Compare commits

..

26 Commits
v1.0 ... v1.2.2

Author SHA1 Message Date
17c0a99bda Merge pull request #67 from jkaninda/develop
Develop
2024-08-15 05:02:56 +02:00
b1c9abf931 Clean up 2024-08-14 22:28:16 +02:00
a70a893c11 Fix encryption permission issue on Openshift 2024-08-14 22:19:35 +02:00
243e25f4fb Fix encryption permission issue on Openshift 2024-08-14 22:19:02 +02:00
cb0dcf4104 Update docs 2024-08-11 09:49:41 +02:00
d26d8d31c9 Merge pull request #65 from jkaninda/docs
Merge Docs
2024-08-11 09:48:08 +02:00
71d438ba76 Merge branch 'main' of github.com:jkaninda/mysql-bkup into develop 2024-08-11 09:44:00 +02:00
a3fc58af96 Add delete /tmp directory after backup or restore and update docs 2024-08-11 09:38:31 +02:00
08ca6d4a39 Merge pull request #64 from jkaninda/develop
docs: update readme
2024-08-10 11:30:53 +02:00
27b9ab5f36 docs: update readme 2024-08-10 11:29:58 +02:00
6d6db7061b Merge pull request #63 from jkaninda/develop
Develop
2024-08-10 11:28:06 +02:00
d90647aae7 Update app version 2024-08-10 11:22:08 +02:00
5c2c05499f docs: update example 2024-08-10 11:12:43 +02:00
88ada6fefd docs: update example 2024-08-10 11:12:17 +02:00
e6c8b0923d Add Docker entrypont, update docs 2024-08-10 10:50:00 +02:00
59a136039c Merge pull request #62 from jkaninda/docs
docs: update stable version
2024-08-04 23:45:36 +02:00
db835e81c4 docs: update stable version 2024-08-04 23:44:49 +02:00
5b05bcbf0c Merge pull request #61 from jkaninda/docs
docs: add Kubernetes restore Job example
2024-08-04 13:38:10 +02:00
b8277c8464 docs: add Kubernetes restore Job example 2024-08-04 13:37:45 +02:00
70338b6ae6 Merge pull request #60 from jkaninda/develop
Develop
2024-08-04 13:12:50 +02:00
33b1acf7c0 docs: add Kubernetes restore example 2024-08-04 11:42:07 +02:00
9a4d02f648 fix: Fix AWS S3 and SSH backup in scheduled mode on Docker and Docker Swarm mode 2024-08-04 11:30:28 +02:00
1e06600c43 fix: Fix AWS S3 and SSH backup in scheduled mode on Docker and Docker Swarm mode 2024-08-04 11:30:03 +02:00
365ab8dfff Merge pull request #59 from jkaninda/develop
Develop
2024-08-04 01:43:49 +02:00
e4ca97b99e Fix log 2024-08-04 01:42:51 +02:00
ae7eb7a159 Fix log, add verification of required environment 2024-08-04 01:36:22 +02:00
31 changed files with 691 additions and 260 deletions

32
.github/workflows/build.yml vendored Normal file
View File

@@ -0,0 +1,32 @@
name: Build
on:
push:
branches: ['develop']
env:
BUILDKIT_IMAGE: jkaninda/mysql-bkup
jobs:
docker:
runs-on: ubuntu-latest
steps:
-
name: Set up QEMU
uses: docker/setup-qemu-action@v3
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
-
name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Build and push
uses: docker/build-push-action@v3
with:
push: true
file: "./docker/Dockerfile"
platforms: linux/amd64,linux/arm64,linux/arm/v7
tags: |
"${{env.BUILDKIT_IMAGE}}:develop-${{ github.sha }}"

View File

@@ -1,4 +1,6 @@
BINARY_NAME=mysql-bkup BINARY_NAME=mysql-bkup
IMAGE_NAME=jkaninda/mysql-bkup
include .env include .env
export export
run: run:
@@ -17,30 +19,30 @@ docker-build:
docker build -f docker/Dockerfile -t jkaninda/mysql-bkup:latest . docker build -f docker/Dockerfile -t jkaninda/mysql-bkup:latest .
docker-run: docker-build docker-run: docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --prune --keep-last 2 docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --prune --keep-last 2
docker-restore: docker-build docker-restore: docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup restore -f ${FILE_NAME} docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} restore -f ${FILE_NAME}
docker-run-scheduled: docker-build docker-run-scheduled: #docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --mode scheduled --period "* * * * *" docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --mode scheduled --period "* * * * *"
docker-run-scheduled-s3: docker-build docker-run-scheduled-s3: docker-build
docker run --rm --network web --user 1000:1000 --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *" docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
docker-run-s3: docker-build docker-run-s3: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "AWS_S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage s3 --path /custom-path docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "AWS_S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --storage s3 --path /custom-path
docker-restore-s3: docker-build docker-restore-s3: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup restore --storage s3 -f ${FILE_NAME} --path /custom-path docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} restore --storage s3 -f ${FILE_NAME} --path /custom-path
docker-run-ssh: docker-build docker-run-ssh: docker-build
docker run --rm --network web -v "${SSH_IDENTIFY_FILE_LOCAL}:" --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage ssh docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --storage ssh
docker-restore-ssh: docker-build docker-restore-ssh: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" jkaninda/mysql-bkup bkup restore --storage ssh -f ${FILE_NAME} docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" ${IMAGE_NAME} restore --storage ssh -f ${FILE_NAME}
run-docs: run-docs:
cd docs && bundle exec jekyll serve -H 0.0.0.0 -t cd docs && bundle exec jekyll serve -H 0.0.0.0 -t

View File

@@ -1,5 +1,5 @@
# MySQL Backup # MySQL Backup
mysql-bkup is a Docker container image that can be used to backup and restore Postgres database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage. MySQL Backup is a Docker container image that can be used to backup and restore MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
It also supports __encrypting__ your backups using GPG. It also supports __encrypting__ your backups using GPG.
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes. The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
@@ -13,6 +13,7 @@ It also supports __encrypting__ your backups using GPG.
![Docker Pulls](https://img.shields.io/docker/pulls/jkaninda/mysql-bkup?style=flat-square) ![Docker Pulls](https://img.shields.io/docker/pulls/jkaninda/mysql-bkup?style=flat-square)
- Docker - Docker
- Docker Swarm
- Kubernetes - Kubernetes
## Documentation is found at <https://jkaninda.github.io/mysql-bkup> ## Documentation is found at <https://jkaninda.github.io/mysql-bkup>
@@ -36,7 +37,7 @@ It also supports __encrypting__ your backups using GPG.
### Simple backup using Docker CLI ### Simple backup using Docker CLI
To run a one time backup, bind your local volume to `/backup` in the container and run the `mysql-bkup backup` command: To run a one time backup, bind your local volume to `/backup` in the container and run the `backup` command:
```shell ```shell
docker run --rm --network your_network_name \ docker run --rm --network your_network_name \
@@ -44,11 +45,17 @@ To run a one time backup, bind your local volume to `/backup` in the container a
-e "DB_HOST=dbhost" \ -e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \ -e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \ -e "DB_PASSWORD=password" \
jkaninda/mysql-bkup mysql-bkup backup -d database_name jkaninda/mysql-bkup backup -d database_name
``` ```
Alternatively, pass a `--env-file` in order to use a full config as described below. Alternatively, pass a `--env-file` in order to use a full config as described below.
```yaml
docker run --rm --network your_network_name \
--env-file your-env-file \
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup backup -d database_name
```
### Simple backup in docker compose file ### Simple backup in docker compose file
@@ -61,10 +68,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup
- /bin/sh
- -c
- mysql-bkup backup
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:
@@ -81,53 +85,45 @@ networks:
``` ```
## Deploy on Kubernetes ## Deploy on Kubernetes
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as CronJob. For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as Job or CronJob.
### Simple Kubernetes CronJob usage: ### Simple Kubernetes backup Job :
```yaml ```yaml
apiVersion: batch/v1 apiVersion: batch/v1
kind: CronJob kind: Job
metadata: metadata:
name: bkup-job name: backup
spec: spec:
schedule: "0 1 * * *" template:
jobTemplate:
spec: spec:
template: containers:
spec: - name: mysql-bkup
containers: # In production, it is advised to lock your image tag to a proper
- name: mysql-bkup # release version instead of using `latest`.
image: jkaninda/mysql-bkup # Check https://github.com/jkaninda/mysql-bkup/releases
command: # for a list of available releases.
- /bin/sh image: jkaninda/mysql-bkup
- -c command:
- mysql-bkup backup -s s3 --path /custom_path - bkup
env: - backup
- name: DB_PORT resources:
value: "5432" limits:
- name: DB_HOST memory: "128Mi"
value: "" cpu: "500m"
- name: DB_NAME env:
value: "" - name: DB_PORT
- name: DB_USERNAME value: "3306"
value: "" - name: DB_HOST
# Please use secret! value: ""
- name: DB_PASSWORD - name: DB_NAME
value: "" value: "dbname"
- name: AWS_S3_ENDPOINT - name: DB_USERNAME
value: "https://s3.amazonaws.com" value: "username"
- name: AWS_S3_BUCKET_NAME # Please use secret!
value: "xxx" - name: DB_PASSWORD
- name: AWS_REGION value: ""
value: "us-west-2" restartPolicy: Never
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: Never
``` ```
## Available image registries ## Available image registries
@@ -135,8 +131,8 @@ This Docker image is published to both Docker Hub and the GitHub container regis
Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`: Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
``` ```
docker pull jkaninda/mysql-bkup:v1.0 docker pull jkaninda/mysql-bkup
docker pull ghcr.io/jkaninda/mysql-bkup:v1.0 docker pull ghcr.io/jkaninda/mysql-bkup
``` ```
Documentation references Docker Hub, but all examples will work using ghcr.io just as well. Documentation references Docker Hub, but all examples will work using ghcr.io just as well.

View File

@@ -21,6 +21,7 @@ ENV AWS_S3_BUCKET_NAME=""
ENV AWS_ACCESS_KEY="" ENV AWS_ACCESS_KEY=""
ENV AWS_SECRET_KEY="" ENV AWS_SECRET_KEY=""
ENV AWS_REGION="us-west-2" ENV AWS_REGION="us-west-2"
ENV AWS_S3_PATH=""
ENV AWS_DISABLE_SSL="false" ENV AWS_DISABLE_SSL="false"
ENV GPG_PASSPHRASE="" ENV GPG_PASSPHRASE=""
ENV SSH_USER="" ENV SSH_USER=""
@@ -30,7 +31,9 @@ ENV SSH_HOST_NAME=""
ENV SSH_IDENTIFY_FILE="" ENV SSH_IDENTIFY_FILE=""
ENV SSH_PORT="22" ENV SSH_PORT="22"
ARG DEBIAN_FRONTEND=noninteractive ARG DEBIAN_FRONTEND=noninteractive
ENV VERSION="v1.0" ENV VERSION="v1.2.2"
ENV BACKUP_CRON_EXPRESSION=""
ENV GNUPGHOME="/tmp/gnupg"
ARG WORKDIR="/app" ARG WORKDIR="/app"
ARG BACKUPDIR="/backup" ARG BACKUPDIR="/backup"
ARG BACKUP_TMP_DIR="/tmp/backup" ARG BACKUP_TMP_DIR="/tmp/backup"
@@ -39,7 +42,6 @@ ARG BACKUP_CRON_SCRIPT="/usr/local/bin/backup_cron.sh"
LABEL author="Jonas Kaninda" LABEL author="Jonas Kaninda"
RUN apt-get update -qq RUN apt-get update -qq
#RUN apt-get install build-essential libcurl4-openssl-dev libxml2-dev mime-support -y
RUN apt install mysql-client supervisor cron gnupg -y RUN apt install mysql-client supervisor cron gnupg -y
# Clear cache # Clear cache
@@ -47,14 +49,16 @@ RUN apt-get clean && rm -rf /var/lib/apt/lists/*
RUN mkdir $WORKDIR RUN mkdir $WORKDIR
RUN mkdir $BACKUPDIR RUN mkdir $BACKUPDIR
RUN mkdir -p $BACKUP_TMP_DIR RUN mkdir -p $BACKUP_TMP_DIR && \
mkdir -p $GNUPGHOME
RUN chmod 777 $WORKDIR RUN chmod 777 $WORKDIR
RUN chmod 777 $BACKUPDIR RUN chmod 777 $BACKUPDIR
RUN chmod 777 $BACKUP_TMP_DIR RUN chmod 777 $BACKUP_TMP_DIR
RUN touch $BACKUP_CRON && \ RUN touch $BACKUP_CRON && \
touch $BACKUP_CRON_SCRIPT && \ touch $BACKUP_CRON_SCRIPT && \
chmod 777 $BACKUP_CRON && \ chmod 777 $BACKUP_CRON && \
chmod 777 $BACKUP_CRON_SCRIPT chmod 777 $BACKUP_CRON_SCRIPT && \
chmod 777 $GNUPGHOME
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
RUN chmod +x /usr/local/bin/mysql-bkup RUN chmod +x /usr/local/bin/mysql-bkup
@@ -63,4 +67,19 @@ RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
WORKDIR $WORKDIR WORKDIR $WORKDIR
# Create backup shell script
COPY <<EOF /usr/local/bin/backup
#!/bin/sh
# shellcheck disable=SC2068
/usr/local/bin/mysql-bkup backup $@
EOF
# Create restore shell script
COPY <<EOF /usr/local/bin/restore
#!/bin/sh
# shellcheck disable=SC2068
/usr/local/bin/mysql-bkup restore $@
EOF
RUN chmod +x /usr/local/bin/backup && \
chmod +x /usr/local/bin/restore
ENTRYPOINT ["/usr/local/bin/mysql-bkup"]

View File

@@ -22,10 +22,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup --storage s3 -d database --path /my-custom-path
- /bin/sh
- -c
- mysql-bkup backup --storage s3 -d database --path /my-custom-path
environment: environment:
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=mysql - DB_HOST=mysql
@@ -62,10 +59,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
- /bin/sh
- -c
- mysql-bkup backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
environment: environment:
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=mysql - DB_HOST=mysql

View File

@@ -7,7 +7,7 @@ nav_order: 3
# Backup to SSH remote server # Backup to SSH remote server
As described for s3 backup section, to change the storage of you backup and use S3 as storage. You need to add `--storage ssh` or `--storage remote`. As described for s3 backup section, to change the storage of your backup and use SSH Remote server as storage. You need to add `--storage ssh` or `--storage remote`.
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `SSH_REMOTE_PATH` environment variable. You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `SSH_REMOTE_PATH` environment variable.
{: .note } {: .note }
@@ -23,16 +23,13 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup --storage remote -d database
- /bin/sh
- -c
- mysql-bkup backup --storage remote -d database
volumes: volumes:
- ./id_ed25519:/tmp/id_ed25519" - ./id_ed25519:/tmp/id_ed25519"
environment: environment:
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=mysql - DB_HOST=mysql
- DB_NAME=database #- DB_NAME=database
- DB_USERNAME=username - DB_USERNAME=username
- DB_PASSWORD=password - DB_PASSWORD=password
## SSH config ## SSH config
@@ -66,10 +63,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup -d database --storage ssh --mode scheduled --period "0 1 * * *"
- /bin/sh
- -c
- mysql-bkup backup -d database --storage s3 --mode scheduled --period "0 1 * * *"
volumes: volumes:
- ./id_ed25519:/tmp/id_ed25519" - ./id_ed25519:/tmp/id_ed25519"
environment: environment:
@@ -117,7 +111,7 @@ spec:
command: command:
- /bin/sh - /bin/sh
- -c - -c
- mysql-bkup backup -s s3 --path /custom_path - mysql-bkup backup -s ssh
env: env:
- name: DB_PORT - name: DB_PORT
value: "3306" value: "3306"
@@ -141,6 +135,6 @@ spec:
- name: AWS_ACCESS_KEY - name: AWS_ACCESS_KEY
value: "xxxx" value: "xxxx"
- name: SSH_IDENTIFY_FILE - name: SSH_IDENTIFY_FILE
value: "/home/jkaninda/backups" value: "/tmp/id_ed25519"
restartPolicy: OnFailure restartPolicy: Never
``` ```

View File

@@ -7,7 +7,7 @@ nav_order: 1
# Backup database # Backup database
To backup the database, you need to add `backup` subcommand to `mysql-bkup` or `bkup`. To backup the database, you need to add `backup` command.
{: .note } {: .note }
The default storage is local storage mounted to __/backup__. The backup is compressed by default using gzip. The flag __`disable-compression`__ can be used when you need to disable backup compression. The default storage is local storage mounted to __/backup__. The backup is compressed by default using gzip. The flag __`disable-compression`__ can be used when you need to disable backup compression.
@@ -27,10 +27,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup -d database
- /bin/sh
- -c
- mysql-bkup backup -d database
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:
@@ -54,7 +51,7 @@ networks:
-e "DB_HOST=dbhost" \ -e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \ -e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \ -e "DB_PASSWORD=password" \
jkaninda/mysql-bkup mysql-bkup backup -d database_name jkaninda/mysql-bkup backup -d database_name
``` ```
In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below. In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
@@ -68,10 +65,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup -d database --mode scheduled --period "0 1 * * *"
- /bin/sh
- -c
- mysql-bkup backup -d database --mode scheduled --period "0 1 * * *"
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:

View File

@@ -0,0 +1,296 @@
---
title: Deploy on Kubernetes
layout: default
parent: How Tos
nav_order: 8
---
## Deploy on Kubernetes
To deploy MySQL Backup on Kubernetes, you can use Job to backup or Restore your database.
For recurring backup you can use CronJob, you don't need to run it in scheduled mode. as described bellow.
## Backup to S3 storage
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: backup
spec:
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- bkup
- backup
- --storage
- s3
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: Never
```
## Backup Job to SSH remote server
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: backup
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- bkup
- backup
- --storage
- ssh
- --disable-compression
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "xxx"
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "xxx"
- name: SSH_PASSWORD
value: "xxxx"
- name: SSH_REMOTE_PATH
value: "/home/toto/backup"
# Optional, required if you want to encrypt your backup
- name: GPG_PASSPHRASE
value: "xxxx"
restartPolicy: Never
```
## Restore Job
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: restore-job
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- bkup
- restore
- --storage
- ssh
- --file store_20231219_022941.sql.gz
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "xxx"
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "xxx"
- name: SSH_PASSWORD
value: "xxxx"
- name: SSH_REMOTE_PATH
value: "/home/xxxx/backup"
# Optional, required if your backup was encrypted
#- name: GPG_PASSPHRASE
# value: "xxxx"
restartPolicy: Never
```
## Recurring backup
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: backup-job
spec:
schedule: "* * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- bkup
- backup
- --storage
- ssh
- --disable-compression
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "username"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "xxx"
- name: SSH_PORT
value: "xxx"
- name: SSH_USER
value: "jkaninda"
- name: SSH_REMOTE_PATH
value: "/home/jkaninda/backup"
- name: SSH_PASSWORD
value: "password"
# Optional, required if you want to encrypt your backup
#- name: GPG_PASSPHRASE
# value: "xxx"
restartPolicy: Never
```
## Kubernetes Rootless
This image also supports Kubernetes security context, you can run it in Rootless environment.
It has been tested on Openshift, it works well.
Deployment on Openshift is supported, you need to remove `securityContext` section on your yaml file.
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: backup-job
spec:
schedule: "* * * * *"
jobTemplate:
spec:
template:
spec:
securityContext:
runAsUser: 1000
runAsGroup: 3000
fsGroup: 2000
containers:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- bkup
- backup
- --storage
- ssh
- --disable-compression
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "xxx"
- name: DB_USERNAME
value: "xxx"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "xxx"
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "jkaninda"
- name: SSH_REMOTE_PATH
value: "/home/jkaninda/backup"
- name: SSH_PASSWORD
value: "password"
# Optional, required if you want to encrypt your backup
#- name: GPG_PASSPHRASE
# value: "xxx"
restartPolicy: OnFailure
```

View File

@@ -11,7 +11,7 @@ The image supports encrypting backups using GPG out of the box. In case a `GPG_P
{: .warning } {: .warning }
To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process. To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process.
To decrypt manually, you need to install gnupg To decrypt manually, you need to install `gnupg`
### Decrypt backup ### Decrypt backup
@@ -32,10 +32,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup -d database
- /bin/sh
- -c
- mysql-bkup backup -d database
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:

View File

@@ -7,7 +7,7 @@ nav_order: 5
# Restore database from S3 storage # Restore database from S3 storage
To restore the database, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`. To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
{: .note } {: .note }
It supports __.sql__ and __.sql.gz__ compressed file. It supports __.sql__ and __.sql.gz__ compressed file.
@@ -23,10 +23,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path
- /bin/sh
- -c
- mysql-bkup restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:
@@ -48,4 +45,51 @@ services:
- web - web
networks: networks:
web: web:
``` ```
## Restore on Kubernetes
Simple Kubernetes restore Job:
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: restore-db
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup restore -s s3 --path /custom_path -f store_20231219_022941.sql.gz
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: Never
backoffLimit: 4
```

View File

@@ -6,7 +6,7 @@ nav_order: 6
--- ---
# Restore database from SSH remote server # Restore database from SSH remote server
To restore the database from your remote server, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`. To restore the database from your remote server, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
{: .note } {: .note }
It supports __.sql__ and __.sql.gz__ compressed file. It supports __.sql__ and __.sql.gz__ compressed file.
@@ -22,10 +22,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups
- /bin/sh
- -c
- mysql-bkup restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:
@@ -47,4 +44,50 @@ services:
- web - web
networks: networks:
web: web:
```
## Restore on Kubernetes
Simple Kubernetes restore Job:
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: restore-db
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup restore -s ssh -f store_20231219_022941.sql.gz
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: ""
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "xxx"
- name: SSH_REMOTE_PATH
value: "/home/jkaninda/backups"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: SSH_IDENTIFY_FILE
value: "/tmp/id_ed25519"
restartPolicy: Never
backoffLimit: 4
``` ```

View File

@@ -7,7 +7,7 @@ nav_order: 4
# Restore database # Restore database
To restore the database, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`. To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
{: .note } {: .note }
It supports __.sql__ and __.sql.gz__ compressed file. It supports __.sql__ and __.sql.gz__ compressed file.
@@ -23,10 +23,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: restore -d database -f store_20231219_022941.sql.gz
- /bin/sh
- -c
- mysql-bkup restore -d database -f store_20231219_022941.sql.gz
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:

View File

@@ -6,7 +6,7 @@ nav_order: 1
# About mysql-bkup # About mysql-bkup
{:.no_toc} {:.no_toc}
mysql-bkup is a Docker container image that can be used to backup and restore MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage. MySQL Backup is a Docker container image that can be used to backup and restore MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH remote storage.
It also supports __encrypting__ your backups using GPG. It also supports __encrypting__ your backups using GPG.
We are open to receiving stars, PRs, and issues! We are open to receiving stars, PRs, and issues!
@@ -32,7 +32,7 @@ Code and documentation for `v1` version on [this branch][v1-branch].
### Simple backup using Docker CLI ### Simple backup using Docker CLI
To run a one time backup, bind your local volume to `/backup` in the container and run the `mysql-bkup backup` command: To run a one time backup, bind your local volume to `/backup` in the container and run the `backup` command:
```shell ```shell
docker run --rm --network your_network_name \ docker run --rm --network your_network_name \
@@ -40,11 +40,18 @@ To run a one time backup, bind your local volume to `/backup` in the container a
-e "DB_HOST=dbhost" \ -e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \ -e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \ -e "DB_PASSWORD=password" \
jkaninda/mysql-bkup mysql-bkup backup -d database_name jkaninda/mysql-bkup backup -d database_name
``` ```
Alternatively, pass a `--env-file` in order to use a full config as described below. Alternatively, pass a `--env-file` in order to use a full config as described below.
```yaml
docker run --rm --network your_network_name \
--env-file your-env-file \
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup backup -d database_name
```
### Simple backup in docker compose file ### Simple backup in docker compose file
```yaml ```yaml
@@ -56,10 +63,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup
- /bin/sh
- -c
- mysql-bkup backup
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:
@@ -81,8 +85,8 @@ This Docker image is published to both Docker Hub and the GitHub container regis
Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`: Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
``` ```
docker pull jkaninda/mysql-bkup:v1.0 docker pull jkaninda/mysql-bkup
docker pull ghcr.io/jkaninda/mysql-bkup:v1.0 docker pull ghcr.io/jkaninda/mysql-bkup
``` ```
Documentation references Docker Hub, but all examples will work using ghcr.io just as well. Documentation references Docker Hub, but all examples will work using ghcr.io just as well.

View File

@@ -21,7 +21,7 @@ In the old version, S3 storage was mounted using s3fs, so we decided to migrate
| Options | Shorts | Usage | | Options | Shorts | Usage |
|-----------------------|--------|------------------------------------------------------------------------| |-----------------------|--------|------------------------------------------------------------------------|
| mysql-bkup | bkup | CLI utility | | mysql-bkup | bkup | CLI utility |
| backup | | Backup database operation | | backup | | Backup database operation |
| restore | | Restore database operation | | restore | | Restore database operation |
| history | | Show the history of backup | | history | | Show the history of backup |

View File

@@ -6,10 +6,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup --storage s3 -d my-database"
- /bin/sh
- -c
- mysql-bkup backup --storage s3 -d my-database"
environment: environment:
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=mysql - DB_HOST=mysql

View File

@@ -1,12 +1,11 @@
version: "3" version: "3"
services: services:
mysql-bkup: mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup --dbname database_name --mode scheduled --period "0 1 * * *"
- /bin/sh
- -c
- mysql-bkup backup --dbname database_name --mode scheduled --period "0 1 * * *"
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:

View File

@@ -6,10 +6,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
- /bin/sh
- -c
- mysql-bkup backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
environment: environment:
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=mysql - DB_HOST=mysql

View File

@@ -3,10 +3,7 @@ services:
mysql-bkup: mysql-bkup:
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: command: backup --dbname database_name
- /bin/sh
- -c
- mysql-bkup backup --dbname database_name
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:

View File

@@ -1,4 +1,4 @@
piVersion: batch/v1 apiVersion: batch/v1
kind: CronJob kind: CronJob
metadata: metadata:
name: bkup-job name: bkup-job

1
go.mod
View File

@@ -10,7 +10,6 @@ require (
github.com/hpcloud/tail v1.0.0 github.com/hpcloud/tail v1.0.0
github.com/spf13/cobra v1.8.0 github.com/spf13/cobra v1.8.0
golang.org/x/crypto v0.18.0 golang.org/x/crypto v0.18.0
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
) )
require ( require (

View File

@@ -22,10 +22,9 @@ func StartBackup(cmd *cobra.Command) {
utils.SetEnv("STORAGE_PATH", storagePath) utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "dbname", "DB_NAME") utils.GetEnv(cmd, "dbname", "DB_NAME")
utils.GetEnv(cmd, "port", "DB_PORT") utils.GetEnv(cmd, "port", "DB_PORT")
utils.GetEnv(cmd, "period", "SCHEDULE_PERIOD") utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
//Get flag value and set env //Get flag value and set env
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH") remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE") storage = utils.GetEnv(cmd, "storage", "STORAGE")
file = utils.GetEnv(cmd, "file", "FILE_NAME") file = utils.GetEnv(cmd, "file", "FILE_NAME")
@@ -35,6 +34,8 @@ func StartBackup(cmd *cobra.Command) {
executionMode, _ = cmd.Flags().GetString("mode") executionMode, _ = cmd.Flags().GetString("mode")
dbName = os.Getenv("DB_NAME") dbName = os.Getenv("DB_NAME")
gpqPassphrase := os.Getenv("GPG_PASSPHRASE") gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
// //
if gpqPassphrase != "" { if gpqPassphrase != "" {
encryption = true encryption = true
@@ -49,7 +50,7 @@ func StartBackup(cmd *cobra.Command) {
if executionMode == "default" { if executionMode == "default" {
switch storage { switch storage {
case "s3": case "s3":
s3Backup(backupFileName, s3Path, disableCompression, prune, backupRetention, encryption) s3Backup(backupFileName, disableCompression, prune, backupRetention, encryption)
case "local": case "local":
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption) localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
case "ssh", "remote": case "ssh", "remote":
@@ -61,7 +62,7 @@ func StartBackup(cmd *cobra.Command) {
} }
} else if executionMode == "scheduled" { } else if executionMode == "scheduled" {
scheduledMode() scheduledMode(storage)
} else { } else {
utils.Fatal("Error, unknown execution mode!") utils.Fatal("Error, unknown execution mode!")
} }
@@ -69,14 +70,15 @@ func StartBackup(cmd *cobra.Command) {
} }
// Run in scheduled mode // Run in scheduled mode
func scheduledMode() { func scheduledMode(storage string) {
fmt.Println() fmt.Println()
fmt.Println("**********************************") fmt.Println("**********************************")
fmt.Println(" Starting MySQL Bkup... ") fmt.Println(" Starting MySQL Bkup... ")
fmt.Println("***********************************") fmt.Println("***********************************")
utils.Info("Running in Scheduled mode") utils.Info("Running in Scheduled mode")
utils.Info("Execution period ", os.Getenv("SCHEDULE_PERIOD")) utils.Info("Execution period %s", os.Getenv("BACKUP_CRON_EXPRESSION"))
utils.Info("Storage type %s ", storage)
//Test database connexion //Test database connexion
utils.TestDatabaseConnection() utils.TestDatabaseConnection()
@@ -123,13 +125,6 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
dbPort = os.Getenv("DB_PORT") dbPort = os.Getenv("DB_PORT")
storagePath = os.Getenv("STORAGE_PATH") storagePath = os.Getenv("STORAGE_PATH")
// dbHVars Required environment variables for database
var dbHVars = []string{
"DB_HOST",
"DB_PASSWORD",
"DB_USERNAME",
"DB_NAME",
}
err := utils.CheckEnvVars(dbHVars) err := utils.CheckEnvVars(dbHVars)
if err != nil { if err != nil {
utils.Error("Please make sure all required environment variables for database are set") utils.Error("Please make sure all required environment variables for database are set")
@@ -208,10 +203,13 @@ func localBackup(backupFileName string, disableCompression bool, prune bool, bac
if prune { if prune {
deleteOldBackup(backupRetention) deleteOldBackup(backupRetention)
} }
//Delete temp
deleteTemp()
} }
func s3Backup(backupFileName string, s3Path string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { func s3Backup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME") bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
utils.Info("Backup database to s3 storage") utils.Info("Backup database to s3 storage")
//Backup database //Backup database
BackupDatabase(backupFileName, disableCompression) BackupDatabase(backupFileName, disableCompression)
@@ -220,7 +218,7 @@ func s3Backup(backupFileName string, s3Path string, disableCompression bool, pru
encryptBackup(backupFileName) encryptBackup(backupFileName)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg") finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
} }
utils.Info("Uploading backup file to S3 storage...") utils.Info("Uploading backup archive to remote storage S3 ... ")
utils.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path) err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
if err != nil { if err != nil {
@@ -241,7 +239,9 @@ func s3Backup(backupFileName string, s3Path string, disableCompression bool, pru
utils.Fatal("Error deleting old backup from S3: %s ", err) utils.Fatal("Error deleting old backup from S3: %s ", err)
} }
} }
utils.Done("Database has been backed up and uploaded to s3 ") utils.Done("Uploading backup archive to remote storage S3 ... done ")
//Delete temp
deleteTemp()
} }
func sshBackup(backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { func sshBackup(backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
utils.Info("Backup database to Remote server") utils.Info("Backup database to Remote server")
@@ -252,7 +252,7 @@ func sshBackup(backupFileName, remotePath string, disableCompression bool, prune
encryptBackup(backupFileName) encryptBackup(backupFileName)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg") finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
} }
utils.Info("Uploading backup file to remote server...") utils.Info("Uploading backup archive to remote storage ... ")
utils.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
err := CopyToRemote(finalFileName, remotePath) err := CopyToRemote(finalFileName, remotePath)
if err != nil { if err != nil {
@@ -272,9 +272,10 @@ func sshBackup(backupFileName, remotePath string, disableCompression bool, prune
} }
utils.Done("Database has been backed up and uploaded to remote server ") utils.Done("Uploading backup archive to remote storage ... done ")
//Delete temp
deleteTemp()
} }
func encryptBackup(backupFileName string) { func encryptBackup(backupFileName string) {
gpgPassphrase := os.Getenv("GPG_PASSPHRASE") gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase) err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase)

View File

@@ -1,7 +1,6 @@
package pkg package pkg
import ( import (
"fmt"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"os" "os"
"os/exec" "os/exec"
@@ -16,7 +15,6 @@ func Decrypt(inputFile string, passphrase string) error {
err := cmd.Run() err := cmd.Run()
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
return err return err
} }
@@ -32,7 +30,6 @@ func Encrypt(inputFile string, passphrase string) error {
err := cmd.Run() err := cmd.Run()
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
return err return err
} }

View File

@@ -71,4 +71,28 @@ func deleteOldBackup(retentionDays int) {
utils.Fatal(fmt.Sprintf("Error: %s", err)) utils.Fatal(fmt.Sprintf("Error: %s", err))
return return
} }
utils.Done("Deleting old backups...done")
}
func deleteTemp() {
utils.Info("Deleting %s ...", tmpPath)
err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Check if the current item is a file
if !info.IsDir() {
// Delete the file
err = os.Remove(path)
if err != nil {
return err
}
}
return nil
})
if err != nil {
utils.Error("Error deleting files: %v", err)
} else {
utils.Info("Deleting %s ... done", tmpPath)
}
} }

View File

@@ -44,7 +44,7 @@ func restoreFromS3(file, bucket, s3Path string) {
utils.Info("Restore database from s3") utils.Info("Restore database from s3")
err := utils.DownloadFile(tmpPath, file, bucket, s3Path) err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
if err != nil { if err != nil {
utils.Fatal(fmt.Sprintf("Error download file from s3 %s %s", file, err)) utils.Fatal("Error download file from s3 %s %v", file, err)
} }
RestoreDatabase(file) RestoreDatabase(file)
} }
@@ -52,7 +52,7 @@ func restoreFromRemote(file, remotePath string) {
utils.Info("Restore database from remote server") utils.Info("Restore database from remote server")
err := CopyFromRemote(file, remotePath) err := CopyFromRemote(file, remotePath)
if err != nil { if err != nil {
utils.Fatal(fmt.Sprintf("Error download file from remote server: ", filepath.Join(remotePath, file), err)) utils.Fatal("Error download file from remote server: %s %v ", filepath.Join(remotePath, file), err)
} }
RestoreDatabase(file) RestoreDatabase(file)
} }
@@ -68,13 +68,7 @@ func RestoreDatabase(file string) {
if file == "" { if file == "" {
utils.Fatal("Error, file required") utils.Fatal("Error, file required")
} }
// dbHVars Required environment variables for database
var dbHVars = []string{
"DB_HOST",
"DB_PASSWORD",
"DB_USERNAME",
"DB_NAME",
}
err := utils.CheckEnvVars(dbHVars) err := utils.CheckEnvVars(dbHVars)
if err != nil { if err != nil {
utils.Error("Please make sure all required environment variables for database are set") utils.Error("Please make sure all required environment variables for database are set")
@@ -90,7 +84,7 @@ func RestoreDatabase(file string) {
//Decrypt file //Decrypt file
err := Decrypt(filepath.Join(tmpPath, file), gpgPassphrase) err := Decrypt(filepath.Join(tmpPath, file), gpgPassphrase)
if err != nil { if err != nil {
utils.Fatal("Error decrypting file ", file, err) utils.Fatal("Error decrypting file %s %v", file, err)
} }
//Update file name //Update file name
file = RemoveLastExtension(file) file = RemoveLastExtension(file)
@@ -99,12 +93,8 @@ func RestoreDatabase(file string) {
} }
if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) { if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) {
err := os.Setenv("mysqlPASSWORD", dbPassword)
if err != nil {
return
}
utils.TestDatabaseConnection() utils.TestDatabaseConnection()
utils.Info("Restoring database...")
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file)) extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
// Restore from compressed file / .sql.gz // Restore from compressed file / .sql.gz
@@ -112,9 +102,12 @@ func RestoreDatabase(file string) {
str := "zcat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME") str := "zcat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
_, err := exec.Command("bash", "-c", str).Output() _, err := exec.Command("bash", "-c", str).Output()
if err != nil { if err != nil {
utils.Fatal(fmt.Sprintf("Error, in restoring the database %s", err)) utils.Fatal("Error, in restoring the database %v", err)
} }
utils.Info("Restoring database... done")
utils.Done("Database has been restored") utils.Done("Database has been restored")
//Delete temp
deleteTemp()
} else if extension == ".sql" { } else if extension == ".sql" {
//Restore from sql file //Restore from sql file
@@ -123,7 +116,10 @@ func RestoreDatabase(file string) {
if err != nil { if err != nil {
utils.Fatal(fmt.Sprintf("Error in restoring the database %s", err)) utils.Fatal(fmt.Sprintf("Error in restoring the database %s", err))
} }
utils.Info("Restoring database... done")
utils.Done("Database has been restored") utils.Done("Database has been restored")
//Delete temp
deleteTemp()
} else { } else {
utils.Fatal(fmt.Sprintf("Unknown file extension %s", extension)) utils.Fatal(fmt.Sprintf("Unknown file extension %s", extension))
} }

View File

@@ -8,7 +8,6 @@ import (
"github.com/bramvdbogaerde/go-scp/auth" "github.com/bramvdbogaerde/go-scp/auth"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh"
"golang.org/x/exp/slog"
"os" "os"
"path/filepath" "path/filepath"
) )
@@ -20,16 +19,9 @@ func CopyToRemote(fileName, remotePath string) error {
sshPort := os.Getenv("SSH_PORT") sshPort := os.Getenv("SSH_PORT")
sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE") sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE")
// SSSHVars Required environment variables for SSH remote server storage
var sshHVars = []string{
"SSH_USER",
"SSH_REMOTE_PATH",
"SSH_HOST_NAME",
"SSH_PORT",
}
err := utils.CheckEnvVars(sshHVars) err := utils.CheckEnvVars(sshHVars)
if err != nil { if err != nil {
slog.Error(fmt.Sprintf("Error checking environment variables\n: %s", err)) utils.Error("Error checking environment variables: %s", err)
os.Exit(1) os.Exit(1)
} }
@@ -39,9 +31,9 @@ func CopyToRemote(fileName, remotePath string) error {
} else { } else {
if sshPassword == "" { if sshPassword == "" {
return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n") return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty")
} }
slog.Warn("Accessing the remote server using password, password is not recommended\n") utils.Warn("Accessing the remote server using password, password is not recommended")
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey()) clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
} }
@@ -51,7 +43,7 @@ func CopyToRemote(fileName, remotePath string) error {
// Connect to the remote server // Connect to the remote server
err = client.Connect() err = client.Connect()
if err != nil { if err != nil {
return errors.New("Couldn't establish a connection to the remote server\n") return errors.New("Couldn't establish a connection to the remote server")
} }
// Open a file // Open a file
@@ -77,6 +69,12 @@ func CopyFromRemote(fileName, remotePath string) error {
sshPort := os.Getenv("SSH_PORT") sshPort := os.Getenv("SSH_PORT")
sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE") sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE")
err := utils.CheckEnvVars(sshHVars)
if err != nil {
utils.Error("Error checking environment variables\n: %s", err)
os.Exit(1)
}
clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey()) clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) { if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) {
clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey()) clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey())
@@ -85,7 +83,7 @@ func CopyFromRemote(fileName, remotePath string) error {
if sshPassword == "" { if sshPassword == "" {
return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n") return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n")
} }
slog.Warn("Accessing the remote server using password, password is not recommended\n") utils.Warn("Accessing the remote server using password, password is not recommended")
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey()) clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
} }
@@ -93,7 +91,7 @@ func CopyFromRemote(fileName, remotePath string) error {
client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig) client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig)
// Connect to the remote server // Connect to the remote server
err := client.Connect() err = client.Connect()
if err != nil { if err != nil {
return errors.New("Couldn't establish a connection to the remote server\n") return errors.New("Couldn't establish a connection to the remote server\n")
} }

View File

@@ -22,19 +22,10 @@ func CreateCrontabScript(disableCompression bool, storage string) {
disableC = "--disable-compression" disableC = "--disable-compression"
} }
var scriptContent string scriptContent := fmt.Sprintf(`#!/usr/bin/env bash
if storage == "s3" {
scriptContent = fmt.Sprintf(`#!/usr/bin/env bash
set -e set -e
bkup backup --dbname %s --port %s --storage s3 --path %s %v /usr/local/bin/mysql-bkup backup --dbname %s --port %s --storage %s %v
`, os.Getenv("DB_NAME"), os.Getenv("DB_PORT"), os.Getenv("S3_PATH"), disableC) `, os.Getenv("DB_NAME"), os.Getenv("DB_PORT"), storage, disableC)
} else {
scriptContent = fmt.Sprintf(`#!/usr/bin/env bash
set -e
bkup backup --dbname %s --port %s %v
`, os.Getenv("DB_NAME"), os.Getenv("DB_PORT"), disableC)
}
if err := utils.WriteToFile(backupCronFile, scriptContent); err != nil { if err := utils.WriteToFile(backupCronFile, scriptContent); err != nil {
utils.Fatal("Error writing to %s: %v\n", backupCronFile, err) utils.Fatal("Error writing to %s: %v\n", backupCronFile, err)
@@ -63,7 +54,7 @@ bkup backup --dbname %s --port %s %v
} }
cronContent := fmt.Sprintf(`%s root exec /bin/bash -c ". /run/supervisord.env; /usr/local/bin/backup_cron.sh >> %s" cronContent := fmt.Sprintf(`%s root exec /bin/bash -c ". /run/supervisord.env; /usr/local/bin/backup_cron.sh >> %s"
`, os.Getenv("SCHEDULE_PERIOD"), cronLogFile) `, os.Getenv("BACKUP_CRON_EXPRESSION"), cronLogFile)
if err := utils.WriteToFile(cronJob, cronContent); err != nil { if err := utils.WriteToFile(cronJob, cronContent); err != nil {
utils.Fatal("Error writing to %s: %v\n", cronJob, err) utils.Fatal("Error writing to %s: %v\n", cronJob, err)

View File

@@ -19,3 +19,19 @@ var (
disableCompression = false disableCompression = false
encryption = false encryption = false
) )
// dbHVars Required environment variables for database
var dbHVars = []string{
"DB_HOST",
"DB_PASSWORD",
"DB_USERNAME",
"DB_NAME",
}
// sshHVars Required environment variables for SSH remote server storage
var sshHVars = []string{
"SSH_USER",
"SSH_REMOTE_PATH",
"SSH_HOST_NAME",
"SSH_PORT",
}

View File

@@ -1,8 +0,0 @@
#!/bin/sh
DB_USERNAME='db_username'
DB_PASSWORD='password'
DB_HOST='db_hostname'
DB_NAME='db_name'
BACKUP_DIR="$PWD/backup"
docker run --rm --name mysql-bkup -v $BACKUP_DIR:/backup/ -e "DB_HOST=$DB_HOST" -e "DB_USERNAME=$DB_USERNAME" -e "DB_PASSWORD=$DB_PASSWORD" jkaninda/mysql-bkup:latest backup -d $DB_NAME

56
utils/logger.go Normal file
View File

@@ -0,0 +1,56 @@
package utils
import (
"fmt"
"os"
"time"
)
var currentTime = time.Now().Format("2006/01/02 15:04:05")
func Info(msg string, args ...any) {
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s INFO: %s\n", currentTime, msg)
} else {
fmt.Printf("%s INFO: %s\n", currentTime, formattedMessage)
}
}
// Warn warning message
func Warn(msg string, args ...any) {
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s WARN: %s\n", currentTime, msg)
} else {
fmt.Printf("%s WARN: %s\n", currentTime, formattedMessage)
}
}
func Error(msg string, args ...any) {
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s ERROR: %s\n", currentTime, msg)
} else {
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
}
}
func Done(msg string, args ...any) {
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s INFO: %s\n", currentTime, msg)
} else {
fmt.Printf("%s INFO: %s\n", currentTime, formattedMessage)
}
}
// Fatal logs an error message and exits the program
func Fatal(msg string, args ...any) {
// Fatal logs an error message and exits the program.
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s ERROR: %s\n", currentTime, msg)
} else {
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
}
os.Exit(1)
}

View File

@@ -8,7 +8,6 @@ import (
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/aws/aws-sdk-go/service/s3/s3manager"
"golang.org/x/exp/slog"
"log" "log"
"net/http" "net/http"
"os" "os"
@@ -43,8 +42,7 @@ func CreateSession() (*session.Session, error) {
err = CheckEnvVars(awsVars) err = CheckEnvVars(awsVars)
if err != nil { if err != nil {
slog.Error(fmt.Sprintf("Error checking environment variables\n: %s", err)) Fatal("Error checking environment variables\n: %s", err)
os.Exit(1)
} }
// S3 Config // S3 Config
s3Config := &aws.Config{ s3Config := &aws.Config{

View File

@@ -10,51 +10,12 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"golang.org/x/exp/slog"
"io" "io"
"io/fs" "io/fs"
"os" "os"
"os/exec" "os/exec"
) )
func Info(msg string, args ...any) {
if len(args) == 0 {
slog.Info(msg)
} else {
slog.Info(fmt.Sprintf(msg, args...))
}
}
func Worn(msg string, args ...any) {
if len(args) == 0 {
slog.Warn(msg)
} else {
slog.Warn(fmt.Sprintf(msg, args...))
}
}
func Error(msg string, args ...any) {
if len(args) == 0 {
slog.Error(msg)
} else {
slog.Error(fmt.Sprintf(msg, args...))
}
}
func Done(msg string, args ...any) {
if len(args) == 0 {
slog.Info(msg)
} else {
slog.Info(fmt.Sprintf(msg, args...))
}
}
func Fatal(msg string, args ...any) {
// Fatal logs an error message and exits the program.
if len(args) == 0 {
slog.Error(msg)
} else {
slog.Error(fmt.Sprintf(msg, args...))
}
os.Exit(1)
}
func FileExists(filename string) bool { func FileExists(filename string) bool {
info, err := os.Stat(filename) info, err := os.Stat(filename)
if os.IsNotExist(err) { if os.IsNotExist(err) {
@@ -150,7 +111,7 @@ func TestDatabaseConnection() {
cmd.Stderr = &out cmd.Stderr = &out
err := cmd.Run() err := cmd.Run()
if err != nil { if err != nil {
slog.Error(fmt.Sprintf("Error testing database connection: %v\nOutput: %s\n", err, out.String())) Error("Error testing database connection: %v\nOutput: %s", err, out.String())
os.Exit(1) os.Exit(1)
} }
@@ -196,7 +157,7 @@ func GetEnvVariable(envName, oldEnvName string) string {
if err != nil { if err != nil {
return value return value
} }
Worn("%s is deprecated, please use %s instead!\n", oldEnvName, envName) Warn("%s is deprecated, please use %s instead!", oldEnvName, envName)
} }
} }