Compare commits

..

29 Commits

Author SHA1 Message Date
40557af437 Merge pull request #130 from jkaninda/refactor
Refactor
2024-10-20 06:58:20 +02:00
Jonas Kaninda
1dcb9586a6 chore: add command usage error 2024-10-20 06:57:06 +02:00
Jonas Kaninda
2c6336e84a chore: add backup prune, replace period flag by BACKUP_RETENTION_DAYS environment variable 2024-10-20 06:52:36 +02:00
c16ee3a492 Merge pull request #129 from jkaninda/refactor
chore: replace prune and keep-last flags by BACKUP_RETENTION_DAYS env…
2024-10-19 05:31:31 +02:00
Jonas Kaninda
3f7d28ea49 chore: replace prune and keep-last flags by BACKUP_RETENTION_DAYS env variable 2024-10-19 05:30:57 +02:00
cea1ef9c3b Merge pull request #128 from jkaninda/refactor
doc: update using s3 storage example
2024-10-18 08:51:17 +02:00
Jonas Kaninda
56c271bc29 doc: update using s3 storage example 2024-10-18 08:51:04 +02:00
45c30dca5f Merge pull request #127 from jkaninda/refactor
Refactor
2024-10-15 16:57:44 +02:00
Jonas Kaninda
b0ae212578 docs: update scheduled backup docker deployment example 2024-10-15 16:53:37 +02:00
Jonas Kaninda
6e2d3a9f21 add env.example 2024-10-15 16:52:25 +02:00
Jonas Kaninda
dd314aa4cb chore: clean up Dockerfile 2024-10-15 16:50:37 +02:00
Jonas Kaninda
24ccdaa671 refactor: add default env variable value 2024-10-15 16:43:02 +02:00
45e3452376 Merge pull request #126 from jkaninda/refactor
docs: update readme
2024-10-13 14:53:31 +02:00
Jonas Kaninda
3527b4cdcd docs: update readme 2024-10-13 14:53:04 +02:00
dc6fe2f4b9 Merge pull request #125 from jkaninda/refactor
chore: switch to encryptor module
2024-10-13 14:34:18 +02:00
Jonas Kaninda
f0afc0f4e0 chore: switch to encryptor module 2024-10-13 14:33:54 +02:00
7d7c813bb0 Merge pull request #124 from jkaninda/template
docs: update restoration supported extensions
2024-10-12 11:14:08 +02:00
Jonas Kaninda
6b8491cdc0 docs: update restoration supported extensions 2024-10-12 11:13:45 +02:00
a1dd6e3f58 Merge pull request #123 from jkaninda/template
docs: update email notification example
2024-10-12 10:47:28 +02:00
Jonas Kaninda
86ba3530c9 docs: update email notification example 2024-10-12 10:47:14 +02:00
e1f3b15003 Merge pull request #122 from jkaninda/template
docs: update Kubernetes deployments
2024-10-10 21:22:08 +02:00
Jonas Kaninda
1577e92a66 docs: update Kubernetes deployments 2024-10-10 21:21:52 +02:00
7b67f88769 Merge pull request #121 from jkaninda/template
Template
2024-10-10 21:11:14 +02:00
Jonas Kaninda
043233dabe docs: update Kubernetes deployments 2024-10-10 21:10:40 +02:00
Jonas Kaninda
d6652cfb75 chore: update github link 2024-10-10 21:03:07 +02:00
140ed608ab Merge pull request #120 from jkaninda/fix-dockerfile
fix: Dockerfile backup, restore, and migrate scripts since the migration of base image from Ubuntu to alpine
2024-10-10 10:03:44 +02:00
Jonas Kaninda
98211a27b8 fix: Dockerfile backup, restore, and migrate scripts since the migration of base image from Ubuntu to alpine 2024-10-10 10:02:42 +02:00
4e4d45e555 Merge pull request #119 from jkaninda/fix-notification
fix: fix multi backup s3 path
2024-10-10 05:51:46 +02:00
Jonas Kaninda
01e41acb5c fix: fix multi backup s3 path 2024-10-10 05:51:18 +02:00
30 changed files with 259 additions and 524 deletions

69
.env.example Normal file
View File

@@ -0,0 +1,69 @@
### Database
DB_HOST=
DB_PORT=3306
DB_USERNAME=
DB_PASSWORD=
DB_NAME=
TZ=Europe/Paris
### Database Migration
#TARGET_DB_HOST=
#TARGET_DB_PORT=3306
#TARGET_DB_NAME=
#TARGET_DB_USERNAME=
#TARGET_DB_PASSWORD=
### Backup restoration
#FILE_NAME=
### AWS S3 Storage
#ACCESS_KEY=
#SECRET_KEY=
#AWS_S3_BUCKET_NAME=
#AWS_S3_ENDPOINT=
#AWS_REGION=
#AWS_S3_PATH=
#AWS_DISABLE_SSL=false
#AWS_FORCE_PATH_STYLE=true
### Backup Cron Expression
#BACKUP_CRON_EXPRESSION=@midnight
##Delete old backup created more than specified days ago
#BACKUP_RETENTION_DAYS=7
####SSH Storage
#SSH_HOST_NAME=
#SSH_PORT=22
#SSH_USER=
#SSH_PASSWORD=
#SSH_IDENTIFY_FILE=/tmp/id_ed25519
####FTP Storage
#FTP_PASSWORD=
#FTP_HOST_NAME=
#FTP_USER=
#FTP_PORT=21
#REMOTE_PATH=
#### Backup encryption
#GPG_PUBLIC_KEY=/config/public_key.asc
#GPG_PRIVATE_KEY=/config/private_key.asc
#GPG_PASSPHRASE=Your strong passphrase
## For multiple database backup on Docker or Docker in Swarm mode
#BACKUP_CONFIG_FILE=/config/config.yaml
### Database restoration
#FILE_NAME=
### Notification
#BACKUP_REFERENCE=K8s/Paris cluster
## Telegram
#TG_TOKEN=
#TG_CHAT_ID=
### Email
#MAIL_HOST=
#MAIL_PORT=
#MAIL_USERNAME=
#MAIL_PASSWORD=
#MAIL_FROM=Backup Jobs <backup-jobs@example.com>
#MAIL_TO=backup@example.com,me@example.com,team@example.com
#MAIL_SKIP_TLS=false

View File

@@ -10,50 +10,18 @@ RUN go mod download
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/mysql-bkup
FROM alpine:3.20.3
ENV DB_HOST=""
ENV DB_NAME=""
ENV DB_USERNAME=""
ENV DB_PASSWORD=""
ENV DB_PORT=3306
ENV STORAGE=local
ENV AWS_S3_ENDPOINT=""
ENV AWS_S3_BUCKET_NAME=""
ENV AWS_ACCESS_KEY=""
ENV AWS_SECRET_KEY=""
ENV AWS_S3_PATH=""
ENV AWS_REGION="us-west-2"
ENV AWS_DISABLE_SSL="false"
ENV AWS_FORCE_PATH_STYLE="true"
ENV GPG_PASSPHRASE=""
ENV SSH_USER=""
ENV SSH_PASSWORD=""
ENV SSH_HOST=""
ENV SSH_IDENTIFY_FILE=""
ENV SSH_PORT=22
ENV REMOTE_PATH=""
ENV FTP_HOST=""
ENV FTP_PORT=21
ENV FTP_USER=""
ENV FTP_PASSWORD=""
ENV TARGET_DB_HOST=""
ENV TARGET_DB_PORT=3306
ENV TARGET_DB_NAME=""
ENV TARGET_DB_USERNAME=""
ENV TARGET_DB_PASSWORD=""
ENV BACKUP_CRON_EXPRESSION=""
ENV TG_TOKEN=""
ENV TG_CHAT_ID=""
ENV TZ=UTC
ARG WORKDIR="/config"
ARG BACKUPDIR="/backup"
ARG BACKUP_TMP_DIR="/tmp/backup"
ARG TEMPLATES_DIR="/config/templates"
ARG appVersion="v1.2.12"
ARG appVersion=""
ENV VERSION=${appVersion}
LABEL author="Jonas Kaninda"
LABEL version=${appVersion}
LABEL github="github.com/jkaninda/mysql-bkup"
RUN apk --update add --no-cache mysql-client mariadb-connector-c tzdata
RUN apk --update add --no-cache mysql-client mariadb-connector-c tzdata ca-certificates
RUN mkdir $WORKDIR
RUN mkdir $BACKUPDIR
RUN mkdir $TEMPLATES_DIR
@@ -70,13 +38,13 @@ RUN chmod +x /usr/local/bin/mysql-bkup
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
# Create backup script and make it executable
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
RUN printf '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
chmod +x /usr/local/bin/backup
# Create restore script and make it executable
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup restore "$@"' > /usr/local/bin/restore && \
RUN printf '#!/bin/sh\n/usr/local/bin/mysql-bkup restore "$@"' > /usr/local/bin/restore && \
chmod +x /usr/local/bin/restore
# Create migrate script and make it executable
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup migrate "$@"' > /usr/local/bin/migrate && \
RUN printf '#!/bin/sh\n/usr/local/bin/mysql-bkup migrate "$@"' > /usr/local/bin/migrate && \
chmod +x /usr/local/bin/migrate
WORKDIR $WORKDIR

View File

@@ -3,10 +3,13 @@ MySQL Backup is a Docker container image that can be used to backup, restore and
It also supports __encrypting__ your backups using GPG.
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3, FTP or SSH compatible storage.
It handles __recurring__ backups of MySQL or MariaDB database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3, FTP or SSH compatible storage.
It also supports database __encryption__ using GPG.
Telegram and Email notifications on successful and failed backups.
[![Build](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml/badge.svg)](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml)
[![Go Report](https://goreportcard.com/badge/github.com/jkaninda/mysql-bkup)](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup)
![Docker Image Size (latest by date)](https://img.shields.io/docker/image-size/jkaninda/mysql-bkup?style=flat-square)
@@ -96,7 +99,7 @@ networks:
-e "DB_HOST=hostname" \
-e "DB_USERNAME=user" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 1m"
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 15m" #@midnight
```
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules

View File

@@ -20,7 +20,7 @@ var BackupCmd = &cobra.Command{
if len(args) == 0 {
pkg.StartBackup(cmd)
} else {
utils.Fatal("Error, no argument required")
utils.Fatal(`"backup" accepts no argument %q`, args)
}
},
}
@@ -30,8 +30,6 @@ func init() {
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
BackupCmd.PersistentFlags().StringP("cron-expression", "", "", "Backup cron expression")
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")
BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "Delete files created more than specified days ago, default 7 days")
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
}

View File

@@ -19,7 +19,7 @@ var MigrateCmd = &cobra.Command{
if len(args) == 0 {
pkg.StartMigration(cmd)
} else {
utils.Fatal("Error, no argument required")
utils.Fatal(`"migrate" accepts no argument %q`, args)
}

View File

@@ -14,7 +14,7 @@ var RestoreCmd = &cobra.Command{
if len(args) == 0 {
pkg.StartRestore(cmd)
} else {
utils.Fatal("Error, no argument required")
utils.Fatal(`"restore" accepts no argument %q`, args)
}

View File

@@ -37,6 +37,7 @@ services:
- AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
- AWS_FORCE_PATH_STYLE="false"
# mysql-bkup container must be connected to the same network with your database
networks:
@@ -73,6 +74,8 @@ services:
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
#Delete old backup created more than specified days ago
#- BACKUP_RETENTION_DAYS=7
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
# mysql-bkup container must be connected to the same network with your database
@@ -82,53 +85,3 @@ networks:
web:
```
## Deploy on Kubernetes
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as CronJob.
### Simple Kubernetes CronJob usage:
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: bkup-job
spec:
schedule: "0 1 * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup -s s3 --path /custom_path
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: OnFailure
```

View File

@@ -79,6 +79,8 @@ services:
- REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
#Delete old backup created more than specified days ago
#- BACKUP_RETENTION_DAYS=7
## We advise you to use a private jey instead of password
#- SSH_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
@@ -87,55 +89,3 @@ services:
networks:
web:
```
## Deploy on Kubernetes
For Kubernetes, you don't need to run it in scheduled mode.
You can deploy it as CronJob.
Simple Kubernetes CronJob usage:
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: bkup-job
spec:
schedule: "0 1 * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup -s ssh
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST
value: ""
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "xxx"
- name: REMOTE_PATH
value: "/home/jkaninda/backups"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: SSH_IDENTIFY_FILE
value: "/tmp/id_ed25519"
restartPolicy: Never
```

View File

@@ -75,6 +75,8 @@ services:
- DB_USERNAME=username
- DB_PASSWORD=password
- BACKUP_CRON_EXPRESSION=0 1 * * *
#Delete old backup created more than specified days ago
#- BACKUP_RETENTION_DAYS=7
# mysql-bkup container must be connected to the same network with your database
networks:
- web

View File

@@ -59,6 +59,8 @@ spec:
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
- name: AWS_FORCE_PATH_STYLE
value: "false"
restartPolicy: Never
```
@@ -81,13 +83,9 @@ spec:
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- backup
- --storage
- ssh
- --disable-compression
- /bin/sh
- -c
- backup --storage ssh
resources:
limits:
memory: "128Mi"
@@ -116,7 +114,7 @@ spec:
value: "/home/toto/backup"
# Optional, required if you want to encrypt your backup
- name: GPG_PASSPHRASE
value: "xxxx"
value: "secure-passphrase"
restartPolicy: Never
```
@@ -139,13 +137,9 @@ spec:
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- restore
- --storage
- ssh
- --file store_20231219_022941.sql.gz
- /bin/sh
- -c
- backup --storage ssh --file store_20231219_022941.sql.gz
resources:
limits:
memory: "128Mi"
@@ -238,7 +232,6 @@ spec:
This image also supports Kubernetes security context, you can run it in Rootless environment.
It has been tested on Openshift, it works well.
Deployment on OpenShift is supported, you need to remove `securityContext` section on your yaml file.
```yaml
apiVersion: batch/v1
@@ -301,3 +294,55 @@ spec:
# value: "xxx"
restartPolicy: OnFailure
```
## Migrate database
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: migrate-db
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- migrate
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
## Source Database
- name: DB_HOST
value: "mysql"
- name: DB_PORT
value: "3306"
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
- name: DB_PASSWORD
value: "password"
## Target Database
- name: TARGET_DB_HOST
value: "target-mysql"
- name: TARGET_DB_PORT
value: "3306"
- name: TARGET_DB_NAME
value: "dbname"
- name: TARGET_DB_USERNAME
value: "username"
- name: TARGET_DB_PASSWORD
value: "password"
restartPolicy: Never
```

View File

@@ -78,54 +78,3 @@ TARGET_DB_PASSWORD=password
jkaninda/mysql-bkup migrate
```
## Kubernetes
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: migrate-db
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- migrate
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
## Source Database
- name: DB_HOST
value: "mysql"
- name: DB_PORT
value: "3306"
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
- name: DB_PASSWORD
value: "password"
## Target Database
- name: TARGET_DB_HOST
value: "target-mysql"
- name: TARGET_DB_PORT
value: "3306"
- name: TARGET_DB_NAME
value: "dbname"
- name: TARGET_DB_USERNAME
value: "username"
- name: TARGET_DB_PASSWORD
value: "password"
restartPolicy: Never
```

View File

@@ -27,7 +27,8 @@ services:
- MAIL_PORT=587
- MAIL_USERNAME=
- MAIL_PASSWORD=!
- MAIL_FROM=
- MAIL_FROM=Backup Jobs <backup@example.com>
## Multiple recipients separated by a comma
- MAIL_TO=me@example.com,team@example.com,manager@example.com
- MAIL_SKIP_TLS=false
## Time format for notification
@@ -93,13 +94,6 @@ Here is a list of all data passed to the template:
```html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>✅ Database Backup Notification {{.Database}}</title>
</head>
<body>
<h2>Hi,</h2>
<p>Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.</p>
<h3>Backup Details:</h3>
@@ -113,8 +107,6 @@ Here is a list of all data passed to the template:
<li>Backup Reference: {{.BackupReference}} </li>
</ul>
<p>Best regards,</p>
</body>
</html>
```
> telegram.template

View File

@@ -10,7 +10,7 @@ nav_order: 6
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
{: .note }
It supports __.sql__ and __.sql.gz__ compressed file.
It supports __.sql__,__.sql.gpg__ and __.sql.gz__,__.sql.gz.gpg__ compressed file.
### Restore
@@ -40,56 +40,10 @@ services:
- AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
- AWS_FORCE_PATH_STYLE="false"
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
## Restore on Kubernetes
Simple Kubernetes restore Job:
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: restore-db
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- restore -s s3 --path /custom_path -f store_20231219_022941.sql.gz
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: Never
backoffLimit: 4
```

View File

@@ -9,7 +9,7 @@ nav_order: 7
To restore the database from your remote server, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
{: .note }
It supports __.sql__ and __.sql.gz__ compressed file.
It supports __.sql__,__.sql.gpg__ and __.sql.gz__,__.sql.gz.gpg__ compressed file.
### Restore
@@ -44,50 +44,4 @@ services:
- web
networks:
web:
```
## Restore on Kubernetes
Simple Kubernetes restore Job:
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: restore-db
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- restore -s ssh -f store_20231219_022941.sql.gz
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: ""
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "xxx"
- name: SSH_REMOTE_PATH
value: "/home/jkaninda/backups"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: SSH_IDENTIFY_FILE
value: "/tmp/id_ed25519"
restartPolicy: Never
backoffLimit: 4
```

View File

@@ -10,7 +10,7 @@ nav_order: 5
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
{: .note }
It supports __.sql__ and __.sql.gz__ compressed file.
It supports __.sql__,__.sql.gpg__ and __.sql.gz__,__.sql.gz.gpg__ compressed file.
### Restore

View File

@@ -9,6 +9,9 @@ nav_order: 1
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, FTP and SSH remote storage.
It also supports __encrypting__ your backups using GPG.
Telegram and Email notifications on successful and failed backups.
We are open to receiving stars, PRs, and issues!
@@ -88,7 +91,7 @@ networks:
-e "DB_HOST=hostname" \
-e "DB_USERNAME=user" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 1m"
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 15m" #@midnight
```
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules

View File

@@ -26,8 +26,6 @@ Backup, restore and migrate targets, schedule and retention are configured using
| --dbname | -d | Database name |
| --port | -p | Database port (default: 3306) |
| --disable-compression | | Disable database backup compression |
| --prune | | Delete old backup, default disabled |
| --keep-last | | Delete old backup created more than specified days ago, default 7 days |
| --cron-expression | | Backup cron expression, eg: (* * * * *) or @daily |
| --help | -h | Print this help message and exit |
| --version | -V | Print version information and exit |
@@ -52,6 +50,7 @@ Backup, restore and migrate targets, schedule and retention are configured using
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
| GPG_PUBLIC_KEY | Optional, required to encrypt backup | GPG public key, used to encrypt backup (/config/public_key.asc) |
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the `--cron-expression` flag | Backup cron expression for docker in scheduled mode |
| BACKUP_RETENTION_DAYS | Optional | Delete old backup created more than specified days ago |
| SSH_HOST | Optional, required for SSH storage | ssh remote hostname or ip |
| SSH_USER | Optional, required for SSH storage | ssh remote user |
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |

View File

@@ -44,4 +44,6 @@ spec:
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
- name: AWS_FORCE_PATH_STYLE
value: "false"
restartPolicy: Never

1
go.mod
View File

@@ -24,6 +24,7 @@ require (
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jkaninda/encryptor v0.0.0-20241013064832-ed4bd6a1b221 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
golang.org/x/sys v0.22.0 // indirect

2
go.sum
View File

@@ -28,6 +28,8 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jkaninda/encryptor v0.0.0-20241013064832-ed4bd6a1b221 h1:AwkCf7el1kzeCJ89A+gUAK0ero5JYnvLOKsYMzq+rs4=
github.com/jkaninda/encryptor v0.0.0-20241013064832-ed4bd6a1b221/go.mod h1:9F8ZJ+ZXE8DZBo77+aneGj8LMjrYXX6eFUCC/uqZOUo=
github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=

View File

@@ -8,6 +8,7 @@ package pkg
import (
"fmt"
"github.com/jkaninda/encryptor"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/robfig/cron/v3"
"github.com/spf13/cobra"
@@ -45,6 +46,7 @@ func StartBackup(cmd *cobra.Command) {
func scheduledMode(db *dbConfig, config *BackupConfig) {
utils.Info("Running in Scheduled mode")
utils.Info("Backup cron expression: %s", config.cronExpression)
utils.Info("The next scheduled time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
utils.Info("Storage type %s ", config.storage)
//Test backup
@@ -57,6 +59,8 @@ func scheduledMode(db *dbConfig, config *BackupConfig) {
_, err := c.AddFunc(config.cronExpression, func() {
BackupTask(db, config)
utils.Info("Next backup time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
})
if err != nil {
return
@@ -118,6 +122,7 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
if utils.IsValidCronExpression(bkConfig.cronExpression) {
utils.Info("Running MultiBackup in Scheduled mode")
utils.Info("Backup cron expression: %s", bkConfig.cronExpression)
utils.Info("The next scheduled time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
utils.Info("Storage type %s ", bkConfig.storage)
//Test backup
@@ -131,6 +136,8 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
_, err := c.AddFunc(bkConfig.cronExpression, func() {
// Create a channel
multiBackupTask(conf.Databases, bkConfig)
utils.Info("Next backup time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
})
if err != nil {
return
@@ -302,7 +309,7 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
BackupSize: backupSize,
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName),
BackupLocation: filepath.Join(s3Path, finalFileName),
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
@@ -417,16 +424,30 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
}
func encryptBackup(config *BackupConfig) {
backupFile, err := os.ReadFile(filepath.Join(tmpPath, config.backupFileName))
outputFile := fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension)
if err != nil {
utils.Fatal("Error reading backup file: %s ", err)
}
if config.usingKey {
err := encryptWithGPGPublicKey(filepath.Join(tmpPath, config.backupFileName), config.publicKey)
utils.Info("Encrypting backup using public key...")
pubKey, err := os.ReadFile(config.publicKey)
if err != nil {
utils.Fatal("error during encrypting backup %v", err)
utils.Fatal("Error reading public key: %s ", err)
}
err = encryptor.EncryptWithPublicKey(backupFile, fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension), pubKey)
if err != nil {
utils.Fatal("Error encrypting backup file: %v ", err)
}
utils.Info("Encrypting backup using public key...done")
} else if config.passphrase != "" {
err := encryptWithGPG(filepath.Join(tmpPath, config.backupFileName), config.passphrase)
utils.Info("Encrypting backup using passphrase...")
err := encryptor.Encrypt(backupFile, outputFile, config.passphrase)
if err != nil {
utils.Fatal("error during encrypting backup %v", err)
}
utils.Info("Encrypting backup using passphrase...done")
}

View File

@@ -154,11 +154,11 @@ func initAWSConfig() *AWSConfig {
aConfig.region = os.Getenv("AWS_REGION")
disableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
if err != nil {
utils.Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
disableSsl = false
}
forcePathStyle, err := strconv.ParseBool(os.Getenv("AWS_FORCE_PATH_STYLE"))
if err != nil {
utils.Fatal("Unable to parse AWS_FORCE_PATH_STYLE env var: %s", err)
forcePathStyle = false
}
aConfig.disableSsl = disableSsl
aConfig.forcePathStyle = forcePathStyle
@@ -172,13 +172,15 @@ func initAWSConfig() *AWSConfig {
func initBackupConfig(cmd *cobra.Command) *BackupConfig {
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION")
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
utils.GetEnv(cmd, "path", "REMOTE_PATH")
//Get flag value and set env
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE")
backupRetention, _ := cmd.Flags().GetInt("keep-last")
prune, _ := cmd.Flags().GetBool("prune")
prune := false
backupRetention := utils.GetIntEnv("BACKUP_RETENTION_DAYS")
if backupRetention > 0 {
prune = true
}
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
_, _ = cmd.Flags().GetString("mode")
passphrase := os.Getenv("GPG_PASSPHRASE")
@@ -253,7 +255,7 @@ func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
func initTargetDbConfig() *targetDbConfig {
tdbConfig := targetDbConfig{}
tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST")
tdbConfig.targetDbPort = os.Getenv("TARGET_DB_PORT")
tdbConfig.targetDbPort = utils.EnvWithDefault("TARGET_DB_PORT", "3306")
tdbConfig.targetDbName = os.Getenv("TARGET_DB_NAME")
tdbConfig.targetDbUserName = os.Getenv("TARGET_DB_USERNAME")
tdbConfig.targetDbPassword = os.Getenv("TARGET_DB_PASSWORD")

View File

@@ -1,182 +0,0 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
"errors"
"fmt"
"github.com/ProtonMail/gopenpgp/v2/crypto"
"github.com/jkaninda/mysql-bkup/utils"
"os"
"strings"
)
// decryptWithGPG decrypts backup file using a passphrase
func decryptWithGPG(inputFile string, passphrase string) error {
utils.Info("Decrypting backup using passphrase...")
// Read the encrypted file
encFileContent, err := os.ReadFile(inputFile)
if err != nil {
return errors.New(fmt.Sprintf("Error reading encrypted file: %s", err))
}
// Define the passphrase used to encrypt the file
_passphrase := []byte(passphrase)
// Create a PGP message object from the encrypted file content
encryptedMessage := crypto.NewPGPMessage(encFileContent)
// Decrypt the message using the passphrase
plainMessage, err := crypto.DecryptMessageWithPassword(encryptedMessage, _passphrase)
if err != nil {
return errors.New(fmt.Sprintf("Error decrypting file: %s", err))
}
// Save the decrypted file (restore it)
err = os.WriteFile(RemoveLastExtension(inputFile), plainMessage.GetBinary(), 0644)
if err != nil {
return errors.New(fmt.Sprintf("Error saving decrypted file: %s", err))
}
utils.Info("Decrypting backup using passphrase...done")
utils.Info("Backup file decrypted successful!")
return nil
}
// encryptWithGPG encrypts backup using a passphrase
func encryptWithGPG(inputFile string, passphrase string) error {
utils.Info("Encrypting backup using passphrase...")
// Read the file to be encrypted
plainFileContent, err := os.ReadFile(inputFile)
if err != nil {
return errors.New(fmt.Sprintf("Error reading file: %s", err))
}
// Define the passphrase to encrypt the file
_passphrase := []byte(passphrase)
// Create a message object from the file content
message := crypto.NewPlainMessage(plainFileContent)
// Encrypt the message using the passphrase
encryptedMessage, err := crypto.EncryptMessageWithPassword(message, _passphrase)
if err != nil {
return errors.New(fmt.Sprintf("Error encrypting backup file: %s", err))
}
// Save the encrypted .tar file
err = os.WriteFile(fmt.Sprintf("%s.%s", inputFile, gpgExtension), encryptedMessage.GetBinary(), 0644)
if err != nil {
return errors.New(fmt.Sprintf("Error saving encrypted filee: %s", err))
}
utils.Info("Encrypting backup using passphrase...done")
utils.Info("Backup file encrypted successful!")
return nil
}
// encryptWithGPGPublicKey encrypts backup using a public key
func encryptWithGPGPublicKey(inputFile string, publicKey string) error {
utils.Info("Encrypting backup using public key...")
// Read the public key
pubKeyBytes, err := os.ReadFile(publicKey)
if err != nil {
return errors.New(fmt.Sprintf("Error reading public key: %s", err))
}
// Create a new keyring with the public key
publicKeyObj, err := crypto.NewKeyFromArmored(string(pubKeyBytes))
if err != nil {
return errors.New(fmt.Sprintf("Error parsing public key: %s", err))
}
keyRing, err := crypto.NewKeyRing(publicKeyObj)
if err != nil {
return errors.New(fmt.Sprintf("Error creating key ring: %v", err))
}
// Read the file to encryptWithGPGPublicKey
fileContent, err := os.ReadFile(inputFile)
if err != nil {
return errors.New(fmt.Sprintf("Error reading file: %v", err))
}
// encryptWithGPG the file
message := crypto.NewPlainMessage(fileContent)
encMessage, err := keyRing.Encrypt(message, nil)
if err != nil {
return errors.New(fmt.Sprintf("Error encrypting file: %v", err))
}
// Save the encrypted file
err = os.WriteFile(fmt.Sprintf("%s.%s", inputFile, gpgExtension), encMessage.GetBinary(), 0644)
if err != nil {
return errors.New(fmt.Sprintf("Error saving encrypted file: %v", err))
}
utils.Info("Encrypting backup using public key...done")
utils.Info("Backup file encrypted successful!")
return nil
}
// decryptWithGPGPrivateKey decrypts backup file using a private key and passphrase.
// privateKey GPG private key
// passphrase GPG passphrase
func decryptWithGPGPrivateKey(inputFile, privateKey, passphrase string) error {
utils.Info("Encrypting backup using private key...")
// Read the private key
priKeyBytes, err := os.ReadFile(privateKey)
if err != nil {
return errors.New(fmt.Sprintf("Error reading private key: %s", err))
}
// Read the password for the private key (if its password-protected)
password := []byte(passphrase)
// Create a key object from the armored private key
privateKeyObj, err := crypto.NewKeyFromArmored(string(priKeyBytes))
if err != nil {
return errors.New(fmt.Sprintf("Error parsing private key: %s", err))
}
// Unlock the private key with the password
if passphrase != "" {
// Unlock the private key with the password
_, err = privateKeyObj.Unlock(password)
if err != nil {
return errors.New(fmt.Sprintf("Error unlocking private key: %s", err))
}
}
// Create a new keyring with the private key
keyRing, err := crypto.NewKeyRing(privateKeyObj)
if err != nil {
return errors.New(fmt.Sprintf("Error creating key ring: %v", err))
}
// Read the encrypted file
encFileContent, err := os.ReadFile(inputFile)
if err != nil {
return errors.New(fmt.Sprintf("Error reading encrypted file: %s", err))
}
// decryptWithGPG the file
encryptedMessage := crypto.NewPGPMessage(encFileContent)
message, err := keyRing.Decrypt(encryptedMessage, nil, 0)
if err != nil {
return errors.New(fmt.Sprintf("Error decrypting file: %s", err))
}
// Save the decrypted file
err = os.WriteFile(RemoveLastExtension(inputFile), message.GetBinary(), 0644)
if err != nil {
return errors.New(fmt.Sprintf("Error saving decrypted file: %s", err))
}
utils.Info("Encrypting backup using public key...done")
fmt.Println("File successfully decrypted!")
return nil
}
func RemoveLastExtension(filename string) string {
if idx := strings.LastIndex(filename, "."); idx != -1 {
return filename[:idx]
}
return filename
}

View File

@@ -14,6 +14,7 @@ import (
"os"
"os/exec"
"path/filepath"
"strings"
"time"
)
@@ -211,3 +212,9 @@ func checkConfigFile(filePath string) (string, error) {
// Return an error if neither file exists
return "", fmt.Errorf("no config file found")
}
func RemoveLastExtension(filename string) string {
if idx := strings.LastIndex(filename, "."); idx != -1 {
return filename[:idx]
}
return filename
}

View File

@@ -7,6 +7,7 @@
package pkg
import (
"github.com/jkaninda/encryptor"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
"os"
@@ -68,24 +69,38 @@ func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
utils.Fatal("Error, file required")
}
extension := filepath.Ext(filepath.Join(tmpPath, conf.file))
rFile, err := os.ReadFile(filepath.Join(tmpPath, conf.file))
outputFile := RemoveLastExtension(filepath.Join(tmpPath, conf.file))
if err != nil {
utils.Fatal("Error reading backup file: %s ", err)
}
if extension == ".gpg" {
if conf.usingKey {
utils.Info("Decrypting backup using private key...")
utils.Warn("Backup decryption using a private key is not fully supported")
err := decryptWithGPGPrivateKey(filepath.Join(tmpPath, conf.file), conf.privateKey, conf.passphrase)
prKey, err := os.ReadFile(conf.privateKey)
if err != nil {
utils.Fatal("Error reading public key: %s ", err)
}
err = encryptor.DecryptWithPrivateKey(rFile, outputFile, prKey, conf.passphrase)
if err != nil {
utils.Fatal("error during decrypting backup %v", err)
}
utils.Info("Decrypting backup using private key...done")
} else {
if conf.passphrase == "" {
utils.Error("Error, passphrase or private key required")
utils.Fatal("Your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE or GPG_PRIVATE_KEY environment variable is required.")
} else {
utils.Info("Decrypting backup using passphrase...")
//decryptWithGPG file
err := decryptWithGPG(filepath.Join(tmpPath, conf.file), conf.passphrase)
err := encryptor.Decrypt(rFile, outputFile, conf.passphrase)
if err != nil {
utils.Fatal("Error decrypting file %s %v", file, err)
}
utils.Info("Decrypting backup using passphrase...done")
//Update file name
conf.file = RemoveLastExtension(file)
}

View File

@@ -12,6 +12,7 @@ const algorithm = "aes256"
const gpgHome = "/config/gnupg"
const gpgExtension = "gpg"
const workingDir = "/config"
const timeFormat = "2006-01-02 at 15:04:05"
var (
storage = "local"

View File

@@ -13,6 +13,6 @@
<li>Date: {{.EndTime}}</li>
<li>Backup Reference: {{.BackupReference}} </li>
</ul>
<p>©2024 <a href="github.com/jkaninda/mysql-bkup">mysql-bkup</a></p>
<p>©2024 <a href="https://github.com/jkaninda/mysql-bkup">mysql-bkup</a></p>
</body>
</html>

View File

@@ -18,7 +18,7 @@
<li>Backup Reference: {{.BackupReference}} </li>
</ul>
<p>Best regards,</p>
<p>©2024 <a href="github.com/jkaninda/mysql-bkup">mysql-bkup</a></p>
<p>©2024 <a href="https://github.com/jkaninda/mysql-bkup">mysql-bkup</a></p>
<href>
</body>
</html>

View File

@@ -6,7 +6,6 @@ import (
"encoding/json"
"fmt"
"github.com/go-mail/mail"
"github.com/robfig/cron/v3"
"html/template"
"io/ioutil"
"net/http"
@@ -177,7 +176,3 @@ func getTgUrl() string {
return fmt.Sprintf("https://api.telegram.org/bot%s", os.Getenv("TG_TOKEN"))
}
func IsValidCronExpression(cronExpr string) bool {
_, err := cron.ParseStandard(cronExpr)
return err == nil
}

View File

@@ -8,11 +8,13 @@ package utils
import (
"fmt"
"github.com/robfig/cron/v3"
"github.com/spf13/cobra"
"io"
"io/fs"
"os"
"strconv"
"time"
)
// FileExists checks if the file does exist
@@ -180,3 +182,33 @@ func GetIntEnv(envName string) int {
}
return ret
}
func EnvWithDefault(envName string, defaultValue string) string {
value := os.Getenv(envName)
if value == "" {
return defaultValue
}
return value
}
// IsValidCronExpression verify cronExpression and returns boolean
func IsValidCronExpression(cronExpr string) bool {
// Parse the cron expression
_, err := cron.ParseStandard(cronExpr)
return err == nil
}
// CronNextTime returns cronExpression next time
func CronNextTime(cronExpr string) time.Time {
// Parse the cron expression
schedule, err := cron.ParseStandard(cronExpr)
if err != nil {
Error("Error parsing cron expression:", err)
return time.Time{}
}
// Get the current time
now := time.Now()
// Get the next scheduled time
next := schedule.Next(now)
//Info("The next scheduled time is: %v\n", next)
return next
}