mirror of
https://github.com/jkaninda/mysql-bkup.git
synced 2025-12-06 13:39:41 +01:00
Compare commits
48 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 40557af437 | |||
|
|
1dcb9586a6 | ||
|
|
2c6336e84a | ||
| c16ee3a492 | |||
|
|
3f7d28ea49 | ||
| cea1ef9c3b | |||
|
|
56c271bc29 | ||
| 45c30dca5f | |||
|
|
b0ae212578 | ||
|
|
6e2d3a9f21 | ||
|
|
dd314aa4cb | ||
|
|
24ccdaa671 | ||
| 45e3452376 | |||
|
|
3527b4cdcd | ||
| dc6fe2f4b9 | |||
|
|
f0afc0f4e0 | ||
| 7d7c813bb0 | |||
|
|
6b8491cdc0 | ||
| a1dd6e3f58 | |||
|
|
86ba3530c9 | ||
| e1f3b15003 | |||
|
|
1577e92a66 | ||
| 7b67f88769 | |||
|
|
043233dabe | ||
|
|
d6652cfb75 | ||
| 140ed608ab | |||
|
|
98211a27b8 | ||
| 4e4d45e555 | |||
|
|
01e41acb5c | ||
| 3dce2017f8 | |||
|
|
ed2f1b8d9c | ||
| b64875df21 | |||
|
|
fc90507b3f | ||
| df0efd24d3 | |||
|
|
e5dd7e76ce | ||
| 12fbb67a09 | |||
|
|
df490af7b6 | ||
| d930c3e2f6 | |||
|
|
e4258cb12e | ||
| 4c44166921 | |||
| 554df819ab | |||
|
|
ca5633882e | ||
| c5cca82841 | |||
|
|
bbd5422089 | ||
|
|
d72156f890 | ||
|
|
909a50dbe7 | ||
|
|
94ceb71da2 | ||
|
|
fe05fe5110 |
69
.env.example
Normal file
69
.env.example
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
### Database
|
||||||
|
DB_HOST=
|
||||||
|
DB_PORT=3306
|
||||||
|
DB_USERNAME=
|
||||||
|
DB_PASSWORD=
|
||||||
|
DB_NAME=
|
||||||
|
TZ=Europe/Paris
|
||||||
|
|
||||||
|
### Database Migration
|
||||||
|
#TARGET_DB_HOST=
|
||||||
|
#TARGET_DB_PORT=3306
|
||||||
|
#TARGET_DB_NAME=
|
||||||
|
#TARGET_DB_USERNAME=
|
||||||
|
#TARGET_DB_PASSWORD=
|
||||||
|
|
||||||
|
### Backup restoration
|
||||||
|
#FILE_NAME=
|
||||||
|
### AWS S3 Storage
|
||||||
|
#ACCESS_KEY=
|
||||||
|
#SECRET_KEY=
|
||||||
|
#AWS_S3_BUCKET_NAME=
|
||||||
|
#AWS_S3_ENDPOINT=
|
||||||
|
#AWS_REGION=
|
||||||
|
#AWS_S3_PATH=
|
||||||
|
#AWS_DISABLE_SSL=false
|
||||||
|
#AWS_FORCE_PATH_STYLE=true
|
||||||
|
|
||||||
|
### Backup Cron Expression
|
||||||
|
#BACKUP_CRON_EXPRESSION=@midnight
|
||||||
|
##Delete old backup created more than specified days ago
|
||||||
|
#BACKUP_RETENTION_DAYS=7
|
||||||
|
|
||||||
|
####SSH Storage
|
||||||
|
#SSH_HOST_NAME=
|
||||||
|
#SSH_PORT=22
|
||||||
|
#SSH_USER=
|
||||||
|
#SSH_PASSWORD=
|
||||||
|
#SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
||||||
|
|
||||||
|
####FTP Storage
|
||||||
|
#FTP_PASSWORD=
|
||||||
|
#FTP_HOST_NAME=
|
||||||
|
#FTP_USER=
|
||||||
|
#FTP_PORT=21
|
||||||
|
#REMOTE_PATH=
|
||||||
|
#### Backup encryption
|
||||||
|
#GPG_PUBLIC_KEY=/config/public_key.asc
|
||||||
|
#GPG_PRIVATE_KEY=/config/private_key.asc
|
||||||
|
#GPG_PASSPHRASE=Your strong passphrase
|
||||||
|
## For multiple database backup on Docker or Docker in Swarm mode
|
||||||
|
#BACKUP_CONFIG_FILE=/config/config.yaml
|
||||||
|
### Database restoration
|
||||||
|
#FILE_NAME=
|
||||||
|
### Notification
|
||||||
|
#BACKUP_REFERENCE=K8s/Paris cluster
|
||||||
|
## Telegram
|
||||||
|
#TG_TOKEN=
|
||||||
|
#TG_CHAT_ID=
|
||||||
|
### Email
|
||||||
|
#MAIL_HOST=
|
||||||
|
#MAIL_PORT=
|
||||||
|
#MAIL_USERNAME=
|
||||||
|
#MAIL_PASSWORD=
|
||||||
|
#MAIL_FROM=Backup Jobs <backup-jobs@example.com>
|
||||||
|
#MAIL_TO=backup@example.com,me@example.com,team@example.com
|
||||||
|
#MAIL_SKIP_TLS=false
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
|||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
push: true
|
push: true
|
||||||
file: "./docker/Dockerfile"
|
file: "./Dockerfile"
|
||||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||||
build-args: |
|
build-args: |
|
||||||
appVersion=develop-${{ github.sha }}
|
appVersion=develop-${{ github.sha }}
|
||||||
|
|||||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -39,7 +39,7 @@ jobs:
|
|||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
push: true
|
push: true
|
||||||
file: "./docker/Dockerfile"
|
file: "./Dockerfile"
|
||||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||||
build-args: |
|
build-args: |
|
||||||
appVersion=${{ env.TAG_NAME }}
|
appVersion=${{ env.TAG_NAME }}
|
||||||
|
|||||||
@@ -10,51 +10,21 @@ RUN go mod download
|
|||||||
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/mysql-bkup
|
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/mysql-bkup
|
||||||
|
|
||||||
FROM alpine:3.20.3
|
FROM alpine:3.20.3
|
||||||
ENV DB_HOST=""
|
|
||||||
ENV DB_NAME=""
|
|
||||||
ENV DB_USERNAME=""
|
|
||||||
ENV DB_PASSWORD=""
|
|
||||||
ENV DB_PORT=3306
|
|
||||||
ENV STORAGE=local
|
|
||||||
ENV AWS_S3_ENDPOINT=""
|
|
||||||
ENV AWS_S3_BUCKET_NAME=""
|
|
||||||
ENV AWS_ACCESS_KEY=""
|
|
||||||
ENV AWS_SECRET_KEY=""
|
|
||||||
ENV AWS_S3_PATH=""
|
|
||||||
ENV AWS_REGION="us-west-2"
|
|
||||||
ENV AWS_DISABLE_SSL="false"
|
|
||||||
ENV AWS_FORCE_PATH_STYLE="true"
|
|
||||||
ENV GPG_PASSPHRASE=""
|
|
||||||
ENV SSH_USER=""
|
|
||||||
ENV SSH_PASSWORD=""
|
|
||||||
ENV SSH_HOST=""
|
|
||||||
ENV SSH_IDENTIFY_FILE=""
|
|
||||||
ENV SSH_PORT=22
|
|
||||||
ENV REMOTE_PATH=""
|
|
||||||
ENV FTP_HOST=""
|
|
||||||
ENV FTP_PORT=21
|
|
||||||
ENV FTP_USER=""
|
|
||||||
ENV FTP_PASSWORD=""
|
|
||||||
ENV TARGET_DB_HOST=""
|
|
||||||
ENV TARGET_DB_PORT=3306
|
|
||||||
ENV TARGET_DB_NAME=""
|
|
||||||
ENV TARGET_DB_USERNAME=""
|
|
||||||
ENV TARGET_DB_PASSWORD=""
|
|
||||||
ENV BACKUP_CRON_EXPRESSION=""
|
|
||||||
ENV TG_TOKEN=""
|
|
||||||
ENV TG_CHAT_ID=""
|
|
||||||
ENV TZ=UTC
|
ENV TZ=UTC
|
||||||
ARG WORKDIR="/config"
|
ARG WORKDIR="/config"
|
||||||
ARG BACKUPDIR="/backup"
|
ARG BACKUPDIR="/backup"
|
||||||
ARG BACKUP_TMP_DIR="/tmp/backup"
|
ARG BACKUP_TMP_DIR="/tmp/backup"
|
||||||
ARG appVersion="v1.2.12"
|
ARG TEMPLATES_DIR="/config/templates"
|
||||||
|
ARG appVersion=""
|
||||||
ENV VERSION=${appVersion}
|
ENV VERSION=${appVersion}
|
||||||
LABEL author="Jonas Kaninda"
|
LABEL author="Jonas Kaninda"
|
||||||
LABEL version=${appVersion}
|
LABEL version=${appVersion}
|
||||||
|
LABEL github="github.com/jkaninda/mysql-bkup"
|
||||||
|
|
||||||
RUN apk --update add --no-cache mysql-client mariadb-connector-c gnupg tzdata
|
RUN apk --update add --no-cache mysql-client mariadb-connector-c tzdata ca-certificates
|
||||||
RUN mkdir $WORKDIR
|
RUN mkdir $WORKDIR
|
||||||
RUN mkdir $BACKUPDIR
|
RUN mkdir $BACKUPDIR
|
||||||
|
RUN mkdir $TEMPLATES_DIR
|
||||||
RUN mkdir -p $BACKUP_TMP_DIR
|
RUN mkdir -p $BACKUP_TMP_DIR
|
||||||
RUN chmod 777 $WORKDIR
|
RUN chmod 777 $WORKDIR
|
||||||
RUN chmod 777 $BACKUPDIR
|
RUN chmod 777 $BACKUPDIR
|
||||||
@@ -62,18 +32,19 @@ RUN chmod 777 $BACKUP_TMP_DIR
|
|||||||
RUN chmod 777 $WORKDIR
|
RUN chmod 777 $WORKDIR
|
||||||
|
|
||||||
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
|
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
|
||||||
|
COPY ./templates/* $TEMPLATES_DIR/
|
||||||
RUN chmod +x /usr/local/bin/mysql-bkup
|
RUN chmod +x /usr/local/bin/mysql-bkup
|
||||||
|
|
||||||
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
|
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
|
||||||
|
|
||||||
# Create backup script and make it executable
|
# Create backup script and make it executable
|
||||||
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
|
RUN printf '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
|
||||||
chmod +x /usr/local/bin/backup
|
chmod +x /usr/local/bin/backup
|
||||||
# Create restore script and make it executable
|
# Create restore script and make it executable
|
||||||
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup restore "$@"' > /usr/local/bin/restore && \
|
RUN printf '#!/bin/sh\n/usr/local/bin/mysql-bkup restore "$@"' > /usr/local/bin/restore && \
|
||||||
chmod +x /usr/local/bin/restore
|
chmod +x /usr/local/bin/restore
|
||||||
# Create migrate script and make it executable
|
# Create migrate script and make it executable
|
||||||
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup migrate "$@"' > /usr/local/bin/migrate && \
|
RUN printf '#!/bin/sh\n/usr/local/bin/mysql-bkup migrate "$@"' > /usr/local/bin/migrate && \
|
||||||
chmod +x /usr/local/bin/migrate
|
chmod +x /usr/local/bin/migrate
|
||||||
|
|
||||||
WORKDIR $WORKDIR
|
WORKDIR $WORKDIR
|
||||||
@@ -3,10 +3,13 @@ MySQL Backup is a Docker container image that can be used to backup, restore and
|
|||||||
It also supports __encrypting__ your backups using GPG.
|
It also supports __encrypting__ your backups using GPG.
|
||||||
|
|
||||||
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
|
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
|
||||||
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3, FTP or SSH compatible storage.
|
It handles __recurring__ backups of MySQL or MariaDB database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3, FTP or SSH compatible storage.
|
||||||
|
|
||||||
It also supports database __encryption__ using GPG.
|
It also supports database __encryption__ using GPG.
|
||||||
|
|
||||||
|
Telegram and Email notifications on successful and failed backups.
|
||||||
|
|
||||||
|
|
||||||
[](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml)
|
[](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml)
|
||||||
[](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup)
|
[](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup)
|
||||||

|

|
||||||
@@ -96,7 +99,7 @@ networks:
|
|||||||
-e "DB_HOST=hostname" \
|
-e "DB_HOST=hostname" \
|
||||||
-e "DB_USERNAME=user" \
|
-e "DB_USERNAME=user" \
|
||||||
-e "DB_PASSWORD=password" \
|
-e "DB_PASSWORD=password" \
|
||||||
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 1m"
|
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 15m" #@midnight
|
||||||
```
|
```
|
||||||
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
||||||
|
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ var BackupCmd = &cobra.Command{
|
|||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
pkg.StartBackup(cmd)
|
pkg.StartBackup(cmd)
|
||||||
} else {
|
} else {
|
||||||
utils.Fatal("Error, no argument required")
|
utils.Fatal(`"backup" accepts no argument %q`, args)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -30,8 +30,6 @@ func init() {
|
|||||||
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
|
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
|
||||||
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
||||||
BackupCmd.PersistentFlags().StringP("cron-expression", "", "", "Backup cron expression")
|
BackupCmd.PersistentFlags().StringP("cron-expression", "", "", "Backup cron expression")
|
||||||
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")
|
|
||||||
BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "Delete files created more than specified days ago, default 7 days")
|
|
||||||
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
|
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ var MigrateCmd = &cobra.Command{
|
|||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
pkg.StartMigration(cmd)
|
pkg.StartMigration(cmd)
|
||||||
} else {
|
} else {
|
||||||
utils.Fatal("Error, no argument required")
|
utils.Fatal(`"migrate" accepts no argument %q`, args)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ var RestoreCmd = &cobra.Command{
|
|||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
pkg.StartRestore(cmd)
|
pkg.StartRestore(cmd)
|
||||||
} else {
|
} else {
|
||||||
utils.Fatal("Error, no argument required")
|
utils.Fatal(`"restore" accepts no argument %q`, args)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -37,6 +37,7 @@ services:
|
|||||||
- AWS_SECRET_KEY=xxxxx
|
- AWS_SECRET_KEY=xxxxx
|
||||||
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||||
- AWS_DISABLE_SSL="false"
|
- AWS_DISABLE_SSL="false"
|
||||||
|
- AWS_FORCE_PATH_STYLE="false"
|
||||||
|
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
networks:
|
networks:
|
||||||
@@ -73,6 +74,8 @@ services:
|
|||||||
- AWS_ACCESS_KEY=xxxx
|
- AWS_ACCESS_KEY=xxxx
|
||||||
- AWS_SECRET_KEY=xxxxx
|
- AWS_SECRET_KEY=xxxxx
|
||||||
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
|
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
|
||||||
|
#Delete old backup created more than specified days ago
|
||||||
|
#- BACKUP_RETENTION_DAYS=7
|
||||||
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||||
- AWS_DISABLE_SSL="false"
|
- AWS_DISABLE_SSL="false"
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
@@ -82,53 +85,3 @@ networks:
|
|||||||
web:
|
web:
|
||||||
```
|
```
|
||||||
|
|
||||||
## Deploy on Kubernetes
|
|
||||||
|
|
||||||
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as CronJob.
|
|
||||||
|
|
||||||
### Simple Kubernetes CronJob usage:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: CronJob
|
|
||||||
metadata:
|
|
||||||
name: bkup-job
|
|
||||||
spec:
|
|
||||||
schedule: "0 1 * * *"
|
|
||||||
jobTemplate:
|
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: mysql-bkup
|
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- backup -s s3 --path /custom_path
|
|
||||||
env:
|
|
||||||
- name: DB_PORT
|
|
||||||
value: "3306"
|
|
||||||
- name: DB_HOST
|
|
||||||
value: ""
|
|
||||||
- name: DB_NAME
|
|
||||||
value: ""
|
|
||||||
- name: DB_USERNAME
|
|
||||||
value: ""
|
|
||||||
# Please use secret!
|
|
||||||
- name: DB_PASSWORD
|
|
||||||
value: ""
|
|
||||||
- name: AWS_S3_ENDPOINT
|
|
||||||
value: "https://s3.amazonaws.com"
|
|
||||||
- name: AWS_S3_BUCKET_NAME
|
|
||||||
value: "xxx"
|
|
||||||
- name: AWS_REGION
|
|
||||||
value: "us-west-2"
|
|
||||||
- name: AWS_ACCESS_KEY
|
|
||||||
value: "xxxx"
|
|
||||||
- name: AWS_SECRET_KEY
|
|
||||||
value: "xxxx"
|
|
||||||
- name: AWS_DISABLE_SSL
|
|
||||||
value: "false"
|
|
||||||
restartPolicy: OnFailure
|
|
||||||
```
|
|
||||||
@@ -79,6 +79,8 @@ services:
|
|||||||
- REMOTE_PATH=/home/jkaninda/backups
|
- REMOTE_PATH=/home/jkaninda/backups
|
||||||
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
||||||
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
|
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
|
||||||
|
#Delete old backup created more than specified days ago
|
||||||
|
#- BACKUP_RETENTION_DAYS=7
|
||||||
## We advise you to use a private jey instead of password
|
## We advise you to use a private jey instead of password
|
||||||
#- SSH_PASSWORD=password
|
#- SSH_PASSWORD=password
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
@@ -87,55 +89,3 @@ services:
|
|||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
```
|
```
|
||||||
|
|
||||||
## Deploy on Kubernetes
|
|
||||||
|
|
||||||
For Kubernetes, you don't need to run it in scheduled mode.
|
|
||||||
You can deploy it as CronJob.
|
|
||||||
|
|
||||||
Simple Kubernetes CronJob usage:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: CronJob
|
|
||||||
metadata:
|
|
||||||
name: bkup-job
|
|
||||||
spec:
|
|
||||||
schedule: "0 1 * * *"
|
|
||||||
jobTemplate:
|
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: mysql-bkup
|
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- backup -s ssh
|
|
||||||
env:
|
|
||||||
- name: DB_PORT
|
|
||||||
value: "3306"
|
|
||||||
- name: DB_HOST
|
|
||||||
value: ""
|
|
||||||
- name: DB_NAME
|
|
||||||
value: ""
|
|
||||||
- name: DB_USERNAME
|
|
||||||
value: ""
|
|
||||||
# Please use secret!
|
|
||||||
- name: DB_PASSWORD
|
|
||||||
value: ""
|
|
||||||
- name: SSH_HOST
|
|
||||||
value: ""
|
|
||||||
- name: SSH_PORT
|
|
||||||
value: "22"
|
|
||||||
- name: SSH_USER
|
|
||||||
value: "xxx"
|
|
||||||
- name: REMOTE_PATH
|
|
||||||
value: "/home/jkaninda/backups"
|
|
||||||
- name: AWS_ACCESS_KEY
|
|
||||||
value: "xxxx"
|
|
||||||
- name: SSH_IDENTIFY_FILE
|
|
||||||
value: "/tmp/id_ed25519"
|
|
||||||
restartPolicy: Never
|
|
||||||
```
|
|
||||||
@@ -75,6 +75,8 @@ services:
|
|||||||
- DB_USERNAME=username
|
- DB_USERNAME=username
|
||||||
- DB_PASSWORD=password
|
- DB_PASSWORD=password
|
||||||
- BACKUP_CRON_EXPRESSION=0 1 * * *
|
- BACKUP_CRON_EXPRESSION=0 1 * * *
|
||||||
|
#Delete old backup created more than specified days ago
|
||||||
|
#- BACKUP_RETENTION_DAYS=7
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
|
|||||||
@@ -59,6 +59,8 @@ spec:
|
|||||||
value: "xxxx"
|
value: "xxxx"
|
||||||
- name: AWS_DISABLE_SSL
|
- name: AWS_DISABLE_SSL
|
||||||
value: "false"
|
value: "false"
|
||||||
|
- name: AWS_FORCE_PATH_STYLE
|
||||||
|
value: "false"
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -81,13 +83,9 @@ spec:
|
|||||||
# for a list of available releases.
|
# for a list of available releases.
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
command:
|
command:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
- -c
|
- -c
|
||||||
- bkup
|
- backup --storage ssh
|
||||||
- backup
|
|
||||||
- --storage
|
|
||||||
- ssh
|
|
||||||
- --disable-compression
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
memory: "128Mi"
|
memory: "128Mi"
|
||||||
@@ -116,7 +114,7 @@ spec:
|
|||||||
value: "/home/toto/backup"
|
value: "/home/toto/backup"
|
||||||
# Optional, required if you want to encrypt your backup
|
# Optional, required if you want to encrypt your backup
|
||||||
- name: GPG_PASSPHRASE
|
- name: GPG_PASSPHRASE
|
||||||
value: "xxxx"
|
value: "secure-passphrase"
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -139,13 +137,9 @@ spec:
|
|||||||
# for a list of available releases.
|
# for a list of available releases.
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
command:
|
command:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
- -c
|
- -c
|
||||||
- bkup
|
- backup --storage ssh --file store_20231219_022941.sql.gz
|
||||||
- restore
|
|
||||||
- --storage
|
|
||||||
- ssh
|
|
||||||
- --file store_20231219_022941.sql.gz
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
memory: "128Mi"
|
memory: "128Mi"
|
||||||
@@ -238,7 +232,6 @@ spec:
|
|||||||
|
|
||||||
This image also supports Kubernetes security context, you can run it in Rootless environment.
|
This image also supports Kubernetes security context, you can run it in Rootless environment.
|
||||||
It has been tested on Openshift, it works well.
|
It has been tested on Openshift, it works well.
|
||||||
Deployment on OpenShift is supported, you need to remove `securityContext` section on your yaml file.
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: batch/v1
|
apiVersion: batch/v1
|
||||||
@@ -301,3 +294,55 @@ spec:
|
|||||||
# value: "xxx"
|
# value: "xxx"
|
||||||
restartPolicy: OnFailure
|
restartPolicy: OnFailure
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Migrate database
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: Job
|
||||||
|
metadata:
|
||||||
|
name: migrate-db
|
||||||
|
spec:
|
||||||
|
ttlSecondsAfterFinished: 100
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: mysql-bkup
|
||||||
|
# In production, it is advised to lock your image tag to a proper
|
||||||
|
# release version instead of using `latest`.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for a list of available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- migrate
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: "128Mi"
|
||||||
|
cpu: "500m"
|
||||||
|
env:
|
||||||
|
## Source Database
|
||||||
|
- name: DB_HOST
|
||||||
|
value: "mysql"
|
||||||
|
- name: DB_PORT
|
||||||
|
value: "3306"
|
||||||
|
- name: DB_NAME
|
||||||
|
value: "dbname"
|
||||||
|
- name: DB_USERNAME
|
||||||
|
value: "username"
|
||||||
|
- name: DB_PASSWORD
|
||||||
|
value: "password"
|
||||||
|
## Target Database
|
||||||
|
- name: TARGET_DB_HOST
|
||||||
|
value: "target-mysql"
|
||||||
|
- name: TARGET_DB_PORT
|
||||||
|
value: "3306"
|
||||||
|
- name: TARGET_DB_NAME
|
||||||
|
value: "dbname"
|
||||||
|
- name: TARGET_DB_USERNAME
|
||||||
|
value: "username"
|
||||||
|
- name: TARGET_DB_PASSWORD
|
||||||
|
value: "password"
|
||||||
|
restartPolicy: Never
|
||||||
|
```
|
||||||
6
docs/how-tos/deprecated-configs.md
Normal file
6
docs/how-tos/deprecated-configs.md
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
title: Update deprecated configurations
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 11
|
||||||
|
---
|
||||||
@@ -1,30 +1,39 @@
|
|||||||
---
|
---
|
||||||
title: Encrypt backups using GPG
|
title: Encrypt backups
|
||||||
layout: default
|
layout: default
|
||||||
parent: How Tos
|
parent: How Tos
|
||||||
nav_order: 8
|
nav_order: 8
|
||||||
---
|
---
|
||||||
# Encrypt backup
|
# Encrypt backup
|
||||||
|
|
||||||
The image supports encrypting backups using GPG out of the box. In case a `GPG_PASSPHRASE` environment variable is set, the backup archive will be encrypted using the given key and saved as a sql.gpg file instead or sql.gz.gpg.
|
The image supports encrypting backups using one of two available methods: GPG with passphrase or GPG with a public key.
|
||||||
|
|
||||||
|
|
||||||
|
The image supports encrypting backups using GPG out of the box. In case a `GPG_PASSPHRASE` or `GPG_PUBLIC_KEY` environment variable is set, the backup archive will be encrypted using the given key and saved as a sql.gpg file instead or sql.gz.gpg.
|
||||||
|
|
||||||
{: .warning }
|
{: .warning }
|
||||||
To restore an encrypted backup, you need to provide the same GPG passphrase or key used during backup process.
|
To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process.
|
||||||
|
|
||||||
- GPG home directory `/config/gnupg`
|
- GPG home directory `/config/gnupg`
|
||||||
- Cipher algorithm `aes256`
|
- Cipher algorithm `aes256`
|
||||||
-
|
|
||||||
To decrypt manually, you need to install `gnupg`
|
|
||||||
|
|
||||||
### Decrypt backup
|
{: .note }
|
||||||
|
The backup encrypted using `GPG passphrase` method can be restored automatically, no need to decrypt it before restoration.
|
||||||
|
Suppose you used a GPG public key during the backup process. In that case, you need to decrypt your backup before restoration because decryption using a `GPG private` key is not fully supported.
|
||||||
|
|
||||||
|
To decrypt manually, you need to install `gnupg`
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
gpg --batch --passphrase "my-passphrase" \
|
gpg --batch --passphrase "my-passphrase" \
|
||||||
--output database_20240730_044201.sql.gz \
|
--output database_20240730_044201.sql.gz \
|
||||||
--decrypt database_20240730_044201.sql.gz.gpg
|
--decrypt database_20240730_044201.sql.gz.gpg
|
||||||
```
|
```
|
||||||
|
Using your private key
|
||||||
|
|
||||||
### Backup
|
```shell
|
||||||
|
gpg --output database_20240730_044201.sql.gz --decrypt database_20240730_044201.sql.gz.gpg
|
||||||
|
```
|
||||||
|
## Using GPG passphrase
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
services:
|
services:
|
||||||
@@ -51,4 +60,32 @@ services:
|
|||||||
- web
|
- web
|
||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
|
```
|
||||||
|
## Using GPG Public Key
|
||||||
|
|
||||||
|
```yml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, it is advised to lock your image tag to a proper
|
||||||
|
# release version instead of using `latest`.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for a list of available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command: backup -d database
|
||||||
|
volumes:
|
||||||
|
- ./backup:/backup
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
## Required to encrypt backup
|
||||||
|
- GPG_PUBLIC_KEY=/config/public_key.asc
|
||||||
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
```
|
```
|
||||||
@@ -78,54 +78,3 @@ TARGET_DB_PASSWORD=password
|
|||||||
jkaninda/mysql-bkup migrate
|
jkaninda/mysql-bkup migrate
|
||||||
```
|
```
|
||||||
|
|
||||||
## Kubernetes
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
name: migrate-db
|
|
||||||
spec:
|
|
||||||
ttlSecondsAfterFinished: 100
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: mysql-bkup
|
|
||||||
# In production, it is advised to lock your image tag to a proper
|
|
||||||
# release version instead of using `latest`.
|
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
|
||||||
# for a list of available releases.
|
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- migrate
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
memory: "128Mi"
|
|
||||||
cpu: "500m"
|
|
||||||
env:
|
|
||||||
## Source Database
|
|
||||||
- name: DB_HOST
|
|
||||||
value: "mysql"
|
|
||||||
- name: DB_PORT
|
|
||||||
value: "3306"
|
|
||||||
- name: DB_NAME
|
|
||||||
value: "dbname"
|
|
||||||
- name: DB_USERNAME
|
|
||||||
value: "username"
|
|
||||||
- name: DB_PASSWORD
|
|
||||||
value: "password"
|
|
||||||
## Target Database
|
|
||||||
- name: TARGET_DB_HOST
|
|
||||||
value: "target-mysql"
|
|
||||||
- name: TARGET_DB_PORT
|
|
||||||
value: "3306"
|
|
||||||
- name: TARGET_DB_NAME
|
|
||||||
value: "dbname"
|
|
||||||
- name: TARGET_DB_USERNAME
|
|
||||||
value: "username"
|
|
||||||
- name: TARGET_DB_PASSWORD
|
|
||||||
value: "password"
|
|
||||||
restartPolicy: Never
|
|
||||||
```
|
|
||||||
63
docs/how-tos/mutli-backup.md
Normal file
63
docs/how-tos/mutli-backup.md
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
---
|
||||||
|
title: Run multiple backup schedules in the same container
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 11
|
||||||
|
---
|
||||||
|
|
||||||
|
Multiple backup schedules with different configuration can be configured by mounting a configuration file into `/config/config.yaml` `/config/config.yml` or by defining an environment variable `BACKUP_CONFIG_FILE=/backup/config.yaml`.
|
||||||
|
|
||||||
|
## Configuration file
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
#cronExpression: "@every 20m" //Optional for scheduled backups
|
||||||
|
cronExpression: ""
|
||||||
|
databases:
|
||||||
|
- host: mysql1
|
||||||
|
port: 3306
|
||||||
|
name: database1
|
||||||
|
user: database1
|
||||||
|
password: password
|
||||||
|
path: /s3-path/database1 #For SSH or FTP you need to define the full path (/home/toto/backup/)
|
||||||
|
- host: mysql2
|
||||||
|
port: 3306
|
||||||
|
name: lldap
|
||||||
|
user: lldap
|
||||||
|
password: password
|
||||||
|
path: /s3-path/lldap #For SSH or FTP you need to define the full path (/home/toto/backup/)
|
||||||
|
- host: mysql3
|
||||||
|
port: 3306
|
||||||
|
name: keycloak
|
||||||
|
user: keycloak
|
||||||
|
password: password
|
||||||
|
path: /s3-path/keycloak #For SSH or FTP you need to define the full path (/home/toto/backup/)
|
||||||
|
- host: mysql4
|
||||||
|
port: 3306
|
||||||
|
name: joplin
|
||||||
|
user: joplin
|
||||||
|
password: password
|
||||||
|
path: /s3-path/joplin #For SSH or FTP you need to define the full path (/home/toto/backup/)
|
||||||
|
```
|
||||||
|
## Docker compose file
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, it is advised to lock your image tag to a proper
|
||||||
|
# release version instead of using `latest`.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for a list of available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command: backup
|
||||||
|
volumes:
|
||||||
|
- ./backup:/backup
|
||||||
|
environment:
|
||||||
|
## Multi backup config file
|
||||||
|
- BACKUP_CONFIG_FILE=/backup/config.yaml
|
||||||
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
162
docs/how-tos/receive-notification.md
Normal file
162
docs/how-tos/receive-notification.md
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
---
|
||||||
|
title: Receive notifications
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 12
|
||||||
|
---
|
||||||
|
Send Email or Telegram notifications on successfully or failed backup.
|
||||||
|
|
||||||
|
### Email
|
||||||
|
To send out email notifications on failed or successfully backup runs, provide SMTP credentials, a sender and a recipient:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command: backup
|
||||||
|
volumes:
|
||||||
|
- ./backup:/backup
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
- MAIL_HOST=
|
||||||
|
- MAIL_PORT=587
|
||||||
|
- MAIL_USERNAME=
|
||||||
|
- MAIL_PASSWORD=!
|
||||||
|
- MAIL_FROM=Backup Jobs <backup@example.com>
|
||||||
|
## Multiple recipients separated by a comma
|
||||||
|
- MAIL_TO=me@example.com,team@example.com,manager@example.com
|
||||||
|
- MAIL_SKIP_TLS=false
|
||||||
|
## Time format for notification
|
||||||
|
- TIME_FORMAT=2006-01-02 at 15:04:05
|
||||||
|
## Backup reference, in case you want to identify every backup instance
|
||||||
|
- BACKUP_REFERENCE=database/Paris cluster
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
|
|
||||||
|
### Telegram
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command: backup
|
||||||
|
volumes:
|
||||||
|
- ./backup:/backup
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
- TG_TOKEN=[BOT ID]:[BOT TOKEN]
|
||||||
|
- TG_CHAT_ID=
|
||||||
|
## Time format for notification
|
||||||
|
- TIME_FORMAT=2006-01-02 at 15:04:05
|
||||||
|
## Backup reference, in case you want to identify every backup instance
|
||||||
|
- BACKUP_REFERENCE=database/Paris cluster
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
|
|
||||||
|
### Customize notifications
|
||||||
|
|
||||||
|
The title and body of the notifications can be tailored to your needs using Go templates.
|
||||||
|
Template sources must be mounted inside the container in /config/templates:
|
||||||
|
|
||||||
|
- email.template: Email notification template
|
||||||
|
- telegram.template: Telegram notification template
|
||||||
|
- email-error.template: Error notification template
|
||||||
|
- telegram-error.template: Error notification template
|
||||||
|
|
||||||
|
### Data
|
||||||
|
|
||||||
|
Here is a list of all data passed to the template:
|
||||||
|
- `Database` : Database name
|
||||||
|
- `StartTime`: Backup start time process
|
||||||
|
- `EndTime`: Backup start time process
|
||||||
|
- `Storage`: Backup storage
|
||||||
|
- `BackupLocation`: Backup location
|
||||||
|
- `BackupSize`: Backup size
|
||||||
|
- `BackupReference`: Backup reference(eg: database/cluster name or server name)
|
||||||
|
|
||||||
|
> email.template:
|
||||||
|
|
||||||
|
|
||||||
|
```html
|
||||||
|
<h2>Hi,</h2>
|
||||||
|
<p>Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.</p>
|
||||||
|
<h3>Backup Details:</h3>
|
||||||
|
<ul>
|
||||||
|
<li>Database Name: {{.Database}}</li>
|
||||||
|
<li>Backup Start Time: {{.StartTime}}</li>
|
||||||
|
<li>Backup End Time: {{.EndTime}}</li>
|
||||||
|
<li>Backup Storage: {{.Storage}}</li>
|
||||||
|
<li>Backup Location: {{.BackupLocation}}</li>
|
||||||
|
<li>Backup Size: {{.BackupSize}} bytes</li>
|
||||||
|
<li>Backup Reference: {{.BackupReference}} </li>
|
||||||
|
</ul>
|
||||||
|
<p>Best regards,</p>
|
||||||
|
```
|
||||||
|
|
||||||
|
> telegram.template
|
||||||
|
|
||||||
|
```html
|
||||||
|
✅ Database Backup Notification – {{.Database}}
|
||||||
|
Hi,
|
||||||
|
Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.
|
||||||
|
|
||||||
|
Backup Details:
|
||||||
|
- Database Name: {{.Database}}
|
||||||
|
- Backup Start Time: {{.StartTime}}
|
||||||
|
- Backup EndTime: {{.EndTime}}
|
||||||
|
- Backup Storage: {{.Storage}}
|
||||||
|
- Backup Location: {{.BackupLocation}}
|
||||||
|
- Backup Size: {{.BackupSize}} bytes
|
||||||
|
- Backup Reference: {{.BackupReference}}
|
||||||
|
```
|
||||||
|
|
||||||
|
> email-error.template
|
||||||
|
|
||||||
|
```html
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<title>🔴 Urgent: Database Backup Failure Notification</title>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h2>Hi,</h2>
|
||||||
|
<p>An error occurred during database backup.</p>
|
||||||
|
<h3>Failure Details:</h3>
|
||||||
|
<ul>
|
||||||
|
<li>Error Message: {{.Error}}</li>
|
||||||
|
<li>Date: {{.EndTime}}</li>
|
||||||
|
<li>Backup Reference: {{.BackupReference}} </li>
|
||||||
|
</ul>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
```
|
||||||
|
|
||||||
|
> telegram-error.template
|
||||||
|
|
||||||
|
|
||||||
|
```html
|
||||||
|
🔴 Urgent: Database Backup Failure Notification
|
||||||
|
|
||||||
|
An error occurred during database backup.
|
||||||
|
Failure Details:
|
||||||
|
|
||||||
|
Error Message: {{.Error}}
|
||||||
|
Date: {{.EndTime}}
|
||||||
|
```
|
||||||
@@ -10,7 +10,7 @@ nav_order: 6
|
|||||||
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||||
|
|
||||||
{: .note }
|
{: .note }
|
||||||
It supports __.sql__ and __.sql.gz__ compressed file.
|
It supports __.sql__,__.sql.gpg__ and __.sql.gz__,__.sql.gz.gpg__ compressed file.
|
||||||
|
|
||||||
### Restore
|
### Restore
|
||||||
|
|
||||||
@@ -40,56 +40,10 @@ services:
|
|||||||
- AWS_SECRET_KEY=xxxxx
|
- AWS_SECRET_KEY=xxxxx
|
||||||
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||||
- AWS_DISABLE_SSL="false"
|
- AWS_DISABLE_SSL="false"
|
||||||
|
- AWS_FORCE_PATH_STYLE="false"
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
```
|
```
|
||||||
|
|
||||||
## Restore on Kubernetes
|
|
||||||
|
|
||||||
Simple Kubernetes restore Job:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
name: restore-db
|
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: mysql-bkup
|
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- restore -s s3 --path /custom_path -f store_20231219_022941.sql.gz
|
|
||||||
env:
|
|
||||||
- name: DB_PORT
|
|
||||||
value: "3306"
|
|
||||||
- name: DB_HOST
|
|
||||||
value: ""
|
|
||||||
- name: DB_NAME
|
|
||||||
value: ""
|
|
||||||
- name: DB_USERNAME
|
|
||||||
value: ""
|
|
||||||
# Please use secret!
|
|
||||||
- name: DB_PASSWORD
|
|
||||||
value: ""
|
|
||||||
- name: AWS_S3_ENDPOINT
|
|
||||||
value: "https://s3.amazonaws.com"
|
|
||||||
- name: AWS_S3_BUCKET_NAME
|
|
||||||
value: "xxx"
|
|
||||||
- name: AWS_REGION
|
|
||||||
value: "us-west-2"
|
|
||||||
- name: AWS_ACCESS_KEY
|
|
||||||
value: "xxxx"
|
|
||||||
- name: AWS_SECRET_KEY
|
|
||||||
value: "xxxx"
|
|
||||||
- name: AWS_DISABLE_SSL
|
|
||||||
value: "false"
|
|
||||||
restartPolicy: Never
|
|
||||||
backoffLimit: 4
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ nav_order: 7
|
|||||||
To restore the database from your remote server, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
To restore the database from your remote server, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||||
|
|
||||||
{: .note }
|
{: .note }
|
||||||
It supports __.sql__ and __.sql.gz__ compressed file.
|
It supports __.sql__,__.sql.gpg__ and __.sql.gz__,__.sql.gz.gpg__ compressed file.
|
||||||
|
|
||||||
### Restore
|
### Restore
|
||||||
|
|
||||||
@@ -44,50 +44,4 @@ services:
|
|||||||
- web
|
- web
|
||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
```
|
|
||||||
## Restore on Kubernetes
|
|
||||||
|
|
||||||
Simple Kubernetes restore Job:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
name: restore-db
|
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: mysql-bkup
|
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- restore -s ssh -f store_20231219_022941.sql.gz
|
|
||||||
env:
|
|
||||||
- name: DB_PORT
|
|
||||||
value: "3306"
|
|
||||||
- name: DB_HOST
|
|
||||||
value: ""
|
|
||||||
- name: DB_NAME
|
|
||||||
value: ""
|
|
||||||
- name: DB_USERNAME
|
|
||||||
value: ""
|
|
||||||
# Please use secret!
|
|
||||||
- name: DB_PASSWORD
|
|
||||||
value: ""
|
|
||||||
- name: SSH_HOST_NAME
|
|
||||||
value: ""
|
|
||||||
- name: SSH_PORT
|
|
||||||
value: "22"
|
|
||||||
- name: SSH_USER
|
|
||||||
value: "xxx"
|
|
||||||
- name: SSH_REMOTE_PATH
|
|
||||||
value: "/home/jkaninda/backups"
|
|
||||||
- name: AWS_ACCESS_KEY
|
|
||||||
value: "xxxx"
|
|
||||||
- name: SSH_IDENTIFY_FILE
|
|
||||||
value: "/tmp/id_ed25519"
|
|
||||||
restartPolicy: Never
|
|
||||||
backoffLimit: 4
|
|
||||||
```
|
```
|
||||||
@@ -10,7 +10,7 @@ nav_order: 5
|
|||||||
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||||
|
|
||||||
{: .note }
|
{: .note }
|
||||||
It supports __.sql__ and __.sql.gz__ compressed file.
|
It supports __.sql__,__.sql.gpg__ and __.sql.gz__,__.sql.gz.gpg__ compressed file.
|
||||||
|
|
||||||
### Restore
|
### Restore
|
||||||
|
|
||||||
|
|||||||
@@ -9,6 +9,9 @@ nav_order: 1
|
|||||||
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, FTP and SSH remote storage.
|
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, FTP and SSH remote storage.
|
||||||
It also supports __encrypting__ your backups using GPG.
|
It also supports __encrypting__ your backups using GPG.
|
||||||
|
|
||||||
|
Telegram and Email notifications on successful and failed backups.
|
||||||
|
|
||||||
|
|
||||||
We are open to receiving stars, PRs, and issues!
|
We are open to receiving stars, PRs, and issues!
|
||||||
|
|
||||||
|
|
||||||
@@ -88,7 +91,7 @@ networks:
|
|||||||
-e "DB_HOST=hostname" \
|
-e "DB_HOST=hostname" \
|
||||||
-e "DB_USERNAME=user" \
|
-e "DB_USERNAME=user" \
|
||||||
-e "DB_PASSWORD=password" \
|
-e "DB_PASSWORD=password" \
|
||||||
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 1m"
|
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 15m" #@midnight
|
||||||
```
|
```
|
||||||
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
||||||
|
|
||||||
|
|||||||
@@ -26,49 +26,49 @@ Backup, restore and migrate targets, schedule and retention are configured using
|
|||||||
| --dbname | -d | Database name |
|
| --dbname | -d | Database name |
|
||||||
| --port | -p | Database port (default: 3306) |
|
| --port | -p | Database port (default: 3306) |
|
||||||
| --disable-compression | | Disable database backup compression |
|
| --disable-compression | | Disable database backup compression |
|
||||||
| --prune | | Delete old backup, default disabled |
|
|
||||||
| --keep-last | | Delete old backup created more than specified days ago, default 7 days |
|
|
||||||
| --cron-expression | | Backup cron expression, eg: (* * * * *) or @daily |
|
| --cron-expression | | Backup cron expression, eg: (* * * * *) or @daily |
|
||||||
| --help | -h | Print this help message and exit |
|
| --help | -h | Print this help message and exit |
|
||||||
| --version | -V | Print version information and exit |
|
| --version | -V | Print version information and exit |
|
||||||
|
|
||||||
## Environment variables
|
## Environment variables
|
||||||
|
|
||||||
| Name | Requirement | Description |
|
| Name | Requirement | Description |
|
||||||
|------------------------|---------------------------------------------------------------|------------------------------------------------------|
|
|------------------------|---------------------------------------------------------------|-----------------------------------------------------------------|
|
||||||
| DB_PORT | Optional, default 3306 | Database port number |
|
| DB_PORT | Optional, default 3306 | Database port number |
|
||||||
| DB_HOST | Required | Database host |
|
| DB_HOST | Required | Database host |
|
||||||
| DB_NAME | Optional if it was provided from the -d flag | Database name |
|
| DB_NAME | Optional if it was provided from the -d flag | Database name |
|
||||||
| DB_USERNAME | Required | Database user name |
|
| DB_USERNAME | Required | Database user name |
|
||||||
| DB_PASSWORD | Required | Database password |
|
| DB_PASSWORD | Required | Database password |
|
||||||
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
|
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
|
||||||
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
|
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
|
||||||
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
||||||
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
||||||
| AWS_REGION | Optional, required for S3 storage | AWS Region |
|
| AWS_REGION | Optional, required for S3 storage | AWS Region |
|
||||||
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
|
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
|
||||||
| AWS_FORCE_PATH_STYLE | Optional, required for S3 storage | Force path style |
|
| AWS_FORCE_PATH_STYLE | Optional, required for S3 storage | Force path style |
|
||||||
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
|
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
|
||||||
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
|
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
|
||||||
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the `--cron-expression` flag | Backup cron expression for docker in scheduled mode |
|
| GPG_PUBLIC_KEY | Optional, required to encrypt backup | GPG public key, used to encrypt backup (/config/public_key.asc) |
|
||||||
| SSH_HOST | Optional, required for SSH storage | ssh remote hostname or ip |
|
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the `--cron-expression` flag | Backup cron expression for docker in scheduled mode |
|
||||||
| SSH_USER | Optional, required for SSH storage | ssh remote user |
|
| BACKUP_RETENTION_DAYS | Optional | Delete old backup created more than specified days ago |
|
||||||
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
|
| SSH_HOST | Optional, required for SSH storage | ssh remote hostname or ip |
|
||||||
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
|
| SSH_USER | Optional, required for SSH storage | ssh remote user |
|
||||||
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
|
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
|
||||||
| REMOTE_PATH | Optional, required for SSH or FTP storage | remote path (/home/toto/backup) |
|
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
|
||||||
| FTP_HOST | Optional, required for FTP storage | FTP host name |
|
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
|
||||||
| FTP_PORT | Optional, required for FTP storage | FTP server port number |
|
| REMOTE_PATH | Optional, required for SSH or FTP storage | remote path (/home/toto/backup) |
|
||||||
| FTP_USER | Optional, required for FTP storage | FTP user |
|
| FTP_HOST | Optional, required for FTP storage | FTP host name |
|
||||||
| FTP_PASSWORD | Optional, required for FTP storage | FTP user password |
|
| FTP_PORT | Optional, required for FTP storage | FTP server port number |
|
||||||
| TARGET_DB_HOST | Optional, required for database migration | Target database host |
|
| FTP_USER | Optional, required for FTP storage | FTP user |
|
||||||
| TARGET_DB_PORT | Optional, required for database migration | Target database port |
|
| FTP_PASSWORD | Optional, required for FTP storage | FTP user password |
|
||||||
| TARGET_DB_NAME | Optional, required for database migration | Target database name |
|
| TARGET_DB_HOST | Optional, required for database migration | Target database host |
|
||||||
| TARGET_DB_USERNAME | Optional, required for database migration | Target database username |
|
| TARGET_DB_PORT | Optional, required for database migration | Target database port |
|
||||||
| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password |
|
| TARGET_DB_NAME | Optional, required for database migration | Target database name |
|
||||||
| TG_TOKEN | Optional, required for Telegram notification | Telegram token (`BOT-ID:BOT-TOKEN`) |
|
| TARGET_DB_USERNAME | Optional, required for database migration | Target database username |
|
||||||
| TG_CHAT_ID | Optional, required for Telegram notification | Telegram Chat ID |
|
| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password |
|
||||||
| TZ | Optional | Time Zone |
|
| TG_TOKEN | Optional, required for Telegram notification | Telegram token (`BOT-ID:BOT-TOKEN`) |
|
||||||
|
| TG_CHAT_ID | Optional, required for Telegram notification | Telegram Chat ID |
|
||||||
|
| TZ | Optional | Time Zone |
|
||||||
|
|
||||||
---
|
---
|
||||||
## Run in Scheduled mode
|
## Run in Scheduled mode
|
||||||
|
|||||||
@@ -44,4 +44,6 @@ spec:
|
|||||||
value: "xxxx"
|
value: "xxxx"
|
||||||
- name: AWS_DISABLE_SSL
|
- name: AWS_DISABLE_SSL
|
||||||
value: "false"
|
value: "false"
|
||||||
|
- name: AWS_FORCE_PATH_STYLE
|
||||||
|
value: "false"
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
14
go.mod
14
go.mod
@@ -5,21 +5,31 @@ go 1.22.5
|
|||||||
require github.com/spf13/pflag v1.0.5
|
require github.com/spf13/pflag v1.0.5
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/ProtonMail/gopenpgp/v2 v2.7.5
|
||||||
github.com/aws/aws-sdk-go v1.55.3
|
github.com/aws/aws-sdk-go v1.55.3
|
||||||
github.com/bramvdbogaerde/go-scp v1.5.0
|
github.com/bramvdbogaerde/go-scp v1.5.0
|
||||||
github.com/hpcloud/tail v1.0.0
|
github.com/hpcloud/tail v1.0.0
|
||||||
|
github.com/jlaffaye/ftp v0.2.0
|
||||||
|
github.com/robfig/cron/v3 v3.0.1
|
||||||
github.com/spf13/cobra v1.8.0
|
github.com/spf13/cobra v1.8.0
|
||||||
golang.org/x/crypto v0.18.0
|
golang.org/x/crypto v0.18.0
|
||||||
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 // indirect
|
||||||
|
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
|
||||||
|
github.com/cloudflare/circl v1.3.3 // indirect
|
||||||
|
github.com/go-mail/mail v2.3.1+incompatible // indirect
|
||||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/jlaffaye/ftp v0.2.0 // indirect
|
github.com/jkaninda/encryptor v0.0.0-20241013064832-ed4bd6a1b221 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
golang.org/x/sys v0.22.0 // indirect
|
golang.org/x/sys v0.22.0 // indirect
|
||||||
|
golang.org/x/text v0.14.0 // indirect
|
||||||
|
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
||||||
gopkg.in/fsnotify.v1 v1.4.7 // indirect
|
gopkg.in/fsnotify.v1 v1.4.7 // indirect
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
64
go.sum
64
go.sum
@@ -1,12 +1,24 @@
|
|||||||
|
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs=
|
||||||
|
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||||
|
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=
|
||||||
|
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw=
|
||||||
|
github.com/ProtonMail/gopenpgp/v2 v2.7.5 h1:STOY3vgES59gNgoOt2w0nyHBjKViB/qSg7NjbQWPJkA=
|
||||||
|
github.com/ProtonMail/gopenpgp/v2 v2.7.5/go.mod h1:IhkNEDaxec6NyzSI0PlxapinnwPVIESk8/76da3Ct3g=
|
||||||
github.com/aws/aws-sdk-go v1.55.3 h1:0B5hOX+mIx7I5XPOrjrHlKSDQV/+ypFZpIHOx5LOk3E=
|
github.com/aws/aws-sdk-go v1.55.3 h1:0B5hOX+mIx7I5XPOrjrHlKSDQV/+ypFZpIHOx5LOk3E=
|
||||||
github.com/aws/aws-sdk-go v1.55.3/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
github.com/aws/aws-sdk-go v1.55.3/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||||
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
|
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
|
||||||
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||||
github.com/bramvdbogaerde/go-scp v1.5.0 h1:a9BinAjTfQh273eh7vd3qUgmBC+bx+3TRDtkZWmIpzM=
|
github.com/bramvdbogaerde/go-scp v1.5.0 h1:a9BinAjTfQh273eh7vd3qUgmBC+bx+3TRDtkZWmIpzM=
|
||||||
github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ=
|
github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ=
|
||||||
|
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
||||||
|
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
|
||||||
|
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/go-mail/mail v2.3.1+incompatible h1:UzNOn0k5lpfVtO31cK3hn6I4VEVGhe3lX8AJBAxXExM=
|
||||||
|
github.com/go-mail/mail v2.3.1+incompatible/go.mod h1:VPWjmmNyRsWXQZHVHT3g0YbIINUkSmuKOiLIDkWbL6M=
|
||||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
@@ -16,11 +28,15 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
|||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
|
github.com/jkaninda/encryptor v0.0.0-20241013064832-ed4bd6a1b221 h1:AwkCf7el1kzeCJ89A+gUAK0ero5JYnvLOKsYMzq+rs4=
|
||||||
|
github.com/jkaninda/encryptor v0.0.0-20241013064832-ed4bd6a1b221/go.mod h1:9F8ZJ+ZXE8DZBo77+aneGj8LMjrYXX6eFUCC/uqZOUo=
|
||||||
github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
|
github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
|
||||||
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
|
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
|
||||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||||
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
@@ -32,16 +48,64 @@ github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3k
|
|||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
|
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||||
|
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||||
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
|
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
|
||||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||||
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
|
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
|
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||||
|
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||||
|
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
||||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
|
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||||
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
|
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
|
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
|
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
|
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
|
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||||
|
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
|
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
|
||||||
|
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
|||||||
234
pkg/backup.go
234
pkg/backup.go
@@ -8,6 +8,7 @@ package pkg
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/jkaninda/encryptor"
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
"github.com/robfig/cron/v3"
|
"github.com/robfig/cron/v3"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@@ -20,18 +21,23 @@ import (
|
|||||||
|
|
||||||
func StartBackup(cmd *cobra.Command) {
|
func StartBackup(cmd *cobra.Command) {
|
||||||
intro()
|
intro()
|
||||||
dbConf = initDbConfig(cmd)
|
|
||||||
//Initialize backup configs
|
//Initialize backup configs
|
||||||
config := initBackupConfig(cmd)
|
config := initBackupConfig(cmd)
|
||||||
|
//Load backup configuration file
|
||||||
if config.cronExpression == "" {
|
configFile, err := loadConfigFile()
|
||||||
BackupTask(dbConf, config)
|
if err != nil {
|
||||||
} else {
|
dbConf = initDbConfig(cmd)
|
||||||
if utils.IsValidCronExpression(config.cronExpression) {
|
if config.cronExpression == "" {
|
||||||
scheduledMode(dbConf, config)
|
BackupTask(dbConf, config)
|
||||||
} else {
|
} else {
|
||||||
utils.Fatal("Cron expression is not valid: %s", config.cronExpression)
|
if utils.IsValidCronExpression(config.cronExpression) {
|
||||||
|
scheduledMode(dbConf, config)
|
||||||
|
} else {
|
||||||
|
utils.Fatal("Cron expression is not valid: %s", config.cronExpression)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
startMultiBackup(config, configFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -40,6 +46,7 @@ func StartBackup(cmd *cobra.Command) {
|
|||||||
func scheduledMode(db *dbConfig, config *BackupConfig) {
|
func scheduledMode(db *dbConfig, config *BackupConfig) {
|
||||||
utils.Info("Running in Scheduled mode")
|
utils.Info("Running in Scheduled mode")
|
||||||
utils.Info("Backup cron expression: %s", config.cronExpression)
|
utils.Info("Backup cron expression: %s", config.cronExpression)
|
||||||
|
utils.Info("The next scheduled time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
|
||||||
utils.Info("Storage type %s ", config.storage)
|
utils.Info("Storage type %s ", config.storage)
|
||||||
|
|
||||||
//Test backup
|
//Test backup
|
||||||
@@ -52,6 +59,8 @@ func scheduledMode(db *dbConfig, config *BackupConfig) {
|
|||||||
|
|
||||||
_, err := c.AddFunc(config.cronExpression, func() {
|
_, err := c.AddFunc(config.cronExpression, func() {
|
||||||
BackupTask(db, config)
|
BackupTask(db, config)
|
||||||
|
utils.Info("Next backup time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
|
||||||
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
@@ -64,7 +73,8 @@ func scheduledMode(db *dbConfig, config *BackupConfig) {
|
|||||||
select {}
|
select {}
|
||||||
}
|
}
|
||||||
func BackupTask(db *dbConfig, config *BackupConfig) {
|
func BackupTask(db *dbConfig, config *BackupConfig) {
|
||||||
//Generate backup file name
|
utils.Info("Starting backup task...")
|
||||||
|
//Generate file name
|
||||||
backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20060102_150405"))
|
backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20060102_150405"))
|
||||||
if config.disableCompression {
|
if config.disableCompression {
|
||||||
backupFileName = fmt.Sprintf("%s_%s.sql", db.dbName, time.Now().Format("20060102_150405"))
|
backupFileName = fmt.Sprintf("%s_%s.sql", db.dbName, time.Now().Format("20060102_150405"))
|
||||||
@@ -79,31 +89,89 @@ func BackupTask(db *dbConfig, config *BackupConfig) {
|
|||||||
sshBackup(db, config)
|
sshBackup(db, config)
|
||||||
case "ftp", "FTP":
|
case "ftp", "FTP":
|
||||||
ftpBackup(db, config)
|
ftpBackup(db, config)
|
||||||
|
//utils.Fatal("Not supported storage type: %s", config.storage)
|
||||||
default:
|
default:
|
||||||
localBackup(db, config)
|
localBackup(db, config)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
func multiBackupTask(databases []Database, bkConfig *BackupConfig) {
|
||||||
|
for _, db := range databases {
|
||||||
|
//Check if path is defined in config file
|
||||||
|
if db.Path != "" {
|
||||||
|
bkConfig.remotePath = db.Path
|
||||||
|
}
|
||||||
|
BackupTask(getDatabase(db), bkConfig)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func startMultiBackup(bkConfig *BackupConfig, configFile string) {
|
||||||
|
utils.Info("Starting multiple backup jobs...")
|
||||||
|
var conf = &Config{}
|
||||||
|
conf, err := readConf(configFile)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error reading config file: %s", err)
|
||||||
|
}
|
||||||
|
//Check if cronExpression is defined in config file
|
||||||
|
if conf.CronExpression != "" {
|
||||||
|
bkConfig.cronExpression = conf.CronExpression
|
||||||
|
}
|
||||||
|
// Check if cronExpression is defined
|
||||||
|
if bkConfig.cronExpression == "" {
|
||||||
|
multiBackupTask(conf.Databases, bkConfig)
|
||||||
|
} else {
|
||||||
|
// Check if cronExpression is valid
|
||||||
|
if utils.IsValidCronExpression(bkConfig.cronExpression) {
|
||||||
|
utils.Info("Running MultiBackup in Scheduled mode")
|
||||||
|
utils.Info("Backup cron expression: %s", bkConfig.cronExpression)
|
||||||
|
utils.Info("The next scheduled time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
|
||||||
|
utils.Info("Storage type %s ", bkConfig.storage)
|
||||||
|
|
||||||
|
//Test backup
|
||||||
|
utils.Info("Testing backup configurations...")
|
||||||
|
multiBackupTask(conf.Databases, bkConfig)
|
||||||
|
utils.Info("Testing backup configurations...done")
|
||||||
|
utils.Info("Creating multi backup job...")
|
||||||
|
// Create a new cron instance
|
||||||
|
c := cron.New()
|
||||||
|
|
||||||
|
_, err := c.AddFunc(bkConfig.cronExpression, func() {
|
||||||
|
// Create a channel
|
||||||
|
multiBackupTask(conf.Databases, bkConfig)
|
||||||
|
utils.Info("Next backup time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
|
||||||
|
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Start the cron scheduler
|
||||||
|
c.Start()
|
||||||
|
utils.Info("Creating multi backup job...done")
|
||||||
|
utils.Info("Backup job started")
|
||||||
|
defer c.Stop()
|
||||||
|
select {}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
utils.Fatal("Cron expression is not valid: %s", bkConfig.cronExpression)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
// BackupDatabase backup database
|
// BackupDatabase backup database
|
||||||
func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) {
|
func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) {
|
||||||
|
|
||||||
storagePath = os.Getenv("STORAGE_PATH")
|
storagePath = os.Getenv("STORAGE_PATH")
|
||||||
|
|
||||||
err := utils.CheckEnvVars(dbHVars)
|
|
||||||
if err != nil {
|
|
||||||
utils.Error("Please make sure all required environment variables for database are set")
|
|
||||||
utils.Fatal("Error checking environment variables: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
utils.Info("Starting database backup...")
|
utils.Info("Starting database backup...")
|
||||||
err = os.Setenv("MYSQL_PWD", db.dbPassword)
|
|
||||||
|
err := os.Setenv("MYSQL_PWD", db.dbPassword)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
testDatabaseConnection(db)
|
testDatabaseConnection(db)
|
||||||
|
|
||||||
// Backup Database database
|
// Backup Database database
|
||||||
utils.Info("Backing up database...")
|
utils.Info("Backing up database...")
|
||||||
|
|
||||||
|
// Verify is compression is disabled
|
||||||
if disableCompression {
|
if disableCompression {
|
||||||
// Execute mysqldump
|
// Execute mysqldump
|
||||||
cmd := exec.Command("mysqldump",
|
cmd := exec.Command("mysqldump",
|
||||||
@@ -118,7 +186,7 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
// save output
|
// save output
|
||||||
file, err := os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName))
|
file, err := os.Create(filepath.Join(tmpPath, backupFileName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -139,7 +207,7 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
|
|||||||
}
|
}
|
||||||
gzipCmd := exec.Command("gzip")
|
gzipCmd := exec.Command("gzip")
|
||||||
gzipCmd.Stdin = stdout
|
gzipCmd.Stdin = stdout
|
||||||
gzipCmd.Stdout, err = os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName))
|
gzipCmd.Stdout, err = os.Create(filepath.Join(tmpPath, backupFileName))
|
||||||
gzipCmd.Start()
|
gzipCmd.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
@@ -157,43 +225,70 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
|
|||||||
}
|
}
|
||||||
func localBackup(db *dbConfig, config *BackupConfig) {
|
func localBackup(db *dbConfig, config *BackupConfig) {
|
||||||
utils.Info("Backup database to local storage")
|
utils.Info("Backup database to local storage")
|
||||||
|
startTime = time.Now().Format(utils.TimeFormat())
|
||||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||||
finalFileName := config.backupFileName
|
finalFileName := config.backupFileName
|
||||||
if config.encryption {
|
if config.encryption {
|
||||||
encryptBackup(config.backupFileName, config.passphrase)
|
encryptBackup(config)
|
||||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, gpgExtension)
|
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, gpgExtension)
|
||||||
}
|
}
|
||||||
|
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error:", err)
|
||||||
|
}
|
||||||
|
//Get backup info
|
||||||
|
backupSize = fileInfo.Size()
|
||||||
utils.Info("Backup name is %s", finalFileName)
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
moveToBackup(finalFileName, storagePath)
|
moveToBackup(finalFileName, storagePath)
|
||||||
//Send notification
|
//Send notification
|
||||||
utils.NotifySuccess(finalFileName)
|
utils.NotifySuccess(&utils.NotificationData{
|
||||||
|
File: finalFileName,
|
||||||
|
BackupSize: backupSize,
|
||||||
|
Database: db.dbName,
|
||||||
|
Storage: config.storage,
|
||||||
|
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||||
|
StartTime: startTime,
|
||||||
|
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||||
|
})
|
||||||
//Delete old backup
|
//Delete old backup
|
||||||
if config.prune {
|
if config.prune {
|
||||||
deleteOldBackup(config.backupRetention)
|
deleteOldBackup(config.backupRetention)
|
||||||
}
|
}
|
||||||
//Delete temp
|
//Delete temp
|
||||||
deleteTemp()
|
deleteTemp()
|
||||||
|
utils.Info("Backup completed successfully")
|
||||||
}
|
}
|
||||||
|
|
||||||
func s3Backup(db *dbConfig, config *BackupConfig) {
|
func s3Backup(db *dbConfig, config *BackupConfig) {
|
||||||
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||||
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
|
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
|
||||||
|
if config.remotePath != "" {
|
||||||
|
s3Path = config.remotePath
|
||||||
|
}
|
||||||
utils.Info("Backup database to s3 storage")
|
utils.Info("Backup database to s3 storage")
|
||||||
|
startTime = time.Now().Format(utils.TimeFormat())
|
||||||
|
|
||||||
//Backup database
|
//Backup database
|
||||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||||
finalFileName := config.backupFileName
|
finalFileName := config.backupFileName
|
||||||
if config.encryption {
|
if config.encryption {
|
||||||
encryptBackup(config.backupFileName, config.passphrase)
|
encryptBackup(config)
|
||||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||||
}
|
}
|
||||||
utils.Info("Uploading backup archive to remote storage S3 ... ")
|
utils.Info("Uploading backup archive to remote storage S3 ... ")
|
||||||
|
|
||||||
utils.Info("Backup name is %s", finalFileName)
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
err := UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
|
err := UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error uploading file to S3: %s ", err)
|
utils.Fatal("Error uploading backup archive to S3: %s ", err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
//Get backup info
|
||||||
|
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error:", err)
|
||||||
|
}
|
||||||
|
backupSize = fileInfo.Size()
|
||||||
//Delete backup file from tmp folder
|
//Delete backup file from tmp folder
|
||||||
err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName))
|
err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -209,19 +304,29 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
|
|||||||
}
|
}
|
||||||
utils.Done("Uploading backup archive to remote storage S3 ... done ")
|
utils.Done("Uploading backup archive to remote storage S3 ... done ")
|
||||||
//Send notification
|
//Send notification
|
||||||
utils.NotifySuccess(finalFileName)
|
utils.NotifySuccess(&utils.NotificationData{
|
||||||
|
File: finalFileName,
|
||||||
|
BackupSize: backupSize,
|
||||||
|
Database: db.dbName,
|
||||||
|
Storage: config.storage,
|
||||||
|
BackupLocation: filepath.Join(s3Path, finalFileName),
|
||||||
|
StartTime: startTime,
|
||||||
|
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||||
|
})
|
||||||
//Delete temp
|
//Delete temp
|
||||||
deleteTemp()
|
deleteTemp()
|
||||||
}
|
utils.Info("Backup completed successfully")
|
||||||
|
|
||||||
// sshBackup backup database to SSH remote server
|
}
|
||||||
func sshBackup(db *dbConfig, config *BackupConfig) {
|
func sshBackup(db *dbConfig, config *BackupConfig) {
|
||||||
utils.Info("Backup database to Remote server")
|
utils.Info("Backup database to Remote server")
|
||||||
|
startTime = time.Now().Format(utils.TimeFormat())
|
||||||
|
|
||||||
//Backup database
|
//Backup database
|
||||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||||
finalFileName := config.backupFileName
|
finalFileName := config.backupFileName
|
||||||
if config.encryption {
|
if config.encryption {
|
||||||
encryptBackup(config.backupFileName, config.passphrase)
|
encryptBackup(config)
|
||||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||||
}
|
}
|
||||||
utils.Info("Uploading backup archive to remote storage ... ")
|
utils.Info("Uploading backup archive to remote storage ... ")
|
||||||
@@ -231,11 +336,16 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
|
|||||||
utils.Fatal("Error uploading file to the remote server: %s ", err)
|
utils.Fatal("Error uploading file to the remote server: %s ", err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
//Get backup info
|
||||||
|
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error:", err)
|
||||||
|
}
|
||||||
|
backupSize = fileInfo.Size()
|
||||||
//Delete backup file from tmp folder
|
//Delete backup file from tmp folder
|
||||||
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("Error deleting file: ", err)
|
utils.Error("Error deleting file: %v", err)
|
||||||
|
|
||||||
}
|
}
|
||||||
if config.prune {
|
if config.prune {
|
||||||
@@ -246,17 +356,29 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
|
|||||||
|
|
||||||
utils.Done("Uploading backup archive to remote storage ... done ")
|
utils.Done("Uploading backup archive to remote storage ... done ")
|
||||||
//Send notification
|
//Send notification
|
||||||
utils.NotifySuccess(finalFileName)
|
utils.NotifySuccess(&utils.NotificationData{
|
||||||
|
File: finalFileName,
|
||||||
|
BackupSize: backupSize,
|
||||||
|
Database: db.dbName,
|
||||||
|
Storage: config.storage,
|
||||||
|
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||||
|
StartTime: startTime,
|
||||||
|
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||||
|
})
|
||||||
//Delete temp
|
//Delete temp
|
||||||
deleteTemp()
|
deleteTemp()
|
||||||
|
utils.Info("Backup completed successfully")
|
||||||
|
|
||||||
}
|
}
|
||||||
func ftpBackup(db *dbConfig, config *BackupConfig) {
|
func ftpBackup(db *dbConfig, config *BackupConfig) {
|
||||||
utils.Info("Backup database to the remote FTP server")
|
utils.Info("Backup database to the remote FTP server")
|
||||||
|
startTime = time.Now().Format(utils.TimeFormat())
|
||||||
|
|
||||||
//Backup database
|
//Backup database
|
||||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||||
finalFileName := config.backupFileName
|
finalFileName := config.backupFileName
|
||||||
if config.encryption {
|
if config.encryption {
|
||||||
encryptBackup(config.backupFileName, config.passphrase)
|
encryptBackup(config)
|
||||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||||
}
|
}
|
||||||
utils.Info("Uploading backup archive to the remote FTP server ... ")
|
utils.Info("Uploading backup archive to the remote FTP server ... ")
|
||||||
@@ -266,7 +388,12 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
|
|||||||
utils.Fatal("Error uploading file to the remote FTP server: %s ", err)
|
utils.Fatal("Error uploading file to the remote FTP server: %s ", err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
//Get backup info
|
||||||
|
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error:", err)
|
||||||
|
}
|
||||||
|
backupSize = fileInfo.Size()
|
||||||
//Delete backup file from tmp folder
|
//Delete backup file from tmp folder
|
||||||
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -281,16 +408,47 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
|
|||||||
|
|
||||||
utils.Done("Uploading backup archive to the remote FTP server ... done ")
|
utils.Done("Uploading backup archive to the remote FTP server ... done ")
|
||||||
//Send notification
|
//Send notification
|
||||||
utils.NotifySuccess(finalFileName)
|
utils.NotifySuccess(&utils.NotificationData{
|
||||||
|
File: finalFileName,
|
||||||
|
BackupSize: backupSize,
|
||||||
|
Database: db.dbName,
|
||||||
|
Storage: config.storage,
|
||||||
|
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||||
|
StartTime: startTime,
|
||||||
|
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||||
|
})
|
||||||
//Delete temp
|
//Delete temp
|
||||||
deleteTemp()
|
deleteTemp()
|
||||||
|
utils.Info("Backup completed successfully")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// encryptBackup encrypt backup
|
func encryptBackup(config *BackupConfig) {
|
||||||
func encryptBackup(backupFileName, passphrase string) {
|
backupFile, err := os.ReadFile(filepath.Join(tmpPath, config.backupFileName))
|
||||||
err := Encrypt(filepath.Join(tmpPath, backupFileName), passphrase)
|
outputFile := fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error during encrypting backup %s", err)
|
utils.Fatal("Error reading backup file: %s ", err)
|
||||||
|
}
|
||||||
|
if config.usingKey {
|
||||||
|
utils.Info("Encrypting backup using public key...")
|
||||||
|
pubKey, err := os.ReadFile(config.publicKey)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error reading public key: %s ", err)
|
||||||
|
}
|
||||||
|
err = encryptor.EncryptWithPublicKey(backupFile, fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension), pubKey)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error encrypting backup file: %v ", err)
|
||||||
|
}
|
||||||
|
utils.Info("Encrypting backup using public key...done")
|
||||||
|
|
||||||
|
} else if config.passphrase != "" {
|
||||||
|
utils.Info("Encrypting backup using passphrase...")
|
||||||
|
err := encryptor.Encrypt(backupFile, outputFile, config.passphrase)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("error during encrypting backup %v", err)
|
||||||
|
}
|
||||||
|
utils.Info("Encrypting backup using passphrase...done")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,17 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type Database struct {
|
||||||
|
Host string `yaml:"host"`
|
||||||
|
Port string `yaml:"port"`
|
||||||
|
Name string `yaml:"name"`
|
||||||
|
User string `yaml:"user"`
|
||||||
|
Password string `yaml:"password"`
|
||||||
|
Path string `yaml:"path"`
|
||||||
|
}
|
||||||
type Config struct {
|
type Config struct {
|
||||||
|
Databases []Database `yaml:"databases"`
|
||||||
|
CronExpression string `yaml:"cronExpression"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type dbConfig struct {
|
type dbConfig struct {
|
||||||
@@ -40,9 +50,11 @@ type BackupConfig struct {
|
|||||||
backupRetention int
|
backupRetention int
|
||||||
disableCompression bool
|
disableCompression bool
|
||||||
prune bool
|
prune bool
|
||||||
encryption bool
|
|
||||||
remotePath string
|
remotePath string
|
||||||
|
encryption bool
|
||||||
|
usingKey bool
|
||||||
passphrase string
|
passphrase string
|
||||||
|
publicKey string
|
||||||
storage string
|
storage string
|
||||||
cronExpression string
|
cronExpression string
|
||||||
}
|
}
|
||||||
@@ -90,6 +102,16 @@ func initDbConfig(cmd *cobra.Command) *dbConfig {
|
|||||||
return &dConf
|
return &dConf
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getDatabase(database Database) *dbConfig {
|
||||||
|
return &dbConfig{
|
||||||
|
dbHost: database.Host,
|
||||||
|
dbPort: database.Port,
|
||||||
|
dbName: database.Name,
|
||||||
|
dbUserName: database.User,
|
||||||
|
dbPassword: database.Password,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// loadSSHConfig loads the SSH configuration from environment variables
|
// loadSSHConfig loads the SSH configuration from environment variables
|
||||||
func loadSSHConfig() (*SSHConfig, error) {
|
func loadSSHConfig() (*SSHConfig, error) {
|
||||||
utils.GetEnvVariable("SSH_HOST", "SSH_HOST_NAME")
|
utils.GetEnvVariable("SSH_HOST", "SSH_HOST_NAME")
|
||||||
@@ -132,11 +154,11 @@ func initAWSConfig() *AWSConfig {
|
|||||||
aConfig.region = os.Getenv("AWS_REGION")
|
aConfig.region = os.Getenv("AWS_REGION")
|
||||||
disableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
|
disableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
|
disableSsl = false
|
||||||
}
|
}
|
||||||
forcePathStyle, err := strconv.ParseBool(os.Getenv("AWS_FORCE_PATH_STYLE"))
|
forcePathStyle, err := strconv.ParseBool(os.Getenv("AWS_FORCE_PATH_STYLE"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Unable to parse AWS_FORCE_PATH_STYLE env var: %s", err)
|
forcePathStyle = false
|
||||||
}
|
}
|
||||||
aConfig.disableSsl = disableSsl
|
aConfig.disableSsl = disableSsl
|
||||||
aConfig.forcePathStyle = forcePathStyle
|
aConfig.forcePathStyle = forcePathStyle
|
||||||
@@ -150,23 +172,29 @@ func initAWSConfig() *AWSConfig {
|
|||||||
func initBackupConfig(cmd *cobra.Command) *BackupConfig {
|
func initBackupConfig(cmd *cobra.Command) *BackupConfig {
|
||||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||||
utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION")
|
utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION")
|
||||||
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
|
|
||||||
utils.GetEnv(cmd, "path", "REMOTE_PATH")
|
utils.GetEnv(cmd, "path", "REMOTE_PATH")
|
||||||
//Get flag value and set env
|
//Get flag value and set env
|
||||||
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
|
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
|
||||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||||
backupRetention, _ := cmd.Flags().GetInt("keep-last")
|
prune := false
|
||||||
prune, _ := cmd.Flags().GetBool("prune")
|
backupRetention := utils.GetIntEnv("BACKUP_RETENTION_DAYS")
|
||||||
|
if backupRetention > 0 {
|
||||||
|
prune = true
|
||||||
|
}
|
||||||
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
|
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
|
||||||
_, _ = cmd.Flags().GetString("mode")
|
_, _ = cmd.Flags().GetString("mode")
|
||||||
passphrase := os.Getenv("GPG_PASSPHRASE")
|
passphrase := os.Getenv("GPG_PASSPHRASE")
|
||||||
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||||
cronExpression := os.Getenv("BACKUP_CRON_EXPRESSION")
|
cronExpression := os.Getenv("BACKUP_CRON_EXPRESSION")
|
||||||
|
|
||||||
if passphrase != "" {
|
publicKeyFile, err := checkPubKeyFile(os.Getenv("GPG_PUBLIC_KEY"))
|
||||||
|
if err == nil {
|
||||||
encryption = true
|
encryption = true
|
||||||
|
usingKey = true
|
||||||
|
} else if passphrase != "" {
|
||||||
|
encryption = true
|
||||||
|
usingKey = false
|
||||||
}
|
}
|
||||||
|
|
||||||
//Initialize backup configs
|
//Initialize backup configs
|
||||||
config := BackupConfig{}
|
config := BackupConfig{}
|
||||||
config.backupRetention = backupRetention
|
config.backupRetention = backupRetention
|
||||||
@@ -176,17 +204,21 @@ func initBackupConfig(cmd *cobra.Command) *BackupConfig {
|
|||||||
config.encryption = encryption
|
config.encryption = encryption
|
||||||
config.remotePath = remotePath
|
config.remotePath = remotePath
|
||||||
config.passphrase = passphrase
|
config.passphrase = passphrase
|
||||||
|
config.publicKey = publicKeyFile
|
||||||
|
config.usingKey = usingKey
|
||||||
config.cronExpression = cronExpression
|
config.cronExpression = cronExpression
|
||||||
return &config
|
return &config
|
||||||
}
|
}
|
||||||
|
|
||||||
type RestoreConfig struct {
|
type RestoreConfig struct {
|
||||||
s3Path string
|
s3Path string
|
||||||
remotePath string
|
remotePath string
|
||||||
storage string
|
storage string
|
||||||
file string
|
file string
|
||||||
bucket string
|
bucket string
|
||||||
gpqPassphrase string
|
usingKey bool
|
||||||
|
passphrase string
|
||||||
|
privateKey string
|
||||||
}
|
}
|
||||||
|
|
||||||
func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
|
func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
|
||||||
@@ -199,7 +231,14 @@ func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
|
|||||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||||
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
||||||
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||||
gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
|
passphrase := os.Getenv("GPG_PASSPHRASE")
|
||||||
|
privateKeyFile, err := checkPrKeyFile(os.Getenv("GPG_PRIVATE_KEY"))
|
||||||
|
if err == nil {
|
||||||
|
usingKey = true
|
||||||
|
} else if passphrase != "" {
|
||||||
|
usingKey = false
|
||||||
|
}
|
||||||
|
|
||||||
//Initialize restore configs
|
//Initialize restore configs
|
||||||
rConfig := RestoreConfig{}
|
rConfig := RestoreConfig{}
|
||||||
rConfig.s3Path = s3Path
|
rConfig.s3Path = s3Path
|
||||||
@@ -208,13 +247,15 @@ func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
|
|||||||
rConfig.bucket = bucket
|
rConfig.bucket = bucket
|
||||||
rConfig.file = file
|
rConfig.file = file
|
||||||
rConfig.storage = storage
|
rConfig.storage = storage
|
||||||
rConfig.gpqPassphrase = gpqPassphrase
|
rConfig.passphrase = passphrase
|
||||||
|
rConfig.usingKey = usingKey
|
||||||
|
rConfig.privateKey = privateKeyFile
|
||||||
return &rConfig
|
return &rConfig
|
||||||
}
|
}
|
||||||
func initTargetDbConfig() *targetDbConfig {
|
func initTargetDbConfig() *targetDbConfig {
|
||||||
tdbConfig := targetDbConfig{}
|
tdbConfig := targetDbConfig{}
|
||||||
tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST")
|
tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST")
|
||||||
tdbConfig.targetDbPort = os.Getenv("TARGET_DB_PORT")
|
tdbConfig.targetDbPort = utils.EnvWithDefault("TARGET_DB_PORT", "3306")
|
||||||
tdbConfig.targetDbName = os.Getenv("TARGET_DB_NAME")
|
tdbConfig.targetDbName = os.Getenv("TARGET_DB_NAME")
|
||||||
tdbConfig.targetDbUserName = os.Getenv("TARGET_DB_USERNAME")
|
tdbConfig.targetDbUserName = os.Getenv("TARGET_DB_USERNAME")
|
||||||
tdbConfig.targetDbPassword = os.Getenv("TARGET_DB_PASSWORD")
|
tdbConfig.targetDbPassword = os.Getenv("TARGET_DB_PASSWORD")
|
||||||
@@ -226,3 +267,10 @@ func initTargetDbConfig() *targetDbConfig {
|
|||||||
}
|
}
|
||||||
return &tdbConfig
|
return &tdbConfig
|
||||||
}
|
}
|
||||||
|
func loadConfigFile() (string, error) {
|
||||||
|
backupConfigFile, err := checkConfigFile(os.Getenv("BACKUP_CONFIG_FILE"))
|
||||||
|
if err == nil {
|
||||||
|
return backupConfigFile, nil
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("backup config file not found")
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,63 +0,0 @@
|
|||||||
// Package pkg /
|
|
||||||
/*****
|
|
||||||
@author Jonas Kaninda
|
|
||||||
@license MIT License <https://opensource.org/licenses/MIT>
|
|
||||||
@Copyright © 2024 Jonas Kaninda
|
|
||||||
**/
|
|
||||||
package pkg
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Decrypt(inputFile string, passphrase string) error {
|
|
||||||
utils.Info("Decrypting backup file: " + inputFile + " ...")
|
|
||||||
//Create gpg home dir
|
|
||||||
err := utils.MakeDirAll(gpgHome)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
utils.SetEnv("GNUPGHOME", gpgHome)
|
|
||||||
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile)
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
|
|
||||||
err = cmd.Run()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
utils.Info("Backup file decrypted successful!")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func Encrypt(inputFile string, passphrase string) error {
|
|
||||||
utils.Info("Encrypting backup...")
|
|
||||||
//Create gpg home dir
|
|
||||||
err := utils.MakeDirAll(gpgHome)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
utils.SetEnv("GNUPGHOME", gpgHome)
|
|
||||||
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--symmetric", "--cipher-algo", algorithm, inputFile)
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
|
|
||||||
err = cmd.Run()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
utils.Info("Backup file encrypted successful!")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func RemoveLastExtension(filename string) string {
|
|
||||||
if idx := strings.LastIndex(filename, "."); idx != -1 {
|
|
||||||
return filename[:idx]
|
|
||||||
}
|
|
||||||
return filename
|
|
||||||
}
|
|
||||||
@@ -10,9 +10,11 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
|
"gopkg.in/yaml.v3"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -129,3 +131,90 @@ func intro() {
|
|||||||
utils.Info("Starting MySQL Backup...")
|
utils.Info("Starting MySQL Backup...")
|
||||||
utils.Info("Copyright (c) 2024 Jonas Kaninda ")
|
utils.Info("Copyright (c) 2024 Jonas Kaninda ")
|
||||||
}
|
}
|
||||||
|
func checkPubKeyFile(pubKey string) (string, error) {
|
||||||
|
// Define possible key file names
|
||||||
|
keyFiles := []string{filepath.Join(gpgHome, "public_key.asc"), filepath.Join(gpgHome, "public_key.gpg"), pubKey}
|
||||||
|
|
||||||
|
// Loop through key file names and check if they exist
|
||||||
|
for _, keyFile := range keyFiles {
|
||||||
|
if _, err := os.Stat(keyFile); err == nil {
|
||||||
|
// File exists
|
||||||
|
return keyFile, nil
|
||||||
|
} else if os.IsNotExist(err) {
|
||||||
|
// File does not exist, continue to the next one
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
// An unexpected error occurred
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an error if neither file exists
|
||||||
|
return "", fmt.Errorf("no public key file found")
|
||||||
|
}
|
||||||
|
func checkPrKeyFile(prKey string) (string, error) {
|
||||||
|
// Define possible key file names
|
||||||
|
keyFiles := []string{filepath.Join(gpgHome, "private_key.asc"), filepath.Join(gpgHome, "private_key.gpg"), prKey}
|
||||||
|
|
||||||
|
// Loop through key file names and check if they exist
|
||||||
|
for _, keyFile := range keyFiles {
|
||||||
|
if _, err := os.Stat(keyFile); err == nil {
|
||||||
|
// File exists
|
||||||
|
return keyFile, nil
|
||||||
|
} else if os.IsNotExist(err) {
|
||||||
|
// File does not exist, continue to the next one
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
// An unexpected error occurred
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an error if neither file exists
|
||||||
|
return "", fmt.Errorf("no public key file found")
|
||||||
|
}
|
||||||
|
func readConf(configFile string) (*Config, error) {
|
||||||
|
//configFile := filepath.Join("./", filename)
|
||||||
|
if utils.FileExists(configFile) {
|
||||||
|
buf, err := os.ReadFile(configFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c := &Config{}
|
||||||
|
err = yaml.Unmarshal(buf, c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("in file %q: %w", configFile, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c, err
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("config file %q not found", configFile)
|
||||||
|
}
|
||||||
|
func checkConfigFile(filePath string) (string, error) {
|
||||||
|
// Define possible config file names
|
||||||
|
configFiles := []string{filepath.Join(workingDir, "config.yaml"), filepath.Join(workingDir, "config.yml"), filePath}
|
||||||
|
|
||||||
|
// Loop through config file names and check if they exist
|
||||||
|
for _, configFile := range configFiles {
|
||||||
|
if _, err := os.Stat(configFile); err == nil {
|
||||||
|
// File exists
|
||||||
|
return configFile, nil
|
||||||
|
} else if os.IsNotExist(err) {
|
||||||
|
// File does not exist, continue to the next one
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
// An unexpected error occurred
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an error if neither file exists
|
||||||
|
return "", fmt.Errorf("no config file found")
|
||||||
|
}
|
||||||
|
func RemoveLastExtension(filename string) string {
|
||||||
|
if idx := strings.LastIndex(filename, "."); idx != -1 {
|
||||||
|
return filename[:idx]
|
||||||
|
}
|
||||||
|
return filename
|
||||||
|
}
|
||||||
|
|||||||
@@ -30,11 +30,13 @@ func StartMigration(cmd *cobra.Command) {
|
|||||||
|
|
||||||
//Generate file name
|
//Generate file name
|
||||||
backupFileName := fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
|
backupFileName := fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
|
||||||
|
conf := &RestoreConfig{}
|
||||||
|
conf.file = backupFileName
|
||||||
//Backup source Database
|
//Backup source Database
|
||||||
BackupDatabase(dbConf, backupFileName, true)
|
BackupDatabase(dbConf, backupFileName, true)
|
||||||
//Restore source database into target database
|
//Restore source database into target database
|
||||||
utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
|
utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
|
||||||
RestoreDatabase(&newDbConfig, backupFileName)
|
RestoreDatabase(&newDbConfig, conf)
|
||||||
utils.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
|
utils.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
|
||||||
utils.Info("Database migration completed.")
|
utils.Info("Database migration completed.")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
package pkg
|
package pkg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"github.com/jkaninda/encryptor"
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"os"
|
"os"
|
||||||
@@ -24,87 +24,102 @@ func StartRestore(cmd *cobra.Command) {
|
|||||||
case "local":
|
case "local":
|
||||||
utils.Info("Restore database from local")
|
utils.Info("Restore database from local")
|
||||||
copyToTmp(storagePath, restoreConf.file)
|
copyToTmp(storagePath, restoreConf.file)
|
||||||
RestoreDatabase(dbConf, restoreConf.file)
|
RestoreDatabase(dbConf, restoreConf)
|
||||||
case "s3", "S3":
|
case "s3", "S3":
|
||||||
restoreFromS3(dbConf, restoreConf.file, restoreConf.bucket, restoreConf.s3Path)
|
restoreFromS3(dbConf, restoreConf)
|
||||||
case "ssh", "SSH":
|
case "ssh", "SSH", "remote":
|
||||||
restoreFromRemote(dbConf, restoreConf.file, restoreConf.remotePath)
|
restoreFromRemote(dbConf, restoreConf)
|
||||||
case "ftp", "FTP":
|
case "ftp", "FTP":
|
||||||
restoreFromFTP(dbConf, restoreConf.file, restoreConf.remotePath)
|
restoreFromFTP(dbConf, restoreConf)
|
||||||
default:
|
default:
|
||||||
utils.Info("Restore database from local")
|
utils.Info("Restore database from local")
|
||||||
copyToTmp(storagePath, restoreConf.file)
|
copyToTmp(storagePath, restoreConf.file)
|
||||||
RestoreDatabase(dbConf, restoreConf.file)
|
RestoreDatabase(dbConf, restoreConf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func restoreFromS3(db *dbConfig, file, bucket, s3Path string) {
|
func restoreFromS3(db *dbConfig, conf *RestoreConfig) {
|
||||||
utils.Info("Restore database from s3")
|
utils.Info("Restore database from s3")
|
||||||
err := DownloadFile(tmpPath, file, bucket, s3Path)
|
err := DownloadFile(tmpPath, conf.file, conf.bucket, conf.s3Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error download file from s3 %s %v", file, err)
|
utils.Fatal("Error download file from s3 %s %v ", conf.file, err)
|
||||||
}
|
}
|
||||||
RestoreDatabase(db, file)
|
RestoreDatabase(db, conf)
|
||||||
}
|
}
|
||||||
func restoreFromRemote(db *dbConfig, file, remotePath string) {
|
func restoreFromRemote(db *dbConfig, conf *RestoreConfig) {
|
||||||
utils.Info("Restore database from remote server")
|
utils.Info("Restore database from remote server")
|
||||||
err := CopyFromRemote(file, remotePath)
|
err := CopyFromRemote(conf.file, conf.remotePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error download file from remote server: %s %v ", filepath.Join(remotePath, file), err)
|
utils.Fatal("Error download file from remote server: %s %v", filepath.Join(conf.remotePath, conf.file), err)
|
||||||
}
|
}
|
||||||
RestoreDatabase(db, file)
|
RestoreDatabase(db, conf)
|
||||||
}
|
}
|
||||||
func restoreFromFTP(db *dbConfig, file, remotePath string) {
|
func restoreFromFTP(db *dbConfig, conf *RestoreConfig) {
|
||||||
utils.Info("Restore database from FTP server")
|
utils.Info("Restore database from FTP server")
|
||||||
err := CopyFromFTP(file, remotePath)
|
err := CopyFromFTP(conf.file, conf.remotePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error download file from FTP server: %s %v", filepath.Join(remotePath, file), err)
|
utils.Fatal("Error download file from FTP server: %s %v", filepath.Join(conf.remotePath, conf.file), err)
|
||||||
}
|
}
|
||||||
RestoreDatabase(db, file)
|
RestoreDatabase(db, conf)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RestoreDatabase restore database
|
// RestoreDatabase restore database
|
||||||
func RestoreDatabase(db *dbConfig, file string) {
|
func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
|
||||||
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
|
if conf.file == "" {
|
||||||
if file == "" {
|
|
||||||
utils.Fatal("Error, file required")
|
utils.Fatal("Error, file required")
|
||||||
}
|
}
|
||||||
|
extension := filepath.Ext(filepath.Join(tmpPath, conf.file))
|
||||||
err := utils.CheckEnvVars(dbHVars)
|
rFile, err := os.ReadFile(filepath.Join(tmpPath, conf.file))
|
||||||
|
outputFile := RemoveLastExtension(filepath.Join(tmpPath, conf.file))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Error("Please make sure all required environment variables for database are set")
|
utils.Fatal("Error reading backup file: %s ", err)
|
||||||
utils.Fatal("Error checking environment variables: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
|
|
||||||
if extension == ".gpg" {
|
if extension == ".gpg" {
|
||||||
if gpgPassphrase == "" {
|
|
||||||
utils.Fatal("Error: GPG passphrase is required, your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE environment variable is required.")
|
|
||||||
|
|
||||||
} else {
|
if conf.usingKey {
|
||||||
//Decrypt file
|
utils.Info("Decrypting backup using private key...")
|
||||||
err := Decrypt(filepath.Join(tmpPath, file), gpgPassphrase)
|
utils.Warn("Backup decryption using a private key is not fully supported")
|
||||||
|
prKey, err := os.ReadFile(conf.privateKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error decrypting file %s %v", file, err)
|
utils.Fatal("Error reading public key: %s ", err)
|
||||||
|
}
|
||||||
|
err = encryptor.DecryptWithPrivateKey(rFile, outputFile, prKey, conf.passphrase)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("error during decrypting backup %v", err)
|
||||||
|
}
|
||||||
|
utils.Info("Decrypting backup using private key...done")
|
||||||
|
} else {
|
||||||
|
if conf.passphrase == "" {
|
||||||
|
utils.Error("Error, passphrase or private key required")
|
||||||
|
utils.Fatal("Your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE or GPG_PRIVATE_KEY environment variable is required.")
|
||||||
|
} else {
|
||||||
|
utils.Info("Decrypting backup using passphrase...")
|
||||||
|
//decryptWithGPG file
|
||||||
|
err := encryptor.Decrypt(rFile, outputFile, conf.passphrase)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error decrypting file %s %v", file, err)
|
||||||
|
}
|
||||||
|
utils.Info("Decrypting backup using passphrase...done")
|
||||||
|
//Update file name
|
||||||
|
conf.file = RemoveLastExtension(file)
|
||||||
}
|
}
|
||||||
//Update file name
|
|
||||||
file = RemoveLastExtension(file)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) {
|
if utils.FileExists(filepath.Join(tmpPath, conf.file)) {
|
||||||
err = os.Setenv("MYSQL_PWD", db.dbPassword)
|
err := os.Setenv("MYSQL_PWD", db.dbPassword)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
testDatabaseConnection(db)
|
testDatabaseConnection(db)
|
||||||
utils.Info("Restoring database...")
|
utils.Info("Restoring database...")
|
||||||
|
|
||||||
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
|
extension := filepath.Ext(filepath.Join(tmpPath, conf.file))
|
||||||
// Restore from compressed file / .sql.gz
|
// Restore from compressed file / .sql.gz
|
||||||
if extension == ".gz" {
|
if extension == ".gz" {
|
||||||
str := "zcat " + filepath.Join(tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
|
str := "zcat " + filepath.Join(tmpPath, conf.file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
|
||||||
_, err := exec.Command("sh", "-c", str).Output()
|
_, err := exec.Command("sh", "-c", str).Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error, in restoring the database %v", err)
|
utils.Fatal("Error, in restoring the database %v", err)
|
||||||
@@ -116,7 +131,7 @@ func RestoreDatabase(db *dbConfig, file string) {
|
|||||||
|
|
||||||
} else if extension == ".sql" {
|
} else if extension == ".sql" {
|
||||||
//Restore from sql file
|
//Restore from sql file
|
||||||
str := "cat " + filepath.Join(tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
|
str := "cat " + filepath.Join(tmpPath, conf.file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
|
||||||
_, err := exec.Command("sh", "-c", str).Output()
|
_, err := exec.Command("sh", "-c", str).Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error in restoring the database %v", err)
|
utils.Fatal("Error in restoring the database %v", err)
|
||||||
@@ -130,6 +145,6 @@ func RestoreDatabase(db *dbConfig, file string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
utils.Fatal("File not found in %s", filepath.Join(tmpPath, file))
|
utils.Fatal("File not found in %s", filepath.Join(tmpPath, conf.file))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
15
pkg/var.go
15
pkg/var.go
@@ -11,13 +11,18 @@ const tmpPath = "/tmp/backup"
|
|||||||
const algorithm = "aes256"
|
const algorithm = "aes256"
|
||||||
const gpgHome = "/config/gnupg"
|
const gpgHome = "/config/gnupg"
|
||||||
const gpgExtension = "gpg"
|
const gpgExtension = "gpg"
|
||||||
|
const workingDir = "/config"
|
||||||
|
const timeFormat = "2006-01-02 at 15:04:05"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
storage = "local"
|
storage = "local"
|
||||||
file = ""
|
file = ""
|
||||||
storagePath = "/backup"
|
storagePath = "/backup"
|
||||||
disableCompression = false
|
disableCompression = false
|
||||||
encryption = false
|
encryption = false
|
||||||
|
usingKey = false
|
||||||
|
backupSize int64 = 0
|
||||||
|
startTime string
|
||||||
)
|
)
|
||||||
|
|
||||||
// dbHVars Required environment variables for database
|
// dbHVars Required environment variables for database
|
||||||
|
|||||||
18
templates/email-error.template
Normal file
18
templates/email-error.template
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<title>🔴 Urgent: Database Backup Failure Notification</title>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h2>Hi,</h2>
|
||||||
|
<p>An error occurred during database backup.</p>
|
||||||
|
<h3>Failure Details:</h3>
|
||||||
|
<ul>
|
||||||
|
<li>Error Message: {{.Error}}</li>
|
||||||
|
<li>Date: {{.EndTime}}</li>
|
||||||
|
<li>Backup Reference: {{.BackupReference}} </li>
|
||||||
|
</ul>
|
||||||
|
<p>©2024 <a href="https://github.com/jkaninda/mysql-bkup">mysql-bkup</a></p>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
24
templates/email.template
Normal file
24
templates/email.template
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<title>✅ Database Backup Notification – {{.Database}}</title>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h2>Hi,</h2>
|
||||||
|
<p>Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.</p>
|
||||||
|
<h3>Backup Details:</h3>
|
||||||
|
<ul>
|
||||||
|
<li>Database Name: {{.Database}}</li>
|
||||||
|
<li>Backup Start Time: {{.StartTime}}</li>
|
||||||
|
<li>Backup End Time: {{.EndTime}}</li>
|
||||||
|
<li>Backup Storage: {{.Storage}}</li>
|
||||||
|
<li>Backup Location: {{.BackupLocation}}</li>
|
||||||
|
<li>Backup Size: {{.BackupSize}} bytes</li>
|
||||||
|
<li>Backup Reference: {{.BackupReference}} </li>
|
||||||
|
</ul>
|
||||||
|
<p>Best regards,</p>
|
||||||
|
<p>©2024 <a href="https://github.com/jkaninda/mysql-bkup">mysql-bkup</a></p>
|
||||||
|
<href>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
8
templates/telegram-error.template
Normal file
8
templates/telegram-error.template
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
🔴 Urgent: Database Backup Failure Notification
|
||||||
|
Hi,
|
||||||
|
An error occurred during database backup.
|
||||||
|
Failure Details:
|
||||||
|
- Date: {{.EndTime}}
|
||||||
|
- Backup Reference: {{.BackupReference}}
|
||||||
|
- Error Message: {{.Error}}
|
||||||
|
|
||||||
12
templates/telegram.template
Normal file
12
templates/telegram.template
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
[✅ Database Backup Notification – {{.Database}}
|
||||||
|
Hi,
|
||||||
|
Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.
|
||||||
|
|
||||||
|
Backup Details:
|
||||||
|
- Database Name: {{.Database}}
|
||||||
|
- Backup Start Time: {{.StartTime}}
|
||||||
|
- Backup EndTime: {{.EndTime}}
|
||||||
|
- Backup Storage: {{.Storage}}
|
||||||
|
- Backup Location: {{.BackupLocation}}
|
||||||
|
- Backup Size: {{.BackupSize}} bytes
|
||||||
|
- Backup Reference: {{.BackupReference}}
|
||||||
59
utils/config.go
Normal file
59
utils/config.go
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
package utils
|
||||||
|
|
||||||
|
import "os"
|
||||||
|
|
||||||
|
type MailConfig struct {
|
||||||
|
MailHost string
|
||||||
|
MailPort int
|
||||||
|
MailUserName string
|
||||||
|
MailPassword string
|
||||||
|
MailTo string
|
||||||
|
MailFrom string
|
||||||
|
SkipTls bool
|
||||||
|
}
|
||||||
|
type NotificationData struct {
|
||||||
|
File string
|
||||||
|
BackupSize int64
|
||||||
|
Database string
|
||||||
|
StartTime string
|
||||||
|
EndTime string
|
||||||
|
Storage string
|
||||||
|
BackupLocation string
|
||||||
|
BackupReference string
|
||||||
|
}
|
||||||
|
type ErrorMessage struct {
|
||||||
|
Database string
|
||||||
|
EndTime string
|
||||||
|
Error string
|
||||||
|
BackupReference string
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadMailConfig gets mail environment variables and returns MailConfig
|
||||||
|
func loadMailConfig() *MailConfig {
|
||||||
|
return &MailConfig{
|
||||||
|
MailHost: os.Getenv("MAIL_HOST"),
|
||||||
|
MailPort: GetIntEnv("MAIL_PORT"),
|
||||||
|
MailUserName: os.Getenv("MAIL_USERNAME"),
|
||||||
|
MailPassword: os.Getenv("MAIL_PASSWORD"),
|
||||||
|
MailTo: os.Getenv("MAIL_TO"),
|
||||||
|
MailFrom: os.Getenv("MAIL_FROM"),
|
||||||
|
SkipTls: os.Getenv("MAIL_SKIP_TLS") == "false",
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeFormat returns the format of the time
|
||||||
|
func TimeFormat() string {
|
||||||
|
format := os.Getenv("TIME_FORMAT")
|
||||||
|
if format == "" {
|
||||||
|
return "2006-01-02 at 15:04:05"
|
||||||
|
|
||||||
|
}
|
||||||
|
return format
|
||||||
|
}
|
||||||
|
|
||||||
|
func backupReference() string {
|
||||||
|
return os.Getenv("BACKUP_REFERENCE")
|
||||||
|
}
|
||||||
|
|
||||||
|
const templatePath = "/config/templates"
|
||||||
@@ -6,9 +6,9 @@
|
|||||||
**/
|
**/
|
||||||
package utils
|
package utils
|
||||||
|
|
||||||
const RestoreExample = "mysql-bkup restore --dbname database --file db_20231219_022941.sql.gz\n" +
|
const RestoreExample = "restore --dbname database --file db_20231219_022941.sql.gz\n" +
|
||||||
"restore --dbname database --storage s3 --path /custom-path --file db_20231219_022941.sql.gz"
|
"restore --dbname database --storage s3 --path /custom-path --file db_20231219_022941.sql.gz"
|
||||||
const BackupExample = "mysql-bkup backup --dbname database --disable-compression\n" +
|
const BackupExample = "backup --dbname database --disable-compression\n" +
|
||||||
"backup --dbname database --storage s3 --path /custom-path --disable-compression"
|
"backup --dbname database --storage s3 --path /custom-path --disable-compression"
|
||||||
|
|
||||||
const MainExample = "mysql-bkup backup --dbname database --disable-compression\n" +
|
const MainExample = "mysql-bkup backup --dbname database --disable-compression\n" +
|
||||||
|
|||||||
178
utils/notification.go
Normal file
178
utils/notification.go
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"github.com/go-mail/mail"
|
||||||
|
"html/template"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func parseTemplate[T any](data T, fileName string) (string, error) {
|
||||||
|
// Open the file
|
||||||
|
tmpl, err := template.ParseFiles(filepath.Join(templatePath, fileName))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err = tmpl.Execute(&buf, data); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func SendEmail(subject, body string) error {
|
||||||
|
Info("Start sending email notification....")
|
||||||
|
config := loadMailConfig()
|
||||||
|
emails := strings.Split(config.MailTo, ",")
|
||||||
|
m := mail.NewMessage()
|
||||||
|
m.SetHeader("From", config.MailFrom)
|
||||||
|
m.SetHeader("To", emails...)
|
||||||
|
m.SetHeader("Subject", subject)
|
||||||
|
m.SetBody("text/html", body)
|
||||||
|
d := mail.NewDialer(config.MailHost, config.MailPort, config.MailUserName, config.MailPassword)
|
||||||
|
d.TLSConfig = &tls.Config{InsecureSkipVerify: config.SkipTls}
|
||||||
|
|
||||||
|
if err := d.DialAndSend(m); err != nil {
|
||||||
|
Error("Error could not send email : %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
Info("Email notification has been sent")
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
func sendMessage(msg string) error {
|
||||||
|
|
||||||
|
Info("Sending Telegram notification... ")
|
||||||
|
chatId := os.Getenv("TG_CHAT_ID")
|
||||||
|
body, _ := json.Marshal(map[string]string{
|
||||||
|
"chat_id": chatId,
|
||||||
|
"text": msg,
|
||||||
|
})
|
||||||
|
url := fmt.Sprintf("%s/sendMessage", getTgUrl())
|
||||||
|
// Create an HTTP post request
|
||||||
|
request, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
request.Header.Add("Content-Type", "application/json")
|
||||||
|
client := &http.Client{}
|
||||||
|
response, err := client.Do(request)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
code := response.StatusCode
|
||||||
|
if code == 200 {
|
||||||
|
Info("Telegram notification has been sent")
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
body, _ := ioutil.ReadAll(response.Body)
|
||||||
|
Error("Error could not send message, error: %s", string(body))
|
||||||
|
return fmt.Errorf("error could not send message %s", string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
func NotifySuccess(notificationData *NotificationData) {
|
||||||
|
notificationData.BackupReference = backupReference()
|
||||||
|
var vars = []string{
|
||||||
|
"TG_TOKEN",
|
||||||
|
"TG_CHAT_ID",
|
||||||
|
}
|
||||||
|
var mailVars = []string{
|
||||||
|
"MAIL_HOST",
|
||||||
|
"MAIL_PORT",
|
||||||
|
"MAIL_USERNAME",
|
||||||
|
"MAIL_PASSWORD",
|
||||||
|
"MAIL_FROM",
|
||||||
|
"MAIL_TO",
|
||||||
|
}
|
||||||
|
|
||||||
|
//Email notification
|
||||||
|
err := CheckEnvVars(mailVars)
|
||||||
|
if err == nil {
|
||||||
|
body, err := parseTemplate(*notificationData, "email.template")
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not parse email template: %v", err)
|
||||||
|
}
|
||||||
|
err = SendEmail(fmt.Sprintf("✅ Database Backup Notification – %s", notificationData.Database), body)
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not send email: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//Telegram notification
|
||||||
|
err = CheckEnvVars(vars)
|
||||||
|
if err == nil {
|
||||||
|
message, err := parseTemplate(*notificationData, "telegram.template")
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not parse telegram template: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = sendMessage(message)
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not send Telegram message: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func NotifyError(error string) {
|
||||||
|
var vars = []string{
|
||||||
|
"TG_TOKEN",
|
||||||
|
"TG_CHAT_ID",
|
||||||
|
}
|
||||||
|
var mailVars = []string{
|
||||||
|
"MAIL_HOST",
|
||||||
|
"MAIL_PORT",
|
||||||
|
"MAIL_USERNAME",
|
||||||
|
"MAIL_PASSWORD",
|
||||||
|
"MAIL_FROM",
|
||||||
|
"MAIL_TO",
|
||||||
|
}
|
||||||
|
|
||||||
|
//Email notification
|
||||||
|
err := CheckEnvVars(mailVars)
|
||||||
|
if err == nil {
|
||||||
|
body, err := parseTemplate(ErrorMessage{
|
||||||
|
Error: error,
|
||||||
|
EndTime: time.Now().Format(TimeFormat()),
|
||||||
|
BackupReference: os.Getenv("BACKUP_REFERENCE"),
|
||||||
|
}, "email-error.template")
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not parse error template: %v", err)
|
||||||
|
}
|
||||||
|
err = SendEmail(fmt.Sprintf("🔴 Urgent: Database Backup Failure Notification"), body)
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not send email: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//Telegram notification
|
||||||
|
err = CheckEnvVars(vars)
|
||||||
|
if err == nil {
|
||||||
|
message, err := parseTemplate(ErrorMessage{
|
||||||
|
Error: error,
|
||||||
|
EndTime: time.Now().Format(TimeFormat()),
|
||||||
|
BackupReference: os.Getenv("BACKUP_REFERENCE"),
|
||||||
|
}, "telegram-error.template")
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not parse error template: %v", err)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
err = sendMessage(message)
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not send telegram message: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTgUrl() string {
|
||||||
|
return fmt.Sprintf("https://api.telegram.org/bot%s", os.Getenv("TG_TOKEN"))
|
||||||
|
|
||||||
|
}
|
||||||
@@ -7,19 +7,17 @@
|
|||||||
package utils
|
package utils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/robfig/cron/v3"
|
"github.com/robfig/cron/v3"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// FileExists checks if the file does exist
|
||||||
func FileExists(filename string) bool {
|
func FileExists(filename string) bool {
|
||||||
info, err := os.Stat(filename)
|
info, err := os.Stat(filename)
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
@@ -133,14 +131,11 @@ func GetEnvVariable(envName, oldEnvName string) string {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return value
|
return value
|
||||||
}
|
}
|
||||||
Warn("%s is deprecated, please use %s instead!", oldEnvName, envName)
|
Warn("%s is deprecated, please use %s instead! ", oldEnvName, envName)
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return value
|
return value
|
||||||
}
|
}
|
||||||
func ShowHistory() {
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckEnvVars checks if all the specified environment variables are set
|
// CheckEnvVars checks if all the specified environment variables are set
|
||||||
func CheckEnvVars(vars []string) error {
|
func CheckEnvVars(vars []string) error {
|
||||||
@@ -187,71 +182,33 @@ func GetIntEnv(envName string) int {
|
|||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
func sendMessage(msg string) {
|
func EnvWithDefault(envName string, defaultValue string) string {
|
||||||
|
value := os.Getenv(envName)
|
||||||
Info("Sending notification... ")
|
if value == "" {
|
||||||
chatId := os.Getenv("TG_CHAT_ID")
|
return defaultValue
|
||||||
body, _ := json.Marshal(map[string]string{
|
|
||||||
"chat_id": chatId,
|
|
||||||
"text": msg,
|
|
||||||
})
|
|
||||||
url := fmt.Sprintf("%s/sendMessage", getTgUrl())
|
|
||||||
// Create an HTTP post request
|
|
||||||
request, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
request.Header.Add("Content-Type", "application/json")
|
|
||||||
client := &http.Client{}
|
|
||||||
response, err := client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
code := response.StatusCode
|
|
||||||
if code == 200 {
|
|
||||||
Info("Notification has been sent")
|
|
||||||
} else {
|
|
||||||
body, _ := ioutil.ReadAll(response.Body)
|
|
||||||
Error("Message not sent, error: %s", string(body))
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
func NotifySuccess(fileName string) {
|
|
||||||
var vars = []string{
|
|
||||||
"TG_TOKEN",
|
|
||||||
"TG_CHAT_ID",
|
|
||||||
}
|
|
||||||
|
|
||||||
//Telegram notification
|
|
||||||
err := CheckEnvVars(vars)
|
|
||||||
if err == nil {
|
|
||||||
message := "[✅ MySQL Backup ]\n" +
|
|
||||||
"Database has been backed up \n" +
|
|
||||||
"Backup name is " + fileName
|
|
||||||
sendMessage(message)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func NotifyError(error string) {
|
|
||||||
var vars = []string{
|
|
||||||
"TG_TOKEN",
|
|
||||||
"TG_CHAT_ID",
|
|
||||||
}
|
|
||||||
|
|
||||||
//Telegram notification
|
|
||||||
err := CheckEnvVars(vars)
|
|
||||||
if err == nil {
|
|
||||||
message := "[🔴 MySQL Backup ]\n" +
|
|
||||||
"An error occurred during database backup \n" +
|
|
||||||
"Error: " + error
|
|
||||||
sendMessage(message)
|
|
||||||
}
|
}
|
||||||
|
return value
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTgUrl() string {
|
// IsValidCronExpression verify cronExpression and returns boolean
|
||||||
return fmt.Sprintf("https://api.telegram.org/bot%s", os.Getenv("TG_TOKEN"))
|
|
||||||
|
|
||||||
}
|
|
||||||
func IsValidCronExpression(cronExpr string) bool {
|
func IsValidCronExpression(cronExpr string) bool {
|
||||||
|
// Parse the cron expression
|
||||||
_, err := cron.ParseStandard(cronExpr)
|
_, err := cron.ParseStandard(cronExpr)
|
||||||
return err == nil
|
return err == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CronNextTime returns cronExpression next time
|
||||||
|
func CronNextTime(cronExpr string) time.Time {
|
||||||
|
// Parse the cron expression
|
||||||
|
schedule, err := cron.ParseStandard(cronExpr)
|
||||||
|
if err != nil {
|
||||||
|
Error("Error parsing cron expression:", err)
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
// Get the current time
|
||||||
|
now := time.Now()
|
||||||
|
// Get the next scheduled time
|
||||||
|
next := schedule.Next(now)
|
||||||
|
//Info("The next scheduled time is: %v\n", next)
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user