mirror of
https://github.com/jkaninda/mysql-bkup.git
synced 2025-12-06 21:49:40 +01:00
Compare commits
11 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f383f5559d | ||
|
|
3725809d28 | ||
|
|
b1598ef7d0 | ||
|
|
e4a83b9851 | ||
|
|
4b2527f416 | ||
|
|
e97fc7512a | ||
|
|
7912ce46ed | ||
|
|
050f5e81bc | ||
|
|
b39e97b77d | ||
|
|
cbb73ae89b | ||
|
|
29a58aa26d |
@@ -29,8 +29,9 @@ func init() {
|
|||||||
//Backup
|
//Backup
|
||||||
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
|
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
|
||||||
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
||||||
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. default or scheduled")
|
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. | Deprecated")
|
||||||
BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Schedule period time")
|
BackupCmd.PersistentFlags().StringP("period", "", "", "Schedule period time | Deprecated")
|
||||||
|
BackupCmd.PersistentFlags().StringP("cron-expression", "", "", "Backup cron expression")
|
||||||
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")
|
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")
|
||||||
BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "Delete files created more than specified days ago, default 7 days")
|
BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "Delete files created more than specified days ago, default 7 days")
|
||||||
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
|
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ ENV TARGET_DB_NAME="localhost"
|
|||||||
ENV TARGET_DB_USERNAME=""
|
ENV TARGET_DB_USERNAME=""
|
||||||
ENV TARGET_DB_PASSWORD=""
|
ENV TARGET_DB_PASSWORD=""
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
ENV VERSION="v1.2.7"
|
ENV VERSION="v1.2.8"
|
||||||
ENV BACKUP_CRON_EXPRESSION=""
|
ENV BACKUP_CRON_EXPRESSION=""
|
||||||
ENV TG_TOKEN=""
|
ENV TG_TOKEN=""
|
||||||
ENV TG_CHAT_ID=""
|
ENV TG_CHAT_ID=""
|
||||||
@@ -48,7 +48,7 @@ ARG BACKUP_CRON_SCRIPT="/usr/local/bin/backup_cron.sh"
|
|||||||
LABEL author="Jonas Kaninda"
|
LABEL author="Jonas Kaninda"
|
||||||
|
|
||||||
RUN apt-get update -qq
|
RUN apt-get update -qq
|
||||||
RUN apt install mysql-client supervisor cron gnupg -y
|
RUN apt install mysql-client cron gnupg -y
|
||||||
|
|
||||||
# Clear cache
|
# Clear cache
|
||||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||||
@@ -69,8 +69,6 @@ RUN chmod +x /usr/local/bin/mysql-bkup
|
|||||||
|
|
||||||
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
|
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
|
||||||
|
|
||||||
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
|
|
||||||
|
|
||||||
# Create backup script and make it executable
|
# Create backup script and make it executable
|
||||||
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
|
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
|
||||||
chmod +x /usr/local/bin/backup
|
chmod +x /usr/local/bin/backup
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ networks:
|
|||||||
### Recurring backups to S3
|
### Recurring backups to S3
|
||||||
|
|
||||||
As explained above, you need just to add AWS environment variables and specify the storage type `--storage s3`.
|
As explained above, you need just to add AWS environment variables and specify the storage type `--storage s3`.
|
||||||
In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
|
In case you need to use recurring backups, you can use `--cron-expression "0 1 * * *"` flag or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
services:
|
services:
|
||||||
@@ -59,7 +59,7 @@ services:
|
|||||||
# for a list of available releases.
|
# for a list of available releases.
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
container_name: mysql-bkup
|
container_name: mysql-bkup
|
||||||
command: backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
|
command: backup --storage s3 -d my-database --cron-expression "0 1 * * *"
|
||||||
environment:
|
environment:
|
||||||
- DB_PORT=3306
|
- DB_PORT=3306
|
||||||
- DB_HOST=mysql
|
- DB_HOST=mysql
|
||||||
@@ -72,6 +72,7 @@ services:
|
|||||||
- AWS_REGION="us-west-2"
|
- AWS_REGION="us-west-2"
|
||||||
- AWS_ACCESS_KEY=xxxx
|
- AWS_ACCESS_KEY=xxxx
|
||||||
- AWS_SECRET_KEY=xxxxx
|
- AWS_SECRET_KEY=xxxxx
|
||||||
|
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
|
||||||
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||||
- AWS_DISABLE_SSL="false"
|
- AWS_DISABLE_SSL="false"
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ networks:
|
|||||||
### Recurring backups to SSH remote server
|
### Recurring backups to SSH remote server
|
||||||
|
|
||||||
As explained above, you need just to add required environment variables and specify the storage type `--storage ssh`.
|
As explained above, you need just to add required environment variables and specify the storage type `--storage ssh`.
|
||||||
You can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
|
You can use `--cron-expression "* * * * *"` or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
services:
|
services:
|
||||||
@@ -63,7 +63,7 @@ services:
|
|||||||
# for a list of available releases.
|
# for a list of available releases.
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
container_name: mysql-bkup
|
container_name: mysql-bkup
|
||||||
command: backup -d database --storage ssh --mode scheduled --period "0 1 * * *"
|
command: backup -d database --storage ssh --cron-expression "0 1 * * *"
|
||||||
volumes:
|
volumes:
|
||||||
- ./id_ed25519:/tmp/id_ed25519"
|
- ./id_ed25519:/tmp/id_ed25519"
|
||||||
environment:
|
environment:
|
||||||
@@ -78,6 +78,7 @@ services:
|
|||||||
- SSH_USER=user
|
- SSH_USER=user
|
||||||
- SSH_REMOTE_PATH=/home/jkaninda/backups
|
- SSH_REMOTE_PATH=/home/jkaninda/backups
|
||||||
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
||||||
|
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
|
||||||
## We advise you to use a private jey instead of password
|
## We advise you to use a private jey instead of password
|
||||||
#- SSH_PASSWORD=password
|
#- SSH_PASSWORD=password
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ networks:
|
|||||||
jkaninda/mysql-bkup backup -d database_name
|
jkaninda/mysql-bkup backup -d database_name
|
||||||
```
|
```
|
||||||
|
|
||||||
In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
|
In case you need to use recurring backups, you can use `--cron-expression "0 1 * * *"` flag or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
services:
|
services:
|
||||||
@@ -65,7 +65,7 @@ services:
|
|||||||
# for a list of available releases.
|
# for a list of available releases.
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
container_name: mysql-bkup
|
container_name: mysql-bkup
|
||||||
command: backup -d database --mode scheduled --period "0 1 * * *"
|
command: backup -d database --cron-expression "0 1 * * *"
|
||||||
volumes:
|
volumes:
|
||||||
- ./backup:/backup
|
- ./backup:/backup
|
||||||
environment:
|
environment:
|
||||||
@@ -74,6 +74,7 @@ services:
|
|||||||
- DB_NAME=database
|
- DB_NAME=database
|
||||||
- DB_USERNAME=username
|
- DB_USERNAME=username
|
||||||
- DB_PASSWORD=password
|
- DB_PASSWORD=password
|
||||||
|
- BACKUP_CRON_EXPRESSION=0 1 * * *
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
|
|||||||
@@ -9,8 +9,11 @@ nav_order: 7
|
|||||||
The image supports encrypting backups using GPG out of the box. In case a `GPG_PASSPHRASE` environment variable is set, the backup archive will be encrypted using the given key and saved as a sql.gpg file instead or sql.gz.gpg.
|
The image supports encrypting backups using GPG out of the box. In case a `GPG_PASSPHRASE` environment variable is set, the backup archive will be encrypted using the given key and saved as a sql.gpg file instead or sql.gz.gpg.
|
||||||
|
|
||||||
{: .warning }
|
{: .warning }
|
||||||
To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process.
|
To restore an encrypted backup, you need to provide the same GPG passphrase or key used during backup process.
|
||||||
|
|
||||||
|
- GPG home directory `/config/gnupg`
|
||||||
|
- Cipher algorithm `aes256`
|
||||||
|
-
|
||||||
To decrypt manually, you need to install `gnupg`
|
To decrypt manually, you need to install `gnupg`
|
||||||
|
|
||||||
### Decrypt backup
|
### Decrypt backup
|
||||||
|
|||||||
@@ -25,50 +25,49 @@ Backup, restore and migrate targets, schedule and retention are configured using
|
|||||||
| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` |
|
| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` |
|
||||||
| --dbname | -d | Database name |
|
| --dbname | -d | Database name |
|
||||||
| --port | -p | Database port (default: 3306) |
|
| --port | -p | Database port (default: 3306) |
|
||||||
| --mode | -m | Execution mode. default or scheduled (default: default) |
|
|
||||||
| --disable-compression | | Disable database backup compression |
|
| --disable-compression | | Disable database backup compression |
|
||||||
| --prune | | Delete old backup, default disabled |
|
| --prune | | Delete old backup, default disabled |
|
||||||
| --keep-last | | Delete old backup created more than specified days ago, default 7 days |
|
| --keep-last | | Delete old backup created more than specified days ago, default 7 days |
|
||||||
| --period | | Crontab period for scheduled mode only. (default: "0 1 * * *") |
|
| --cron-expression | | Backup cron expression, eg: (* * * * *) or @daily |
|
||||||
| --help | -h | Print this help message and exit |
|
| --help | -h | Print this help message and exit |
|
||||||
| --version | -V | Print version information and exit |
|
| --version | -V | Print version information and exit |
|
||||||
|
|
||||||
## Environment variables
|
## Environment variables
|
||||||
|
|
||||||
| Name | Requirement | Description |
|
| Name | Requirement | Description |
|
||||||
|------------------------|----------------------------------------------------|------------------------------------------------------|
|
|------------------------|--------------------------------------------------------------|------------------------------------------------------|
|
||||||
| DB_PORT | Optional, default 3306 | Database port number |
|
| DB_PORT | Optional, default 3306 | Database port number |
|
||||||
| DB_HOST | Required | Database host |
|
| DB_HOST | Required | Database host |
|
||||||
| DB_NAME | Optional if it was provided from the -d flag | Database name |
|
| DB_NAME | Optional if it was provided from the -d flag | Database name |
|
||||||
| DB_USERNAME | Required | Database user name |
|
| DB_USERNAME | Required | Database user name |
|
||||||
| DB_PASSWORD | Required | Database password |
|
| DB_PASSWORD | Required | Database password |
|
||||||
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
|
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
|
||||||
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
|
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
|
||||||
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
||||||
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
||||||
| AWS_REGION | Optional, required for S3 storage | AWS Region |
|
| AWS_REGION | Optional, required for S3 storage | AWS Region |
|
||||||
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
|
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
|
||||||
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
|
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
|
||||||
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the --period flag | Backup cron expression for docker in scheduled mode |
|
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the --cron-expression flag | Backup cron expression for docker in scheduled mode |
|
||||||
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
|
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
|
||||||
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
|
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
|
||||||
| SSH_USER | Optional, required for SSH storage | ssh remote user |
|
| SSH_USER | Optional, required for SSH storage | ssh remote user |
|
||||||
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
|
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
|
||||||
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
|
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
|
||||||
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
|
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
|
||||||
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
|
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
|
||||||
| TARGET_DB_HOST | Optional, required for database migration | Target database host |
|
| TARGET_DB_HOST | Optional, required for database migration | Target database host |
|
||||||
| TARGET_DB_PORT | Optional, required for database migration | Target database port |
|
| TARGET_DB_PORT | Optional, required for database migration | Target database port |
|
||||||
| TARGET_DB_NAME | Optional, required for database migration | Target database name |
|
| TARGET_DB_NAME | Optional, required for database migration | Target database name |
|
||||||
| TARGET_DB_USERNAME | Optional, required for database migration | Target database username |
|
| TARGET_DB_USERNAME | Optional, required for database migration | Target database username |
|
||||||
| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password |
|
| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password |
|
||||||
| TG_TOKEN | Optional, required for Telegram notification | Telegram token |
|
| TG_TOKEN | Optional, required for Telegram notification | Telegram token |
|
||||||
| TG_CHAT_ID | Optional, required for Telegram notification | Telegram Chat ID |
|
| TG_CHAT_ID | Optional, required for Telegram notification | Telegram Chat ID |
|
||||||
---
|
---
|
||||||
## Run in Scheduled mode
|
## Run in Scheduled mode
|
||||||
|
|
||||||
This image can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
|
This image can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
|
||||||
For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag.
|
For Docker, you need to run it in scheduled mode by adding `--cron-expression "* * * * *"` flag or by defining `BACKUP_CRON_EXPRESSION=0 1 * * *` environment variable.
|
||||||
|
|
||||||
## Syntax of crontab (field description)
|
## Syntax of crontab (field description)
|
||||||
|
|
||||||
@@ -110,4 +109,22 @@ Easy to remember format:
|
|||||||
|
|
||||||
```conf
|
```conf
|
||||||
0 1 * * *
|
0 1 * * *
|
||||||
```
|
```
|
||||||
|
## Predefined schedules
|
||||||
|
You may use one of several pre-defined schedules in place of a cron expression.
|
||||||
|
|
||||||
|
| Entry | Description | Equivalent To |
|
||||||
|
|------------------------|--------------------------------------------|---------------|
|
||||||
|
| @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 * |
|
||||||
|
| @monthly | Run once a month, midnight, first of month | 0 0 1 * * |
|
||||||
|
| @weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0 |
|
||||||
|
| @daily (or @midnight) | Run once a day, midnight | 0 0 * * * |
|
||||||
|
| @hourly | Run once an hour, beginning of hour | 0 * * * * |
|
||||||
|
|
||||||
|
### Intervals
|
||||||
|
You may also schedule a job to execute at fixed intervals, starting at the time it's added or cron is run. This is supported by formatting the cron spec like this:
|
||||||
|
|
||||||
|
@every <duration>
|
||||||
|
where "duration" is a string accepted by time.
|
||||||
|
|
||||||
|
For example, "@every 1h30m10s" would indicate a schedule that activates after 1 hour, 30 minutes, 10 seconds, and then every interval after that.
|
||||||
1
go.mod
1
go.mod
@@ -15,6 +15,7 @@ require (
|
|||||||
require (
|
require (
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
|
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||||
golang.org/x/sys v0.22.0 // indirect
|
golang.org/x/sys v0.22.0 // indirect
|
||||||
gopkg.in/fsnotify.v1 v1.4.7 // indirect
|
gopkg.in/fsnotify.v1 v1.4.7 // indirect
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||||
|
|||||||
2
go.sum
2
go.sum
@@ -15,6 +15,8 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y
|
|||||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
|
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||||
|
|||||||
126
pkg/backup.go
126
pkg/backup.go
@@ -8,8 +8,8 @@ package pkg
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/hpcloud/tail"
|
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
|
"github.com/robfig/cron/v3"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
@@ -20,105 +20,67 @@ import (
|
|||||||
|
|
||||||
func StartBackup(cmd *cobra.Command) {
|
func StartBackup(cmd *cobra.Command) {
|
||||||
intro()
|
intro()
|
||||||
//Set env
|
dbConf = initDbConfig(cmd)
|
||||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
//Initialize backup configs
|
||||||
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
|
config := initBackupConfig(cmd)
|
||||||
|
|
||||||
//Get flag value and set env
|
if config.cronExpression == "" {
|
||||||
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
|
BackupTask(dbConf, config)
|
||||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
|
||||||
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
|
||||||
backupRetention, _ := cmd.Flags().GetInt("keep-last")
|
|
||||||
prune, _ := cmd.Flags().GetBool("prune")
|
|
||||||
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
|
|
||||||
executionMode, _ = cmd.Flags().GetString("mode")
|
|
||||||
gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
|
|
||||||
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
|
||||||
|
|
||||||
dbConf = getDbConfig(cmd)
|
|
||||||
|
|
||||||
//
|
|
||||||
if gpqPassphrase != "" {
|
|
||||||
encryption = true
|
|
||||||
}
|
|
||||||
|
|
||||||
//Generate file name
|
|
||||||
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbConf.dbName, time.Now().Format("20060102_150405"))
|
|
||||||
if disableCompression {
|
|
||||||
backupFileName = fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if executionMode == "default" {
|
|
||||||
switch storage {
|
|
||||||
case "s3":
|
|
||||||
s3Backup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
|
|
||||||
case "local":
|
|
||||||
localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
|
|
||||||
case "ssh", "remote":
|
|
||||||
sshBackup(dbConf, backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
|
|
||||||
case "ftp":
|
|
||||||
utils.Fatal("Not supported storage type: %s", storage)
|
|
||||||
default:
|
|
||||||
localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
|
|
||||||
}
|
|
||||||
|
|
||||||
} else if executionMode == "scheduled" {
|
|
||||||
scheduledMode(dbConf, storage)
|
|
||||||
} else {
|
} else {
|
||||||
utils.Fatal("Error, unknown execution mode!")
|
if utils.IsValidCronExpression(config.cronExpression) {
|
||||||
|
scheduledMode(dbConf, config)
|
||||||
|
} else {
|
||||||
|
utils.Fatal("Cron expression is not valid: %s", config.cronExpression)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run in scheduled mode
|
// Run in scheduled mode
|
||||||
func scheduledMode(db *dbConfig, storage string) {
|
func scheduledMode(db *dbConfig, config *BackupConfig) {
|
||||||
|
|
||||||
fmt.Println()
|
|
||||||
fmt.Println("**********************************")
|
|
||||||
fmt.Println(" Starting MySQL Bkup... ")
|
|
||||||
fmt.Println("***********************************")
|
|
||||||
utils.Info("Running in Scheduled mode")
|
utils.Info("Running in Scheduled mode")
|
||||||
utils.Info("Execution period %s", os.Getenv("BACKUP_CRON_EXPRESSION"))
|
utils.Info("Backup cron expression: %s", config.cronExpression)
|
||||||
utils.Info("Storage type %s ", storage)
|
utils.Info("Storage type %s ", config.storage)
|
||||||
|
|
||||||
//Test database connexion
|
//Test database connexion
|
||||||
testDatabaseConnection(db)
|
testDatabaseConnection(db)
|
||||||
|
|
||||||
utils.Info("Creating backup job...")
|
utils.Info("Creating backup job...")
|
||||||
CreateCrontabScript(disableCompression, storage)
|
// Create a new cron instance
|
||||||
|
c := cron.New()
|
||||||
|
|
||||||
supervisorConfig := "/etc/supervisor/supervisord.conf"
|
_, err := c.AddFunc(config.cronExpression, func() {
|
||||||
|
BackupTask(db, config)
|
||||||
// Start Supervisor
|
})
|
||||||
cmd := exec.Command("supervisord", "-c", supervisorConfig)
|
|
||||||
err := cmd.Start()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal(fmt.Sprintf("Failed to start supervisord: %v", err))
|
return
|
||||||
}
|
}
|
||||||
|
// Start the cron scheduler
|
||||||
|
c.Start()
|
||||||
|
utils.Info("Creating backup job...done")
|
||||||
utils.Info("Backup job started")
|
utils.Info("Backup job started")
|
||||||
defer func() {
|
defer c.Stop()
|
||||||
if err := cmd.Process.Kill(); err != nil {
|
select {}
|
||||||
utils.Info("Failed to kill supervisord process: %v", err)
|
|
||||||
} else {
|
|
||||||
utils.Info("Supervisor stopped.")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
if _, err := os.Stat(cronLogFile); os.IsNotExist(err) {
|
|
||||||
utils.Fatal(fmt.Sprintf("Log file %s does not exist.", cronLogFile))
|
|
||||||
}
|
|
||||||
t, err := tail.TailFile(cronLogFile, tail.Config{Follow: true})
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatal("Failed to tail file: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read and print new lines from the log file
|
|
||||||
for line := range t.Lines {
|
|
||||||
fmt.Println(line.Text)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
func intro() {
|
func BackupTask(db *dbConfig, config *BackupConfig) {
|
||||||
utils.Info("Starting MySQL Backup...")
|
//Generate backup file name
|
||||||
utils.Info("Copyright © 2024 Jonas Kaninda ")
|
backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20240102_150405"))
|
||||||
|
if config.disableCompression {
|
||||||
|
backupFileName = fmt.Sprintf("%s_%s.sql", db.dbName, time.Now().Format("20240102_150405"))
|
||||||
|
}
|
||||||
|
config.backupFileName = backupFileName
|
||||||
|
switch config.storage {
|
||||||
|
case "s3":
|
||||||
|
s3Backup(db, config.backupFileName, config.disableCompression, config.prune, config.backupRetention, config.encryption)
|
||||||
|
case "local":
|
||||||
|
localBackup(db, config.backupFileName, config.disableCompression, config.prune, config.backupRetention, config.encryption)
|
||||||
|
case "ssh", "remote":
|
||||||
|
sshBackup(db, config.backupFileName, config.remotePath, config.disableCompression, config.prune, config.backupRetention, config.encryption)
|
||||||
|
case "ftp":
|
||||||
|
utils.Fatal("Not supported storage type: %s", config.storage)
|
||||||
|
default:
|
||||||
|
localBackup(db, config.backupFileName, config.disableCompression, config.prune, config.backupRetention, config.encryption)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// BackupDatabase backup database
|
// BackupDatabase backup database
|
||||||
|
|||||||
@@ -30,7 +30,27 @@ type targetDbConfig struct {
|
|||||||
targetDbName string
|
targetDbName string
|
||||||
}
|
}
|
||||||
|
|
||||||
func getDbConfig(cmd *cobra.Command) *dbConfig {
|
type BackupConfig struct {
|
||||||
|
backupFileName string
|
||||||
|
backupRetention int
|
||||||
|
disableCompression bool
|
||||||
|
prune bool
|
||||||
|
encryption bool
|
||||||
|
remotePath string
|
||||||
|
gpqPassphrase string
|
||||||
|
storage string
|
||||||
|
cronExpression string
|
||||||
|
}
|
||||||
|
type RestoreConfig struct {
|
||||||
|
s3Path string
|
||||||
|
remotePath string
|
||||||
|
storage string
|
||||||
|
file string
|
||||||
|
bucket string
|
||||||
|
gpqPassphrase string
|
||||||
|
}
|
||||||
|
|
||||||
|
func initDbConfig(cmd *cobra.Command) *dbConfig {
|
||||||
//Set env
|
//Set env
|
||||||
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
||||||
dConf := dbConfig{}
|
dConf := dbConfig{}
|
||||||
@@ -47,7 +67,60 @@ func getDbConfig(cmd *cobra.Command) *dbConfig {
|
|||||||
}
|
}
|
||||||
return &dConf
|
return &dConf
|
||||||
}
|
}
|
||||||
func getTargetDbConfig() *targetDbConfig {
|
func initBackupConfig(cmd *cobra.Command) *BackupConfig {
|
||||||
|
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||||
|
utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION")
|
||||||
|
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
|
||||||
|
|
||||||
|
//Get flag value and set env
|
||||||
|
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
|
||||||
|
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||||
|
backupRetention, _ := cmd.Flags().GetInt("keep-last")
|
||||||
|
prune, _ := cmd.Flags().GetBool("prune")
|
||||||
|
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
|
||||||
|
_, _ = cmd.Flags().GetString("mode")
|
||||||
|
gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||||
|
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||||
|
cronExpression := os.Getenv("BACKUP_CRON_EXPRESSION")
|
||||||
|
|
||||||
|
if gpqPassphrase != "" {
|
||||||
|
encryption = true
|
||||||
|
}
|
||||||
|
//Initialize backup configs
|
||||||
|
config := BackupConfig{}
|
||||||
|
config.backupRetention = backupRetention
|
||||||
|
config.disableCompression = disableCompression
|
||||||
|
config.prune = prune
|
||||||
|
config.storage = storage
|
||||||
|
config.encryption = encryption
|
||||||
|
config.remotePath = remotePath
|
||||||
|
config.gpqPassphrase = gpqPassphrase
|
||||||
|
config.cronExpression = cronExpression
|
||||||
|
return &config
|
||||||
|
}
|
||||||
|
func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
|
||||||
|
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||||
|
|
||||||
|
//Get flag value and set env
|
||||||
|
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||||
|
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
|
||||||
|
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||||
|
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
||||||
|
_, _ = cmd.Flags().GetString("mode")
|
||||||
|
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||||
|
gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||||
|
//Initialize restore configs
|
||||||
|
rConfig := RestoreConfig{}
|
||||||
|
rConfig.s3Path = s3Path
|
||||||
|
rConfig.remotePath = remotePath
|
||||||
|
rConfig.storage = storage
|
||||||
|
rConfig.bucket = bucket
|
||||||
|
rConfig.file = file
|
||||||
|
rConfig.storage = storage
|
||||||
|
rConfig.gpqPassphrase = gpqPassphrase
|
||||||
|
return &rConfig
|
||||||
|
}
|
||||||
|
func initTargetDbConfig() *targetDbConfig {
|
||||||
tdbConfig := targetDbConfig{}
|
tdbConfig := targetDbConfig{}
|
||||||
tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST")
|
tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST")
|
||||||
tdbConfig.targetDbPort = os.Getenv("TARGET_DB_PORT")
|
tdbConfig.targetDbPort = os.Getenv("TARGET_DB_PORT")
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ import (
|
|||||||
func Decrypt(inputFile string, passphrase string) error {
|
func Decrypt(inputFile string, passphrase string) error {
|
||||||
utils.Info("Decrypting backup file: " + inputFile + " ...")
|
utils.Info("Decrypting backup file: " + inputFile + " ...")
|
||||||
//Create gpg home dir
|
//Create gpg home dir
|
||||||
err := utils.MakeDir(gpgHome)
|
err := utils.MakeDirAll(gpgHome)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -37,7 +37,7 @@ func Decrypt(inputFile string, passphrase string) error {
|
|||||||
func Encrypt(inputFile string, passphrase string) error {
|
func Encrypt(inputFile string, passphrase string) error {
|
||||||
utils.Info("Encrypting backup...")
|
utils.Info("Encrypting backup...")
|
||||||
//Create gpg home dir
|
//Create gpg home dir
|
||||||
err := utils.MakeDir(gpgHome)
|
err := utils.MakeDirAll(gpgHome)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -125,3 +125,7 @@ func testDatabaseConnection(db *dbConfig) {
|
|||||||
utils.Info("Successfully connected to %s database", db.dbName)
|
utils.Info("Successfully connected to %s database", db.dbName)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
func intro() {
|
||||||
|
utils.Info("Starting MySQL Backup...")
|
||||||
|
utils.Info("Copyright © 2024 Jonas Kaninda ")
|
||||||
|
}
|
||||||
|
|||||||
@@ -17,8 +17,8 @@ func StartMigration(cmd *cobra.Command) {
|
|||||||
intro()
|
intro()
|
||||||
utils.Info("Starting database migration...")
|
utils.Info("Starting database migration...")
|
||||||
//Get DB config
|
//Get DB config
|
||||||
dbConf = getDbConfig(cmd)
|
dbConf = initDbConfig(cmd)
|
||||||
targetDbConf = getTargetDbConfig()
|
targetDbConf = initTargetDbConfig()
|
||||||
|
|
||||||
//Defining the target database variables
|
//Defining the target database variables
|
||||||
newDbConfig := dbConfig{}
|
newDbConfig := dbConfig{}
|
||||||
|
|||||||
@@ -17,33 +17,24 @@ import (
|
|||||||
|
|
||||||
func StartRestore(cmd *cobra.Command) {
|
func StartRestore(cmd *cobra.Command) {
|
||||||
intro()
|
intro()
|
||||||
//Set env
|
dbConf = initDbConfig(cmd)
|
||||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
restoreConf := initRestoreConfig(cmd)
|
||||||
|
|
||||||
//Get flag value and set env
|
switch restoreConf.storage {
|
||||||
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
|
||||||
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
|
|
||||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
|
||||||
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
|
||||||
executionMode, _ = cmd.Flags().GetString("mode")
|
|
||||||
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
|
||||||
dbConf = getDbConfig(cmd)
|
|
||||||
|
|
||||||
switch storage {
|
|
||||||
case "s3":
|
case "s3":
|
||||||
restoreFromS3(dbConf, file, bucket, s3Path)
|
restoreFromS3(dbConf, restoreConf.file, restoreConf.bucket, restoreConf.s3Path)
|
||||||
case "local":
|
case "local":
|
||||||
utils.Info("Restore database from local")
|
utils.Info("Restore database from local")
|
||||||
copyToTmp(storagePath, file)
|
copyToTmp(storagePath, restoreConf.file)
|
||||||
RestoreDatabase(dbConf, file)
|
RestoreDatabase(dbConf, restoreConf.file)
|
||||||
case "ssh":
|
case "ssh":
|
||||||
restoreFromRemote(dbConf, file, remotePath)
|
restoreFromRemote(dbConf, restoreConf.file, restoreConf.remotePath)
|
||||||
case "ftp":
|
case "ftp":
|
||||||
utils.Fatal("Restore from FTP is not yet supported")
|
utils.Fatal("Restore from FTP is not yet supported")
|
||||||
default:
|
default:
|
||||||
utils.Info("Restore database from local")
|
utils.Info("Restore database from local")
|
||||||
copyToTmp(storagePath, file)
|
copyToTmp(storagePath, restoreConf.file)
|
||||||
RestoreDatabase(dbConf, file)
|
RestoreDatabase(dbConf, restoreConf.file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,71 +0,0 @@
|
|||||||
// Package pkg /
|
|
||||||
/*****
|
|
||||||
@author Jonas Kaninda
|
|
||||||
@license MIT License <https://opensource.org/licenses/MIT>
|
|
||||||
@Copyright © 2024 Jonas Kaninda
|
|
||||||
**/
|
|
||||||
package pkg
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
)
|
|
||||||
|
|
||||||
func CreateCrontabScript(disableCompression bool, storage string) {
|
|
||||||
//task := "/usr/local/bin/backup_cron.sh"
|
|
||||||
touchCmd := exec.Command("touch", backupCronFile)
|
|
||||||
if err := touchCmd.Run(); err != nil {
|
|
||||||
utils.Fatal("Error creating file %s: %v\n", backupCronFile, err)
|
|
||||||
}
|
|
||||||
var disableC = ""
|
|
||||||
if disableCompression {
|
|
||||||
disableC = "--disable-compression"
|
|
||||||
}
|
|
||||||
|
|
||||||
scriptContent := fmt.Sprintf(`#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
/usr/local/bin/mysql-bkup backup --dbname %s --storage %s %v
|
|
||||||
`, os.Getenv("DB_NAME"), storage, disableC)
|
|
||||||
|
|
||||||
if err := utils.WriteToFile(backupCronFile, scriptContent); err != nil {
|
|
||||||
utils.Fatal("Error writing to %s: %v\n", backupCronFile, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
chmodCmd := exec.Command("chmod", "+x", "/usr/local/bin/backup_cron.sh")
|
|
||||||
if err := chmodCmd.Run(); err != nil {
|
|
||||||
utils.Fatal("Error changing permissions of %s: %v\n", backupCronFile, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
lnCmd := exec.Command("ln", "-s", "/usr/local/bin/backup_cron.sh", "/usr/local/bin/backup_cron")
|
|
||||||
if err := lnCmd.Run(); err != nil {
|
|
||||||
utils.Fatal("Error creating symbolic link: %v\n", err)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
touchLogCmd := exec.Command("touch", cronLogFile)
|
|
||||||
if err := touchLogCmd.Run(); err != nil {
|
|
||||||
utils.Fatal("Error creating file %s: %v\n", cronLogFile, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cronJob := "/etc/cron.d/backup_cron"
|
|
||||||
touchCronCmd := exec.Command("touch", cronJob)
|
|
||||||
if err := touchCronCmd.Run(); err != nil {
|
|
||||||
utils.Fatal("Error creating file %s: %v\n", cronJob, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cronContent := fmt.Sprintf(`%s root exec /bin/bash -c ". /run/supervisord.env; /usr/local/bin/backup_cron.sh >> %s"
|
|
||||||
`, os.Getenv("BACKUP_CRON_EXPRESSION"), cronLogFile)
|
|
||||||
|
|
||||||
if err := utils.WriteToFile(cronJob, cronContent); err != nil {
|
|
||||||
utils.Fatal("Error writing to %s: %v\n", cronJob, err)
|
|
||||||
}
|
|
||||||
utils.ChangePermission("/etc/cron.d/backup_cron", 0644)
|
|
||||||
|
|
||||||
crontabCmd := exec.Command("crontab", "/etc/cron.d/backup_cron")
|
|
||||||
if err := crontabCmd.Run(); err != nil {
|
|
||||||
utils.Fatal("Error updating crontab: ", err)
|
|
||||||
}
|
|
||||||
utils.Info("Backup job created.")
|
|
||||||
}
|
|
||||||
@@ -10,13 +10,12 @@ const cronLogFile = "/var/log/mysql-bkup.log"
|
|||||||
const tmpPath = "/tmp/backup"
|
const tmpPath = "/tmp/backup"
|
||||||
const backupCronFile = "/usr/local/bin/backup_cron.sh"
|
const backupCronFile = "/usr/local/bin/backup_cron.sh"
|
||||||
const algorithm = "aes256"
|
const algorithm = "aes256"
|
||||||
const gpgHome = "gnupg"
|
const gpgHome = "/config/gnupg"
|
||||||
const gpgExtension = "gpg"
|
const gpgExtension = "gpg"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
storage = "local"
|
storage = "local"
|
||||||
file = ""
|
file = ""
|
||||||
executionMode = "default"
|
|
||||||
storagePath = "/backup"
|
storagePath = "/backup"
|
||||||
disableCompression = false
|
disableCompression = false
|
||||||
encryption = false
|
encryption = false
|
||||||
|
|||||||
@@ -7,10 +7,10 @@
|
|||||||
package utils
|
package utils
|
||||||
|
|
||||||
const RestoreExample = "mysql-bkup restore --dbname database --file db_20231219_022941.sql.gz\n" +
|
const RestoreExample = "mysql-bkup restore --dbname database --file db_20231219_022941.sql.gz\n" +
|
||||||
"bkup restore --dbname database --storage s3 --path /custom-path --file db_20231219_022941.sql.gz"
|
"restore --dbname database --storage s3 --path /custom-path --file db_20231219_022941.sql.gz"
|
||||||
const BackupExample = "mysql-bkup backup --dbname database --disable-compression\n" +
|
const BackupExample = "mysql-bkup backup --dbname database --disable-compression\n" +
|
||||||
"mysql-bkup backup --dbname database --storage s3 --path /custom-path --disable-compression"
|
"backup --dbname database --storage s3 --path /custom-path --disable-compression"
|
||||||
|
|
||||||
const MainExample = "mysql-bkup backup --dbname database --disable-compression\n" +
|
const MainExample = "mysql-bkup backup --dbname database --disable-compression\n" +
|
||||||
"mysql-bkup backup --dbname database --storage s3 --path /custom-path\n" +
|
"backup --dbname database --storage s3 --path /custom-path\n" +
|
||||||
"mysql-bkup restore --dbname database --file db_20231219_022941.sql.gz"
|
"restore --dbname database --file db_20231219_022941.sql.gz"
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/robfig/cron/v3"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
@@ -224,7 +225,7 @@ func NotifySuccess(fileName string) {
|
|||||||
//Telegram notification
|
//Telegram notification
|
||||||
err := CheckEnvVars(vars)
|
err := CheckEnvVars(vars)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
message := "MySQL Backup \n" +
|
message := "[✅ MySQL Backup ]\n" +
|
||||||
"Database has been backed up \n" +
|
"Database has been backed up \n" +
|
||||||
"Backup name is " + fileName
|
"Backup name is " + fileName
|
||||||
sendMessage(message)
|
sendMessage(message)
|
||||||
@@ -239,7 +240,7 @@ func NotifyError(error string) {
|
|||||||
//Telegram notification
|
//Telegram notification
|
||||||
err := CheckEnvVars(vars)
|
err := CheckEnvVars(vars)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
message := "MySQL Backup \n" +
|
message := "[🔴 MySQL Backup ]\n" +
|
||||||
"An error occurred during database backup \n" +
|
"An error occurred during database backup \n" +
|
||||||
"Error: " + error
|
"Error: " + error
|
||||||
sendMessage(message)
|
sendMessage(message)
|
||||||
@@ -250,3 +251,7 @@ func getTgUrl() string {
|
|||||||
return fmt.Sprintf("https://api.telegram.org/bot%s", os.Getenv("TG_TOKEN"))
|
return fmt.Sprintf("https://api.telegram.org/bot%s", os.Getenv("TG_TOKEN"))
|
||||||
|
|
||||||
}
|
}
|
||||||
|
func IsValidCronExpression(cronExpr string) bool {
|
||||||
|
_, err := cron.ParseStandard(cronExpr)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user