Compare commits

...

16 Commits

Author SHA1 Message Date
Jonas Kaninda
f3c5585051 Merge pull request #74 from jkaninda/docs
Docs
2024-08-30 21:24:50 +02:00
Jonas Kaninda
7163d030a5 chore: remove dbport from command flag 2024-08-30 21:22:18 +02:00
Jonas Kaninda
a2cec86e73 chore: remove dbport from command flag 2024-08-30 21:21:21 +02:00
Jonas Kaninda
662b73579d feat: add migrate database from a source to a target databse
fix: gpg encrypt permission warning message, update Kubernetes deployment example
2024-08-30 19:58:12 +02:00
c9f8a32de1 Merge pull request #73 from jkaninda/docs
docs: update Kubernetes deployment
2024-08-28 20:35:31 +02:00
8fb008151c docs: update Kubernetes deployment 2024-08-28 20:35:01 +02:00
113c84c885 Merge pull request #72 from jkaninda/docs
docs: update readme
2024-08-21 03:53:15 +02:00
58deb92953 docs: update readme 2024-08-21 03:52:49 +02:00
c41afb8b57 Merge pull request #71 from jkaninda/docs
docs: update readme
2024-08-21 03:51:25 +02:00
02e51a3933 docs: update readme 2024-08-21 03:50:59 +02:00
db4061b64b Merge pull request #70 from jkaninda/docs
docs: update readme
2024-08-21 03:49:58 +02:00
9467b157aa docs: update reamdme 2024-08-21 03:49:15 +02:00
c229ebdc9d Merge pull request #69 from jkaninda/docs
docs: fix grammar
2024-08-20 19:21:24 +02:00
7b701d1740 docs: fix grammar 2024-08-20 19:20:54 +02:00
ad6f190bad Merge pull request #68 from jkaninda/docs
docs: update readme
2024-08-15 06:06:26 +02:00
de4dcaaeca docs: update readme 2024-08-15 06:05:39 +02:00
22 changed files with 517 additions and 242 deletions

3
.gitignore vendored
View File

@@ -8,4 +8,5 @@ test.md
mysql-bkup
/.DS_Store
/.idea
bin
bin
Makefile

View File

@@ -1,48 +0,0 @@
BINARY_NAME=mysql-bkup
IMAGE_NAME=jkaninda/mysql-bkup
include .env
export
run:
go run . backup
build:
go build -o bin/${BINARY_NAME} .
compile:
GOOS=darwin GOARCH=arm64 go build -o bin/${BINARY_NAME}-darwin-arm64 .
GOOS=darwin GOARCH=amd64 go build -o bin/${BINARY_NAME}-darwin-amd64 .
GOOS=linux GOARCH=arm64 go build -o bin/${BINARY_NAME}-linux-arm64 .
GOOS=linux GOARCH=amd64 go build -o bin/${BINARY_NAME}-linux-amd64 .
docker-build:
docker build -f docker/Dockerfile -t jkaninda/mysql-bkup:latest .
docker-run: docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --prune --keep-last 2
docker-restore: docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} restore -f ${FILE_NAME}
docker-run-scheduled: #docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --mode scheduled --period "* * * * *"
docker-run-scheduled-s3: docker-build
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
docker-run-s3: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "AWS_S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --storage s3 --path /custom-path
docker-restore-s3: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} restore --storage s3 -f ${FILE_NAME} --path /custom-path
docker-run-ssh: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --storage ssh
docker-restore-ssh: docker-build
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" ${IMAGE_NAME} restore --storage ssh -f ${FILE_NAME}
run-docs:
cd docs && bundle exec jekyll serve -H 0.0.0.0 -t

View File

@@ -12,9 +12,11 @@ It also supports __encrypting__ your backups using GPG.
![Docker Image Size (latest by date)](https://img.shields.io/docker/image-size/jkaninda/mysql-bkup?style=flat-square)
![Docker Pulls](https://img.shields.io/docker/pulls/jkaninda/mysql-bkup?style=flat-square)
Successfully tested on:
- Docker
- Docker Swarm
- Docker in Swarm mode
- Kubernetes
- OpenShift
## Documentation is found at <https://jkaninda.github.io/mysql-bkup>
@@ -31,7 +33,7 @@ It also supports __encrypting__ your backups using GPG.
## Storage:
- Local
- AWS S3 or any S3 Alternatives for Object Storage
- SSH
- SSH remote server
## Quickstart
@@ -72,8 +74,8 @@ services:
volumes:
- ./backup:/backup
environment:
- DB_PORT=5432
- DB_HOST=postgres
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=foo
- DB_USERNAME=bar
- DB_PASSWORD=password
@@ -93,36 +95,41 @@ For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as
apiVersion: batch/v1
kind: Job
metadata:
name: backup
name: backup-job
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
- name: pg-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- bkup
- backup
- /bin/sh
- -c
- backup -d dbname
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
value: "mysql"
- name: DB_USERNAME
value: "username"
# Please use secret!
value: "user"
- name: DB_PASSWORD
value: ""
value: "password"
volumeMounts:
- mountPath: /backup
name: backup
volumes:
- name: backup
hostPath:
path: /home/toto/backup # directory location on host
type: Directory # this field is optional
restartPolicy: Never
```
## Available image registries

View File

@@ -21,6 +21,8 @@ var BackupCmd = &cobra.Command{
func init() {
//Backup
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. default or scheduled")
BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Schedule period time")
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")

21
cmd/migrate.go Normal file
View File

@@ -0,0 +1,21 @@
package cmd
import (
"github.com/jkaninda/mysql-bkup/pkg"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
)
var MigrateCmd = &cobra.Command{
Use: "migrate",
Short: "Migrate database from a source database to a target database",
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
pkg.StartMigration(cmd)
} else {
utils.Fatal("Error, no argument required")
}
},
}

View File

@@ -24,5 +24,7 @@ var RestoreCmd = &cobra.Command{
func init() {
//Restore
RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database")
RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
}

View File

@@ -30,13 +30,11 @@ func Execute() {
}
func init() {
rootCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
rootCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
rootCmd.PersistentFlags().StringP("dbname", "d", "", "Database name")
rootCmd.PersistentFlags().IntP("port", "p", 3306, "Database port")
rootCmd.PersistentFlags().StringVarP(&operation, "operation", "o", "", "Set operation, for old version only")
rootCmd.AddCommand(VersionCmd)
rootCmd.AddCommand(BackupCmd)
rootCmd.AddCommand(RestoreCmd)
rootCmd.AddCommand(MigrateCmd)
}

View File

@@ -14,7 +14,7 @@ ENV DB_HOST=""
ENV DB_NAME=""
ENV DB_USERNAME=""
ENV DB_PASSWORD=""
ENV DB_PORT="3306"
ENV DB_PORT=3306
ENV STORAGE=local
ENV AWS_S3_ENDPOINT=""
ENV AWS_S3_BUCKET_NAME=""
@@ -30,11 +30,15 @@ ENV SSH_PASSWORD=""
ENV SSH_HOST_NAME=""
ENV SSH_IDENTIFY_FILE=""
ENV SSH_PORT="22"
ENV SOURCE_DB_HOST=""
ENV SOURCE_DB_PORT=3306
ENV SOURCE_DB_NAME=""
ENV SOURCE_DB_USERNAME=""
ENV SOURCE_DB_PASSWORD=""
ARG DEBIAN_FRONTEND=noninteractive
ENV VERSION="v1.2.2"
ENV VERSION="v1.2.3"
ENV BACKUP_CRON_EXPRESSION=""
ENV GNUPGHOME="/tmp/gnupg"
ARG WORKDIR="/app"
ARG WORKDIR="/config"
ARG BACKUPDIR="/backup"
ARG BACKUP_TMP_DIR="/tmp/backup"
ARG BACKUP_CRON="/etc/cron.d/backup_cron"
@@ -49,16 +53,14 @@ RUN apt-get clean && rm -rf /var/lib/apt/lists/*
RUN mkdir $WORKDIR
RUN mkdir $BACKUPDIR
RUN mkdir -p $BACKUP_TMP_DIR && \
mkdir -p $GNUPGHOME
RUN mkdir -p $BACKUP_TMP_DIR
RUN chmod 777 $WORKDIR
RUN chmod 777 $BACKUPDIR
RUN chmod 777 $BACKUP_TMP_DIR
RUN touch $BACKUP_CRON && \
touch $BACKUP_CRON_SCRIPT && \
chmod 777 $BACKUP_CRON && \
chmod 777 $BACKUP_CRON_SCRIPT && \
chmod 777 $GNUPGHOME
chmod 777 $BACKUP_CRON_SCRIPT
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
RUN chmod +x /usr/local/bin/mysql-bkup
@@ -67,19 +69,15 @@ RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
WORKDIR $WORKDIR
# Create backup shell script
COPY <<EOF /usr/local/bin/backup
#!/bin/sh
# shellcheck disable=SC2068
/usr/local/bin/mysql-bkup backup $@
EOF
# Create restore shell script
COPY <<EOF /usr/local/bin/restore
#!/bin/sh
# shellcheck disable=SC2068
/usr/local/bin/mysql-bkup restore $@
EOF
RUN chmod +x /usr/local/bin/backup && \
# Create backup script and make it executable
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
chmod +x /usr/local/bin/backup
# Create restore script and make it executable
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup restore "$@"' > /usr/local/bin/restore && \
chmod +x /usr/local/bin/restore
# Create migrate script and make it executable
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup migrate "$@"' > /usr/local/bin/migrate && \
chmod +x /usr/local/bin/migrate
WORKDIR $WORKDIR
ENTRYPOINT ["/usr/local/bin/mysql-bkup"]

View File

@@ -13,10 +13,11 @@
# you will see them accessed via {{ site.title }}, {{ site.email }}, and so on.
# You can create any custom variable you would like, and they will be accessible
# in the templates via {{ site.myvariable }}.
title: MySQL database backup
title: MySQL Backup Docker container image
email: hi@jonaskaninda.com
description: >- # this means to ignore newlines until "baseurl:"
MySQL Backup and Restore Docker container image. Backup database to AWS S3 storage or SSH remote server.
MySQL Backup is a Docker container image that can be used to backup and restore MySQL database.
It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
baseurl: "" # the subpath of your site, e.g. /blog
url: "jkaninda.github.io/mysql-bkup/" # the base hostname & protocol for your site, e.g. http://example.com

View File

@@ -28,6 +28,8 @@ spec:
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- backup
- --storage
@@ -82,6 +84,8 @@ spec:
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- backup
- --storage
@@ -138,6 +142,8 @@ spec:
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- restore
- --storage
@@ -192,6 +198,8 @@ spec:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- backup
- --storage
@@ -233,7 +241,7 @@ spec:
This image also supports Kubernetes security context, you can run it in Rootless environment.
It has been tested on Openshift, it works well.
Deployment on Openshift is supported, you need to remove `securityContext` section on your yaml file.
Deployment on OpenShift is supported, you need to remove `securityContext` section on your yaml file.
```yaml
apiVersion: batch/v1
@@ -258,6 +266,8 @@ spec:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- backup
- --storage

123
docs/how-tos/migrate.md Normal file
View File

@@ -0,0 +1,123 @@
---
title: Migrate database
layout: default
parent: How Tos
nav_order: 9
---
# Migrate database
To migrate the database, you need to add `migrate` command.
{: .note }
The Mysql backup has another great feature: migrating your database from a source database to another.
As you know, to restore a database from a source to a target database, you need 2 operations: which is to start by backing up the source database and then restoring the source backed database to the target database.
Instead of proceeding like that, you can use the integrated feature `(migrate)`, which will help you migrate your database by doing only one operation.
### Docker compose
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: migrate
volumes:
- ./backup:/backup
environment:
## Target database
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## Source database
- SOURCE_DB_HOST=mysql2
- SOURCE_DB_PORT=3306
- SOURCE_DB_NAME=sourcedb
- SOURCE_DB_USERNAME=jonas
- SOURCE_DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
### Migrate database using Docker CLI
```
## Target database
DB_PORT=3306
DB_HOST=mysql
DB_NAME=targetdb
DB_USERNAME=targetuser
DB_PASSWORD=password
## Source database
SOURCE_DB_HOST=mysql2
SOURCE_DB_PORT=3306
SOURCE_DB_NAME=sourcedb
SOURCE_DB_USERNAME=sourceuser
SOURCE_DB_PASSWORD=password
```
```shell
docker run --rm --network your_network_name \
--env-file your-env
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup migrate -d database_name
```
## Kubernetes
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: migrate-db
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- migrate -d targetdb
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
## Target DB
- name: DB_HOST
value: "postgres-target"
- name: DB_USERNAME
value: "mysql"
- name: DB_PASSWORD
value: "password"
## Source DB
- name: SOURCE_DB_HOST
value: "postgres-source"
- name: SOURCE_DB_NAME
value: "sourcedb"
- name: SOURCE_DB_USERNAME
value: "postgres"
# Please use secret!
- name: SOURCE_DB_PASSWORD
value: "password"
restartPolicy: Never
```

View File

@@ -68,7 +68,7 @@ services:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=postgres
- DB_HOST=mysql
- DB_NAME=foo
- DB_USERNAME=bar
- DB_PASSWORD=password
@@ -78,6 +78,49 @@ services:
networks:
web:
```
## Kubernetes
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: backup-job
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup -d dbname
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_HOST
value: "mysql"
- name: DB_USERNAME
value: "user"
- name: DB_PASSWORD
value: "password"
volumeMounts:
- mountPath: /backup
name: backup
volumes:
- name: backup
hostPath:
path: /home/toto/backup # directory location on host
type: Directory # this field is optional
restartPolicy: Never
```
## Available image registries

View File

@@ -6,7 +6,7 @@ nav_order: 2
# Configuration reference
Backup and restore targets, schedule and retention are configured using environment variables or flags.
Backup, restore and migrate targets, schedule and retention are configured using environment variables or flags.
@@ -19,6 +19,7 @@ Backup and restore targets, schedule and retention are configured using environm
| mysql-bkup | bkup | CLI utility |
| backup | | Backup database operation |
| restore | | Restore database operation |
| migrate | | Migrate database from one instance to another one |
| --storage | -s | Storage. local or s3 (default: local) |
| --file | -f | File name for restoration |
| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` |
@@ -34,27 +35,33 @@ Backup and restore targets, schedule and retention are configured using environm
## Environment variables
| Name | Requirement | Description |
|-------------------|--------------------------------------------------|------------------------------------------------------|
| DB_PORT | Optional, default 3306 | Database port number |
| DB_HOST | Required | Database host |
| DB_NAME | Optional if it was provided from the -d flag | Database name |
| DB_USERNAME | Required | Database user name |
| DB_PASSWORD | Required | Database password |
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_REGION | Optional, required for S3 storage | AWS Region |
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
| Gmysql_PASSPHRASE | Optional, required to encrypt and restore backup | Gmysql passphrase |
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
| SSH_USER | Optional, required for SSH storage | ssh remote user |
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
| Name | Requirement | Description |
|------------------------|----------------------------------------------------|------------------------------------------------------|
| DB_PORT | Optional, default 3306 | Database port number |
| DB_HOST | Required | Database host |
| DB_NAME | Optional if it was provided from the -d flag | Database name |
| DB_USERNAME | Required | Database user name |
| DB_PASSWORD | Required | Database password |
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_REGION | Optional, required for S3 storage | AWS Region |
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the --period flag | Backup cron expression for docker in scheduled mode |
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
| SSH_USER | Optional, required for SSH storage | ssh remote user |
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
| SOURCE_DB_HOST | Optional, required for database migration | Source database host |
| SOURCE_DB_PORT | Optional, required for database migration | Source database port |
| SOURCE_DB_NAME | Optional, required for database migration | Source database name |
| SOURCE_DB_USERNAME | Optional, required for database migration | Source database username |
| SOURCE_DB_PASSWORD | Optional, required for database migration | Source database password |
---
## Run in Scheduled mode

View File

@@ -1,44 +1,50 @@
apiVersion: batch/v1
kind: CronJob
kind: Job
metadata:
name: bkup-job
name: backup
spec:
schedule: "0 1 * * *"
jobTemplate:
template:
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup backup -s s3 --path /custom_path
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: ACCESS_KEY
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: OnFailure
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- backup
- --storage
- s3
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: Never

View File

@@ -20,8 +20,6 @@ func StartBackup(cmd *cobra.Command) {
_, _ = cmd.Flags().GetString("operation")
//Set env
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "dbname", "DB_NAME")
utils.GetEnv(cmd, "port", "DB_PORT")
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
//Get flag value and set env
@@ -32,37 +30,38 @@ func StartBackup(cmd *cobra.Command) {
prune, _ := cmd.Flags().GetBool("prune")
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
executionMode, _ = cmd.Flags().GetString("mode")
dbName = os.Getenv("DB_NAME")
gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
dbConf = getDbConfig(cmd)
//
if gpqPassphrase != "" {
encryption = true
}
//Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbConf.dbName, time.Now().Format("20060102_150405"))
if disableCompression {
backupFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405"))
backupFileName = fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
}
if executionMode == "default" {
switch storage {
case "s3":
s3Backup(backupFileName, disableCompression, prune, backupRetention, encryption)
s3Backup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
case "local":
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
case "ssh", "remote":
sshBackup(backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
sshBackup(dbConf, backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
case "ftp":
utils.Fatal("Not supported storage type: %s", storage)
default:
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
}
} else if executionMode == "scheduled" {
scheduledMode(storage)
scheduledMode(dbConf, storage)
} else {
utils.Fatal("Error, unknown execution mode!")
}
@@ -70,7 +69,7 @@ func StartBackup(cmd *cobra.Command) {
}
// Run in scheduled mode
func scheduledMode(storage string) {
func scheduledMode(db *dbConfig, storage string) {
fmt.Println()
fmt.Println("**********************************")
@@ -81,7 +80,7 @@ func scheduledMode(storage string) {
utils.Info("Storage type %s ", storage)
//Test database connexion
utils.TestDatabaseConnection()
testDatabaseConnection(db)
utils.Info("Creating backup job...")
CreateCrontabScript(disableCompression, storage)
@@ -117,12 +116,7 @@ func scheduledMode(storage string) {
}
// BackupDatabase backup database
func BackupDatabase(backupFileName string, disableCompression bool) {
dbHost = os.Getenv("DB_HOST")
dbPassword = os.Getenv("DB_PASSWORD")
dbUserName = os.Getenv("DB_USERNAME")
dbName = os.Getenv("DB_NAME")
dbPort = os.Getenv("DB_PORT")
func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) {
storagePath = os.Getenv("STORAGE_PATH")
err := utils.CheckEnvVars(dbHVars)
@@ -132,7 +126,7 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
}
utils.Info("Starting database backup...")
utils.TestDatabaseConnection()
testDatabaseConnection(db)
// Backup Database database
utils.Info("Backing up database...")
@@ -140,11 +134,11 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
if disableCompression {
// Execute mysqldump
cmd := exec.Command("mysqldump",
"-h", dbHost,
"-P", dbPort,
"-u", dbUserName,
"--password="+dbPassword,
dbName,
"-h", db.dbHost,
"-P", db.dbPort,
"-u", db.dbUserName,
"--password="+db.dbPassword,
db.dbName,
)
output, err := cmd.Output()
if err != nil {
@@ -166,7 +160,7 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
} else {
// Execute mysqldump
cmd := exec.Command("mysqldump", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName)
cmd := exec.Command("mysqldump", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, "--password="+db.dbPassword, db.dbName)
stdout, err := cmd.StdoutPipe()
if err != nil {
log.Fatal(err)
@@ -189,9 +183,9 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
}
}
func localBackup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
func localBackup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
utils.Info("Backup database to local storage")
BackupDatabase(backupFileName, disableCompression)
BackupDatabase(db, backupFileName, disableCompression)
finalFileName := backupFileName
if encrypt {
encryptBackup(backupFileName)
@@ -207,12 +201,12 @@ func localBackup(backupFileName string, disableCompression bool, prune bool, bac
deleteTemp()
}
func s3Backup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
func s3Backup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
utils.Info("Backup database to s3 storage")
//Backup database
BackupDatabase(backupFileName, disableCompression)
BackupDatabase(db, backupFileName, disableCompression)
finalFileName := backupFileName
if encrypt {
encryptBackup(backupFileName)
@@ -243,10 +237,10 @@ func s3Backup(backupFileName string, disableCompression bool, prune bool, backup
//Delete temp
deleteTemp()
}
func sshBackup(backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
func sshBackup(db *dbConfig, backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
utils.Info("Backup database to Remote server")
//Backup database
BackupDatabase(backupFileName, disableCompression)
BackupDatabase(db, backupFileName, disableCompression)
finalFileName := backupFileName
if encrypt {
encryptBackup(backupFileName)

View File

@@ -1,4 +1,58 @@
package pkg
import (
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
"os"
)
type Config struct {
}
type dbConfig struct {
dbHost string
dbPort string
dbName string
dbUserName string
dbPassword string
}
type dbSourceConfig struct {
sourceDbHost string
sourceDbPort string
sourceDbUserName string
sourceDbPassword string
sourceDbName string
}
func getDbConfig(cmd *cobra.Command) *dbConfig {
//Set env
utils.GetEnv(cmd, "dbname", "DB_NAME")
dConf := dbConfig{}
dConf.dbHost = os.Getenv("DB_HOST")
dConf.dbPort = os.Getenv("DB_PORT")
dConf.dbName = os.Getenv("DB_NAME")
dConf.dbUserName = os.Getenv("DB_USERNAME")
dConf.dbPassword = os.Getenv("DB_PASSWORD")
err := utils.CheckEnvVars(dbHVars)
if err != nil {
utils.Error("Please make sure all required environment variables for database are set")
utils.Fatal("Error checking environment variables: %s", err)
}
return &dConf
}
func getSourceDbConfig() *dbSourceConfig {
sdbConfig := dbSourceConfig{}
sdbConfig.sourceDbHost = os.Getenv("SOURCE_DB_HOST")
sdbConfig.sourceDbPort = os.Getenv("SOURCE_DB_PORT")
sdbConfig.sourceDbName = os.Getenv("SOURCE_DB_NAME")
sdbConfig.sourceDbUserName = os.Getenv("SOURCE_DB_USERNAME")
sdbConfig.sourceDbPassword = os.Getenv("SOURCE_DB_PASSWORD")
err := utils.CheckEnvVars(sdbRVars)
if err != nil {
utils.Error("Please make sure all required environment variables for source database are set")
utils.Fatal("Error checking environment variables: %s", err)
}
return &sdbConfig
}

View File

@@ -9,11 +9,17 @@ import (
func Decrypt(inputFile string, passphrase string) error {
utils.Info("Decrypting backup file: " + inputFile + " ...")
//Create gpg home dir
err := utils.MakeDir(gpgHome)
if err != nil {
return err
}
utils.SetEnv("GNUPGHOME", gpgHome)
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
err = cmd.Run()
if err != nil {
return err
}
@@ -24,11 +30,17 @@ func Decrypt(inputFile string, passphrase string) error {
func Encrypt(inputFile string, passphrase string) error {
utils.Info("Encrypting backup...")
//Create gpg home dir
err := utils.MakeDir(gpgHome)
if err != nil {
return err
}
utils.SetEnv("GNUPGHOME", gpgHome)
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--symmetric", "--cipher-algo", algorithm, inputFile)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
err = cmd.Run()
if err != nil {
return err
}

View File

@@ -1,9 +1,11 @@
package pkg
import (
"bytes"
"fmt"
"github.com/jkaninda/mysql-bkup/utils"
"os"
"os/exec"
"path/filepath"
"time"
)
@@ -96,3 +98,24 @@ func deleteTemp() {
utils.Info("Deleting %s ... done", tmpPath)
}
}
// TestDatabaseConnection tests the database connection
func testDatabaseConnection(db *dbConfig) {
utils.Info("Connecting to %s database ...", db.dbName)
cmd := exec.Command("mysql", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, "--password="+db.dbPassword, db.dbName, "-e", "quit")
// Capture the output
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
err := cmd.Run()
if err != nil {
utils.Error("Error testing database connection: %v\nOutput: %s", err, out.String())
os.Exit(1)
}
utils.Info("Successfully connected to %s database", db.dbName)
}

31
pkg/migrate.go Normal file
View File

@@ -0,0 +1,31 @@
package pkg
import (
"fmt"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
"time"
)
func StartMigration(cmd *cobra.Command) {
utils.Info("Starting database migration...")
//Get DB config
dbConf = getDbConfig(cmd)
sDbConf = getSourceDbConfig()
//Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql", sDbConf.sourceDbName, time.Now().Format("20060102_150405"))
//Backup Source Database
newDbConfig := dbConfig{}
newDbConfig.dbHost = sDbConf.sourceDbHost
newDbConfig.dbPort = sDbConf.sourceDbPort
newDbConfig.dbName = sDbConf.sourceDbName
newDbConfig.dbUserName = sDbConf.sourceDbUserName
newDbConfig.dbPassword = sDbConf.sourceDbPassword
BackupDatabase(&newDbConfig, backupFileName, true)
//Restore source database into target database
utils.Info("Restoring [%s] database into [%s] database...", sDbConf.sourceDbName, dbConf.dbName)
RestoreDatabase(dbConf, backupFileName)
utils.Info("[%s] database has been restored into [%s] database", sDbConf.sourceDbName, dbConf.dbName)
utils.Info("Database migration completed!")
}

View File

@@ -13,8 +13,6 @@ func StartRestore(cmd *cobra.Command) {
//Set env
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "dbname", "DB_NAME")
utils.GetEnv(cmd, "port", "DB_PORT")
//Get flag value and set env
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
@@ -23,47 +21,45 @@ func StartRestore(cmd *cobra.Command) {
file = utils.GetEnv(cmd, "file", "FILE_NAME")
executionMode, _ = cmd.Flags().GetString("mode")
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
dbConf = getDbConfig(cmd)
switch storage {
case "s3":
restoreFromS3(file, bucket, s3Path)
restoreFromS3(dbConf, file, bucket, s3Path)
case "local":
utils.Info("Restore database from local")
copyToTmp(storagePath, file)
RestoreDatabase(file)
RestoreDatabase(dbConf, file)
case "ssh":
restoreFromRemote(file, remotePath)
restoreFromRemote(dbConf, file, remotePath)
case "ftp":
utils.Fatal("Restore from FTP is not yet supported")
default:
utils.Info("Restore database from local")
RestoreDatabase(file)
copyToTmp(storagePath, file)
RestoreDatabase(dbConf, file)
}
}
func restoreFromS3(file, bucket, s3Path string) {
func restoreFromS3(db *dbConfig, file, bucket, s3Path string) {
utils.Info("Restore database from s3")
err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
if err != nil {
utils.Fatal("Error download file from s3 %s %v", file, err)
}
RestoreDatabase(file)
RestoreDatabase(db, file)
}
func restoreFromRemote(file, remotePath string) {
func restoreFromRemote(db *dbConfig, file, remotePath string) {
utils.Info("Restore database from remote server")
err := CopyFromRemote(file, remotePath)
if err != nil {
utils.Fatal("Error download file from remote server: %s %v ", filepath.Join(remotePath, file), err)
}
RestoreDatabase(file)
RestoreDatabase(db, file)
}
// RestoreDatabase restore database
func RestoreDatabase(file string) {
dbHost = os.Getenv("DB_HOST")
dbPassword = os.Getenv("DB_PASSWORD")
dbUserName = os.Getenv("DB_USERNAME")
dbName = os.Getenv("DB_NAME")
dbPort = os.Getenv("DB_PORT")
func RestoreDatabase(db *dbConfig, file string) {
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
if file == "" {
utils.Fatal("Error, file required")
@@ -93,7 +89,7 @@ func RestoreDatabase(file string) {
}
if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) {
utils.TestDatabaseConnection()
testDatabaseConnection(db)
utils.Info("Restoring database...")
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))

View File

@@ -4,16 +4,12 @@ const cronLogFile = "/var/log/mysql-bkup.log"
const tmpPath = "/tmp/backup"
const backupCronFile = "/usr/local/bin/backup_cron.sh"
const algorithm = "aes256"
const gpgHome = "gnupg"
const gpgExtension = "gpg"
var (
storage = "local"
file = ""
dbPassword = ""
dbUserName = ""
dbName = ""
dbHost = ""
dbPort = "3306"
executionMode = "default"
storagePath = "/backup"
disableCompression = false
@@ -27,6 +23,16 @@ var dbHVars = []string{
"DB_USERNAME",
"DB_NAME",
}
var sdbRVars = []string{
"SOURCE_DB_HOST",
"SOURCE_DB_PORT",
"SOURCE_DB_NAME",
"SOURCE_DB_USERNAME",
"SOURCE_DB_PASSWORD",
}
var dbConf *dbConfig
var sDbConf *dbSourceConfig
// sshHVars Required environment variables for SSH remote server storage
var sshHVars = []string{

View File

@@ -7,13 +7,11 @@ package utils
* @link https://github.com/jkaninda/mysql-bkup
**/
import (
"bytes"
"fmt"
"github.com/spf13/cobra"
"io"
"io/fs"
"os"
"os/exec"
)
func FileExists(filename string) bool {
@@ -90,34 +88,6 @@ func IsDirEmpty(name string) (bool, error) {
return true, nil
}
// TestDatabaseConnection tests the database connection
func TestDatabaseConnection() {
dbHost := os.Getenv("DB_HOST")
dbPassword := os.Getenv("DB_PASSWORD")
dbUserName := os.Getenv("DB_USERNAME")
dbName := os.Getenv("DB_NAME")
dbPort := os.Getenv("DB_PORT")
if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" {
Fatal("Please make sure all required database environment variables are set")
} else {
Info("Connecting to database ...")
cmd := exec.Command("mysql", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName, "-e", "quit")
// Capture the output
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
err := cmd.Run()
if err != nil {
Error("Error testing database connection: %v\nOutput: %s", err, out.String())
os.Exit(1)
}
Info("Successfully connected to database")
}
}
func GetEnv(cmd *cobra.Command, flagName, envName string) string {
value, _ := cmd.Flags().GetString(flagName)
if value != "" {
@@ -182,3 +152,21 @@ func CheckEnvVars(vars []string) error {
return nil
}
// MakeDir create directory
func MakeDir(dirPath string) error {
err := os.Mkdir(dirPath, 0700)
if err != nil {
return err
}
return nil
}
// MakeDirAll create directory
func MakeDirAll(dirPath string) error {
err := os.MkdirAll(dirPath, 0700)
if err != nil {
return err
}
return nil
}