mirror of
https://github.com/jkaninda/mysql-bkup.git
synced 2025-12-06 13:39:41 +01:00
Compare commits
24 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f3c5585051 | ||
|
|
7163d030a5 | ||
|
|
a2cec86e73 | ||
|
|
662b73579d | ||
| c9f8a32de1 | |||
| 8fb008151c | |||
| 113c84c885 | |||
| 58deb92953 | |||
| c41afb8b57 | |||
| 02e51a3933 | |||
| db4061b64b | |||
| 9467b157aa | |||
| c229ebdc9d | |||
| 7b701d1740 | |||
| ad6f190bad | |||
| de4dcaaeca | |||
| 17c0a99bda | |||
| b1c9abf931 | |||
| a70a893c11 | |||
| 243e25f4fb | |||
| cb0dcf4104 | |||
| d26d8d31c9 | |||
| 71d438ba76 | |||
| a3fc58af96 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -9,3 +9,4 @@ mysql-bkup
|
||||
/.DS_Store
|
||||
/.idea
|
||||
bin
|
||||
Makefile
|
||||
48
Makefile
48
Makefile
@@ -1,48 +0,0 @@
|
||||
BINARY_NAME=mysql-bkup
|
||||
IMAGE_NAME=jkaninda/mysql-bkup
|
||||
|
||||
include .env
|
||||
export
|
||||
run:
|
||||
go run . backup
|
||||
|
||||
build:
|
||||
go build -o bin/${BINARY_NAME} .
|
||||
|
||||
compile:
|
||||
GOOS=darwin GOARCH=arm64 go build -o bin/${BINARY_NAME}-darwin-arm64 .
|
||||
GOOS=darwin GOARCH=amd64 go build -o bin/${BINARY_NAME}-darwin-amd64 .
|
||||
GOOS=linux GOARCH=arm64 go build -o bin/${BINARY_NAME}-linux-arm64 .
|
||||
GOOS=linux GOARCH=amd64 go build -o bin/${BINARY_NAME}-linux-amd64 .
|
||||
|
||||
docker-build:
|
||||
docker build -f docker/Dockerfile -t jkaninda/mysql-bkup:latest .
|
||||
|
||||
docker-run: #docker-build
|
||||
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --prune --keep-last 2
|
||||
docker-restore: docker-build
|
||||
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} restore -f ${FILE_NAME}
|
||||
|
||||
|
||||
docker-run-scheduled: #docker-build
|
||||
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --mode scheduled --period "* * * * *"
|
||||
|
||||
|
||||
docker-run-scheduled-s3: docker-build
|
||||
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
|
||||
|
||||
docker-run-s3: docker-build
|
||||
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "AWS_S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --storage s3 --path /custom-path
|
||||
|
||||
|
||||
docker-restore-s3: docker-build
|
||||
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} restore --storage s3 -f ${FILE_NAME} --path /custom-path
|
||||
|
||||
docker-run-ssh: docker-build
|
||||
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --storage ssh
|
||||
|
||||
docker-restore-ssh: docker-build
|
||||
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" ${IMAGE_NAME} restore --storage ssh -f ${FILE_NAME}
|
||||
|
||||
run-docs:
|
||||
cd docs && bundle exec jekyll serve -H 0.0.0.0 -t
|
||||
93
README.md
93
README.md
@@ -12,9 +12,11 @@ It also supports __encrypting__ your backups using GPG.
|
||||

|
||||

|
||||
|
||||
Successfully tested on:
|
||||
- Docker
|
||||
- Docker Swarm
|
||||
- Docker in Swarm mode
|
||||
- Kubernetes
|
||||
- OpenShift
|
||||
|
||||
## Documentation is found at <https://jkaninda.github.io/mysql-bkup>
|
||||
|
||||
@@ -31,7 +33,7 @@ It also supports __encrypting__ your backups using GPG.
|
||||
## Storage:
|
||||
- Local
|
||||
- AWS S3 or any S3 Alternatives for Object Storage
|
||||
- SSH
|
||||
- SSH remote server
|
||||
|
||||
## Quickstart
|
||||
|
||||
@@ -50,6 +52,12 @@ To run a one time backup, bind your local volume to `/backup` in the container a
|
||||
|
||||
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
||||
|
||||
```yaml
|
||||
docker run --rm --network your_network_name \
|
||||
--env-file your-env-file \
|
||||
-v $PWD/backup:/backup/ \
|
||||
jkaninda/mysql-bkup backup -d database_name
|
||||
```
|
||||
|
||||
### Simple backup in docker compose file
|
||||
|
||||
@@ -66,8 +74,8 @@ services:
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
- DB_PORT=5432
|
||||
- DB_HOST=postgres
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
- DB_NAME=foo
|
||||
- DB_USERNAME=bar
|
||||
- DB_PASSWORD=password
|
||||
@@ -79,57 +87,50 @@ networks:
|
||||
```
|
||||
## Deploy on Kubernetes
|
||||
|
||||
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as CronJob.
|
||||
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as Job or CronJob.
|
||||
|
||||
### Simple Kubernetes CronJob usage:
|
||||
### Simple Kubernetes backup Job :
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
kind: Job
|
||||
metadata:
|
||||
name: bkup-job
|
||||
name: backup-job
|
||||
spec:
|
||||
schedule: "0 1 * * *"
|
||||
jobTemplate:
|
||||
ttlSecondsAfterFinished: 100
|
||||
template:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
containers:
|
||||
- name: pg-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup -s s3 --path /custom_path
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "5432"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: ""
|
||||
- name: DB_USERNAME
|
||||
value: ""
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: AWS_S3_ENDPOINT
|
||||
value: "https://s3.amazonaws.com"
|
||||
- name: AWS_S3_BUCKET_NAME
|
||||
value: "xxx"
|
||||
- name: AWS_REGION
|
||||
value: "us-west-2"
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_SECRET_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_DISABLE_SSL
|
||||
value: "false"
|
||||
restartPolicy: Never
|
||||
- backup -d dbname
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
- name: DB_HOST
|
||||
value: "mysql"
|
||||
- name: DB_USERNAME
|
||||
value: "user"
|
||||
- name: DB_PASSWORD
|
||||
value: "password"
|
||||
volumeMounts:
|
||||
- mountPath: /backup
|
||||
name: backup
|
||||
volumes:
|
||||
- name: backup
|
||||
hostPath:
|
||||
path: /home/toto/backup # directory location on host
|
||||
type: Directory # this field is optional
|
||||
restartPolicy: Never
|
||||
```
|
||||
## Available image registries
|
||||
|
||||
|
||||
@@ -21,6 +21,8 @@ var BackupCmd = &cobra.Command{
|
||||
|
||||
func init() {
|
||||
//Backup
|
||||
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
|
||||
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
||||
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. default or scheduled")
|
||||
BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Schedule period time")
|
||||
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")
|
||||
|
||||
21
cmd/migrate.go
Normal file
21
cmd/migrate.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/jkaninda/mysql-bkup/pkg"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var MigrateCmd = &cobra.Command{
|
||||
Use: "migrate",
|
||||
Short: "Migrate database from a source database to a target database",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) == 0 {
|
||||
pkg.StartMigration(cmd)
|
||||
} else {
|
||||
utils.Fatal("Error, no argument required")
|
||||
|
||||
}
|
||||
|
||||
},
|
||||
}
|
||||
@@ -24,5 +24,7 @@ var RestoreCmd = &cobra.Command{
|
||||
func init() {
|
||||
//Restore
|
||||
RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database")
|
||||
RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
|
||||
RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
||||
|
||||
}
|
||||
|
||||
@@ -30,13 +30,11 @@ func Execute() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
|
||||
rootCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
||||
rootCmd.PersistentFlags().StringP("dbname", "d", "", "Database name")
|
||||
rootCmd.PersistentFlags().IntP("port", "p", 3306, "Database port")
|
||||
rootCmd.PersistentFlags().StringVarP(&operation, "operation", "o", "", "Set operation, for old version only")
|
||||
|
||||
rootCmd.AddCommand(VersionCmd)
|
||||
rootCmd.AddCommand(BackupCmd)
|
||||
rootCmd.AddCommand(RestoreCmd)
|
||||
rootCmd.AddCommand(MigrateCmd)
|
||||
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ ENV DB_HOST=""
|
||||
ENV DB_NAME=""
|
||||
ENV DB_USERNAME=""
|
||||
ENV DB_PASSWORD=""
|
||||
ENV DB_PORT="3306"
|
||||
ENV DB_PORT=3306
|
||||
ENV STORAGE=local
|
||||
ENV AWS_S3_ENDPOINT=""
|
||||
ENV AWS_S3_BUCKET_NAME=""
|
||||
@@ -30,9 +30,15 @@ ENV SSH_PASSWORD=""
|
||||
ENV SSH_HOST_NAME=""
|
||||
ENV SSH_IDENTIFY_FILE=""
|
||||
ENV SSH_PORT="22"
|
||||
ENV SOURCE_DB_HOST=""
|
||||
ENV SOURCE_DB_PORT=3306
|
||||
ENV SOURCE_DB_NAME=""
|
||||
ENV SOURCE_DB_USERNAME=""
|
||||
ENV SOURCE_DB_PASSWORD=""
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ENV VERSION="v1.2.1"
|
||||
ARG WORKDIR="/app"
|
||||
ENV VERSION="v1.2.3"
|
||||
ENV BACKUP_CRON_EXPRESSION=""
|
||||
ARG WORKDIR="/config"
|
||||
ARG BACKUPDIR="/backup"
|
||||
ARG BACKUP_TMP_DIR="/tmp/backup"
|
||||
ARG BACKUP_CRON="/etc/cron.d/backup_cron"
|
||||
@@ -40,7 +46,6 @@ ARG BACKUP_CRON_SCRIPT="/usr/local/bin/backup_cron.sh"
|
||||
LABEL author="Jonas Kaninda"
|
||||
|
||||
RUN apt-get update -qq
|
||||
#RUN apt-get install build-essential libcurl4-openssl-dev libxml2-dev mime-support -y
|
||||
RUN apt install mysql-client supervisor cron gnupg -y
|
||||
|
||||
# Clear cache
|
||||
@@ -64,5 +69,15 @@ RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
|
||||
|
||||
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
|
||||
|
||||
# Create backup script and make it executable
|
||||
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
|
||||
chmod +x /usr/local/bin/backup
|
||||
# Create restore script and make it executable
|
||||
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup restore "$@"' > /usr/local/bin/restore && \
|
||||
chmod +x /usr/local/bin/restore
|
||||
# Create migrate script and make it executable
|
||||
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup migrate "$@"' > /usr/local/bin/migrate && \
|
||||
chmod +x /usr/local/bin/migrate
|
||||
|
||||
WORKDIR $WORKDIR
|
||||
ENTRYPOINT ["/usr/local/bin/mysql-bkup"]
|
||||
|
||||
@@ -13,10 +13,11 @@
|
||||
# you will see them accessed via {{ site.title }}, {{ site.email }}, and so on.
|
||||
# You can create any custom variable you would like, and they will be accessible
|
||||
# in the templates via {{ site.myvariable }}.
|
||||
title: MySQL database backup
|
||||
title: MySQL Backup Docker container image
|
||||
email: hi@jonaskaninda.com
|
||||
description: >- # this means to ignore newlines until "baseurl:"
|
||||
MySQL Backup and Restore Docker container image. Backup database to AWS S3 storage or SSH remote server.
|
||||
MySQL Backup is a Docker container image that can be used to backup and restore MySQL database.
|
||||
It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
|
||||
|
||||
baseurl: "" # the subpath of your site, e.g. /blog
|
||||
url: "jkaninda.github.io/mysql-bkup/" # the base hostname & protocol for your site, e.g. http://example.com
|
||||
|
||||
@@ -7,7 +7,7 @@ nav_order: 3
|
||||
# Backup to SSH remote server
|
||||
|
||||
|
||||
As described for s3 backup section, to change the storage of you backup and use S3 as storage. You need to add `--storage ssh` or `--storage remote`.
|
||||
As described for s3 backup section, to change the storage of your backup and use SSH Remote server as storage. You need to add `--storage ssh` or `--storage remote`.
|
||||
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `SSH_REMOTE_PATH` environment variable.
|
||||
|
||||
{: .note }
|
||||
|
||||
@@ -10,7 +10,62 @@ nav_order: 8
|
||||
To deploy MySQL Backup on Kubernetes, you can use Job to backup or Restore your database.
|
||||
For recurring backup you can use CronJob, you don't need to run it in scheduled mode. as described bellow.
|
||||
|
||||
## Backup Job
|
||||
## Backup to S3 storage
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: backup
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- bkup
|
||||
- backup
|
||||
- --storage
|
||||
- s3
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: "dbname"
|
||||
- name: DB_USERNAME
|
||||
value: "username"
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: AWS_S3_ENDPOINT
|
||||
value: "https://s3.amazonaws.com"
|
||||
- name: AWS_S3_BUCKET_NAME
|
||||
value: "xxx"
|
||||
- name: AWS_REGION
|
||||
value: "us-west-2"
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_SECRET_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_DISABLE_SSL
|
||||
value: "false"
|
||||
restartPolicy: Never
|
||||
```
|
||||
|
||||
## Backup Job to SSH remote server
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
@@ -29,6 +84,8 @@ spec:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- bkup
|
||||
- backup
|
||||
- --storage
|
||||
@@ -85,6 +142,8 @@ spec:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- bkup
|
||||
- restore
|
||||
- --storage
|
||||
@@ -139,6 +198,8 @@ spec:
|
||||
- name: mysql-bkup
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- bkup
|
||||
- backup
|
||||
- --storage
|
||||
@@ -178,6 +239,9 @@ spec:
|
||||
|
||||
## Kubernetes Rootless
|
||||
|
||||
This image also supports Kubernetes security context, you can run it in Rootless environment.
|
||||
It has been tested on Openshift, it works well.
|
||||
Deployment on OpenShift is supported, you need to remove `securityContext` section on your yaml file.
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
@@ -202,6 +266,8 @@ spec:
|
||||
- name: mysql-bkup
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- bkup
|
||||
- backup
|
||||
- --storage
|
||||
|
||||
@@ -11,7 +11,7 @@ The image supports encrypting backups using GPG out of the box. In case a `GPG_P
|
||||
{: .warning }
|
||||
To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process.
|
||||
|
||||
To decrypt manually, you need to install gnupg
|
||||
To decrypt manually, you need to install `gnupg`
|
||||
|
||||
### Decrypt backup
|
||||
|
||||
|
||||
123
docs/how-tos/migrate.md
Normal file
123
docs/how-tos/migrate.md
Normal file
@@ -0,0 +1,123 @@
|
||||
---
|
||||
title: Migrate database
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 9
|
||||
---
|
||||
|
||||
# Migrate database
|
||||
|
||||
To migrate the database, you need to add `migrate` command.
|
||||
|
||||
{: .note }
|
||||
The Mysql backup has another great feature: migrating your database from a source database to another.
|
||||
|
||||
As you know, to restore a database from a source to a target database, you need 2 operations: which is to start by backing up the source database and then restoring the source backed database to the target database.
|
||||
Instead of proceeding like that, you can use the integrated feature `(migrate)`, which will help you migrate your database by doing only one operation.
|
||||
|
||||
|
||||
### Docker compose
|
||||
```yml
|
||||
services:
|
||||
mysql-bkup:
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command: migrate
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
## Target database
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
- DB_NAME=database
|
||||
- DB_USERNAME=username
|
||||
- DB_PASSWORD=password
|
||||
## Source database
|
||||
- SOURCE_DB_HOST=mysql2
|
||||
- SOURCE_DB_PORT=3306
|
||||
- SOURCE_DB_NAME=sourcedb
|
||||
- SOURCE_DB_USERNAME=jonas
|
||||
- SOURCE_DB_PASSWORD=password
|
||||
# mysql-bkup container must be connected to the same network with your database
|
||||
networks:
|
||||
- web
|
||||
networks:
|
||||
web:
|
||||
```
|
||||
|
||||
### Migrate database using Docker CLI
|
||||
|
||||
|
||||
```
|
||||
## Target database
|
||||
DB_PORT=3306
|
||||
DB_HOST=mysql
|
||||
DB_NAME=targetdb
|
||||
DB_USERNAME=targetuser
|
||||
DB_PASSWORD=password
|
||||
|
||||
## Source database
|
||||
SOURCE_DB_HOST=mysql2
|
||||
SOURCE_DB_PORT=3306
|
||||
SOURCE_DB_NAME=sourcedb
|
||||
SOURCE_DB_USERNAME=sourceuser
|
||||
SOURCE_DB_PASSWORD=password
|
||||
```
|
||||
|
||||
```shell
|
||||
docker run --rm --network your_network_name \
|
||||
--env-file your-env
|
||||
-v $PWD/backup:/backup/ \
|
||||
jkaninda/mysql-bkup migrate -d database_name
|
||||
```
|
||||
|
||||
## Kubernetes
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: migrate-db
|
||||
spec:
|
||||
ttlSecondsAfterFinished: 100
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- migrate -d targetdb
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
## Target DB
|
||||
- name: DB_HOST
|
||||
value: "postgres-target"
|
||||
- name: DB_USERNAME
|
||||
value: "mysql"
|
||||
- name: DB_PASSWORD
|
||||
value: "password"
|
||||
## Source DB
|
||||
- name: SOURCE_DB_HOST
|
||||
value: "postgres-source"
|
||||
- name: SOURCE_DB_NAME
|
||||
value: "sourcedb"
|
||||
- name: SOURCE_DB_USERNAME
|
||||
value: "postgres"
|
||||
# Please use secret!
|
||||
- name: SOURCE_DB_PASSWORD
|
||||
value: "password"
|
||||
restartPolicy: Never
|
||||
```
|
||||
@@ -45,6 +45,13 @@ To run a one time backup, bind your local volume to `/backup` in the container a
|
||||
|
||||
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
||||
|
||||
```yaml
|
||||
docker run --rm --network your_network_name \
|
||||
--env-file your-env-file \
|
||||
-v $PWD/backup:/backup/ \
|
||||
jkaninda/mysql-bkup backup -d database_name
|
||||
```
|
||||
|
||||
### Simple backup in docker compose file
|
||||
|
||||
```yaml
|
||||
@@ -61,7 +68,7 @@ services:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=postgres
|
||||
- DB_HOST=mysql
|
||||
- DB_NAME=foo
|
||||
- DB_USERNAME=bar
|
||||
- DB_PASSWORD=password
|
||||
@@ -71,6 +78,49 @@ services:
|
||||
networks:
|
||||
web:
|
||||
```
|
||||
## Kubernetes
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: backup-job
|
||||
spec:
|
||||
ttlSecondsAfterFinished: 100
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- backup -d dbname
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
- name: DB_HOST
|
||||
value: "mysql"
|
||||
- name: DB_USERNAME
|
||||
value: "user"
|
||||
- name: DB_PASSWORD
|
||||
value: "password"
|
||||
volumeMounts:
|
||||
- mountPath: /backup
|
||||
name: backup
|
||||
volumes:
|
||||
- name: backup
|
||||
hostPath:
|
||||
path: /home/toto/backup # directory location on host
|
||||
type: Directory # this field is optional
|
||||
restartPolicy: Never
|
||||
```
|
||||
|
||||
## Available image registries
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ nav_order: 2
|
||||
|
||||
# Configuration reference
|
||||
|
||||
Backup and restore targets, schedule and retention are configured using environment variables or flags.
|
||||
Backup, restore and migrate targets, schedule and retention are configured using environment variables or flags.
|
||||
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ Backup and restore targets, schedule and retention are configured using environm
|
||||
| mysql-bkup | bkup | CLI utility |
|
||||
| backup | | Backup database operation |
|
||||
| restore | | Restore database operation |
|
||||
| migrate | | Migrate database from one instance to another one |
|
||||
| --storage | -s | Storage. local or s3 (default: local) |
|
||||
| --file | -f | File name for restoration |
|
||||
| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` |
|
||||
@@ -34,27 +35,33 @@ Backup and restore targets, schedule and retention are configured using environm
|
||||
|
||||
## Environment variables
|
||||
|
||||
| Name | Requirement | Description |
|
||||
|-------------------|--------------------------------------------------|------------------------------------------------------|
|
||||
| DB_PORT | Optional, default 3306 | Database port number |
|
||||
| DB_HOST | Required | Database host |
|
||||
| DB_NAME | Optional if it was provided from the -d flag | Database name |
|
||||
| DB_USERNAME | Required | Database user name |
|
||||
| DB_PASSWORD | Required | Database password |
|
||||
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
|
||||
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
|
||||
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
||||
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
||||
| AWS_REGION | Optional, required for S3 storage | AWS Region |
|
||||
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
|
||||
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
|
||||
| Gmysql_PASSPHRASE | Optional, required to encrypt and restore backup | Gmysql passphrase |
|
||||
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
|
||||
| SSH_USER | Optional, required for SSH storage | ssh remote user |
|
||||
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
|
||||
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
|
||||
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
|
||||
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
|
||||
| Name | Requirement | Description |
|
||||
|------------------------|----------------------------------------------------|------------------------------------------------------|
|
||||
| DB_PORT | Optional, default 3306 | Database port number |
|
||||
| DB_HOST | Required | Database host |
|
||||
| DB_NAME | Optional if it was provided from the -d flag | Database name |
|
||||
| DB_USERNAME | Required | Database user name |
|
||||
| DB_PASSWORD | Required | Database password |
|
||||
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
|
||||
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
|
||||
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
||||
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
||||
| AWS_REGION | Optional, required for S3 storage | AWS Region |
|
||||
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
|
||||
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
|
||||
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the --period flag | Backup cron expression for docker in scheduled mode |
|
||||
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
|
||||
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
|
||||
| SSH_USER | Optional, required for SSH storage | ssh remote user |
|
||||
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
|
||||
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
|
||||
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
|
||||
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
|
||||
| SOURCE_DB_HOST | Optional, required for database migration | Source database host |
|
||||
| SOURCE_DB_PORT | Optional, required for database migration | Source database port |
|
||||
| SOURCE_DB_NAME | Optional, required for database migration | Source database name |
|
||||
| SOURCE_DB_USERNAME | Optional, required for database migration | Source database username |
|
||||
| SOURCE_DB_PASSWORD | Optional, required for database migration | Source database password |
|
||||
|
||||
---
|
||||
## Run in Scheduled mode
|
||||
|
||||
@@ -1,44 +1,50 @@
|
||||
piVersion: batch/v1
|
||||
kind: CronJob
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: bkup-job
|
||||
name: backup
|
||||
spec:
|
||||
schedule: "0 1 * * *"
|
||||
jobTemplate:
|
||||
template:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup -s s3 --path /custom_path
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: ""
|
||||
- name: DB_USERNAME
|
||||
value: ""
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: ACCESS_KEY
|
||||
value: ""
|
||||
- name: AWS_S3_ENDPOINT
|
||||
value: "https://s3.amazonaws.com"
|
||||
- name: AWS_S3_BUCKET_NAME
|
||||
value: "xxx"
|
||||
- name: AWS_REGION
|
||||
value: "us-west-2"
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_SECRET_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_DISABLE_SSL
|
||||
value: "false"
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: mysql-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- bkup
|
||||
- backup
|
||||
- --storage
|
||||
- s3
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_HOST
|
||||
value: ""
|
||||
- name: DB_NAME
|
||||
value: "dbname"
|
||||
- name: DB_USERNAME
|
||||
value: "username"
|
||||
# Please use secret!
|
||||
- name: DB_PASSWORD
|
||||
value: ""
|
||||
- name: AWS_S3_ENDPOINT
|
||||
value: "https://s3.amazonaws.com"
|
||||
- name: AWS_S3_BUCKET_NAME
|
||||
value: "xxx"
|
||||
- name: AWS_REGION
|
||||
value: "us-west-2"
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_SECRET_KEY
|
||||
value: "xxxx"
|
||||
- name: AWS_DISABLE_SSL
|
||||
value: "false"
|
||||
restartPolicy: Never
|
||||
@@ -20,9 +20,7 @@ func StartBackup(cmd *cobra.Command) {
|
||||
_, _ = cmd.Flags().GetString("operation")
|
||||
//Set env
|
||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
||||
utils.GetEnv(cmd, "port", "DB_PORT")
|
||||
utils.GetEnv(cmd, "period", "SCHEDULE_PERIOD")
|
||||
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
|
||||
|
||||
//Get flag value and set env
|
||||
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
|
||||
@@ -32,37 +30,38 @@ func StartBackup(cmd *cobra.Command) {
|
||||
prune, _ := cmd.Flags().GetBool("prune")
|
||||
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
|
||||
executionMode, _ = cmd.Flags().GetString("mode")
|
||||
dbName = os.Getenv("DB_NAME")
|
||||
gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||
|
||||
dbConf = getDbConfig(cmd)
|
||||
|
||||
//
|
||||
if gpqPassphrase != "" {
|
||||
encryption = true
|
||||
}
|
||||
|
||||
//Generate file name
|
||||
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
|
||||
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbConf.dbName, time.Now().Format("20060102_150405"))
|
||||
if disableCompression {
|
||||
backupFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405"))
|
||||
backupFileName = fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
|
||||
}
|
||||
|
||||
if executionMode == "default" {
|
||||
switch storage {
|
||||
case "s3":
|
||||
s3Backup(backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||
s3Backup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||
case "local":
|
||||
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||
localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||
case "ssh", "remote":
|
||||
sshBackup(backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
|
||||
sshBackup(dbConf, backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
|
||||
case "ftp":
|
||||
utils.Fatal("Not supported storage type: %s", storage)
|
||||
default:
|
||||
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||
localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||
}
|
||||
|
||||
} else if executionMode == "scheduled" {
|
||||
scheduledMode(storage)
|
||||
scheduledMode(dbConf, storage)
|
||||
} else {
|
||||
utils.Fatal("Error, unknown execution mode!")
|
||||
}
|
||||
@@ -70,18 +69,18 @@ func StartBackup(cmd *cobra.Command) {
|
||||
}
|
||||
|
||||
// Run in scheduled mode
|
||||
func scheduledMode(storage string) {
|
||||
func scheduledMode(db *dbConfig, storage string) {
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("**********************************")
|
||||
fmt.Println(" Starting MySQL Bkup... ")
|
||||
fmt.Println("***********************************")
|
||||
utils.Info("Running in Scheduled mode")
|
||||
utils.Info("Execution period %s", os.Getenv("SCHEDULE_PERIOD"))
|
||||
utils.Info("Execution period %s", os.Getenv("BACKUP_CRON_EXPRESSION"))
|
||||
utils.Info("Storage type %s ", storage)
|
||||
|
||||
//Test database connexion
|
||||
utils.TestDatabaseConnection()
|
||||
testDatabaseConnection(db)
|
||||
|
||||
utils.Info("Creating backup job...")
|
||||
CreateCrontabScript(disableCompression, storage)
|
||||
@@ -117,12 +116,7 @@ func scheduledMode(storage string) {
|
||||
}
|
||||
|
||||
// BackupDatabase backup database
|
||||
func BackupDatabase(backupFileName string, disableCompression bool) {
|
||||
dbHost = os.Getenv("DB_HOST")
|
||||
dbPassword = os.Getenv("DB_PASSWORD")
|
||||
dbUserName = os.Getenv("DB_USERNAME")
|
||||
dbName = os.Getenv("DB_NAME")
|
||||
dbPort = os.Getenv("DB_PORT")
|
||||
func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) {
|
||||
storagePath = os.Getenv("STORAGE_PATH")
|
||||
|
||||
err := utils.CheckEnvVars(dbHVars)
|
||||
@@ -132,7 +126,7 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
|
||||
}
|
||||
|
||||
utils.Info("Starting database backup...")
|
||||
utils.TestDatabaseConnection()
|
||||
testDatabaseConnection(db)
|
||||
|
||||
// Backup Database database
|
||||
utils.Info("Backing up database...")
|
||||
@@ -140,11 +134,11 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
|
||||
if disableCompression {
|
||||
// Execute mysqldump
|
||||
cmd := exec.Command("mysqldump",
|
||||
"-h", dbHost,
|
||||
"-P", dbPort,
|
||||
"-u", dbUserName,
|
||||
"--password="+dbPassword,
|
||||
dbName,
|
||||
"-h", db.dbHost,
|
||||
"-P", db.dbPort,
|
||||
"-u", db.dbUserName,
|
||||
"--password="+db.dbPassword,
|
||||
db.dbName,
|
||||
)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
@@ -166,7 +160,7 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
|
||||
|
||||
} else {
|
||||
// Execute mysqldump
|
||||
cmd := exec.Command("mysqldump", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName)
|
||||
cmd := exec.Command("mysqldump", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, "--password="+db.dbPassword, db.dbName)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -189,9 +183,9 @@ func BackupDatabase(backupFileName string, disableCompression bool) {
|
||||
}
|
||||
|
||||
}
|
||||
func localBackup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||
func localBackup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||
utils.Info("Backup database to local storage")
|
||||
BackupDatabase(backupFileName, disableCompression)
|
||||
BackupDatabase(db, backupFileName, disableCompression)
|
||||
finalFileName := backupFileName
|
||||
if encrypt {
|
||||
encryptBackup(backupFileName)
|
||||
@@ -203,20 +197,22 @@ func localBackup(backupFileName string, disableCompression bool, prune bool, bac
|
||||
if prune {
|
||||
deleteOldBackup(backupRetention)
|
||||
}
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
}
|
||||
|
||||
func s3Backup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||
func s3Backup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
|
||||
utils.Info("Backup database to s3 storage")
|
||||
//Backup database
|
||||
BackupDatabase(backupFileName, disableCompression)
|
||||
BackupDatabase(db, backupFileName, disableCompression)
|
||||
finalFileName := backupFileName
|
||||
if encrypt {
|
||||
encryptBackup(backupFileName)
|
||||
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
|
||||
}
|
||||
utils.Info("Uploading backup file to S3 storage...")
|
||||
utils.Info("Uploading backup archive to remote storage S3 ... ")
|
||||
utils.Info("Backup name is %s", finalFileName)
|
||||
err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
|
||||
if err != nil {
|
||||
@@ -237,18 +233,20 @@ func s3Backup(backupFileName string, disableCompression bool, prune bool, backup
|
||||
utils.Fatal("Error deleting old backup from S3: %s ", err)
|
||||
}
|
||||
}
|
||||
utils.Done("Database has been backed up and uploaded to s3 ")
|
||||
utils.Done("Uploading backup archive to remote storage S3 ... done ")
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
}
|
||||
func sshBackup(backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||
func sshBackup(db *dbConfig, backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||
utils.Info("Backup database to Remote server")
|
||||
//Backup database
|
||||
BackupDatabase(backupFileName, disableCompression)
|
||||
BackupDatabase(db, backupFileName, disableCompression)
|
||||
finalFileName := backupFileName
|
||||
if encrypt {
|
||||
encryptBackup(backupFileName)
|
||||
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
|
||||
}
|
||||
utils.Info("Uploading backup file to remote server...")
|
||||
utils.Info("Uploading backup archive to remote storage ... ")
|
||||
utils.Info("Backup name is %s", finalFileName)
|
||||
err := CopyToRemote(finalFileName, remotePath)
|
||||
if err != nil {
|
||||
@@ -268,9 +266,10 @@ func sshBackup(backupFileName, remotePath string, disableCompression bool, prune
|
||||
|
||||
}
|
||||
|
||||
utils.Done("Database has been backed up and uploaded to remote server ")
|
||||
utils.Done("Uploading backup archive to remote storage ... done ")
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
}
|
||||
|
||||
func encryptBackup(backupFileName string) {
|
||||
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||
err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase)
|
||||
|
||||
@@ -1,4 +1,58 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"github.com/spf13/cobra"
|
||||
"os"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
}
|
||||
|
||||
type dbConfig struct {
|
||||
dbHost string
|
||||
dbPort string
|
||||
dbName string
|
||||
dbUserName string
|
||||
dbPassword string
|
||||
}
|
||||
type dbSourceConfig struct {
|
||||
sourceDbHost string
|
||||
sourceDbPort string
|
||||
sourceDbUserName string
|
||||
sourceDbPassword string
|
||||
sourceDbName string
|
||||
}
|
||||
|
||||
func getDbConfig(cmd *cobra.Command) *dbConfig {
|
||||
//Set env
|
||||
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
||||
dConf := dbConfig{}
|
||||
dConf.dbHost = os.Getenv("DB_HOST")
|
||||
dConf.dbPort = os.Getenv("DB_PORT")
|
||||
dConf.dbName = os.Getenv("DB_NAME")
|
||||
dConf.dbUserName = os.Getenv("DB_USERNAME")
|
||||
dConf.dbPassword = os.Getenv("DB_PASSWORD")
|
||||
|
||||
err := utils.CheckEnvVars(dbHVars)
|
||||
if err != nil {
|
||||
utils.Error("Please make sure all required environment variables for database are set")
|
||||
utils.Fatal("Error checking environment variables: %s", err)
|
||||
}
|
||||
return &dConf
|
||||
}
|
||||
func getSourceDbConfig() *dbSourceConfig {
|
||||
sdbConfig := dbSourceConfig{}
|
||||
sdbConfig.sourceDbHost = os.Getenv("SOURCE_DB_HOST")
|
||||
sdbConfig.sourceDbPort = os.Getenv("SOURCE_DB_PORT")
|
||||
sdbConfig.sourceDbName = os.Getenv("SOURCE_DB_NAME")
|
||||
sdbConfig.sourceDbUserName = os.Getenv("SOURCE_DB_USERNAME")
|
||||
sdbConfig.sourceDbPassword = os.Getenv("SOURCE_DB_PASSWORD")
|
||||
|
||||
err := utils.CheckEnvVars(sdbRVars)
|
||||
if err != nil {
|
||||
utils.Error("Please make sure all required environment variables for source database are set")
|
||||
utils.Fatal("Error checking environment variables: %s", err)
|
||||
}
|
||||
return &sdbConfig
|
||||
}
|
||||
|
||||
@@ -9,11 +9,17 @@ import (
|
||||
|
||||
func Decrypt(inputFile string, passphrase string) error {
|
||||
utils.Info("Decrypting backup file: " + inputFile + " ...")
|
||||
//Create gpg home dir
|
||||
err := utils.MakeDir(gpgHome)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
utils.SetEnv("GNUPGHOME", gpgHome)
|
||||
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
err := cmd.Run()
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -24,11 +30,17 @@ func Decrypt(inputFile string, passphrase string) error {
|
||||
|
||||
func Encrypt(inputFile string, passphrase string) error {
|
||||
utils.Info("Encrypting backup...")
|
||||
//Create gpg home dir
|
||||
err := utils.MakeDir(gpgHome)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
utils.SetEnv("GNUPGHOME", gpgHome)
|
||||
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--symmetric", "--cipher-algo", algorithm, inputFile)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
err := cmd.Run()
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
@@ -71,4 +73,49 @@ func deleteOldBackup(retentionDays int) {
|
||||
utils.Fatal(fmt.Sprintf("Error: %s", err))
|
||||
return
|
||||
}
|
||||
utils.Done("Deleting old backups...done")
|
||||
|
||||
}
|
||||
func deleteTemp() {
|
||||
utils.Info("Deleting %s ...", tmpPath)
|
||||
err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Check if the current item is a file
|
||||
if !info.IsDir() {
|
||||
// Delete the file
|
||||
err = os.Remove(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
utils.Error("Error deleting files: %v", err)
|
||||
} else {
|
||||
utils.Info("Deleting %s ... done", tmpPath)
|
||||
}
|
||||
}
|
||||
|
||||
// TestDatabaseConnection tests the database connection
|
||||
func testDatabaseConnection(db *dbConfig) {
|
||||
|
||||
utils.Info("Connecting to %s database ...", db.dbName)
|
||||
|
||||
cmd := exec.Command("mysql", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, "--password="+db.dbPassword, db.dbName, "-e", "quit")
|
||||
|
||||
// Capture the output
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
cmd.Stderr = &out
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
utils.Error("Error testing database connection: %v\nOutput: %s", err, out.String())
|
||||
os.Exit(1)
|
||||
|
||||
}
|
||||
utils.Info("Successfully connected to %s database", db.dbName)
|
||||
|
||||
}
|
||||
|
||||
31
pkg/migrate.go
Normal file
31
pkg/migrate.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"github.com/spf13/cobra"
|
||||
"time"
|
||||
)
|
||||
|
||||
func StartMigration(cmd *cobra.Command) {
|
||||
utils.Info("Starting database migration...")
|
||||
//Get DB config
|
||||
dbConf = getDbConfig(cmd)
|
||||
sDbConf = getSourceDbConfig()
|
||||
|
||||
//Generate file name
|
||||
backupFileName := fmt.Sprintf("%s_%s.sql", sDbConf.sourceDbName, time.Now().Format("20060102_150405"))
|
||||
//Backup Source Database
|
||||
newDbConfig := dbConfig{}
|
||||
newDbConfig.dbHost = sDbConf.sourceDbHost
|
||||
newDbConfig.dbPort = sDbConf.sourceDbPort
|
||||
newDbConfig.dbName = sDbConf.sourceDbName
|
||||
newDbConfig.dbUserName = sDbConf.sourceDbUserName
|
||||
newDbConfig.dbPassword = sDbConf.sourceDbPassword
|
||||
BackupDatabase(&newDbConfig, backupFileName, true)
|
||||
//Restore source database into target database
|
||||
utils.Info("Restoring [%s] database into [%s] database...", sDbConf.sourceDbName, dbConf.dbName)
|
||||
RestoreDatabase(dbConf, backupFileName)
|
||||
utils.Info("[%s] database has been restored into [%s] database", sDbConf.sourceDbName, dbConf.dbName)
|
||||
utils.Info("Database migration completed!")
|
||||
}
|
||||
@@ -13,8 +13,6 @@ func StartRestore(cmd *cobra.Command) {
|
||||
|
||||
//Set env
|
||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
||||
utils.GetEnv(cmd, "port", "DB_PORT")
|
||||
|
||||
//Get flag value and set env
|
||||
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||
@@ -23,47 +21,45 @@ func StartRestore(cmd *cobra.Command) {
|
||||
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
||||
executionMode, _ = cmd.Flags().GetString("mode")
|
||||
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||
dbConf = getDbConfig(cmd)
|
||||
|
||||
switch storage {
|
||||
case "s3":
|
||||
restoreFromS3(file, bucket, s3Path)
|
||||
restoreFromS3(dbConf, file, bucket, s3Path)
|
||||
case "local":
|
||||
utils.Info("Restore database from local")
|
||||
copyToTmp(storagePath, file)
|
||||
RestoreDatabase(file)
|
||||
RestoreDatabase(dbConf, file)
|
||||
case "ssh":
|
||||
restoreFromRemote(file, remotePath)
|
||||
restoreFromRemote(dbConf, file, remotePath)
|
||||
case "ftp":
|
||||
utils.Fatal("Restore from FTP is not yet supported")
|
||||
default:
|
||||
utils.Info("Restore database from local")
|
||||
RestoreDatabase(file)
|
||||
copyToTmp(storagePath, file)
|
||||
RestoreDatabase(dbConf, file)
|
||||
}
|
||||
}
|
||||
|
||||
func restoreFromS3(file, bucket, s3Path string) {
|
||||
func restoreFromS3(db *dbConfig, file, bucket, s3Path string) {
|
||||
utils.Info("Restore database from s3")
|
||||
err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
|
||||
if err != nil {
|
||||
utils.Fatal(fmt.Sprintf("Error download file from s3 %s %s", file, err))
|
||||
utils.Fatal("Error download file from s3 %s %v", file, err)
|
||||
}
|
||||
RestoreDatabase(file)
|
||||
RestoreDatabase(db, file)
|
||||
}
|
||||
func restoreFromRemote(file, remotePath string) {
|
||||
func restoreFromRemote(db *dbConfig, file, remotePath string) {
|
||||
utils.Info("Restore database from remote server")
|
||||
err := CopyFromRemote(file, remotePath)
|
||||
if err != nil {
|
||||
utils.Fatal(fmt.Sprintf("Error download file from remote server: ", filepath.Join(remotePath, file), err))
|
||||
utils.Fatal("Error download file from remote server: %s %v ", filepath.Join(remotePath, file), err)
|
||||
}
|
||||
RestoreDatabase(file)
|
||||
RestoreDatabase(db, file)
|
||||
}
|
||||
|
||||
// RestoreDatabase restore database
|
||||
func RestoreDatabase(file string) {
|
||||
dbHost = os.Getenv("DB_HOST")
|
||||
dbPassword = os.Getenv("DB_PASSWORD")
|
||||
dbUserName = os.Getenv("DB_USERNAME")
|
||||
dbName = os.Getenv("DB_NAME")
|
||||
dbPort = os.Getenv("DB_PORT")
|
||||
func RestoreDatabase(db *dbConfig, file string) {
|
||||
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||
if file == "" {
|
||||
utils.Fatal("Error, file required")
|
||||
@@ -93,7 +89,8 @@ func RestoreDatabase(file string) {
|
||||
}
|
||||
|
||||
if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) {
|
||||
utils.TestDatabaseConnection()
|
||||
testDatabaseConnection(db)
|
||||
utils.Info("Restoring database...")
|
||||
|
||||
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
|
||||
// Restore from compressed file / .sql.gz
|
||||
@@ -103,7 +100,10 @@ func RestoreDatabase(file string) {
|
||||
if err != nil {
|
||||
utils.Fatal("Error, in restoring the database %v", err)
|
||||
}
|
||||
utils.Info("Restoring database... done")
|
||||
utils.Done("Database has been restored")
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
|
||||
} else if extension == ".sql" {
|
||||
//Restore from sql file
|
||||
@@ -112,7 +112,10 @@ func RestoreDatabase(file string) {
|
||||
if err != nil {
|
||||
utils.Fatal(fmt.Sprintf("Error in restoring the database %s", err))
|
||||
}
|
||||
utils.Info("Restoring database... done")
|
||||
utils.Done("Database has been restored")
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
} else {
|
||||
utils.Fatal(fmt.Sprintf("Unknown file extension %s", extension))
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ set -e
|
||||
}
|
||||
|
||||
cronContent := fmt.Sprintf(`%s root exec /bin/bash -c ". /run/supervisord.env; /usr/local/bin/backup_cron.sh >> %s"
|
||||
`, os.Getenv("SCHEDULE_PERIOD"), cronLogFile)
|
||||
`, os.Getenv("BACKUP_CRON_EXPRESSION"), cronLogFile)
|
||||
|
||||
if err := utils.WriteToFile(cronJob, cronContent); err != nil {
|
||||
utils.Fatal("Error writing to %s: %v\n", cronJob, err)
|
||||
|
||||
16
pkg/var.go
16
pkg/var.go
@@ -4,16 +4,12 @@ const cronLogFile = "/var/log/mysql-bkup.log"
|
||||
const tmpPath = "/tmp/backup"
|
||||
const backupCronFile = "/usr/local/bin/backup_cron.sh"
|
||||
const algorithm = "aes256"
|
||||
const gpgHome = "gnupg"
|
||||
const gpgExtension = "gpg"
|
||||
|
||||
var (
|
||||
storage = "local"
|
||||
file = ""
|
||||
dbPassword = ""
|
||||
dbUserName = ""
|
||||
dbName = ""
|
||||
dbHost = ""
|
||||
dbPort = "3306"
|
||||
executionMode = "default"
|
||||
storagePath = "/backup"
|
||||
disableCompression = false
|
||||
@@ -27,6 +23,16 @@ var dbHVars = []string{
|
||||
"DB_USERNAME",
|
||||
"DB_NAME",
|
||||
}
|
||||
var sdbRVars = []string{
|
||||
"SOURCE_DB_HOST",
|
||||
"SOURCE_DB_PORT",
|
||||
"SOURCE_DB_NAME",
|
||||
"SOURCE_DB_USERNAME",
|
||||
"SOURCE_DB_PASSWORD",
|
||||
}
|
||||
|
||||
var dbConf *dbConfig
|
||||
var sDbConf *dbSourceConfig
|
||||
|
||||
// sshHVars Required environment variables for SSH remote server storage
|
||||
var sshHVars = []string{
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
DB_USERNAME='db_username'
|
||||
DB_PASSWORD='password'
|
||||
DB_HOST='db_hostname'
|
||||
DB_NAME='db_name'
|
||||
BACKUP_DIR="$PWD/backup"
|
||||
|
||||
docker run --rm --name mysql-bkup -v $BACKUP_DIR:/backup/ -e "DB_HOST=$DB_HOST" -e "DB_USERNAME=$DB_USERNAME" -e "DB_PASSWORD=$DB_PASSWORD" jkaninda/mysql-bkup:latest backup -d $DB_NAME
|
||||
@@ -7,13 +7,11 @@ package utils
|
||||
* @link https://github.com/jkaninda/mysql-bkup
|
||||
**/
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/spf13/cobra"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func FileExists(filename string) bool {
|
||||
@@ -90,34 +88,6 @@ func IsDirEmpty(name string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// TestDatabaseConnection tests the database connection
|
||||
func TestDatabaseConnection() {
|
||||
dbHost := os.Getenv("DB_HOST")
|
||||
dbPassword := os.Getenv("DB_PASSWORD")
|
||||
dbUserName := os.Getenv("DB_USERNAME")
|
||||
dbName := os.Getenv("DB_NAME")
|
||||
dbPort := os.Getenv("DB_PORT")
|
||||
|
||||
if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" {
|
||||
Fatal("Please make sure all required database environment variables are set")
|
||||
} else {
|
||||
Info("Connecting to database ...")
|
||||
|
||||
cmd := exec.Command("mysql", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName, "-e", "quit")
|
||||
|
||||
// Capture the output
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
cmd.Stderr = &out
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
Error("Error testing database connection: %v\nOutput: %s", err, out.String())
|
||||
os.Exit(1)
|
||||
|
||||
}
|
||||
Info("Successfully connected to database")
|
||||
}
|
||||
}
|
||||
func GetEnv(cmd *cobra.Command, flagName, envName string) string {
|
||||
value, _ := cmd.Flags().GetString(flagName)
|
||||
if value != "" {
|
||||
@@ -182,3 +152,21 @@ func CheckEnvVars(vars []string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MakeDir create directory
|
||||
func MakeDir(dirPath string) error {
|
||||
err := os.Mkdir(dirPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MakeDirAll create directory
|
||||
func MakeDirAll(dirPath string) error {
|
||||
err := os.MkdirAll(dirPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user