mirror of
https://github.com/jkaninda/mysql-bkup.git
synced 2025-12-06 05:29:41 +01:00
Compare commits
64 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1cbf65d686 | ||
|
|
73d19913f8 | ||
|
|
b0224e43ef | ||
|
|
fa0485bb5a | ||
|
|
65ef6d3e8f | ||
|
|
a7b6abb101 | ||
|
|
3b21c109bc | ||
|
|
a50a1ef6f9 | ||
|
|
76bbfa35c4 | ||
|
|
599d93bef4 | ||
|
|
247e90f73e | ||
|
|
7d544aca68 | ||
|
|
1722ee0eeb | ||
|
|
726fd14831 | ||
|
|
fdc88e6064 | ||
|
|
2ba1b516e9 | ||
|
|
301594676b | ||
|
|
d06f2f2d7e | ||
|
|
2f06bd1c3a | ||
|
|
f383f5559d | ||
|
|
3725809d28 | ||
|
|
b1598ef7d0 | ||
|
|
e4a83b9851 | ||
|
|
4b2527f416 | ||
|
|
e97fc7512a | ||
|
|
7912ce46ed | ||
|
|
050f5e81bc | ||
|
|
b39e97b77d | ||
|
|
cbb73ae89b | ||
|
|
29a58aa26d | ||
|
|
041e0a07e9 | ||
|
|
9daac9c654 | ||
|
|
f6098769cd | ||
|
|
5cdfaa4d94 | ||
|
|
b205cd61ea | ||
|
|
e1307250e8 | ||
|
|
17ac951deb | ||
|
|
6e2e08224d | ||
|
|
570b775f48 | ||
|
|
e38e106983 | ||
|
|
3040420a09 | ||
|
|
eac5f70408 | ||
|
|
3476c6f529 | ||
|
|
1a9c8483f8 | ||
|
|
f8722f7ae4 | ||
|
|
421bf12910 | ||
|
|
3da4a27baa | ||
|
|
0881f075ef | ||
|
|
066e73f8e4 | ||
|
|
645243ff77 | ||
|
|
9384998127 | ||
|
|
390e7dad0c | ||
|
|
67ea22385f | ||
|
|
cde82d8cfc | ||
|
|
4808f093e5 | ||
|
|
c7a03861fe | ||
|
|
36ec63d522 | ||
|
|
0f07de1d83 | ||
|
|
ae55839996 | ||
|
|
a7f7e57a0d | ||
|
|
b2ddaec93b | ||
|
|
b3570d774c | ||
|
|
38f7e91c03 | ||
|
|
07c2935925 |
3
.github/FUNDING.yml
vendored
Normal file
3
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
ko_fi: jkaninda
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Release
|
||||
name: CI
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
|
||||
24
README.md
24
README.md
@@ -1,16 +1,17 @@
|
||||
# MySQL Backup
|
||||
MySQL Backup is a Docker container image that can be used to backup and restore MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
|
||||
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, FTP and SSH compatible storage.
|
||||
It also supports __encrypting__ your backups using GPG.
|
||||
|
||||
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
|
||||
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
|
||||
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3, FTP or SSH compatible storage.
|
||||
|
||||
It also supports __encrypting__ your backups using GPG.
|
||||
It also supports database __encryption__ using GPG.
|
||||
|
||||
[](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml)
|
||||
[](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup)
|
||||

|
||||

|
||||
<a href="https://ko-fi.com/jkaninda"><img src="https://uploads-ssl.webflow.com/5c14e387dab576fe667689cf/5cbed8a4ae2b88347c06c923_BuyMeACoffee_blue.png" height="20" alt="buy ma a coffee"></a>
|
||||
|
||||
Successfully tested on:
|
||||
- Docker
|
||||
@@ -85,6 +86,19 @@ services:
|
||||
networks:
|
||||
web:
|
||||
```
|
||||
|
||||
### Docker recurring backup
|
||||
|
||||
```shell
|
||||
docker run --rm --network network_name \
|
||||
-v $PWD/backup:/backup/ \
|
||||
-e "DB_HOST=hostname" \
|
||||
-e "DB_USERNAME=user" \
|
||||
-e "DB_PASSWORD=password" \
|
||||
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 1m"
|
||||
```
|
||||
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
||||
|
||||
## Deploy on Kubernetes
|
||||
|
||||
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as Job or CronJob.
|
||||
@@ -101,7 +115,7 @@ spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: pg-bkup
|
||||
- name: mysql-bkup
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
@@ -153,7 +167,7 @@ While it may work against different implementations, there are no guarantees abo
|
||||
|
||||
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
||||
|
||||
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
|
||||
- The original image is based on `alpine` and requires additional tools, making it heavy.
|
||||
- This image is written in Go.
|
||||
- `arm64` and `arm/v7` architectures are supported.
|
||||
- Docker in Swarm mode is supported.
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
// Package cmd /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package cmd
|
||||
|
||||
import (
|
||||
@@ -23,8 +29,9 @@ func init() {
|
||||
//Backup
|
||||
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
|
||||
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
||||
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. default or scheduled")
|
||||
BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Schedule period time")
|
||||
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. | Deprecated")
|
||||
BackupCmd.PersistentFlags().StringP("period", "", "", "Schedule period time | Deprecated")
|
||||
BackupCmd.PersistentFlags().StringP("cron-expression", "", "", "Backup cron expression")
|
||||
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")
|
||||
BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "Delete files created more than specified days ago, default 7 days")
|
||||
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
// Package cmd /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package cmd
|
||||
|
||||
import (
|
||||
|
||||
11
cmd/root.go
11
cmd/root.go
@@ -1,7 +1,9 @@
|
||||
// Package cmd /*
|
||||
/*
|
||||
Copyright © 2024 Jonas Kaninda
|
||||
*/
|
||||
// Package cmd /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package cmd
|
||||
|
||||
import (
|
||||
@@ -31,7 +33,6 @@ func Execute() {
|
||||
|
||||
func init() {
|
||||
rootCmd.PersistentFlags().StringP("dbname", "d", "", "Database name")
|
||||
rootCmd.PersistentFlags().StringVarP(&operation, "operation", "o", "", "Set operation, for old version only")
|
||||
rootCmd.AddCommand(VersionCmd)
|
||||
rootCmd.AddCommand(BackupCmd)
|
||||
rootCmd.AddCommand(RestoreCmd)
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
// Package cmd /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package cmd
|
||||
|
||||
/*
|
||||
Copyright © 2024 Jonas Kaninda
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
@@ -9,7 +9,7 @@ RUN go mod download
|
||||
# Build
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/mysql-bkup
|
||||
|
||||
FROM ubuntu:24.04
|
||||
FROM alpine:3.20.3
|
||||
ENV DB_HOST=""
|
||||
ENV DB_NAME=""
|
||||
ENV DB_USERNAME=""
|
||||
@@ -20,55 +20,48 @@ ENV AWS_S3_ENDPOINT=""
|
||||
ENV AWS_S3_BUCKET_NAME=""
|
||||
ENV AWS_ACCESS_KEY=""
|
||||
ENV AWS_SECRET_KEY=""
|
||||
ENV AWS_REGION="us-west-2"
|
||||
ENV AWS_S3_PATH=""
|
||||
ENV AWS_REGION="us-west-2"
|
||||
ENV AWS_DISABLE_SSL="false"
|
||||
ENV GPG_PASSPHRASE=""
|
||||
ENV SSH_USER=""
|
||||
ENV SSH_REMOTE_PATH=""
|
||||
ENV SSH_PASSWORD=""
|
||||
ENV SSH_HOST_NAME=""
|
||||
ENV SSH_IDENTIFY_FILE=""
|
||||
ENV SSH_PORT="22"
|
||||
ENV SOURCE_DB_HOST=""
|
||||
ENV SOURCE_DB_PORT=3306
|
||||
ENV SOURCE_DB_NAME=""
|
||||
ENV SOURCE_DB_USERNAME=""
|
||||
ENV SOURCE_DB_PASSWORD=""
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ENV VERSION="v1.2.3"
|
||||
ENV SSH_PORT=22
|
||||
ENV REMOTE_PATH=""
|
||||
ENV FTP_HOST_NAME=""
|
||||
ENV FTP_PORT=21
|
||||
ENV FTP_USER=""
|
||||
ENV FTP_PASSWORD=""
|
||||
ENV TARGET_DB_HOST=""
|
||||
ENV TARGET_DB_PORT=3306
|
||||
ENV TARGET_DB_NAME=""
|
||||
ENV TARGET_DB_USERNAME=""
|
||||
ENV TARGET_DB_PASSWORD=""
|
||||
ENV VERSION="v1.2.9"
|
||||
ENV BACKUP_CRON_EXPRESSION=""
|
||||
ENV TG_TOKEN=""
|
||||
ENV TG_CHAT_ID=""
|
||||
ARG WORKDIR="/config"
|
||||
ARG BACKUPDIR="/backup"
|
||||
ARG BACKUP_TMP_DIR="/tmp/backup"
|
||||
ARG BACKUP_CRON="/etc/cron.d/backup_cron"
|
||||
ARG BACKUP_CRON_SCRIPT="/usr/local/bin/backup_cron.sh"
|
||||
LABEL author="Jonas Kaninda"
|
||||
|
||||
RUN apt-get update -qq
|
||||
RUN apt install mysql-client supervisor cron gnupg -y
|
||||
|
||||
# Clear cache
|
||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN apk --update add mysql-client gnupg
|
||||
RUN mkdir $WORKDIR
|
||||
RUN mkdir $BACKUPDIR
|
||||
RUN mkdir -p $BACKUP_TMP_DIR
|
||||
RUN chmod 777 $WORKDIR
|
||||
RUN chmod 777 $BACKUPDIR
|
||||
RUN chmod 777 $BACKUP_TMP_DIR
|
||||
RUN touch $BACKUP_CRON && \
|
||||
touch $BACKUP_CRON_SCRIPT && \
|
||||
chmod 777 $BACKUP_CRON && \
|
||||
chmod 777 $BACKUP_CRON_SCRIPT
|
||||
RUN chmod 777 $WORKDIR
|
||||
|
||||
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
|
||||
RUN chmod +x /usr/local/bin/mysql-bkup
|
||||
|
||||
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
|
||||
|
||||
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
|
||||
|
||||
# Create backup script and make it executable
|
||||
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
|
||||
chmod +x /usr/local/bin/backup
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
[supervisord]
|
||||
nodaemon=true
|
||||
user=root
|
||||
logfile=/var/log/supervisor/supervisord.log
|
||||
pidfile=/var/run/supervisord.pid
|
||||
|
||||
[program:cron]
|
||||
command = /bin/bash -c "declare -p | grep -Ev '^declare -[[:alpha:]]*r' > /run/supervisord.env && /usr/sbin/cron -f -L 15"
|
||||
autostart=true
|
||||
autorestart=true
|
||||
user = root
|
||||
stderr_logfile=/var/log/cron.err.log
|
||||
stdout_logfile=/var/log/cron.out.log
|
||||
@@ -1,12 +0,0 @@
|
||||
FROM ruby:3.3.4
|
||||
|
||||
ENV LC_ALL C.UTF-8
|
||||
ENV LANG en_US.UTF-8
|
||||
ENV LANGUAGE en_US.UTF-8
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
COPY . ./
|
||||
RUN gem install bundler && bundle install
|
||||
|
||||
EXPOSE 4000
|
||||
@@ -1,13 +0,0 @@
|
||||
services:
|
||||
jekyll:
|
||||
build:
|
||||
context: ./
|
||||
ports:
|
||||
- 4000:4000
|
||||
environment:
|
||||
- JEKYLL_ENV=development
|
||||
volumes:
|
||||
- .:/usr/src/app
|
||||
stdin_open: true
|
||||
tty: true
|
||||
command: bundle exec jekyll serve -H 0.0.0.0 -t
|
||||
BIN
docs/favicon.ico
Normal file
BIN
docs/favicon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 4.2 KiB |
44
docs/how-tos/backup-to-ftp.md
Normal file
44
docs/how-tos/backup-to-ftp.md
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
title: Backup to FTP remote server
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 4
|
||||
---
|
||||
# Backup to FTP remote server
|
||||
|
||||
|
||||
As described for SSH backup section, to change the storage of your backup and use FTP Remote server as storage. You need to add `--storage ftp`.
|
||||
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `REMOTE_PATH` environment variable.
|
||||
|
||||
{: .note }
|
||||
These environment variables are required for SSH backup `FTP_HOST_NAME`, `FTP_USER`, `REMOTE_PATH`, `FTP_PORT` or `FTP_PASSWORD`.
|
||||
|
||||
```yml
|
||||
services:
|
||||
mysql-bkup:
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command: backup --storage ftp -d database
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=postgres
|
||||
- DB_NAME=database
|
||||
- DB_USERNAME=username
|
||||
- DB_PASSWORD=password
|
||||
## FTP config
|
||||
- FTP_HOST_NAME="hostname"
|
||||
- FTP_PORT=21
|
||||
- FTP_USER=user
|
||||
- FTP_PASSWORD=password
|
||||
- REMOTE_PATH=/home/jkaninda/backups
|
||||
|
||||
# pg-bkup container must be connected to the same network with your database
|
||||
networks:
|
||||
- web
|
||||
networks:
|
||||
web:
|
||||
```
|
||||
@@ -48,7 +48,7 @@ networks:
|
||||
### Recurring backups to S3
|
||||
|
||||
As explained above, you need just to add AWS environment variables and specify the storage type `--storage s3`.
|
||||
In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
|
||||
In case you need to use recurring backups, you can use `--cron-expression "0 1 * * *"` flag or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
|
||||
|
||||
```yml
|
||||
services:
|
||||
@@ -59,7 +59,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command: backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
|
||||
command: backup --storage s3 -d my-database --cron-expression "0 1 * * *"
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
@@ -72,6 +72,7 @@ services:
|
||||
- AWS_REGION="us-west-2"
|
||||
- AWS_ACCESS_KEY=xxxx
|
||||
- AWS_SECRET_KEY=xxxxx
|
||||
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
|
||||
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||
- AWS_DISABLE_SSL="false"
|
||||
# mysql-bkup container must be connected to the same network with your database
|
||||
@@ -104,7 +105,7 @@ spec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup -s s3 --path /custom_path
|
||||
- backup -s s3 --path /custom_path
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
|
||||
@@ -8,7 +8,7 @@ nav_order: 3
|
||||
|
||||
|
||||
As described for s3 backup section, to change the storage of your backup and use SSH Remote server as storage. You need to add `--storage ssh` or `--storage remote`.
|
||||
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `SSH_REMOTE_PATH` environment variable.
|
||||
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `REMOTE_PATH` environment variable.
|
||||
|
||||
{: .note }
|
||||
These environment variables are required for SSH backup `SSH_HOST_NAME`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server.
|
||||
@@ -36,7 +36,7 @@ services:
|
||||
- SSH_HOST_NAME="hostname"
|
||||
- SSH_PORT=22
|
||||
- SSH_USER=user
|
||||
- SSH_REMOTE_PATH=/home/jkaninda/backups
|
||||
- REMOTE_PATH=/home/jkaninda/backups
|
||||
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
||||
## We advise you to use a private jey instead of password
|
||||
#- SSH_PASSWORD=password
|
||||
@@ -52,7 +52,7 @@ networks:
|
||||
### Recurring backups to SSH remote server
|
||||
|
||||
As explained above, you need just to add required environment variables and specify the storage type `--storage ssh`.
|
||||
You can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
|
||||
You can use `--cron-expression "* * * * *"` or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
|
||||
|
||||
```yml
|
||||
services:
|
||||
@@ -63,7 +63,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command: backup -d database --storage ssh --mode scheduled --period "0 1 * * *"
|
||||
command: backup -d database --storage ssh --cron-expression "0 1 * * *"
|
||||
volumes:
|
||||
- ./id_ed25519:/tmp/id_ed25519"
|
||||
environment:
|
||||
@@ -76,8 +76,9 @@ services:
|
||||
- SSH_HOST_NAME="hostname"
|
||||
- SSH_PORT=22
|
||||
- SSH_USER=user
|
||||
- SSH_REMOTE_PATH=/home/jkaninda/backups
|
||||
- REMOTE_PATH=/home/jkaninda/backups
|
||||
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
||||
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
|
||||
## We advise you to use a private jey instead of password
|
||||
#- SSH_PASSWORD=password
|
||||
# mysql-bkup container must be connected to the same network with your database
|
||||
@@ -111,7 +112,7 @@ spec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mysql-bkup backup -s ssh
|
||||
- backup -s ssh
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
@@ -130,7 +131,7 @@ spec:
|
||||
value: "22"
|
||||
- name: SSH_USER
|
||||
value: "xxx"
|
||||
- name: SSH_REMOTE_PATH
|
||||
- name: REMOTE_PATH
|
||||
value: "/home/jkaninda/backups"
|
||||
- name: AWS_ACCESS_KEY
|
||||
value: "xxxx"
|
||||
|
||||
@@ -54,7 +54,7 @@ networks:
|
||||
jkaninda/mysql-bkup backup -d database_name
|
||||
```
|
||||
|
||||
In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
|
||||
In case you need to use recurring backups, you can use `--cron-expression "0 1 * * *"` flag or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
|
||||
|
||||
```yml
|
||||
services:
|
||||
@@ -65,7 +65,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command: backup -d database --mode scheduled --period "0 1 * * *"
|
||||
command: backup -d database --cron-expression "0 1 * * *"
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
@@ -74,6 +74,7 @@ services:
|
||||
- DB_NAME=database
|
||||
- DB_USERNAME=username
|
||||
- DB_PASSWORD=password
|
||||
- BACKUP_CRON_EXPRESSION=0 1 * * *
|
||||
# mysql-bkup container must be connected to the same network with your database
|
||||
networks:
|
||||
- web
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
title: Deploy on Kubernetes
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 8
|
||||
nav_order: 9
|
||||
---
|
||||
|
||||
## Deploy on Kubernetes
|
||||
@@ -30,10 +30,7 @@ spec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- bkup
|
||||
- backup
|
||||
- --storage
|
||||
- s3
|
||||
- backup --storage s3
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
|
||||
@@ -2,15 +2,18 @@
|
||||
title: Encrypt backups using GPG
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 7
|
||||
nav_order: 8
|
||||
---
|
||||
# Encrypt backup
|
||||
|
||||
The image supports encrypting backups using GPG out of the box. In case a `GPG_PASSPHRASE` environment variable is set, the backup archive will be encrypted using the given key and saved as a sql.gpg file instead or sql.gz.gpg.
|
||||
|
||||
{: .warning }
|
||||
To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process.
|
||||
To restore an encrypted backup, you need to provide the same GPG passphrase or key used during backup process.
|
||||
|
||||
- GPG home directory `/config/gnupg`
|
||||
- Cipher algorithm `aes256`
|
||||
-
|
||||
To decrypt manually, you need to install `gnupg`
|
||||
|
||||
### Decrypt backup
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
title: Migrate database
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 9
|
||||
nav_order: 10
|
||||
---
|
||||
|
||||
# Migrate database
|
||||
@@ -10,11 +10,13 @@ nav_order: 9
|
||||
To migrate the database, you need to add `migrate` command.
|
||||
|
||||
{: .note }
|
||||
The Mysql backup has another great feature: migrating your database from a source database to another.
|
||||
The Mysql backup has another great feature: migrating your database from a source database to a target.
|
||||
|
||||
As you know, to restore a database from a source to a target database, you need 2 operations: which is to start by backing up the source database and then restoring the source backed database to the target database.
|
||||
Instead of proceeding like that, you can use the integrated feature `(migrate)`, which will help you migrate your database by doing only one operation.
|
||||
|
||||
{: .warning }
|
||||
The `migrate` operation is irreversible, please backup your target database before this action.
|
||||
|
||||
### Docker compose
|
||||
```yml
|
||||
@@ -30,18 +32,18 @@ services:
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
## Target database
|
||||
## Source database
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
- DB_NAME=database
|
||||
- DB_USERNAME=username
|
||||
- DB_PASSWORD=password
|
||||
## Source database
|
||||
- SOURCE_DB_HOST=mysql2
|
||||
- SOURCE_DB_PORT=3306
|
||||
- SOURCE_DB_NAME=sourcedb
|
||||
- SOURCE_DB_USERNAME=jonas
|
||||
- SOURCE_DB_PASSWORD=password
|
||||
## Target database
|
||||
- TARGET_DB_HOST=target-mysql
|
||||
- TARGET_DB_PORT=3306
|
||||
- TARGET_DB_NAME=dbname
|
||||
- TARGET_DB_USERNAME=username
|
||||
- TARGET_DB_PASSWORD=password
|
||||
# mysql-bkup container must be connected to the same network with your database
|
||||
networks:
|
||||
- web
|
||||
@@ -49,30 +51,31 @@ networks:
|
||||
web:
|
||||
```
|
||||
|
||||
|
||||
### Migrate database using Docker CLI
|
||||
|
||||
|
||||
```
|
||||
## Target database
|
||||
DB_PORT=3306
|
||||
## Source database
|
||||
DB_HOST=mysql
|
||||
DB_NAME=targetdb
|
||||
DB_USERNAME=targetuser
|
||||
DB_PORT=3306
|
||||
DB_NAME=dbname
|
||||
DB_USERNAME=username
|
||||
DB_PASSWORD=password
|
||||
|
||||
## Source database
|
||||
SOURCE_DB_HOST=mysql2
|
||||
SOURCE_DB_PORT=3306
|
||||
SOURCE_DB_NAME=sourcedb
|
||||
SOURCE_DB_USERNAME=sourceuser
|
||||
SOURCE_DB_PASSWORD=password
|
||||
## Taget database
|
||||
TARGET_DB_HOST=target-mysql
|
||||
TARGET_DB_PORT=3306
|
||||
TARGET_DB_NAME=dbname
|
||||
TARGET_DB_USERNAME=username
|
||||
TARGET_DB_PASSWORD=password
|
||||
```
|
||||
|
||||
```shell
|
||||
docker run --rm --network your_network_name \
|
||||
--env-file your-env
|
||||
-v $PWD/backup:/backup/ \
|
||||
jkaninda/mysql-bkup migrate -d database_name
|
||||
jkaninda/mysql-bkup migrate
|
||||
```
|
||||
|
||||
## Kubernetes
|
||||
@@ -96,28 +99,33 @@ spec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- migrate -d targetdb
|
||||
- migrate
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
cpu: "500m"
|
||||
env:
|
||||
## Target DB
|
||||
## Source Database
|
||||
- name: DB_HOST
|
||||
value: "postgres-target"
|
||||
- name: DB_USERNAME
|
||||
value: "mysql"
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
- name: DB_NAME
|
||||
value: "dbname"
|
||||
- name: DB_USERNAME
|
||||
value: "username"
|
||||
- name: DB_PASSWORD
|
||||
value: "password"
|
||||
## Source DB
|
||||
- name: SOURCE_DB_HOST
|
||||
value: "postgres-source"
|
||||
- name: SOURCE_DB_NAME
|
||||
value: "sourcedb"
|
||||
- name: SOURCE_DB_USERNAME
|
||||
value: "postgres"
|
||||
# Please use secret!
|
||||
- name: SOURCE_DB_PASSWORD
|
||||
## Target Database
|
||||
- name: TARGET_DB_HOST
|
||||
value: "target-mysql"
|
||||
- name: TARGET_DB_PORT
|
||||
value: "3306"
|
||||
- name: TARGET_DB_NAME
|
||||
value: "dbname"
|
||||
- name: TARGET_DB_USERNAME
|
||||
value: "username"
|
||||
- name: TARGET_DB_PASSWORD
|
||||
value: "password"
|
||||
restartPolicy: Never
|
||||
```
|
||||
@@ -2,7 +2,7 @@
|
||||
title: Restore database from AWS S3
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 5
|
||||
nav_order: 6
|
||||
---
|
||||
|
||||
# Restore database from S3 storage
|
||||
@@ -65,7 +65,7 @@ spec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- bkup restore -s s3 --path /custom_path -f store_20231219_022941.sql.gz
|
||||
- restore -s s3 --path /custom_path -f store_20231219_022941.sql.gz
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
title: Restore database from SSH
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 6
|
||||
nav_order: 7
|
||||
---
|
||||
# Restore database from SSH remote server
|
||||
|
||||
@@ -63,7 +63,7 @@ spec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- bkup restore -s ssh -f store_20231219_022941.sql.gz
|
||||
- restore -s ssh -f store_20231219_022941.sql.gz
|
||||
env:
|
||||
- name: DB_PORT
|
||||
value: "3306"
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
title: Restore database
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 4
|
||||
nav_order: 5
|
||||
---
|
||||
|
||||
# Restore database
|
||||
|
||||
@@ -6,7 +6,7 @@ nav_order: 1
|
||||
|
||||
# About mysql-bkup
|
||||
{:.no_toc}
|
||||
MySQL Backup is a Docker container image that can be used to backup and restore MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH remote storage.
|
||||
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, FTP and SSH remote storage.
|
||||
It also supports __encrypting__ your backups using GPG.
|
||||
|
||||
We are open to receiving stars, PRs, and issues!
|
||||
@@ -19,7 +19,8 @@ We are open to receiving stars, PRs, and issues!
|
||||
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
|
||||
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
|
||||
|
||||
It also supports __encrypting__ your backups using GPG.
|
||||
It also supports database __encryption__ using GPG.
|
||||
|
||||
|
||||
{: .note }
|
||||
Code and documentation for `v1` version on [this branch][v1-branch].
|
||||
@@ -78,6 +79,18 @@ services:
|
||||
networks:
|
||||
web:
|
||||
```
|
||||
### Docker recurring backup
|
||||
|
||||
```shell
|
||||
docker run --rm --network network_name \
|
||||
-v $PWD/backup:/backup/ \
|
||||
-e "DB_HOST=hostname" \
|
||||
-e "DB_USERNAME=user" \
|
||||
-e "DB_PASSWORD=password" \
|
||||
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 1m"
|
||||
```
|
||||
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
||||
|
||||
## Kubernetes
|
||||
|
||||
```yaml
|
||||
@@ -143,7 +156,7 @@ While it may work against different implementations, there are no guarantees abo
|
||||
|
||||
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
||||
|
||||
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
|
||||
- The original image is based on `alpine` and requires additional tools, making it heavy.
|
||||
- This image is written in Go.
|
||||
- `arm64` and `arm/v7` architectures are supported.
|
||||
- Docker in Swarm mode is supported.
|
||||
|
||||
@@ -25,18 +25,17 @@ Backup, restore and migrate targets, schedule and retention are configured using
|
||||
| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` |
|
||||
| --dbname | -d | Database name |
|
||||
| --port | -p | Database port (default: 3306) |
|
||||
| --mode | -m | Execution mode. default or scheduled (default: default) |
|
||||
| --disable-compression | | Disable database backup compression |
|
||||
| --prune | | Delete old backup, default disabled |
|
||||
| --keep-last | | Delete old backup created more than specified days ago, default 7 days |
|
||||
| --period | | Crontab period for scheduled mode only. (default: "0 1 * * *") |
|
||||
| --cron-expression | | Backup cron expression, eg: (* * * * *) or @daily |
|
||||
| --help | -h | Print this help message and exit |
|
||||
| --version | -V | Print version information and exit |
|
||||
|
||||
## Environment variables
|
||||
|
||||
| Name | Requirement | Description |
|
||||
|------------------------|----------------------------------------------------|------------------------------------------------------|
|
||||
|------------------------|---------------------------------------------------------------|------------------------------------------------------|
|
||||
| DB_PORT | Optional, default 3306 | Database port number |
|
||||
| DB_HOST | Required | Database host |
|
||||
| DB_NAME | Optional if it was provided from the -d flag | Database name |
|
||||
@@ -49,25 +48,31 @@ Backup, restore and migrate targets, schedule and retention are configured using
|
||||
| AWS_REGION | Optional, required for S3 storage | AWS Region |
|
||||
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
|
||||
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
|
||||
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the --period flag | Backup cron expression for docker in scheduled mode |
|
||||
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
|
||||
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the `--cron-expression` flag | Backup cron expression for docker in scheduled mode |
|
||||
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
|
||||
| SSH_USER | Optional, required for SSH storage | ssh remote user |
|
||||
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
|
||||
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
|
||||
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
|
||||
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
|
||||
| SOURCE_DB_HOST | Optional, required for database migration | Source database host |
|
||||
| SOURCE_DB_PORT | Optional, required for database migration | Source database port |
|
||||
| SOURCE_DB_NAME | Optional, required for database migration | Source database name |
|
||||
| SOURCE_DB_USERNAME | Optional, required for database migration | Source database username |
|
||||
| SOURCE_DB_PASSWORD | Optional, required for database migration | Source database password |
|
||||
| REMOTE_PATH | Optional, required for SSH or FTP storage | remote path (/home/toto/backup) |
|
||||
| FTP_HOST_NAME | Optional, required for FTP storage | FTP host name |
|
||||
| FTP_PORT | Optional, required for FTP storage | FTP server port number |
|
||||
| FTP_USER | Optional, required for FTP storage | FTP user |
|
||||
| FTP_PASSWORD | Optional, required for FTP storage | FTP user password |
|
||||
| TARGET_DB_HOST | Optional, required for database migration | Target database host |
|
||||
| TARGET_DB_PORT | Optional, required for database migration | Target database port |
|
||||
| TARGET_DB_NAME | Optional, required for database migration | Target database name |
|
||||
| TARGET_DB_USERNAME | Optional, required for database migration | Target database username |
|
||||
| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password |
|
||||
| TG_TOKEN | Optional, required for Telegram notification | Telegram token (`BOT-ID:BOT-TOKEN`) |
|
||||
| TG_CHAT_ID | Optional, required for Telegram notification | Telegram Chat ID |
|
||||
|
||||
---
|
||||
## Run in Scheduled mode
|
||||
|
||||
This image can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
|
||||
For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag.
|
||||
For Docker, you need to run it in scheduled mode by adding `--cron-expression "* * * * *"` flag or by defining `BACKUP_CRON_EXPRESSION=0 1 * * *` environment variable.
|
||||
|
||||
## Syntax of crontab (field description)
|
||||
|
||||
@@ -110,3 +115,21 @@ Easy to remember format:
|
||||
```conf
|
||||
0 1 * * *
|
||||
```
|
||||
## Predefined schedules
|
||||
You may use one of several pre-defined schedules in place of a cron expression.
|
||||
|
||||
| Entry | Description | Equivalent To |
|
||||
|------------------------|--------------------------------------------|---------------|
|
||||
| @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 * |
|
||||
| @monthly | Run once a month, midnight, first of month | 0 0 1 * * |
|
||||
| @weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0 |
|
||||
| @daily (or @midnight) | Run once a day, midnight | 0 0 * * * |
|
||||
| @hourly | Run once an hour, beginning of hour | 0 * * * * |
|
||||
|
||||
### Intervals
|
||||
You may also schedule backup task at fixed intervals, starting at the time it's added or cron is run. This is supported by formatting the cron spec like this:
|
||||
|
||||
@every <duration>
|
||||
where "duration" is a string accepted by time.
|
||||
|
||||
For example, "@every 1h30m10s" would indicate a schedule that activates after 1 hour, 30 minutes, 10 seconds, and then every interval after that.
|
||||
@@ -5,7 +5,7 @@ services:
|
||||
# release version instead of using `latest`.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command: backup --dbname database_name --mode scheduled --period "0 1 * * *"
|
||||
command: backup --dbname database_name
|
||||
volumes:
|
||||
- ./backup:/backup
|
||||
environment:
|
||||
@@ -13,3 +13,5 @@ services:
|
||||
- DB_HOST=mysql
|
||||
- DB_USERNAME=userName
|
||||
- DB_PASSWORD=${DB_PASSWORD}
|
||||
# See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
||||
- BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * *
|
||||
@@ -6,7 +6,7 @@ services:
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command: backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
|
||||
command: backup --storage s3 -d my-database
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
@@ -21,6 +21,8 @@ services:
|
||||
- AWS_SECRET_KEY=xxxxx
|
||||
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||
- AWS_DISABLE_SSL="false"
|
||||
# See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
||||
- BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * *
|
||||
# mysql-bkup container must be connected to the same network with your database
|
||||
networks:
|
||||
- web
|
||||
|
||||
@@ -15,10 +15,7 @@ spec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- bkup
|
||||
- backup
|
||||
- --storage
|
||||
- s3
|
||||
- backup --storage s3
|
||||
resources:
|
||||
limits:
|
||||
memory: "128Mi"
|
||||
|
||||
4
go.mod
4
go.mod
@@ -13,8 +13,12 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jlaffaye/ftp v0.2.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
golang.org/x/sys v0.22.0 // indirect
|
||||
gopkg.in/fsnotify.v1 v1.4.7 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
|
||||
9
go.sum
9
go.sum
@@ -7,14 +7,23 @@ github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9Hu
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
|
||||
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
|
||||
13
main.go
13
main.go
@@ -1,12 +1,11 @@
|
||||
// Package main /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package main
|
||||
|
||||
//main
|
||||
/*****
|
||||
* MySQL Backup & Restore
|
||||
* @author Jonas Kaninda
|
||||
* @license MIT License <https://opensource.org/licenses/MIT>
|
||||
* @link https://github.com/jkaninda/mysql-bkup
|
||||
**/
|
||||
import "github.com/jkaninda/mysql-bkup/cmd"
|
||||
|
||||
func main() {
|
||||
|
||||
246
pkg/backup.go
246
pkg/backup.go
@@ -1,13 +1,15 @@
|
||||
// Package pkg /*
|
||||
/*
|
||||
Copyright © 2024 Jonas Kaninda
|
||||
*/
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/hpcloud/tail"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"github.com/robfig/cron/v3"
|
||||
"github.com/spf13/cobra"
|
||||
"log"
|
||||
"os"
|
||||
@@ -17,101 +19,70 @@ import (
|
||||
)
|
||||
|
||||
func StartBackup(cmd *cobra.Command) {
|
||||
_, _ = cmd.Flags().GetString("operation")
|
||||
//Set env
|
||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
|
||||
intro()
|
||||
dbConf = initDbConfig(cmd)
|
||||
//Initialize backup configs
|
||||
config := initBackupConfig(cmd)
|
||||
|
||||
//Get flag value and set env
|
||||
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
|
||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
||||
backupRetention, _ := cmd.Flags().GetInt("keep-last")
|
||||
prune, _ := cmd.Flags().GetBool("prune")
|
||||
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
|
||||
executionMode, _ = cmd.Flags().GetString("mode")
|
||||
gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||
|
||||
dbConf = getDbConfig(cmd)
|
||||
|
||||
//
|
||||
if gpqPassphrase != "" {
|
||||
encryption = true
|
||||
}
|
||||
|
||||
//Generate file name
|
||||
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbConf.dbName, time.Now().Format("20060102_150405"))
|
||||
if disableCompression {
|
||||
backupFileName = fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
|
||||
}
|
||||
|
||||
if executionMode == "default" {
|
||||
switch storage {
|
||||
case "s3":
|
||||
s3Backup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||
case "local":
|
||||
localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||
case "ssh", "remote":
|
||||
sshBackup(dbConf, backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
|
||||
case "ftp":
|
||||
utils.Fatal("Not supported storage type: %s", storage)
|
||||
default:
|
||||
localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||
}
|
||||
|
||||
} else if executionMode == "scheduled" {
|
||||
scheduledMode(dbConf, storage)
|
||||
if config.cronExpression == "" {
|
||||
BackupTask(dbConf, config)
|
||||
} else {
|
||||
utils.Fatal("Error, unknown execution mode!")
|
||||
if utils.IsValidCronExpression(config.cronExpression) {
|
||||
scheduledMode(dbConf, config)
|
||||
} else {
|
||||
utils.Fatal("Cron expression is not valid: %s", config.cronExpression)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Run in scheduled mode
|
||||
func scheduledMode(db *dbConfig, storage string) {
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("**********************************")
|
||||
fmt.Println(" Starting MySQL Bkup... ")
|
||||
fmt.Println("***********************************")
|
||||
func scheduledMode(db *dbConfig, config *BackupConfig) {
|
||||
utils.Info("Running in Scheduled mode")
|
||||
utils.Info("Execution period %s", os.Getenv("BACKUP_CRON_EXPRESSION"))
|
||||
utils.Info("Storage type %s ", storage)
|
||||
utils.Info("Backup cron expression: %s", config.cronExpression)
|
||||
utils.Info("Storage type %s ", config.storage)
|
||||
|
||||
//Test database connexion
|
||||
testDatabaseConnection(db)
|
||||
|
||||
//Test backup
|
||||
utils.Info("Testing backup configurations...")
|
||||
BackupTask(db, config)
|
||||
utils.Info("Testing backup configurations...done")
|
||||
utils.Info("Creating backup job...")
|
||||
CreateCrontabScript(disableCompression, storage)
|
||||
// Create a new cron instance
|
||||
c := cron.New()
|
||||
|
||||
supervisorConfig := "/etc/supervisor/supervisord.conf"
|
||||
|
||||
// Start Supervisor
|
||||
cmd := exec.Command("supervisord", "-c", supervisorConfig)
|
||||
err := cmd.Start()
|
||||
_, err := c.AddFunc(config.cronExpression, func() {
|
||||
BackupTask(db, config)
|
||||
})
|
||||
if err != nil {
|
||||
utils.Fatal(fmt.Sprintf("Failed to start supervisord: %v", err))
|
||||
return
|
||||
}
|
||||
// Start the cron scheduler
|
||||
c.Start()
|
||||
utils.Info("Creating backup job...done")
|
||||
utils.Info("Backup job started")
|
||||
defer func() {
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
utils.Info("Failed to kill supervisord process: %v", err)
|
||||
} else {
|
||||
utils.Info("Supervisor stopped.")
|
||||
defer c.Stop()
|
||||
select {}
|
||||
}
|
||||
func BackupTask(db *dbConfig, config *BackupConfig) {
|
||||
//Generate backup file name
|
||||
backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20060102_150405"))
|
||||
if config.disableCompression {
|
||||
backupFileName = fmt.Sprintf("%s_%s.sql", db.dbName, time.Now().Format("20060102_150405"))
|
||||
}
|
||||
}()
|
||||
if _, err := os.Stat(cronLogFile); os.IsNotExist(err) {
|
||||
utils.Fatal(fmt.Sprintf("Log file %s does not exist.", cronLogFile))
|
||||
}
|
||||
t, err := tail.TailFile(cronLogFile, tail.Config{Follow: true})
|
||||
if err != nil {
|
||||
utils.Fatal("Failed to tail file: %v", err)
|
||||
}
|
||||
|
||||
// Read and print new lines from the log file
|
||||
for line := range t.Lines {
|
||||
fmt.Println(line.Text)
|
||||
config.backupFileName = backupFileName
|
||||
switch config.storage {
|
||||
case "local":
|
||||
localBackup(db, config)
|
||||
case "s3":
|
||||
s3Backup(db, config)
|
||||
case "ssh", "remote":
|
||||
sshBackup(db, config)
|
||||
case "ftp":
|
||||
ftpBackup(db, config)
|
||||
default:
|
||||
localBackup(db, config)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -126,6 +97,10 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
|
||||
}
|
||||
|
||||
utils.Info("Starting database backup...")
|
||||
err = os.Setenv("MYSQL_PWD", db.dbPassword)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
testDatabaseConnection(db)
|
||||
|
||||
// Backup Database database
|
||||
@@ -137,7 +112,6 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
|
||||
"-h", db.dbHost,
|
||||
"-P", db.dbPort,
|
||||
"-u", db.dbUserName,
|
||||
"--password="+db.dbPassword,
|
||||
db.dbName,
|
||||
)
|
||||
output, err := cmd.Output()
|
||||
@@ -160,7 +134,7 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
|
||||
|
||||
} else {
|
||||
// Execute mysqldump
|
||||
cmd := exec.Command("mysqldump", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, "--password="+db.dbPassword, db.dbName)
|
||||
cmd := exec.Command("mysqldump", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -183,72 +157,78 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
|
||||
}
|
||||
|
||||
}
|
||||
func localBackup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||
func localBackup(db *dbConfig, config *BackupConfig) {
|
||||
utils.Info("Backup database to local storage")
|
||||
BackupDatabase(db, backupFileName, disableCompression)
|
||||
finalFileName := backupFileName
|
||||
if encrypt {
|
||||
encryptBackup(backupFileName)
|
||||
finalFileName = fmt.Sprintf("%s.%s", backupFileName, gpgExtension)
|
||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||
finalFileName := config.backupFileName
|
||||
if config.encryption {
|
||||
encryptBackup(config.backupFileName, config.passphrase)
|
||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, gpgExtension)
|
||||
}
|
||||
utils.Info("Backup name is %s", finalFileName)
|
||||
moveToBackup(finalFileName, storagePath)
|
||||
//Send notification
|
||||
utils.NotifySuccess(finalFileName)
|
||||
//Delete old backup
|
||||
if prune {
|
||||
deleteOldBackup(backupRetention)
|
||||
if config.prune {
|
||||
deleteOldBackup(config.backupRetention)
|
||||
}
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
}
|
||||
|
||||
func s3Backup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||
func s3Backup(db *dbConfig, config *BackupConfig) {
|
||||
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
|
||||
utils.Info("Backup database to s3 storage")
|
||||
//Backup database
|
||||
BackupDatabase(db, backupFileName, disableCompression)
|
||||
finalFileName := backupFileName
|
||||
if encrypt {
|
||||
encryptBackup(backupFileName)
|
||||
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
|
||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||
finalFileName := config.backupFileName
|
||||
if config.encryption {
|
||||
encryptBackup(config.backupFileName, config.passphrase)
|
||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||
}
|
||||
utils.Info("Uploading backup archive to remote storage S3 ... ")
|
||||
utils.Info("Backup name is %s", finalFileName)
|
||||
err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
|
||||
err := UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
|
||||
if err != nil {
|
||||
utils.Fatal("Error uploading file to S3: %s ", err)
|
||||
|
||||
}
|
||||
|
||||
//Delete backup file from tmp folder
|
||||
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
|
||||
err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName))
|
||||
if err != nil {
|
||||
fmt.Println("Error deleting file: ", err)
|
||||
|
||||
}
|
||||
// Delete old backup
|
||||
if prune {
|
||||
err := utils.DeleteOldBackup(bucket, s3Path, backupRetention)
|
||||
if config.prune {
|
||||
err := DeleteOldBackup(bucket, s3Path, config.backupRetention)
|
||||
if err != nil {
|
||||
utils.Fatal("Error deleting old backup from S3: %s ", err)
|
||||
}
|
||||
}
|
||||
utils.Done("Uploading backup archive to remote storage S3 ... done ")
|
||||
//Send notification
|
||||
utils.NotifySuccess(finalFileName)
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
}
|
||||
func sshBackup(db *dbConfig, backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||
|
||||
// sshBackup backup database to SSH remote server
|
||||
func sshBackup(db *dbConfig, config *BackupConfig) {
|
||||
utils.Info("Backup database to Remote server")
|
||||
//Backup database
|
||||
BackupDatabase(db, backupFileName, disableCompression)
|
||||
finalFileName := backupFileName
|
||||
if encrypt {
|
||||
encryptBackup(backupFileName)
|
||||
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
|
||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||
finalFileName := config.backupFileName
|
||||
if config.encryption {
|
||||
encryptBackup(config.backupFileName, config.passphrase)
|
||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||
}
|
||||
utils.Info("Uploading backup archive to remote storage ... ")
|
||||
utils.Info("Backup name is %s", finalFileName)
|
||||
err := CopyToRemote(finalFileName, remotePath)
|
||||
err := CopyToRemote(finalFileName, config.remotePath)
|
||||
if err != nil {
|
||||
utils.Fatal("Error uploading file to the remote server: %s ", err)
|
||||
|
||||
@@ -260,19 +240,57 @@ func sshBackup(db *dbConfig, backupFileName, remotePath string, disableCompressi
|
||||
fmt.Println("Error deleting file: ", err)
|
||||
|
||||
}
|
||||
if prune {
|
||||
if config.prune {
|
||||
//TODO: Delete old backup from remote server
|
||||
utils.Info("Deleting old backup from a remote server is not implemented yet")
|
||||
|
||||
}
|
||||
|
||||
utils.Done("Uploading backup archive to remote storage ... done ")
|
||||
//Send notification
|
||||
utils.NotifySuccess(finalFileName)
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
}
|
||||
func encryptBackup(backupFileName string) {
|
||||
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||
err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase)
|
||||
func ftpBackup(db *dbConfig, config *BackupConfig) {
|
||||
utils.Info("Backup database to the remote FTP server")
|
||||
//Backup database
|
||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||
finalFileName := config.backupFileName
|
||||
if config.encryption {
|
||||
encryptBackup(config.backupFileName, config.passphrase)
|
||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||
}
|
||||
utils.Info("Uploading backup archive to the remote FTP server ... ")
|
||||
utils.Info("Backup name is %s", finalFileName)
|
||||
err := CopyToFTP(finalFileName, config.remotePath)
|
||||
if err != nil {
|
||||
utils.Fatal("Error uploading file to the remote FTP server: %s ", err)
|
||||
|
||||
}
|
||||
|
||||
//Delete backup file from tmp folder
|
||||
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
utils.Error("Error deleting file: %v", err)
|
||||
|
||||
}
|
||||
if config.prune {
|
||||
//TODO: Delete old backup from remote server
|
||||
utils.Info("Deleting old backup from a remote server is not implemented yet")
|
||||
|
||||
}
|
||||
|
||||
utils.Done("Uploading backup archive to the remote FTP server ... done ")
|
||||
//Send notification
|
||||
utils.NotifySuccess(finalFileName)
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
}
|
||||
|
||||
// encryptBackup encrypt backup
|
||||
func encryptBackup(backupFileName, passphrase string) {
|
||||
err := Encrypt(filepath.Join(tmpPath, backupFileName), passphrase)
|
||||
if err != nil {
|
||||
utils.Fatal("Error during encrypting backup %s", err)
|
||||
}
|
||||
|
||||
137
pkg/config.go
137
pkg/config.go
@@ -1,3 +1,9 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
@@ -16,15 +22,42 @@ type dbConfig struct {
|
||||
dbUserName string
|
||||
dbPassword string
|
||||
}
|
||||
type dbSourceConfig struct {
|
||||
sourceDbHost string
|
||||
sourceDbPort string
|
||||
sourceDbUserName string
|
||||
sourceDbPassword string
|
||||
sourceDbName string
|
||||
type targetDbConfig struct {
|
||||
targetDbHost string
|
||||
targetDbPort string
|
||||
targetDbUserName string
|
||||
targetDbPassword string
|
||||
targetDbName string
|
||||
}
|
||||
|
||||
func getDbConfig(cmd *cobra.Command) *dbConfig {
|
||||
type BackupConfig struct {
|
||||
backupFileName string
|
||||
backupRetention int
|
||||
disableCompression bool
|
||||
prune bool
|
||||
encryption bool
|
||||
remotePath string
|
||||
passphrase string
|
||||
storage string
|
||||
cronExpression string
|
||||
}
|
||||
type RestoreConfig struct {
|
||||
s3Path string
|
||||
remotePath string
|
||||
storage string
|
||||
file string
|
||||
bucket string
|
||||
gpqPassphrase string
|
||||
}
|
||||
type FTPConfig struct {
|
||||
host string
|
||||
user string
|
||||
password string
|
||||
port string
|
||||
remotePath string
|
||||
}
|
||||
|
||||
func initDbConfig(cmd *cobra.Command) *dbConfig {
|
||||
//Set env
|
||||
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
||||
dConf := dbConfig{}
|
||||
@@ -41,18 +74,88 @@ func getDbConfig(cmd *cobra.Command) *dbConfig {
|
||||
}
|
||||
return &dConf
|
||||
}
|
||||
func getSourceDbConfig() *dbSourceConfig {
|
||||
sdbConfig := dbSourceConfig{}
|
||||
sdbConfig.sourceDbHost = os.Getenv("SOURCE_DB_HOST")
|
||||
sdbConfig.sourceDbPort = os.Getenv("SOURCE_DB_PORT")
|
||||
sdbConfig.sourceDbName = os.Getenv("SOURCE_DB_NAME")
|
||||
sdbConfig.sourceDbUserName = os.Getenv("SOURCE_DB_USERNAME")
|
||||
sdbConfig.sourceDbPassword = os.Getenv("SOURCE_DB_PASSWORD")
|
||||
func initBackupConfig(cmd *cobra.Command) *BackupConfig {
|
||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||
utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION")
|
||||
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
|
||||
utils.GetEnv(cmd, "path", "REMOTE_PATH")
|
||||
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
|
||||
|
||||
err := utils.CheckEnvVars(sdbRVars)
|
||||
//Get flag value and set env
|
||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||
backupRetention, _ := cmd.Flags().GetInt("keep-last")
|
||||
prune, _ := cmd.Flags().GetBool("prune")
|
||||
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
|
||||
_, _ = cmd.Flags().GetString("mode")
|
||||
passphrase := os.Getenv("GPG_PASSPHRASE")
|
||||
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||
cronExpression := os.Getenv("BACKUP_CRON_EXPRESSION")
|
||||
|
||||
if passphrase != "" {
|
||||
encryption = true
|
||||
}
|
||||
//Initialize backup configs
|
||||
config := BackupConfig{}
|
||||
config.backupRetention = backupRetention
|
||||
config.disableCompression = disableCompression
|
||||
config.prune = prune
|
||||
config.storage = storage
|
||||
config.encryption = encryption
|
||||
config.remotePath = remotePath
|
||||
config.passphrase = passphrase
|
||||
config.cronExpression = cronExpression
|
||||
return &config
|
||||
}
|
||||
func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
|
||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||
utils.GetEnv(cmd, "path", "REMOTE_PATH")
|
||||
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
|
||||
|
||||
//Get flag value and set env
|
||||
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
||||
_, _ = cmd.Flags().GetString("mode")
|
||||
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||
gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||
//Initialize restore configs
|
||||
rConfig := RestoreConfig{}
|
||||
rConfig.s3Path = s3Path
|
||||
rConfig.remotePath = remotePath
|
||||
rConfig.storage = storage
|
||||
rConfig.bucket = bucket
|
||||
rConfig.file = file
|
||||
rConfig.storage = storage
|
||||
rConfig.gpqPassphrase = gpqPassphrase
|
||||
return &rConfig
|
||||
}
|
||||
func initTargetDbConfig() *targetDbConfig {
|
||||
tdbConfig := targetDbConfig{}
|
||||
tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST")
|
||||
tdbConfig.targetDbPort = os.Getenv("TARGET_DB_PORT")
|
||||
tdbConfig.targetDbName = os.Getenv("TARGET_DB_NAME")
|
||||
tdbConfig.targetDbUserName = os.Getenv("TARGET_DB_USERNAME")
|
||||
tdbConfig.targetDbPassword = os.Getenv("TARGET_DB_PASSWORD")
|
||||
|
||||
err := utils.CheckEnvVars(tdbRVars)
|
||||
if err != nil {
|
||||
utils.Error("Please make sure all required environment variables for source database are set")
|
||||
utils.Error("Please make sure all required environment variables for the target database are set")
|
||||
utils.Fatal("Error checking target database environment variables: %s", err)
|
||||
}
|
||||
return &tdbConfig
|
||||
}
|
||||
func initFtpConfig() *FTPConfig {
|
||||
//Initialize backup configs
|
||||
fConfig := FTPConfig{}
|
||||
fConfig.host = os.Getenv("FTP_HOST_NAME")
|
||||
fConfig.user = os.Getenv("FTP_USER")
|
||||
fConfig.password = os.Getenv("FTP_PASSWORD")
|
||||
fConfig.port = os.Getenv("FTP_PORT")
|
||||
fConfig.remotePath = os.Getenv("REMOTE_PATH")
|
||||
err := utils.CheckEnvVars(ftpVars)
|
||||
if err != nil {
|
||||
utils.Error("Please make sure all required environment variables for FTP are set")
|
||||
utils.Fatal("Error checking environment variables: %s", err)
|
||||
}
|
||||
return &sdbConfig
|
||||
return &fConfig
|
||||
}
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
@@ -10,7 +16,7 @@ import (
|
||||
func Decrypt(inputFile string, passphrase string) error {
|
||||
utils.Info("Decrypting backup file: " + inputFile + " ...")
|
||||
//Create gpg home dir
|
||||
err := utils.MakeDir(gpgHome)
|
||||
err := utils.MakeDirAll(gpgHome)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -31,7 +37,7 @@ func Decrypt(inputFile string, passphrase string) error {
|
||||
func Encrypt(inputFile string, passphrase string) error {
|
||||
utils.Info("Encrypting backup...")
|
||||
//Create gpg home dir
|
||||
err := utils.MakeDir(gpgHome)
|
||||
err := utils.MakeDirAll(gpgHome)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
81
pkg/ftp.go
Normal file
81
pkg/ftp.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jlaffaye/ftp"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
// initFtpClient initializes and authenticates an FTP client
|
||||
func initFtpClient() (*ftp.ServerConn, error) {
|
||||
ftpConfig := initFtpConfig()
|
||||
ftpClient, err := ftp.Dial(fmt.Sprintf("%s:%s", ftpConfig.host, ftpConfig.port), ftp.DialWithTimeout(5*time.Second))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to FTP: %w", err)
|
||||
}
|
||||
|
||||
err = ftpClient.Login(ftpConfig.user, ftpConfig.password)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to log in to FTP: %w", err)
|
||||
}
|
||||
|
||||
return ftpClient, nil
|
||||
}
|
||||
|
||||
// CopyToFTP uploads a file to the remote FTP server
|
||||
func CopyToFTP(fileName, remotePath string) (err error) {
|
||||
ftpConfig := initFtpConfig()
|
||||
ftpClient, err := initFtpClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ftpClient.Quit()
|
||||
|
||||
filePath := filepath.Join(tmpPath, fileName)
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open file %s: %w", fileName, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
remoteFilePath := filepath.Join(ftpConfig.remotePath, fileName)
|
||||
err = ftpClient.Stor(remoteFilePath, file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload file %s: %w", fileName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyFromFTP downloads a file from the remote FTP server
|
||||
func CopyFromFTP(fileName, remotePath string) (err error) {
|
||||
ftpClient, err := initFtpClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ftpClient.Quit()
|
||||
|
||||
remoteFilePath := filepath.Join(remotePath, fileName)
|
||||
r, err := ftpClient.Retr(remoteFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve file %s: %w", fileName, err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
localFilePath := filepath.Join(tmpPath, fileName)
|
||||
outFile, err := os.Create(localFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create local file %s: %w", fileName, err)
|
||||
}
|
||||
defer outFile.Close()
|
||||
|
||||
_, err = io.Copy(outFile, r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy data to local file %s: %w", fileName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,3 +1,9 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
@@ -101,21 +107,25 @@ func deleteTemp() {
|
||||
|
||||
// TestDatabaseConnection tests the database connection
|
||||
func testDatabaseConnection(db *dbConfig) {
|
||||
|
||||
err := os.Setenv("MYSQL_PWD", db.dbPassword)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
utils.Info("Connecting to %s database ...", db.dbName)
|
||||
|
||||
cmd := exec.Command("mysql", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, "--password="+db.dbPassword, db.dbName, "-e", "quit")
|
||||
|
||||
cmd := exec.Command("mysql", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName, "-e", "quit")
|
||||
// Capture the output
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
cmd.Stderr = &out
|
||||
err := cmd.Run()
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
utils.Error("Error testing database connection: %v\nOutput: %s", err, out.String())
|
||||
os.Exit(1)
|
||||
utils.Fatal("Error testing database connection: %v\nOutput: %s", err, out.String())
|
||||
|
||||
}
|
||||
utils.Info("Successfully connected to %s database", db.dbName)
|
||||
|
||||
}
|
||||
func intro() {
|
||||
utils.Info("Starting MySQL Backup...")
|
||||
utils.Info("Copyright (c) 2024 Jonas Kaninda ")
|
||||
}
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
@@ -8,24 +14,27 @@ import (
|
||||
)
|
||||
|
||||
func StartMigration(cmd *cobra.Command) {
|
||||
intro()
|
||||
utils.Info("Starting database migration...")
|
||||
//Get DB config
|
||||
dbConf = getDbConfig(cmd)
|
||||
sDbConf = getSourceDbConfig()
|
||||
dbConf = initDbConfig(cmd)
|
||||
targetDbConf = initTargetDbConfig()
|
||||
|
||||
//Defining the target database variables
|
||||
newDbConfig := dbConfig{}
|
||||
newDbConfig.dbHost = targetDbConf.targetDbHost
|
||||
newDbConfig.dbPort = targetDbConf.targetDbPort
|
||||
newDbConfig.dbName = targetDbConf.targetDbName
|
||||
newDbConfig.dbUserName = targetDbConf.targetDbUserName
|
||||
newDbConfig.dbPassword = targetDbConf.targetDbPassword
|
||||
|
||||
//Generate file name
|
||||
backupFileName := fmt.Sprintf("%s_%s.sql", sDbConf.sourceDbName, time.Now().Format("20060102_150405"))
|
||||
//Backup Source Database
|
||||
newDbConfig := dbConfig{}
|
||||
newDbConfig.dbHost = sDbConf.sourceDbHost
|
||||
newDbConfig.dbPort = sDbConf.sourceDbPort
|
||||
newDbConfig.dbName = sDbConf.sourceDbName
|
||||
newDbConfig.dbUserName = sDbConf.sourceDbUserName
|
||||
newDbConfig.dbPassword = sDbConf.sourceDbPassword
|
||||
BackupDatabase(&newDbConfig, backupFileName, true)
|
||||
backupFileName := fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
|
||||
//Backup source Database
|
||||
BackupDatabase(dbConf, backupFileName, true)
|
||||
//Restore source database into target database
|
||||
utils.Info("Restoring [%s] database into [%s] database...", sDbConf.sourceDbName, dbConf.dbName)
|
||||
RestoreDatabase(dbConf, backupFileName)
|
||||
utils.Info("[%s] database has been restored into [%s] database", sDbConf.sourceDbName, dbConf.dbName)
|
||||
utils.Info("Database migration completed!")
|
||||
utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
|
||||
RestoreDatabase(&newDbConfig, backupFileName)
|
||||
utils.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
|
||||
utils.Info("Database migration completed.")
|
||||
}
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
@@ -10,40 +16,31 @@ import (
|
||||
)
|
||||
|
||||
func StartRestore(cmd *cobra.Command) {
|
||||
intro()
|
||||
dbConf = initDbConfig(cmd)
|
||||
restoreConf := initRestoreConfig(cmd)
|
||||
|
||||
//Set env
|
||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||
|
||||
//Get flag value and set env
|
||||
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
|
||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
||||
executionMode, _ = cmd.Flags().GetString("mode")
|
||||
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||
dbConf = getDbConfig(cmd)
|
||||
|
||||
switch storage {
|
||||
switch restoreConf.storage {
|
||||
case "s3":
|
||||
restoreFromS3(dbConf, file, bucket, s3Path)
|
||||
restoreFromS3(dbConf, restoreConf.file, restoreConf.bucket, restoreConf.s3Path)
|
||||
case "local":
|
||||
utils.Info("Restore database from local")
|
||||
copyToTmp(storagePath, file)
|
||||
RestoreDatabase(dbConf, file)
|
||||
copyToTmp(storagePath, restoreConf.file)
|
||||
RestoreDatabase(dbConf, restoreConf.file)
|
||||
case "ssh":
|
||||
restoreFromRemote(dbConf, file, remotePath)
|
||||
restoreFromRemote(dbConf, restoreConf.file, restoreConf.remotePath)
|
||||
case "ftp":
|
||||
utils.Fatal("Restore from FTP is not yet supported")
|
||||
restoreFromFTP(dbConf, restoreConf.file, restoreConf.remotePath)
|
||||
default:
|
||||
utils.Info("Restore database from local")
|
||||
copyToTmp(storagePath, file)
|
||||
RestoreDatabase(dbConf, file)
|
||||
copyToTmp(storagePath, restoreConf.file)
|
||||
RestoreDatabase(dbConf, restoreConf.file)
|
||||
}
|
||||
}
|
||||
|
||||
func restoreFromS3(db *dbConfig, file, bucket, s3Path string) {
|
||||
utils.Info("Restore database from s3")
|
||||
err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
|
||||
err := DownloadFile(tmpPath, file, bucket, s3Path)
|
||||
if err != nil {
|
||||
utils.Fatal("Error download file from s3 %s %v", file, err)
|
||||
}
|
||||
@@ -57,6 +54,14 @@ func restoreFromRemote(db *dbConfig, file, remotePath string) {
|
||||
}
|
||||
RestoreDatabase(db, file)
|
||||
}
|
||||
func restoreFromFTP(db *dbConfig, file, remotePath string) {
|
||||
utils.Info("Restore database from FTP server")
|
||||
err := CopyFromFTP(file, remotePath)
|
||||
if err != nil {
|
||||
utils.Fatal("Error download file from FTP server: %s %v", filepath.Join(remotePath, file), err)
|
||||
}
|
||||
RestoreDatabase(db, file)
|
||||
}
|
||||
|
||||
// RestoreDatabase restore database
|
||||
func RestoreDatabase(db *dbConfig, file string) {
|
||||
@@ -89,14 +94,18 @@ func RestoreDatabase(db *dbConfig, file string) {
|
||||
}
|
||||
|
||||
if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) {
|
||||
err = os.Setenv("MYSQL_PWD", db.dbPassword)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
testDatabaseConnection(db)
|
||||
utils.Info("Restoring database...")
|
||||
|
||||
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
|
||||
// Restore from compressed file / .sql.gz
|
||||
if extension == ".gz" {
|
||||
str := "zcat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
|
||||
_, err := exec.Command("bash", "-c", str).Output()
|
||||
str := "zcat " + filepath.Join(tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
|
||||
_, err := exec.Command("sh", "-c", str).Output()
|
||||
if err != nil {
|
||||
utils.Fatal("Error, in restoring the database %v", err)
|
||||
}
|
||||
@@ -107,20 +116,20 @@ func RestoreDatabase(db *dbConfig, file string) {
|
||||
|
||||
} else if extension == ".sql" {
|
||||
//Restore from sql file
|
||||
str := "cat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
|
||||
_, err := exec.Command("bash", "-c", str).Output()
|
||||
str := "cat " + filepath.Join(tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
|
||||
_, err := exec.Command("sh", "-c", str).Output()
|
||||
if err != nil {
|
||||
utils.Fatal(fmt.Sprintf("Error in restoring the database %s", err))
|
||||
utils.Fatal("Error in restoring the database %v", err)
|
||||
}
|
||||
utils.Info("Restoring database... done")
|
||||
utils.Done("Database has been restored")
|
||||
//Delete temp
|
||||
deleteTemp()
|
||||
} else {
|
||||
utils.Fatal(fmt.Sprintf("Unknown file extension %s", extension))
|
||||
utils.Fatal("Unknown file extension %s", extension)
|
||||
}
|
||||
|
||||
} else {
|
||||
utils.Fatal(fmt.Sprintf("File not found in %s", fmt.Sprintf("%s/%s", tmpPath, file)))
|
||||
utils.Fatal("File not found in %s", filepath.Join(tmpPath, file))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,10 @@
|
||||
package utils
|
||||
// Package utils /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -8,6 +14,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -29,20 +36,20 @@ func CreateSession() (*session.Session, error) {
|
||||
"AWS_REGION",
|
||||
}
|
||||
|
||||
endPoint := GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
|
||||
accessKey := GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
|
||||
secretKey := GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
|
||||
_ = GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||
endPoint := utils.GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
|
||||
accessKey := utils.GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
|
||||
secretKey := utils.GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
|
||||
_ = utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||
|
||||
region := os.Getenv("AWS_REGION")
|
||||
awsDisableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
|
||||
if err != nil {
|
||||
Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
|
||||
utils.Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
|
||||
}
|
||||
|
||||
err = CheckEnvVars(awsVars)
|
||||
err = utils.CheckEnvVars(awsVars)
|
||||
if err != nil {
|
||||
Fatal("Error checking environment variables\n: %s", err)
|
||||
utils.Fatal("Error checking environment variables\n: %s", err)
|
||||
}
|
||||
// S3 Config
|
||||
s3Config := &aws.Config{
|
||||
@@ -102,7 +109,7 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
Info("Download backup from S3 storage...")
|
||||
utils.Info("Download backup from S3 storage...")
|
||||
file, err := os.Create(filepath.Join(destinationPath, key))
|
||||
if err != nil {
|
||||
fmt.Println("Failed to create file", err)
|
||||
@@ -119,10 +126,10 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
|
||||
Key: aws.String(objectKey),
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println("Failed to download file", err)
|
||||
utils.Error("Failed to download file %s", key)
|
||||
return err
|
||||
}
|
||||
Info(fmt.Sprintf("Backup downloaded: ", file.Name(), " bytes size ", numBytes))
|
||||
utils.Info("Backup downloaded: %s bytes size %s ", file.Name(), numBytes)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,3 +1,9 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
package pkg
|
||||
|
||||
// Package pkg /*
|
||||
/*
|
||||
Copyright © 2024 Jonas Kaninda
|
||||
*/
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func CreateCrontabScript(disableCompression bool, storage string) {
|
||||
//task := "/usr/local/bin/backup_cron.sh"
|
||||
touchCmd := exec.Command("touch", backupCronFile)
|
||||
if err := touchCmd.Run(); err != nil {
|
||||
utils.Fatal("Error creating file %s: %v\n", backupCronFile, err)
|
||||
}
|
||||
var disableC = ""
|
||||
if disableCompression {
|
||||
disableC = "--disable-compression"
|
||||
}
|
||||
|
||||
scriptContent := fmt.Sprintf(`#!/usr/bin/env bash
|
||||
set -e
|
||||
/usr/local/bin/mysql-bkup backup --dbname %s --port %s --storage %s %v
|
||||
`, os.Getenv("DB_NAME"), os.Getenv("DB_PORT"), storage, disableC)
|
||||
|
||||
if err := utils.WriteToFile(backupCronFile, scriptContent); err != nil {
|
||||
utils.Fatal("Error writing to %s: %v\n", backupCronFile, err)
|
||||
}
|
||||
|
||||
chmodCmd := exec.Command("chmod", "+x", "/usr/local/bin/backup_cron.sh")
|
||||
if err := chmodCmd.Run(); err != nil {
|
||||
utils.Fatal("Error changing permissions of %s: %v\n", backupCronFile, err)
|
||||
}
|
||||
|
||||
lnCmd := exec.Command("ln", "-s", "/usr/local/bin/backup_cron.sh", "/usr/local/bin/backup_cron")
|
||||
if err := lnCmd.Run(); err != nil {
|
||||
utils.Fatal("Error creating symbolic link: %v\n", err)
|
||||
|
||||
}
|
||||
|
||||
touchLogCmd := exec.Command("touch", cronLogFile)
|
||||
if err := touchLogCmd.Run(); err != nil {
|
||||
utils.Fatal("Error creating file %s: %v\n", cronLogFile, err)
|
||||
}
|
||||
|
||||
cronJob := "/etc/cron.d/backup_cron"
|
||||
touchCronCmd := exec.Command("touch", cronJob)
|
||||
if err := touchCronCmd.Run(); err != nil {
|
||||
utils.Fatal("Error creating file %s: %v\n", cronJob, err)
|
||||
}
|
||||
|
||||
cronContent := fmt.Sprintf(`%s root exec /bin/bash -c ". /run/supervisord.env; /usr/local/bin/backup_cron.sh >> %s"
|
||||
`, os.Getenv("BACKUP_CRON_EXPRESSION"), cronLogFile)
|
||||
|
||||
if err := utils.WriteToFile(cronJob, cronContent); err != nil {
|
||||
utils.Fatal("Error writing to %s: %v\n", cronJob, err)
|
||||
}
|
||||
utils.ChangePermission("/etc/cron.d/backup_cron", 0644)
|
||||
|
||||
crontabCmd := exec.Command("crontab", "/etc/cron.d/backup_cron")
|
||||
if err := crontabCmd.Run(); err != nil {
|
||||
utils.Fatal("Error updating crontab: ", err)
|
||||
}
|
||||
utils.Info("Backup job created.")
|
||||
}
|
||||
32
pkg/var.go
32
pkg/var.go
@@ -1,16 +1,20 @@
|
||||
// Package pkg /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package pkg
|
||||
|
||||
const cronLogFile = "/var/log/mysql-bkup.log"
|
||||
const tmpPath = "/tmp/backup"
|
||||
const backupCronFile = "/usr/local/bin/backup_cron.sh"
|
||||
const algorithm = "aes256"
|
||||
const gpgHome = "gnupg"
|
||||
const gpgHome = "/config/gnupg"
|
||||
const gpgExtension = "gpg"
|
||||
|
||||
var (
|
||||
storage = "local"
|
||||
file = ""
|
||||
executionMode = "default"
|
||||
storagePath = "/backup"
|
||||
disableCompression = false
|
||||
encryption = false
|
||||
@@ -23,21 +27,27 @@ var dbHVars = []string{
|
||||
"DB_USERNAME",
|
||||
"DB_NAME",
|
||||
}
|
||||
var sdbRVars = []string{
|
||||
"SOURCE_DB_HOST",
|
||||
"SOURCE_DB_PORT",
|
||||
"SOURCE_DB_NAME",
|
||||
"SOURCE_DB_USERNAME",
|
||||
"SOURCE_DB_PASSWORD",
|
||||
var tdbRVars = []string{
|
||||
"TARGET_DB_HOST",
|
||||
"TARGET_DB_PORT",
|
||||
"TARGET_DB_NAME",
|
||||
"TARGET_DB_USERNAME",
|
||||
"TARGET_DB_PASSWORD",
|
||||
}
|
||||
|
||||
var dbConf *dbConfig
|
||||
var sDbConf *dbSourceConfig
|
||||
var targetDbConf *targetDbConfig
|
||||
|
||||
// sshHVars Required environment variables for SSH remote server storage
|
||||
var sshHVars = []string{
|
||||
"SSH_USER",
|
||||
"SSH_REMOTE_PATH",
|
||||
"REMOTE_PATH",
|
||||
"SSH_HOST_NAME",
|
||||
"SSH_PORT",
|
||||
}
|
||||
var ftpVars = []string{
|
||||
"FTP_HOST_NAME",
|
||||
"FTP_USER",
|
||||
"FTP_PASSWORD",
|
||||
"FTP_PORT",
|
||||
}
|
||||
|
||||
@@ -1,10 +1,16 @@
|
||||
// Package utils /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package utils
|
||||
|
||||
const RestoreExample = "mysql-bkup restore --dbname database --file db_20231219_022941.sql.gz\n" +
|
||||
"bkup restore --dbname database --storage s3 --path /custom-path --file db_20231219_022941.sql.gz"
|
||||
"restore --dbname database --storage s3 --path /custom-path --file db_20231219_022941.sql.gz"
|
||||
const BackupExample = "mysql-bkup backup --dbname database --disable-compression\n" +
|
||||
"mysql-bkup backup --dbname database --storage s3 --path /custom-path --disable-compression"
|
||||
"backup --dbname database --storage s3 --path /custom-path --disable-compression"
|
||||
|
||||
const MainExample = "mysql-bkup backup --dbname database --disable-compression\n" +
|
||||
"mysql-bkup backup --dbname database --storage s3 --path /custom-path\n" +
|
||||
"mysql-bkup restore --dbname database --file db_20231219_022941.sql.gz"
|
||||
"backup --dbname database --storage s3 --path /custom-path\n" +
|
||||
"restore --dbname database --file db_20231219_022941.sql.gz"
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
// Package utils /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package utils
|
||||
|
||||
import (
|
||||
@@ -49,8 +55,13 @@ func Fatal(msg string, args ...any) {
|
||||
formattedMessage := fmt.Sprintf(msg, args...)
|
||||
if len(args) == 0 {
|
||||
fmt.Printf("%s ERROR: %s\n", currentTime, msg)
|
||||
NotifyError(msg)
|
||||
} else {
|
||||
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
|
||||
NotifyError(formattedMessage)
|
||||
|
||||
}
|
||||
|
||||
os.Exit(1)
|
||||
os.Kill.Signal()
|
||||
}
|
||||
|
||||
@@ -1,17 +1,23 @@
|
||||
// Package utils /
|
||||
/*****
|
||||
@author Jonas Kaninda
|
||||
@license MIT License <https://opensource.org/licenses/MIT>
|
||||
@Copyright © 2024 Jonas Kaninda
|
||||
**/
|
||||
package utils
|
||||
|
||||
/*****
|
||||
* MySQL Backup & Restore
|
||||
* @author Jonas Kaninda
|
||||
* @license MIT License <https://opensource.org/licenses/MIT>
|
||||
* @link https://github.com/jkaninda/mysql-bkup
|
||||
**/
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/robfig/cron/v3"
|
||||
"github.com/spf13/cobra"
|
||||
"io"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func FileExists(filename string) bool {
|
||||
@@ -170,3 +176,82 @@ func MakeDirAll(dirPath string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func GetIntEnv(envName string) int {
|
||||
val := os.Getenv(envName)
|
||||
if val == "" {
|
||||
return 0
|
||||
}
|
||||
ret, err := strconv.Atoi(val)
|
||||
if err != nil {
|
||||
Error("Error: %v", err)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
func sendMessage(msg string) {
|
||||
|
||||
Info("Sending notification... ")
|
||||
chatId := os.Getenv("TG_CHAT_ID")
|
||||
body, _ := json.Marshal(map[string]string{
|
||||
"chat_id": chatId,
|
||||
"text": msg,
|
||||
})
|
||||
url := fmt.Sprintf("%s/sendMessage", getTgUrl())
|
||||
// Create an HTTP post request
|
||||
request, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
request.Header.Add("Content-Type", "application/json")
|
||||
client := &http.Client{}
|
||||
response, err := client.Do(request)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
code := response.StatusCode
|
||||
if code == 200 {
|
||||
Info("Notification has been sent")
|
||||
} else {
|
||||
body, _ := ioutil.ReadAll(response.Body)
|
||||
Error("Message not sent, error: %s", string(body))
|
||||
}
|
||||
|
||||
}
|
||||
func NotifySuccess(fileName string) {
|
||||
var vars = []string{
|
||||
"TG_TOKEN",
|
||||
"TG_CHAT_ID",
|
||||
}
|
||||
|
||||
//Telegram notification
|
||||
err := CheckEnvVars(vars)
|
||||
if err == nil {
|
||||
message := "[✅ MySQL Backup ]\n" +
|
||||
"Database has been backed up \n" +
|
||||
"Backup name is " + fileName
|
||||
sendMessage(message)
|
||||
}
|
||||
}
|
||||
func NotifyError(error string) {
|
||||
var vars = []string{
|
||||
"TG_TOKEN",
|
||||
"TG_CHAT_ID",
|
||||
}
|
||||
|
||||
//Telegram notification
|
||||
err := CheckEnvVars(vars)
|
||||
if err == nil {
|
||||
message := "[🔴 MySQL Backup ]\n" +
|
||||
"An error occurred during database backup \n" +
|
||||
"Error: " + error
|
||||
sendMessage(message)
|
||||
}
|
||||
}
|
||||
|
||||
func getTgUrl() string {
|
||||
return fmt.Sprintf("https://api.telegram.org/bot%s", os.Getenv("TG_TOKEN"))
|
||||
|
||||
}
|
||||
func IsValidCronExpression(cronExpr string) bool {
|
||||
_, err := cron.ParseStandard(cronExpr)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user