mirror of
https://github.com/jkaninda/mysql-bkup.git
synced 2025-12-06 21:49:40 +01:00
Compare commits
21 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
01cf8a3392 | ||
|
|
efea81833a | ||
|
|
1cbf65d686 | ||
|
|
73d19913f8 | ||
|
|
b0224e43ef | ||
|
|
fa0485bb5a | ||
|
|
65ef6d3e8f | ||
|
|
a7b6abb101 | ||
|
|
3b21c109bc | ||
|
|
a50a1ef6f9 | ||
|
|
76bbfa35c4 | ||
|
|
599d93bef4 | ||
|
|
247e90f73e | ||
|
|
7d544aca68 | ||
|
|
1722ee0eeb | ||
|
|
726fd14831 | ||
|
|
fdc88e6064 | ||
|
|
2ba1b516e9 | ||
|
|
301594676b | ||
|
|
d06f2f2d7e | ||
|
|
2f06bd1c3a |
21
README.md
21
README.md
@@ -1,9 +1,9 @@
|
|||||||
# MySQL Backup
|
# MySQL Backup
|
||||||
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
|
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, FTP and SSH compatible storage.
|
||||||
It also supports __encrypting__ your backups using GPG.
|
It also supports __encrypting__ your backups using GPG.
|
||||||
|
|
||||||
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
|
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
|
||||||
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
|
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3, FTP or SSH compatible storage.
|
||||||
|
|
||||||
It also supports database __encryption__ using GPG.
|
It also supports database __encryption__ using GPG.
|
||||||
|
|
||||||
@@ -86,6 +86,19 @@ services:
|
|||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Docker recurring backup
|
||||||
|
|
||||||
|
```shell
|
||||||
|
docker run --rm --network network_name \
|
||||||
|
-v $PWD/backup:/backup/ \
|
||||||
|
-e "DB_HOST=hostname" \
|
||||||
|
-e "DB_USERNAME=user" \
|
||||||
|
-e "DB_PASSWORD=password" \
|
||||||
|
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 1m"
|
||||||
|
```
|
||||||
|
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
||||||
|
|
||||||
## Deploy on Kubernetes
|
## Deploy on Kubernetes
|
||||||
|
|
||||||
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as Job or CronJob.
|
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as Job or CronJob.
|
||||||
@@ -102,7 +115,7 @@ spec:
|
|||||||
template:
|
template:
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: pg-bkup
|
- name: mysql-bkup
|
||||||
# In production, it is advised to lock your image tag to a proper
|
# In production, it is advised to lock your image tag to a proper
|
||||||
# release version instead of using `latest`.
|
# release version instead of using `latest`.
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
@@ -154,7 +167,7 @@ While it may work against different implementations, there are no guarantees abo
|
|||||||
|
|
||||||
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
||||||
|
|
||||||
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
|
- The original image is based on `alpine` and requires additional tools, making it heavy.
|
||||||
- This image is written in Go.
|
- This image is written in Go.
|
||||||
- `arm64` and `arm/v7` architectures are supported.
|
- `arm64` and `arm/v7` architectures are supported.
|
||||||
- Docker in Swarm mode is supported.
|
- Docker in Swarm mode is supported.
|
||||||
|
|||||||
@@ -9,8 +9,8 @@ RUN go mod download
|
|||||||
# Build
|
# Build
|
||||||
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/mysql-bkup
|
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/mysql-bkup
|
||||||
|
|
||||||
FROM ubuntu:24.04
|
FROM alpine:3.20.3
|
||||||
ENV DB_HOST="localhost"
|
ENV DB_HOST=""
|
||||||
ENV DB_NAME=""
|
ENV DB_NAME=""
|
||||||
ENV DB_USERNAME=""
|
ENV DB_USERNAME=""
|
||||||
ENV DB_PASSWORD=""
|
ENV DB_PASSWORD=""
|
||||||
@@ -20,49 +20,42 @@ ENV AWS_S3_ENDPOINT=""
|
|||||||
ENV AWS_S3_BUCKET_NAME=""
|
ENV AWS_S3_BUCKET_NAME=""
|
||||||
ENV AWS_ACCESS_KEY=""
|
ENV AWS_ACCESS_KEY=""
|
||||||
ENV AWS_SECRET_KEY=""
|
ENV AWS_SECRET_KEY=""
|
||||||
ENV AWS_REGION="us-west-2"
|
|
||||||
ENV AWS_S3_PATH=""
|
ENV AWS_S3_PATH=""
|
||||||
|
ENV AWS_REGION="us-west-2"
|
||||||
ENV AWS_DISABLE_SSL="false"
|
ENV AWS_DISABLE_SSL="false"
|
||||||
ENV GPG_PASSPHRASE=""
|
ENV GPG_PASSPHRASE=""
|
||||||
ENV SSH_USER=""
|
ENV SSH_USER=""
|
||||||
ENV SSH_REMOTE_PATH=""
|
|
||||||
ENV SSH_PASSWORD=""
|
ENV SSH_PASSWORD=""
|
||||||
ENV SSH_HOST_NAME=""
|
ENV SSH_HOST_NAME=""
|
||||||
ENV SSH_IDENTIFY_FILE=""
|
ENV SSH_IDENTIFY_FILE=""
|
||||||
ENV SSH_PORT="22"
|
ENV SSH_PORT=22
|
||||||
|
ENV REMOTE_PATH=""
|
||||||
|
ENV FTP_HOST_NAME=""
|
||||||
|
ENV FTP_PORT=21
|
||||||
|
ENV FTP_USER=""
|
||||||
|
ENV FTP_PASSWORD=""
|
||||||
ENV TARGET_DB_HOST=""
|
ENV TARGET_DB_HOST=""
|
||||||
ENV TARGET_DB_PORT=3306
|
ENV TARGET_DB_PORT=3306
|
||||||
ENV TARGET_DB_NAME="localhost"
|
ENV TARGET_DB_NAME=""
|
||||||
ENV TARGET_DB_USERNAME=""
|
ENV TARGET_DB_USERNAME=""
|
||||||
ENV TARGET_DB_PASSWORD=""
|
ENV TARGET_DB_PASSWORD=""
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ENV VERSION="v1.2.9"
|
||||||
ENV VERSION="v1.2.8"
|
|
||||||
ENV BACKUP_CRON_EXPRESSION=""
|
ENV BACKUP_CRON_EXPRESSION=""
|
||||||
ENV TG_TOKEN=""
|
ENV TG_TOKEN=""
|
||||||
ENV TG_CHAT_ID=""
|
ENV TG_CHAT_ID=""
|
||||||
ARG WORKDIR="/config"
|
ARG WORKDIR="/config"
|
||||||
ARG BACKUPDIR="/backup"
|
ARG BACKUPDIR="/backup"
|
||||||
ARG BACKUP_TMP_DIR="/tmp/backup"
|
ARG BACKUP_TMP_DIR="/tmp/backup"
|
||||||
ARG BACKUP_CRON="/etc/cron.d/backup_cron"
|
|
||||||
ARG BACKUP_CRON_SCRIPT="/usr/local/bin/backup_cron.sh"
|
|
||||||
LABEL author="Jonas Kaninda"
|
LABEL author="Jonas Kaninda"
|
||||||
|
|
||||||
RUN apt-get update -qq
|
RUN apk add --no-cache mysql-client mariadb-connector-c gnupg
|
||||||
RUN apt install mysql-client cron gnupg -y
|
|
||||||
|
|
||||||
# Clear cache
|
|
||||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir $WORKDIR
|
RUN mkdir $WORKDIR
|
||||||
RUN mkdir $BACKUPDIR
|
RUN mkdir $BACKUPDIR
|
||||||
RUN mkdir -p $BACKUP_TMP_DIR
|
RUN mkdir -p $BACKUP_TMP_DIR
|
||||||
RUN chmod 777 $WORKDIR
|
RUN chmod 777 $WORKDIR
|
||||||
RUN chmod 777 $BACKUPDIR
|
RUN chmod 777 $BACKUPDIR
|
||||||
RUN chmod 777 $BACKUP_TMP_DIR
|
RUN chmod 777 $BACKUP_TMP_DIR
|
||||||
RUN touch $BACKUP_CRON && \
|
RUN chmod 777 $WORKDIR
|
||||||
touch $BACKUP_CRON_SCRIPT && \
|
|
||||||
chmod 777 $BACKUP_CRON && \
|
|
||||||
chmod 777 $BACKUP_CRON_SCRIPT
|
|
||||||
|
|
||||||
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
|
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
|
||||||
RUN chmod +x /usr/local/bin/mysql-bkup
|
RUN chmod +x /usr/local/bin/mysql-bkup
|
||||||
|
|||||||
@@ -1,13 +0,0 @@
|
|||||||
[supervisord]
|
|
||||||
nodaemon=true
|
|
||||||
user=root
|
|
||||||
logfile=/var/log/supervisor/supervisord.log
|
|
||||||
pidfile=/var/run/supervisord.pid
|
|
||||||
|
|
||||||
[program:cron]
|
|
||||||
command = /bin/bash -c "declare -p | grep -Ev '^declare -[[:alpha:]]*r' > /run/supervisord.env && /usr/sbin/cron -f -L 15"
|
|
||||||
autostart=true
|
|
||||||
autorestart=true
|
|
||||||
user = root
|
|
||||||
stderr_logfile=/var/log/cron.err.log
|
|
||||||
stdout_logfile=/var/log/cron.out.log
|
|
||||||
BIN
docs/favicon.ico
Normal file
BIN
docs/favicon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 4.2 KiB |
44
docs/how-tos/backup-to-ftp.md
Normal file
44
docs/how-tos/backup-to-ftp.md
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
---
|
||||||
|
title: Backup to FTP remote server
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 4
|
||||||
|
---
|
||||||
|
# Backup to FTP remote server
|
||||||
|
|
||||||
|
|
||||||
|
As described for SSH backup section, to change the storage of your backup and use FTP Remote server as storage. You need to add `--storage ftp`.
|
||||||
|
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `REMOTE_PATH` environment variable.
|
||||||
|
|
||||||
|
{: .note }
|
||||||
|
These environment variables are required for SSH backup `FTP_HOST_NAME`, `FTP_USER`, `REMOTE_PATH`, `FTP_PORT` or `FTP_PASSWORD`.
|
||||||
|
|
||||||
|
```yml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, it is advised to lock your image tag to a proper
|
||||||
|
# release version instead of using `latest`.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for a list of available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command: backup --storage ftp -d database
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=postgres
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
## FTP config
|
||||||
|
- FTP_HOST_NAME="hostname"
|
||||||
|
- FTP_PORT=21
|
||||||
|
- FTP_USER=user
|
||||||
|
- FTP_PASSWORD=password
|
||||||
|
- REMOTE_PATH=/home/jkaninda/backups
|
||||||
|
|
||||||
|
# pg-bkup container must be connected to the same network with your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
@@ -8,7 +8,7 @@ nav_order: 3
|
|||||||
|
|
||||||
|
|
||||||
As described for s3 backup section, to change the storage of your backup and use SSH Remote server as storage. You need to add `--storage ssh` or `--storage remote`.
|
As described for s3 backup section, to change the storage of your backup and use SSH Remote server as storage. You need to add `--storage ssh` or `--storage remote`.
|
||||||
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `SSH_REMOTE_PATH` environment variable.
|
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `REMOTE_PATH` environment variable.
|
||||||
|
|
||||||
{: .note }
|
{: .note }
|
||||||
These environment variables are required for SSH backup `SSH_HOST_NAME`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server.
|
These environment variables are required for SSH backup `SSH_HOST_NAME`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server.
|
||||||
@@ -36,7 +36,7 @@ services:
|
|||||||
- SSH_HOST_NAME="hostname"
|
- SSH_HOST_NAME="hostname"
|
||||||
- SSH_PORT=22
|
- SSH_PORT=22
|
||||||
- SSH_USER=user
|
- SSH_USER=user
|
||||||
- SSH_REMOTE_PATH=/home/jkaninda/backups
|
- REMOTE_PATH=/home/jkaninda/backups
|
||||||
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
||||||
## We advise you to use a private jey instead of password
|
## We advise you to use a private jey instead of password
|
||||||
#- SSH_PASSWORD=password
|
#- SSH_PASSWORD=password
|
||||||
@@ -76,7 +76,7 @@ services:
|
|||||||
- SSH_HOST_NAME="hostname"
|
- SSH_HOST_NAME="hostname"
|
||||||
- SSH_PORT=22
|
- SSH_PORT=22
|
||||||
- SSH_USER=user
|
- SSH_USER=user
|
||||||
- SSH_REMOTE_PATH=/home/jkaninda/backups
|
- REMOTE_PATH=/home/jkaninda/backups
|
||||||
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
||||||
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
|
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
|
||||||
## We advise you to use a private jey instead of password
|
## We advise you to use a private jey instead of password
|
||||||
@@ -131,7 +131,7 @@ spec:
|
|||||||
value: "22"
|
value: "22"
|
||||||
- name: SSH_USER
|
- name: SSH_USER
|
||||||
value: "xxx"
|
value: "xxx"
|
||||||
- name: SSH_REMOTE_PATH
|
- name: REMOTE_PATH
|
||||||
value: "/home/jkaninda/backups"
|
value: "/home/jkaninda/backups"
|
||||||
- name: AWS_ACCESS_KEY
|
- name: AWS_ACCESS_KEY
|
||||||
value: "xxxx"
|
value: "xxxx"
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
title: Deploy on Kubernetes
|
title: Deploy on Kubernetes
|
||||||
layout: default
|
layout: default
|
||||||
parent: How Tos
|
parent: How Tos
|
||||||
nav_order: 8
|
nav_order: 9
|
||||||
---
|
---
|
||||||
|
|
||||||
## Deploy on Kubernetes
|
## Deploy on Kubernetes
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
title: Encrypt backups using GPG
|
title: Encrypt backups using GPG
|
||||||
layout: default
|
layout: default
|
||||||
parent: How Tos
|
parent: How Tos
|
||||||
nav_order: 7
|
nav_order: 8
|
||||||
---
|
---
|
||||||
# Encrypt backup
|
# Encrypt backup
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
title: Migrate database
|
title: Migrate database
|
||||||
layout: default
|
layout: default
|
||||||
parent: How Tos
|
parent: How Tos
|
||||||
nav_order: 9
|
nav_order: 10
|
||||||
---
|
---
|
||||||
|
|
||||||
# Migrate database
|
# Migrate database
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
title: Restore database from AWS S3
|
title: Restore database from AWS S3
|
||||||
layout: default
|
layout: default
|
||||||
parent: How Tos
|
parent: How Tos
|
||||||
nav_order: 5
|
nav_order: 6
|
||||||
---
|
---
|
||||||
|
|
||||||
# Restore database from S3 storage
|
# Restore database from S3 storage
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
title: Restore database from SSH
|
title: Restore database from SSH
|
||||||
layout: default
|
layout: default
|
||||||
parent: How Tos
|
parent: How Tos
|
||||||
nav_order: 6
|
nav_order: 7
|
||||||
---
|
---
|
||||||
# Restore database from SSH remote server
|
# Restore database from SSH remote server
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
title: Restore database
|
title: Restore database
|
||||||
layout: default
|
layout: default
|
||||||
parent: How Tos
|
parent: How Tos
|
||||||
nav_order: 4
|
nav_order: 5
|
||||||
---
|
---
|
||||||
|
|
||||||
# Restore database
|
# Restore database
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ nav_order: 1
|
|||||||
|
|
||||||
# About mysql-bkup
|
# About mysql-bkup
|
||||||
{:.no_toc}
|
{:.no_toc}
|
||||||
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH remote storage.
|
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, FTP and SSH remote storage.
|
||||||
It also supports __encrypting__ your backups using GPG.
|
It also supports __encrypting__ your backups using GPG.
|
||||||
|
|
||||||
We are open to receiving stars, PRs, and issues!
|
We are open to receiving stars, PRs, and issues!
|
||||||
@@ -79,6 +79,18 @@ services:
|
|||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
```
|
```
|
||||||
|
### Docker recurring backup
|
||||||
|
|
||||||
|
```shell
|
||||||
|
docker run --rm --network network_name \
|
||||||
|
-v $PWD/backup:/backup/ \
|
||||||
|
-e "DB_HOST=hostname" \
|
||||||
|
-e "DB_USERNAME=user" \
|
||||||
|
-e "DB_PASSWORD=password" \
|
||||||
|
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 1m"
|
||||||
|
```
|
||||||
|
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
||||||
|
|
||||||
## Kubernetes
|
## Kubernetes
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
@@ -144,7 +156,7 @@ While it may work against different implementations, there are no guarantees abo
|
|||||||
|
|
||||||
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
||||||
|
|
||||||
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
|
- The original image is based on `alpine` and requires additional tools, making it heavy.
|
||||||
- This image is written in Go.
|
- This image is written in Go.
|
||||||
- `arm64` and `arm/v7` architectures are supported.
|
- `arm64` and `arm/v7` architectures are supported.
|
||||||
- Docker in Swarm mode is supported.
|
- Docker in Swarm mode is supported.
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ Backup, restore and migrate targets, schedule and retention are configured using
|
|||||||
## Environment variables
|
## Environment variables
|
||||||
|
|
||||||
| Name | Requirement | Description |
|
| Name | Requirement | Description |
|
||||||
|------------------------|--------------------------------------------------------------|------------------------------------------------------|
|
|------------------------|---------------------------------------------------------------|------------------------------------------------------|
|
||||||
| DB_PORT | Optional, default 3306 | Database port number |
|
| DB_PORT | Optional, default 3306 | Database port number |
|
||||||
| DB_HOST | Required | Database host |
|
| DB_HOST | Required | Database host |
|
||||||
| DB_NAME | Optional if it was provided from the -d flag | Database name |
|
| DB_NAME | Optional if it was provided from the -d flag | Database name |
|
||||||
@@ -48,21 +48,26 @@ Backup, restore and migrate targets, schedule and retention are configured using
|
|||||||
| AWS_REGION | Optional, required for S3 storage | AWS Region |
|
| AWS_REGION | Optional, required for S3 storage | AWS Region |
|
||||||
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
|
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
|
||||||
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
|
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
|
||||||
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the --cron-expression flag | Backup cron expression for docker in scheduled mode |
|
|
||||||
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
|
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
|
||||||
|
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the `--cron-expression` flag | Backup cron expression for docker in scheduled mode |
|
||||||
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
|
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
|
||||||
| SSH_USER | Optional, required for SSH storage | ssh remote user |
|
| SSH_USER | Optional, required for SSH storage | ssh remote user |
|
||||||
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
|
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
|
||||||
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
|
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
|
||||||
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
|
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
|
||||||
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
|
| REMOTE_PATH | Optional, required for SSH or FTP storage | remote path (/home/toto/backup) |
|
||||||
|
| FTP_HOST_NAME | Optional, required for FTP storage | FTP host name |
|
||||||
|
| FTP_PORT | Optional, required for FTP storage | FTP server port number |
|
||||||
|
| FTP_USER | Optional, required for FTP storage | FTP user |
|
||||||
|
| FTP_PASSWORD | Optional, required for FTP storage | FTP user password |
|
||||||
| TARGET_DB_HOST | Optional, required for database migration | Target database host |
|
| TARGET_DB_HOST | Optional, required for database migration | Target database host |
|
||||||
| TARGET_DB_PORT | Optional, required for database migration | Target database port |
|
| TARGET_DB_PORT | Optional, required for database migration | Target database port |
|
||||||
| TARGET_DB_NAME | Optional, required for database migration | Target database name |
|
| TARGET_DB_NAME | Optional, required for database migration | Target database name |
|
||||||
| TARGET_DB_USERNAME | Optional, required for database migration | Target database username |
|
| TARGET_DB_USERNAME | Optional, required for database migration | Target database username |
|
||||||
| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password |
|
| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password |
|
||||||
| TG_TOKEN | Optional, required for Telegram notification | Telegram token |
|
| TG_TOKEN | Optional, required for Telegram notification | Telegram token (`BOT-ID:BOT-TOKEN`) |
|
||||||
| TG_CHAT_ID | Optional, required for Telegram notification | Telegram Chat ID |
|
| TG_CHAT_ID | Optional, required for Telegram notification | Telegram Chat ID |
|
||||||
|
|
||||||
---
|
---
|
||||||
## Run in Scheduled mode
|
## Run in Scheduled mode
|
||||||
|
|
||||||
@@ -122,7 +127,7 @@ You may use one of several pre-defined schedules in place of a cron expression.
|
|||||||
| @hourly | Run once an hour, beginning of hour | 0 * * * * |
|
| @hourly | Run once an hour, beginning of hour | 0 * * * * |
|
||||||
|
|
||||||
### Intervals
|
### Intervals
|
||||||
You may also schedule a job to execute at fixed intervals, starting at the time it's added or cron is run. This is supported by formatting the cron spec like this:
|
You may also schedule backup task at fixed intervals, starting at the time it's added or cron is run. This is supported by formatting the cron spec like this:
|
||||||
|
|
||||||
@every <duration>
|
@every <duration>
|
||||||
where "duration" is a string accepted by time.
|
where "duration" is a string accepted by time.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ services:
|
|||||||
# release version instead of using `latest`.
|
# release version instead of using `latest`.
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
container_name: mysql-bkup
|
container_name: mysql-bkup
|
||||||
command: backup --dbname database_name --mode scheduled --period "0 1 * * *"
|
command: backup --dbname database_name
|
||||||
volumes:
|
volumes:
|
||||||
- ./backup:/backup
|
- ./backup:/backup
|
||||||
environment:
|
environment:
|
||||||
@@ -13,3 +13,5 @@ services:
|
|||||||
- DB_HOST=mysql
|
- DB_HOST=mysql
|
||||||
- DB_USERNAME=userName
|
- DB_USERNAME=userName
|
||||||
- DB_PASSWORD=${DB_PASSWORD}
|
- DB_PASSWORD=${DB_PASSWORD}
|
||||||
|
# See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
||||||
|
- BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * *
|
||||||
@@ -6,7 +6,7 @@ services:
|
|||||||
# for a list of available releases.
|
# for a list of available releases.
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
container_name: mysql-bkup
|
container_name: mysql-bkup
|
||||||
command: backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
|
command: backup --storage s3 -d my-database
|
||||||
environment:
|
environment:
|
||||||
- DB_PORT=3306
|
- DB_PORT=3306
|
||||||
- DB_HOST=mysql
|
- DB_HOST=mysql
|
||||||
@@ -21,6 +21,8 @@ services:
|
|||||||
- AWS_SECRET_KEY=xxxxx
|
- AWS_SECRET_KEY=xxxxx
|
||||||
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||||
- AWS_DISABLE_SSL="false"
|
- AWS_DISABLE_SSL="false"
|
||||||
|
# See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
||||||
|
- BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * *
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
|
|||||||
3
go.mod
3
go.mod
@@ -13,7 +13,10 @@ require (
|
|||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
|
github.com/jlaffaye/ftp v0.2.0 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||||
golang.org/x/sys v0.22.0 // indirect
|
golang.org/x/sys v0.22.0 // indirect
|
||||||
|
|||||||
7
go.sum
7
go.sum
@@ -7,10 +7,17 @@ github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9Hu
|
|||||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
|
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||||
|
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
|
github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
|
||||||
|
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
|
||||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||||
|
|||||||
113
pkg/backup.go
113
pkg/backup.go
@@ -44,7 +44,10 @@ func scheduledMode(db *dbConfig, config *BackupConfig) {
|
|||||||
|
|
||||||
//Test database connexion
|
//Test database connexion
|
||||||
testDatabaseConnection(db)
|
testDatabaseConnection(db)
|
||||||
|
//Test backup
|
||||||
|
utils.Info("Testing backup configurations...")
|
||||||
|
BackupTask(db, config)
|
||||||
|
utils.Info("Testing backup configurations...done")
|
||||||
utils.Info("Creating backup job...")
|
utils.Info("Creating backup job...")
|
||||||
// Create a new cron instance
|
// Create a new cron instance
|
||||||
c := cron.New()
|
c := cron.New()
|
||||||
@@ -64,22 +67,22 @@ func scheduledMode(db *dbConfig, config *BackupConfig) {
|
|||||||
}
|
}
|
||||||
func BackupTask(db *dbConfig, config *BackupConfig) {
|
func BackupTask(db *dbConfig, config *BackupConfig) {
|
||||||
//Generate backup file name
|
//Generate backup file name
|
||||||
backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20240102_150405"))
|
backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20060102_150405"))
|
||||||
if config.disableCompression {
|
if config.disableCompression {
|
||||||
backupFileName = fmt.Sprintf("%s_%s.sql", db.dbName, time.Now().Format("20240102_150405"))
|
backupFileName = fmt.Sprintf("%s_%s.sql", db.dbName, time.Now().Format("20060102_150405"))
|
||||||
}
|
}
|
||||||
config.backupFileName = backupFileName
|
config.backupFileName = backupFileName
|
||||||
switch config.storage {
|
switch config.storage {
|
||||||
case "s3":
|
|
||||||
s3Backup(db, config.backupFileName, config.disableCompression, config.prune, config.backupRetention, config.encryption)
|
|
||||||
case "local":
|
case "local":
|
||||||
localBackup(db, config.backupFileName, config.disableCompression, config.prune, config.backupRetention, config.encryption)
|
localBackup(db, config)
|
||||||
|
case "s3":
|
||||||
|
s3Backup(db, config)
|
||||||
case "ssh", "remote":
|
case "ssh", "remote":
|
||||||
sshBackup(db, config.backupFileName, config.remotePath, config.disableCompression, config.prune, config.backupRetention, config.encryption)
|
sshBackup(db, config)
|
||||||
case "ftp":
|
case "ftp":
|
||||||
utils.Fatal("Not supported storage type: %s", config.storage)
|
ftpBackup(db, config)
|
||||||
default:
|
default:
|
||||||
localBackup(db, config.backupFileName, config.disableCompression, config.prune, config.backupRetention, config.encryption)
|
localBackup(db, config)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -154,54 +157,54 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
func localBackup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
func localBackup(db *dbConfig, config *BackupConfig) {
|
||||||
utils.Info("Backup database to local storage")
|
utils.Info("Backup database to local storage")
|
||||||
BackupDatabase(db, backupFileName, disableCompression)
|
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||||
finalFileName := backupFileName
|
finalFileName := config.backupFileName
|
||||||
if encrypt {
|
if config.encryption {
|
||||||
encryptBackup(backupFileName)
|
encryptBackup(config.backupFileName, config.passphrase)
|
||||||
finalFileName = fmt.Sprintf("%s.%s", backupFileName, gpgExtension)
|
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, gpgExtension)
|
||||||
}
|
}
|
||||||
utils.Info("Backup name is %s", finalFileName)
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
moveToBackup(finalFileName, storagePath)
|
moveToBackup(finalFileName, storagePath)
|
||||||
//Send notification
|
//Send notification
|
||||||
utils.NotifySuccess(finalFileName)
|
utils.NotifySuccess(finalFileName)
|
||||||
//Delete old backup
|
//Delete old backup
|
||||||
if prune {
|
if config.prune {
|
||||||
deleteOldBackup(backupRetention)
|
deleteOldBackup(config.backupRetention)
|
||||||
}
|
}
|
||||||
//Delete temp
|
//Delete temp
|
||||||
deleteTemp()
|
deleteTemp()
|
||||||
}
|
}
|
||||||
|
|
||||||
func s3Backup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
func s3Backup(db *dbConfig, config *BackupConfig) {
|
||||||
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||||
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
|
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
|
||||||
utils.Info("Backup database to s3 storage")
|
utils.Info("Backup database to s3 storage")
|
||||||
//Backup database
|
//Backup database
|
||||||
BackupDatabase(db, backupFileName, disableCompression)
|
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||||
finalFileName := backupFileName
|
finalFileName := config.backupFileName
|
||||||
if encrypt {
|
if config.encryption {
|
||||||
encryptBackup(backupFileName)
|
encryptBackup(config.backupFileName, config.passphrase)
|
||||||
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
|
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||||
}
|
}
|
||||||
utils.Info("Uploading backup archive to remote storage S3 ... ")
|
utils.Info("Uploading backup archive to remote storage S3 ... ")
|
||||||
utils.Info("Backup name is %s", finalFileName)
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
|
err := UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error uploading file to S3: %s ", err)
|
utils.Fatal("Error uploading file to S3: %s ", err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//Delete backup file from tmp folder
|
//Delete backup file from tmp folder
|
||||||
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
|
err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("Error deleting file: ", err)
|
fmt.Println("Error deleting file: ", err)
|
||||||
|
|
||||||
}
|
}
|
||||||
// Delete old backup
|
// Delete old backup
|
||||||
if prune {
|
if config.prune {
|
||||||
err := utils.DeleteOldBackup(bucket, s3Path, backupRetention)
|
err := DeleteOldBackup(bucket, s3Path, config.backupRetention)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error deleting old backup from S3: %s ", err)
|
utils.Fatal("Error deleting old backup from S3: %s ", err)
|
||||||
}
|
}
|
||||||
@@ -214,18 +217,18 @@ func s3Backup(db *dbConfig, backupFileName string, disableCompression bool, prun
|
|||||||
}
|
}
|
||||||
|
|
||||||
// sshBackup backup database to SSH remote server
|
// sshBackup backup database to SSH remote server
|
||||||
func sshBackup(db *dbConfig, backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
func sshBackup(db *dbConfig, config *BackupConfig) {
|
||||||
utils.Info("Backup database to Remote server")
|
utils.Info("Backup database to Remote server")
|
||||||
//Backup database
|
//Backup database
|
||||||
BackupDatabase(db, backupFileName, disableCompression)
|
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||||
finalFileName := backupFileName
|
finalFileName := config.backupFileName
|
||||||
if encrypt {
|
if config.encryption {
|
||||||
encryptBackup(backupFileName)
|
encryptBackup(config.backupFileName, config.passphrase)
|
||||||
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
|
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||||
}
|
}
|
||||||
utils.Info("Uploading backup archive to remote storage ... ")
|
utils.Info("Uploading backup archive to remote storage ... ")
|
||||||
utils.Info("Backup name is %s", finalFileName)
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
err := CopyToRemote(finalFileName, remotePath)
|
err := CopyToRemote(finalFileName, config.remotePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error uploading file to the remote server: %s ", err)
|
utils.Fatal("Error uploading file to the remote server: %s ", err)
|
||||||
|
|
||||||
@@ -237,7 +240,7 @@ func sshBackup(db *dbConfig, backupFileName, remotePath string, disableCompressi
|
|||||||
fmt.Println("Error deleting file: ", err)
|
fmt.Println("Error deleting file: ", err)
|
||||||
|
|
||||||
}
|
}
|
||||||
if prune {
|
if config.prune {
|
||||||
//TODO: Delete old backup from remote server
|
//TODO: Delete old backup from remote server
|
||||||
utils.Info("Deleting old backup from a remote server is not implemented yet")
|
utils.Info("Deleting old backup from a remote server is not implemented yet")
|
||||||
|
|
||||||
@@ -249,11 +252,45 @@ func sshBackup(db *dbConfig, backupFileName, remotePath string, disableCompressi
|
|||||||
//Delete temp
|
//Delete temp
|
||||||
deleteTemp()
|
deleteTemp()
|
||||||
}
|
}
|
||||||
|
func ftpBackup(db *dbConfig, config *BackupConfig) {
|
||||||
|
utils.Info("Backup database to the remote FTP server")
|
||||||
|
//Backup database
|
||||||
|
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||||
|
finalFileName := config.backupFileName
|
||||||
|
if config.encryption {
|
||||||
|
encryptBackup(config.backupFileName, config.passphrase)
|
||||||
|
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||||
|
}
|
||||||
|
utils.Info("Uploading backup archive to the remote FTP server ... ")
|
||||||
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
|
err := CopyToFTP(finalFileName, config.remotePath)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error uploading file to the remote FTP server: %s ", err)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
//Delete backup file from tmp folder
|
||||||
|
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error deleting file: %v", err)
|
||||||
|
|
||||||
|
}
|
||||||
|
if config.prune {
|
||||||
|
//TODO: Delete old backup from remote server
|
||||||
|
utils.Info("Deleting old backup from a remote server is not implemented yet")
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
utils.Done("Uploading backup archive to the remote FTP server ... done ")
|
||||||
|
//Send notification
|
||||||
|
utils.NotifySuccess(finalFileName)
|
||||||
|
//Delete temp
|
||||||
|
deleteTemp()
|
||||||
|
}
|
||||||
|
|
||||||
// encryptBackup encrypt backup
|
// encryptBackup encrypt backup
|
||||||
func encryptBackup(backupFileName string) {
|
func encryptBackup(backupFileName, passphrase string) {
|
||||||
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
|
err := Encrypt(filepath.Join(tmpPath, backupFileName), passphrase)
|
||||||
err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error during encrypting backup %s", err)
|
utils.Fatal("Error during encrypting backup %s", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ type BackupConfig struct {
|
|||||||
prune bool
|
prune bool
|
||||||
encryption bool
|
encryption bool
|
||||||
remotePath string
|
remotePath string
|
||||||
gpqPassphrase string
|
passphrase string
|
||||||
storage string
|
storage string
|
||||||
cronExpression string
|
cronExpression string
|
||||||
}
|
}
|
||||||
@@ -49,6 +49,13 @@ type RestoreConfig struct {
|
|||||||
bucket string
|
bucket string
|
||||||
gpqPassphrase string
|
gpqPassphrase string
|
||||||
}
|
}
|
||||||
|
type FTPConfig struct {
|
||||||
|
host string
|
||||||
|
user string
|
||||||
|
password string
|
||||||
|
port string
|
||||||
|
remotePath string
|
||||||
|
}
|
||||||
|
|
||||||
func initDbConfig(cmd *cobra.Command) *dbConfig {
|
func initDbConfig(cmd *cobra.Command) *dbConfig {
|
||||||
//Set env
|
//Set env
|
||||||
@@ -71,19 +78,20 @@ func initBackupConfig(cmd *cobra.Command) *BackupConfig {
|
|||||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||||
utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION")
|
utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION")
|
||||||
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
|
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
|
||||||
|
utils.GetEnv(cmd, "path", "REMOTE_PATH")
|
||||||
|
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
|
||||||
|
|
||||||
//Get flag value and set env
|
//Get flag value and set env
|
||||||
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
|
|
||||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||||
backupRetention, _ := cmd.Flags().GetInt("keep-last")
|
backupRetention, _ := cmd.Flags().GetInt("keep-last")
|
||||||
prune, _ := cmd.Flags().GetBool("prune")
|
prune, _ := cmd.Flags().GetBool("prune")
|
||||||
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
|
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
|
||||||
_, _ = cmd.Flags().GetString("mode")
|
_, _ = cmd.Flags().GetString("mode")
|
||||||
gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
|
passphrase := os.Getenv("GPG_PASSPHRASE")
|
||||||
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||||
cronExpression := os.Getenv("BACKUP_CRON_EXPRESSION")
|
cronExpression := os.Getenv("BACKUP_CRON_EXPRESSION")
|
||||||
|
|
||||||
if gpqPassphrase != "" {
|
if passphrase != "" {
|
||||||
encryption = true
|
encryption = true
|
||||||
}
|
}
|
||||||
//Initialize backup configs
|
//Initialize backup configs
|
||||||
@@ -94,16 +102,17 @@ func initBackupConfig(cmd *cobra.Command) *BackupConfig {
|
|||||||
config.storage = storage
|
config.storage = storage
|
||||||
config.encryption = encryption
|
config.encryption = encryption
|
||||||
config.remotePath = remotePath
|
config.remotePath = remotePath
|
||||||
config.gpqPassphrase = gpqPassphrase
|
config.passphrase = passphrase
|
||||||
config.cronExpression = cronExpression
|
config.cronExpression = cronExpression
|
||||||
return &config
|
return &config
|
||||||
}
|
}
|
||||||
func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
|
func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
|
||||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||||
|
utils.GetEnv(cmd, "path", "REMOTE_PATH")
|
||||||
|
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
|
||||||
|
|
||||||
//Get flag value and set env
|
//Get flag value and set env
|
||||||
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||||
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
|
|
||||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||||
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
||||||
_, _ = cmd.Flags().GetString("mode")
|
_, _ = cmd.Flags().GetString("mode")
|
||||||
@@ -135,3 +144,18 @@ func initTargetDbConfig() *targetDbConfig {
|
|||||||
}
|
}
|
||||||
return &tdbConfig
|
return &tdbConfig
|
||||||
}
|
}
|
||||||
|
func initFtpConfig() *FTPConfig {
|
||||||
|
//Initialize backup configs
|
||||||
|
fConfig := FTPConfig{}
|
||||||
|
fConfig.host = os.Getenv("FTP_HOST_NAME")
|
||||||
|
fConfig.user = os.Getenv("FTP_USER")
|
||||||
|
fConfig.password = os.Getenv("FTP_PASSWORD")
|
||||||
|
fConfig.port = os.Getenv("FTP_PORT")
|
||||||
|
fConfig.remotePath = os.Getenv("REMOTE_PATH")
|
||||||
|
err := utils.CheckEnvVars(ftpVars)
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Please make sure all required environment variables for FTP are set")
|
||||||
|
utils.Fatal("Error checking environment variables: %s", err)
|
||||||
|
}
|
||||||
|
return &fConfig
|
||||||
|
}
|
||||||
|
|||||||
81
pkg/ftp.go
Normal file
81
pkg/ftp.go
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/jlaffaye/ftp"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// initFtpClient initializes and authenticates an FTP client
|
||||||
|
func initFtpClient() (*ftp.ServerConn, error) {
|
||||||
|
ftpConfig := initFtpConfig()
|
||||||
|
ftpClient, err := ftp.Dial(fmt.Sprintf("%s:%s", ftpConfig.host, ftpConfig.port), ftp.DialWithTimeout(5*time.Second))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to connect to FTP: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ftpClient.Login(ftpConfig.user, ftpConfig.password)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to log in to FTP: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ftpClient, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyToFTP uploads a file to the remote FTP server
|
||||||
|
func CopyToFTP(fileName, remotePath string) (err error) {
|
||||||
|
ftpConfig := initFtpConfig()
|
||||||
|
ftpClient, err := initFtpClient()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer ftpClient.Quit()
|
||||||
|
|
||||||
|
filePath := filepath.Join(tmpPath, fileName)
|
||||||
|
file, err := os.Open(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open file %s: %w", fileName, err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
remoteFilePath := filepath.Join(ftpConfig.remotePath, fileName)
|
||||||
|
err = ftpClient.Stor(remoteFilePath, file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to upload file %s: %w", fileName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyFromFTP downloads a file from the remote FTP server
|
||||||
|
func CopyFromFTP(fileName, remotePath string) (err error) {
|
||||||
|
ftpClient, err := initFtpClient()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer ftpClient.Quit()
|
||||||
|
|
||||||
|
remoteFilePath := filepath.Join(remotePath, fileName)
|
||||||
|
r, err := ftpClient.Retr(remoteFilePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to retrieve file %s: %w", fileName, err)
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
localFilePath := filepath.Join(tmpPath, fileName)
|
||||||
|
outFile, err := os.Create(localFilePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create local file %s: %w", fileName, err)
|
||||||
|
}
|
||||||
|
defer outFile.Close()
|
||||||
|
|
||||||
|
_, err = io.Copy(outFile, r)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to copy data to local file %s: %w", fileName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -127,5 +127,5 @@ func testDatabaseConnection(db *dbConfig) {
|
|||||||
}
|
}
|
||||||
func intro() {
|
func intro() {
|
||||||
utils.Info("Starting MySQL Backup...")
|
utils.Info("Starting MySQL Backup...")
|
||||||
utils.Info("Copyright © 2024 Jonas Kaninda ")
|
utils.Info("Copyright (c) 2024 Jonas Kaninda ")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ func StartRestore(cmd *cobra.Command) {
|
|||||||
case "ssh":
|
case "ssh":
|
||||||
restoreFromRemote(dbConf, restoreConf.file, restoreConf.remotePath)
|
restoreFromRemote(dbConf, restoreConf.file, restoreConf.remotePath)
|
||||||
case "ftp":
|
case "ftp":
|
||||||
utils.Fatal("Restore from FTP is not yet supported")
|
restoreFromFTP(dbConf, restoreConf.file, restoreConf.remotePath)
|
||||||
default:
|
default:
|
||||||
utils.Info("Restore database from local")
|
utils.Info("Restore database from local")
|
||||||
copyToTmp(storagePath, restoreConf.file)
|
copyToTmp(storagePath, restoreConf.file)
|
||||||
@@ -40,7 +40,7 @@ func StartRestore(cmd *cobra.Command) {
|
|||||||
|
|
||||||
func restoreFromS3(db *dbConfig, file, bucket, s3Path string) {
|
func restoreFromS3(db *dbConfig, file, bucket, s3Path string) {
|
||||||
utils.Info("Restore database from s3")
|
utils.Info("Restore database from s3")
|
||||||
err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
|
err := DownloadFile(tmpPath, file, bucket, s3Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error download file from s3 %s %v", file, err)
|
utils.Fatal("Error download file from s3 %s %v", file, err)
|
||||||
}
|
}
|
||||||
@@ -54,6 +54,14 @@ func restoreFromRemote(db *dbConfig, file, remotePath string) {
|
|||||||
}
|
}
|
||||||
RestoreDatabase(db, file)
|
RestoreDatabase(db, file)
|
||||||
}
|
}
|
||||||
|
func restoreFromFTP(db *dbConfig, file, remotePath string) {
|
||||||
|
utils.Info("Restore database from FTP server")
|
||||||
|
err := CopyFromFTP(file, remotePath)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error download file from FTP server: %s %v", filepath.Join(remotePath, file), err)
|
||||||
|
}
|
||||||
|
RestoreDatabase(db, file)
|
||||||
|
}
|
||||||
|
|
||||||
// RestoreDatabase restore database
|
// RestoreDatabase restore database
|
||||||
func RestoreDatabase(db *dbConfig, file string) {
|
func RestoreDatabase(db *dbConfig, file string) {
|
||||||
@@ -97,7 +105,7 @@ func RestoreDatabase(db *dbConfig, file string) {
|
|||||||
// Restore from compressed file / .sql.gz
|
// Restore from compressed file / .sql.gz
|
||||||
if extension == ".gz" {
|
if extension == ".gz" {
|
||||||
str := "zcat " + filepath.Join(tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
|
str := "zcat " + filepath.Join(tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
|
||||||
_, err := exec.Command("bash", "-c", str).Output()
|
_, err := exec.Command("sh", "-c", str).Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error, in restoring the database %v", err)
|
utils.Fatal("Error, in restoring the database %v", err)
|
||||||
}
|
}
|
||||||
@@ -109,7 +117,7 @@ func RestoreDatabase(db *dbConfig, file string) {
|
|||||||
} else if extension == ".sql" {
|
} else if extension == ".sql" {
|
||||||
//Restore from sql file
|
//Restore from sql file
|
||||||
str := "cat " + filepath.Join(tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
|
str := "cat " + filepath.Join(tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
|
||||||
_, err := exec.Command("bash", "-c", str).Output()
|
_, err := exec.Command("sh", "-c", str).Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error in restoring the database %v", err)
|
utils.Fatal("Error in restoring the database %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
@license MIT License <https://opensource.org/licenses/MIT>
|
@license MIT License <https://opensource.org/licenses/MIT>
|
||||||
@Copyright © 2024 Jonas Kaninda
|
@Copyright © 2024 Jonas Kaninda
|
||||||
**/
|
**/
|
||||||
package utils
|
package pkg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||||
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
@@ -35,20 +36,20 @@ func CreateSession() (*session.Session, error) {
|
|||||||
"AWS_REGION",
|
"AWS_REGION",
|
||||||
}
|
}
|
||||||
|
|
||||||
endPoint := GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
|
endPoint := utils.GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
|
||||||
accessKey := GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
|
accessKey := utils.GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
|
||||||
secretKey := GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
|
secretKey := utils.GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
|
||||||
_ = GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
_ = utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||||
|
|
||||||
region := os.Getenv("AWS_REGION")
|
region := os.Getenv("AWS_REGION")
|
||||||
awsDisableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
|
awsDisableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
|
utils.Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = CheckEnvVars(awsVars)
|
err = utils.CheckEnvVars(awsVars)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatal("Error checking environment variables\n: %s", err)
|
utils.Fatal("Error checking environment variables\n: %s", err)
|
||||||
}
|
}
|
||||||
// S3 Config
|
// S3 Config
|
||||||
s3Config := &aws.Config{
|
s3Config := &aws.Config{
|
||||||
@@ -108,7 +109,7 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
Info("Download backup from S3 storage...")
|
utils.Info("Download backup from S3 storage...")
|
||||||
file, err := os.Create(filepath.Join(destinationPath, key))
|
file, err := os.Create(filepath.Join(destinationPath, key))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("Failed to create file", err)
|
fmt.Println("Failed to create file", err)
|
||||||
@@ -125,10 +126,10 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
|
|||||||
Key: aws.String(objectKey),
|
Key: aws.String(objectKey),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("Failed to download file", err)
|
utils.Error("Failed to download file %s", key)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
Info("Backup downloaded: %s bytes size %s ", file.Name(), numBytes)
|
utils.Info("Backup downloaded: %s bytes size %s ", file.Name(), numBytes)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -8,7 +8,6 @@ package pkg
|
|||||||
|
|
||||||
const cronLogFile = "/var/log/mysql-bkup.log"
|
const cronLogFile = "/var/log/mysql-bkup.log"
|
||||||
const tmpPath = "/tmp/backup"
|
const tmpPath = "/tmp/backup"
|
||||||
const backupCronFile = "/usr/local/bin/backup_cron.sh"
|
|
||||||
const algorithm = "aes256"
|
const algorithm = "aes256"
|
||||||
const gpgHome = "/config/gnupg"
|
const gpgHome = "/config/gnupg"
|
||||||
const gpgExtension = "gpg"
|
const gpgExtension = "gpg"
|
||||||
@@ -42,7 +41,13 @@ var targetDbConf *targetDbConfig
|
|||||||
// sshHVars Required environment variables for SSH remote server storage
|
// sshHVars Required environment variables for SSH remote server storage
|
||||||
var sshHVars = []string{
|
var sshHVars = []string{
|
||||||
"SSH_USER",
|
"SSH_USER",
|
||||||
"SSH_REMOTE_PATH",
|
"REMOTE_PATH",
|
||||||
"SSH_HOST_NAME",
|
"SSH_HOST_NAME",
|
||||||
"SSH_PORT",
|
"SSH_PORT",
|
||||||
}
|
}
|
||||||
|
var ftpVars = []string{
|
||||||
|
"FTP_HOST_NAME",
|
||||||
|
"FTP_USER",
|
||||||
|
"FTP_PASSWORD",
|
||||||
|
"FTP_PORT",
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user