Compare commits

...

94 Commits
v0.7 ... v1.2.7

Author SHA1 Message Date
Jonas Kaninda
041e0a07e9 Merge pull request #89 from jkaninda/develop
Develop
2024-09-28 03:42:39 +02:00
Jonas Kaninda
9daac9c654 fix: scheduled mode script, remove port number 2024-09-28 03:38:26 +02:00
Jonas Kaninda
f6098769cd fix: backup database in scheduled mode 2024-09-28 03:06:09 +02:00
Jonas Kaninda
5cdfaa4d94 chore: update version in Dockerfile 2024-09-28 02:31:07 +02:00
Jonas Kaninda
b205cd61ea Fix: Using a password on the command line interface can be insecure warning message 2024-09-28 02:25:42 +02:00
Jonas Kaninda
e1307250e8 Merge pull request #88 from jkaninda/jkaninda-patch-1
Update FUNDING.yml
2024-09-12 07:59:57 +02:00
Jonas Kaninda
17ac951deb Update FUNDING.yml 2024-09-12 07:59:46 +02:00
Jonas Kaninda
6e2e08224d Merge pull request #87 from jkaninda/jkaninda-patch-1
Create FUNDING.yml
2024-09-12 07:55:10 +02:00
Jonas Kaninda
570b775f48 Create FUNDING.yml 2024-09-12 07:54:51 +02:00
Jonas Kaninda
e38e106983 Merge pull request #86 from jkaninda/docs
chore: change notification title
2024-09-12 07:10:36 +02:00
Jonas Kaninda
3040420a09 chore: change notification title 2024-09-12 07:10:09 +02:00
Jonas Kaninda
eac5f70408 Merge pull request #85 from jkaninda/docs
Docs
2024-09-12 06:34:32 +02:00
Jonas Kaninda
3476c6f529 docs: update readme 2024-09-12 06:33:38 +02:00
Jonas Kaninda
1a9c8483f8 chore: add code comment 2024-09-12 06:23:57 +02:00
Jonas Kaninda
f8722f7ae4 Merge pull request #84 from jkaninda/docs
Update Intro
2024-09-12 06:18:09 +02:00
Jonas Kaninda
421bf12910 Update Intro 2024-09-12 06:17:46 +02:00
Jonas Kaninda
3da4a27baa Merge pull request #83 from jkaninda/docs
fix: add exit after database connection test failed
2024-09-11 08:03:44 +02:00
Jonas Kaninda
0881f075ef fix: add exit after database connection test failed 2024-09-11 08:03:16 +02:00
Jonas Kaninda
066e73f8e4 Merge pull request #82 from jkaninda/docs
clean up project
2024-09-11 04:55:01 +02:00
Jonas Kaninda
645243ff77 clean up project 2024-09-11 04:53:24 +02:00
Jonas Kaninda
9384998127 Merge pull request #81 from jkaninda/docs
refactor: add Telegram env in Dockerfile, move telegram notification …
2024-09-11 04:37:50 +02:00
Jonas Kaninda
390e7dad0c refactor: add Telegram env in Dockerfile, move telegram notification to utils 2024-09-11 04:37:02 +02:00
Jonas Kaninda
67ea22385f Merge pull request #80 from jkaninda/develop
remove operation old cmd
2024-09-10 23:15:38 +02:00
Jonas Kaninda
cde82d8cfc remove operation old cmd 2024-09-10 23:14:09 +02:00
Jonas Kaninda
4808f093e5 Merge pull request #79 from jkaninda/develop
Update version
2024-09-10 23:11:28 +02:00
Jonas Kaninda
c7a03861fe Update version 2024-09-10 23:10:24 +02:00
Jonas Kaninda
36ec63d522 Merge pull request #78 from jkaninda/develop
feat: Add Telegram notification
2024-09-10 23:04:12 +02:00
Jonas Kaninda
0f07de1d83 feat: Add Telegram notification 2024-09-10 23:01:26 +02:00
Jonas Kaninda
ae55839996 Merge pull request #77 from jkaninda/docs
docs: update Kubernetes deployment
2024-09-09 07:17:51 +02:00
Jonas Kaninda
a7f7e57a0d docs: update Kubernetes deployment 2024-09-09 07:17:15 +02:00
Jonas Kaninda
b2ddaec93b Merge pull request #76 from jkaninda/docs
docs: add buy me a coffee link
2024-09-05 22:43:09 +02:00
Jonas Kaninda
b3570d774c docs: add buy me a coffee link 2024-09-05 22:42:37 +02:00
Jonas Kaninda
38f7e91c03 Merge pull request #75 from jkaninda/develop
chore: rename environment variable for database migration operation
2024-09-03 07:06:24 +02:00
Jonas Kaninda
07c2935925 chore: rename environment variable for database migration operation 2024-09-03 06:49:26 +02:00
Jonas Kaninda
f3c5585051 Merge pull request #74 from jkaninda/docs
Docs
2024-08-30 21:24:50 +02:00
Jonas Kaninda
7163d030a5 chore: remove dbport from command flag 2024-08-30 21:22:18 +02:00
Jonas Kaninda
a2cec86e73 chore: remove dbport from command flag 2024-08-30 21:21:21 +02:00
Jonas Kaninda
662b73579d feat: add migrate database from a source to a target databse
fix: gpg encrypt permission warning message, update Kubernetes deployment example
2024-08-30 19:58:12 +02:00
c9f8a32de1 Merge pull request #73 from jkaninda/docs
docs: update Kubernetes deployment
2024-08-28 20:35:31 +02:00
8fb008151c docs: update Kubernetes deployment 2024-08-28 20:35:01 +02:00
113c84c885 Merge pull request #72 from jkaninda/docs
docs: update readme
2024-08-21 03:53:15 +02:00
58deb92953 docs: update readme 2024-08-21 03:52:49 +02:00
c41afb8b57 Merge pull request #71 from jkaninda/docs
docs: update readme
2024-08-21 03:51:25 +02:00
02e51a3933 docs: update readme 2024-08-21 03:50:59 +02:00
db4061b64b Merge pull request #70 from jkaninda/docs
docs: update readme
2024-08-21 03:49:58 +02:00
9467b157aa docs: update reamdme 2024-08-21 03:49:15 +02:00
c229ebdc9d Merge pull request #69 from jkaninda/docs
docs: fix grammar
2024-08-20 19:21:24 +02:00
7b701d1740 docs: fix grammar 2024-08-20 19:20:54 +02:00
ad6f190bad Merge pull request #68 from jkaninda/docs
docs: update readme
2024-08-15 06:06:26 +02:00
de4dcaaeca docs: update readme 2024-08-15 06:05:39 +02:00
17c0a99bda Merge pull request #67 from jkaninda/develop
Develop
2024-08-15 05:02:56 +02:00
b1c9abf931 Clean up 2024-08-14 22:28:16 +02:00
a70a893c11 Fix encryption permission issue on Openshift 2024-08-14 22:19:35 +02:00
243e25f4fb Fix encryption permission issue on Openshift 2024-08-14 22:19:02 +02:00
cb0dcf4104 Update docs 2024-08-11 09:49:41 +02:00
d26d8d31c9 Merge pull request #65 from jkaninda/docs
Merge Docs
2024-08-11 09:48:08 +02:00
71d438ba76 Merge branch 'main' of github.com:jkaninda/mysql-bkup into develop 2024-08-11 09:44:00 +02:00
a3fc58af96 Add delete /tmp directory after backup or restore and update docs 2024-08-11 09:38:31 +02:00
08ca6d4a39 Merge pull request #64 from jkaninda/develop
docs: update readme
2024-08-10 11:30:53 +02:00
27b9ab5f36 docs: update readme 2024-08-10 11:29:58 +02:00
6d6db7061b Merge pull request #63 from jkaninda/develop
Develop
2024-08-10 11:28:06 +02:00
d90647aae7 Update app version 2024-08-10 11:22:08 +02:00
5c2c05499f docs: update example 2024-08-10 11:12:43 +02:00
88ada6fefd docs: update example 2024-08-10 11:12:17 +02:00
e6c8b0923d Add Docker entrypont, update docs 2024-08-10 10:50:00 +02:00
59a136039c Merge pull request #62 from jkaninda/docs
docs: update stable version
2024-08-04 23:45:36 +02:00
db835e81c4 docs: update stable version 2024-08-04 23:44:49 +02:00
5b05bcbf0c Merge pull request #61 from jkaninda/docs
docs: add Kubernetes restore Job example
2024-08-04 13:38:10 +02:00
b8277c8464 docs: add Kubernetes restore Job example 2024-08-04 13:37:45 +02:00
70338b6ae6 Merge pull request #60 from jkaninda/develop
Develop
2024-08-04 13:12:50 +02:00
33b1acf7c0 docs: add Kubernetes restore example 2024-08-04 11:42:07 +02:00
9a4d02f648 fix: Fix AWS S3 and SSH backup in scheduled mode on Docker and Docker Swarm mode 2024-08-04 11:30:28 +02:00
1e06600c43 fix: Fix AWS S3 and SSH backup in scheduled mode on Docker and Docker Swarm mode 2024-08-04 11:30:03 +02:00
365ab8dfff Merge pull request #59 from jkaninda/develop
Develop
2024-08-04 01:43:49 +02:00
e4ca97b99e Fix log 2024-08-04 01:42:51 +02:00
ae7eb7a159 Fix log, add verification of required environment 2024-08-04 01:36:22 +02:00
204f66badf Merge pull request #58 from jkaninda/develop
fix: fix github action by getting tag name
2024-08-03 16:23:46 +02:00
e0b40ed433 fix: fix github action by getting tag name 2024-08-03 16:23:08 +02:00
07d717fad2 Merge pull request #57 from jkaninda/v1.0
docs: update readme
2024-08-03 16:15:08 +02:00
3bf4911dee docs: update readme 2024-08-03 16:14:34 +02:00
0b34a835f7 Merge pull request #56 from jkaninda/v1.0
docs: update readme
2024-08-03 16:09:33 +02:00
22bf95e6ca docs: update readme 2024-08-03 16:08:24 +02:00
445a104943 Merge pull request #55 from jkaninda/v1.0
Add SSH storage, add database backup encrypt and decrypt
2024-08-03 16:05:55 +02:00
caeba955c5 Add SSH storage, add database backup encrypt and decrypt 2024-08-03 16:03:17 +02:00
d906de6b54 Merge pull request #54 from jkaninda/develop
Add maintaining log in scheduled mode
2024-07-28 22:56:59 +02:00
c8e68af09f Add maintaining log in scheduled mode 2024-07-28 22:54:31 +02:00
082ef09500 Merge pull request #53 from jkaninda/develop
docs: add supported extensions to the doc
2024-02-25 21:35:33 +01:00
2e61054334 docs: add suported extensions to the doc 2024-02-25 21:35:05 +01:00
f394f28357 Merge pull request #52 from jkaninda/develop
refactor: clean up code
2024-02-25 14:39:03 +01:00
d8867a9baf refactor: clean up code 2024-02-25 14:38:32 +01:00
6ed9ff0a31 Merge pull request #51 from jkaninda/develop
refactor: refactoring of code
2024-02-20 07:58:08 +01:00
a4c37e1a4b refactor: refactoring of code 2024-02-20 07:57:21 +01:00
c6930a00ba Merge pull request #50 from jkaninda/develop
refactor: refactoring of code, improvement of deleteOldBackup function
2024-02-18 13:29:01 +01:00
00f2fca8e4 refactor: refactoring of code, improvement of deleteOldBackup function 2024-02-18 13:27:54 +01:00
57 changed files with 3700 additions and 877 deletions

3
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,3 @@
# These are supported funding model platforms
ko_fi: jkaninda

View File

@@ -1,14 +1,7 @@
name: Build
on:
push:
branches: [ "main" ]
workflow_dispatch:
inputs:
docker_tag:
description: 'Docker tag'
required: true
default: 'latest'
type: string
branches: ['develop']
env:
BUILDKIT_IMAGE: jkaninda/mysql-bkup
jobs:
@@ -33,7 +26,7 @@ jobs:
with:
push: true
file: "./docker/Dockerfile"
platforms: linux/amd64,linux/arm64
platforms: linux/amd64,linux/arm64,linux/arm/v7
tags: |
"${{env.BUILDKIT_IMAGE}}:v0.7"
"${{env.BUILDKIT_IMAGE}}:latest"
"${{env.BUILDKIT_IMAGE}}:develop-${{ github.sha }}"

55
.github/workflows/deploy-docs.yml vendored Normal file
View File

@@ -0,0 +1,55 @@
name: Deploy Documenation site to GitHub Pages
on:
push:
branches: ['main']
paths:
- 'docs/**'
- '.github/workflows/deploy-docs.yml'
workflow_dispatch:
permissions:
contents: read
pages: write
id-token: write
concurrency:
group: 'pages'
cancel-in-progress: true
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Ruby
uses: ruby/setup-ruby@v1
with:
ruby-version: '3.2'
bundler-cache: true
cache-version: 0
working-directory: docs
- name: Setup Pages
id: pages
uses: actions/configure-pages@v2
- name: Build with Jekyll
working-directory: docs
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
env:
JEKYLL_ENV: production
- name: Upload artifact
uses: actions/upload-pages-artifact@v1
with:
path: 'docs/_site/'
deploy:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
needs: build
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v1

49
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,49 @@
name: CI
on:
push:
tags:
- v**
env:
BUILDKIT_IMAGE: jkaninda/mysql-bkup
jobs:
docker:
runs-on: ubuntu-latest
permissions:
packages: write
contents: read
steps:
-
name: Set up QEMU
uses: docker/setup-qemu-action@v3
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
-
name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Log in to GHCR
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
-
name: Get the tag name
id: get_tag_name
run: echo "TAG_NAME=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
-
name: Build and push
uses: docker/build-push-action@v3
with:
push: true
file: "./docker/Dockerfile"
platforms: linux/amd64,linux/arm64,linux/arm/v7
tags: |
"${{env.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
"${{env.BUILDKIT_IMAGE}}:latest"
"ghcr.io/${{env.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
"ghcr.io/${{env.BUILDKIT_IMAGE}}:latest"

1
.gitignore vendored
View File

@@ -9,3 +9,4 @@ mysql-bkup
/.DS_Store
/.idea
bin
Makefile

View File

@@ -1,32 +0,0 @@
BINARY_NAME=mysql-bkup
include .env
export
run:
go run .
build:
go build -o bin/${BINARY_NAME} .
compile:
GOOS=darwin GOARCH=arm64 go build -o bin/${BINARY_NAME}-darwin-arm64 .
GOOS=darwin GOARCH=amd64 go build -o bin/${BINARY_NAME}-darwin-amd64 .
GOOS=linux GOARCH=arm64 go build -o bin/${BINARY_NAME}-linux-arm64 .
GOOS=linux GOARCH=amd64 go build -o bin/${BINARY_NAME}-linux-amd64 .
docker-build:
docker build -f docker/Dockerfile -t jkaninda/mysql-bkup:latest .
docker-run: docker-build
docker run --rm --network internal --privileged --device /dev/fuse --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" jkaninda/mysql-bkup bkup backup --prune --keep-last 2
docker-run-scheduled: docker-build
docker run --rm --network internal --privileged --device /dev/fuse --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -v "./backup:/backup" jkaninda/mysql-bkup bkup backup --prune --keep-last=2 --mode scheduled --period "* * * * *"
docker-run-scheduled-s3: docker-build
docker run --rm --network internal --privileged --device /dev/fuse --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" jkaninda/mysql-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
docker-restore-s3: docker-build
docker run --rm --network internal --privileged --device /dev/fuse --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "FILE_NAME=${FILE_NAME}" jkaninda/mysql-bkup bkup restore --storage s3 --path /custom-path

441
README.md
View File

@@ -1,22 +1,29 @@
# MySQL Backup
MySQL Backup and Restoration tool. Backup database to AWS S3 storage or any S3 Alternatives for Object Storage.
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
It also supports __encrypting__ your backups using GPG.
[![Build](https://github.com/jkaninda/mysql-bkup/actions/workflows/build.yml/badge.svg)](https://github.com/jkaninda/mysql-bkup/actions/workflows/build.yml)
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
It also supports database __encryption__ using GPG.
[![Build](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml/badge.svg)](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml)
[![Go Report](https://goreportcard.com/badge/github.com/jkaninda/mysql-bkup)](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup)
![Docker Image Size (latest by date)](https://img.shields.io/docker/image-size/jkaninda/mysql-bkup?style=flat-square)
![Docker Pulls](https://img.shields.io/docker/pulls/jkaninda/mysql-bkup?style=flat-square)
<a href="https://ko-fi.com/jkaninda"><img src="https://uploads-ssl.webflow.com/5c14e387dab576fe667689cf/5cbed8a4ae2b88347c06c923_BuyMeACoffee_blue.png" height="20" alt="buy ma a coffee"></a>
<p align="center">
<a href="https://github.com/jkaninda/mysql-bkup">
<img src="https://www.mysql.com/common/logos/logo-mysql-170x115.png" alt="Logo">
</a>
</p>
> Runs on:
Successfully tested on:
- Docker
- Docker in Swarm mode
- Kubernetes
- OpenShift
## Documentation is found at <https://jkaninda.github.io/mysql-bkup>
## Links:
> Links:
- [Docker Hub](https://hub.docker.com/r/jkaninda/mysql-bkup)
- [Github](https://github.com/jkaninda/mysql-bkup)
@@ -24,357 +31,135 @@ MySQL Backup and Restoration tool. Backup database to AWS S3 storage or any S3 A
- [PostgreSQL](https://github.com/jkaninda/pg-bkup)
## Storage:
- local
- s3
- Object storage
- Local
- AWS S3 or any S3 Alternatives for Object Storage
- SSH remote server
## Volumes:
## Quickstart
- /s3mnt => S3 mounting path
- /backup => local storage mounting path
### Simple backup using Docker CLI
## Usage
To run a one time backup, bind your local volume to `/backup` in the container and run the `backup` command:
| Options | Shorts | Usage |
|-----------------------|--------|-----------------------------------------------------------------------|
| mysql-bkup | bkup | CLI utility |
| backup | | Backup database operation |
| restore | | Restore database operation |
| history | | Show the history of backup |
| --storage | -s | Set storage. local or s3 (default: local) |
| --file | -f | Set file name for restoration |
| --path | | Set s3 path without file name. eg: /custom_path |
| --dbname | -d | Set database name |
| --port | -p | Set database port (default: 3306) |
| --mode | -m | Set execution mode. default or scheduled (default: default) |
| --disable-compression | | Disable database backup compression |
| --prune | | Delete old backup |
| --keep-last | | keep all backup and delete within this time interval, default 7 days |
| --period | | Set crontab period for scheduled mode only. (default: "0 1 * * *") |
| --timeout | -t | Set timeout (default: 60s) |
| --help | -h | Print this help message and exit |
| --version | -V | Print version information and exit |
## Environment variables
| Name | Requirement | Description |
|-------------|--------------------------------------------------|----------------------|
| DB_PORT | Optional, default 3306 | Database port number |
| DB_HOST | Required | Database host |
| DB_NAME | Optional if it was provided from the -d flag | Database name |
| DB_USERNAME | Required | Database user name |
| DB_PASSWORD | Required | Database password |
| ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
| SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
| BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| S3_ENDPOINT | Optional, required for S3 storage | AWS S3 Endpoint |
| FILE_NAME | Optional if it was provided from the --file flag | File to restore |
## Note:
Creating a user for backup tasks who has read-only access is recommended!
> create read-only user
```sh
mysql -u root -p
```shell
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d database_name
```
```sql
CREATE USER read_only_user IDENTIFIED BY 'your_strong_password';
Alternatively, pass a `--env-file` in order to use a full config as described below.
```yaml
docker run --rm --network your_network_name \
--env-file your-env-file \
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup backup -d database_name
```
### Simple backup in docker compose file
```
```sql
GRANT SELECT, SHOW VIEW ON *.* TO read_only_user;
```
```sql
FLUSH PRIVILEGES;
```
## Backup database :
Simple backup usage
```sh
mysql-bkup backup --dbname database_name
```
```sh
mysql-bkup backup -d database_name
```
### S3
```sh
mysql-bkup backup --storage s3 --dbname database_name
```
## Docker run:
```sh
docker run --rm --network your_network_name --name mysql-bkup -v $PWD/backup:/backup/ -e "DB_HOST=database_host_name" -e "DB_USERNAME=username" -e "DB_PASSWORD=password" jkaninda/mysql-bkup:latest mysql-bkup backup -d database_name
```
## Docker compose file:
```yaml
version: '3'
services:
mariadb:
container_name: mariadb
image: mariadb
environment:
MYSQL_DATABASE: mariadb
MYSQL_USER: mariadb
MYSQL_PASSWORD: password
MYSQL_ROOT_PASSWORD: password
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup backup -d database_name
command: backup
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mariadb
- DB_USERNAME=mariadb
- DB_PASSWORD=password
```
## Restore database :
Simple database restore operation usage
```sh
mysql-bkup restore --dbname database_name --file database_20231217_115621.sql
```
```sh
mysql-bkup restore -f database_20231217_115621.sql
```
### S3
```sh
mysql-bkup restore --storage s3 --file database_20231217_115621.sql
```
## Docker run:
```sh
docker run --rm --network your_network_name --name mysql-bkup -v $PWD/backup:/backup/ -e "DB_HOST=database_host_name" -e "DB_USERNAME=username" -e "DB_PASSWORD=password" jkaninda/mysql-bkup mysql-bkup backup -d database_name -f db_20231219_022941.sql.gz
```
## Docker compose file:
```yaml
version: '3'
services:
mariadb:
container_name: mariadb
image: mariadb:latest
environment:
MYSQL_DATABASE: mariadb
MYSQL_USER: mariadb
MYSQL_PASSWORD: password
MYSQL_ROOT_PASSWORD: password
mysql-bkup:
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup restore --file database_20231217_115621.sql --dbname database_name
volumes:
- ./backup:/backup
environment:
#- FILE_NAME=mariadb_20231217_040238.sql # Optional if file name is set from command
- DB_PORT=3306
- DB_HOST=mariadb
- DB_NAME=mariadb
- DB_USERNAME=mariadb
- DB_PASSWORD=password
```
## Run
```sh
docker-compose up -d
```
## Backup to S3
```sh
docker run --rm --privileged --device /dev/fuse --name mysql-bkup -e "DB_HOST=db_hostname" -e "DB_USERNAME=username" -e "DB_PASSWORD=password" -e "ACCESS_KEY=your_access_key" -e "SECRET_KEY=your_secret_key" -e "BUCKETNAME=your_bucket_name" -e "S3_ENDPOINT=https://s3.us-west-2.amazonaws.com" jkaninda/mysql-bkup mysql-bkup backup -s s3 -d database_name
```
> To change s3 backup path add this flag : --path /myPath . default path is /mysql_bkup
Simple S3 backup usage
```sh
bkup backup --storage s3 --dbname mydatabase
```
```yaml
version: '3'
services:
mysql-bkup:
image: jkaninda/mysql-bkup
container_name: mysql-bkup
privileged: true
devices:
- "/dev/fuse"
command:
- /bin/sh
- -c
- mysql-bkup restore --storage s3 -f database_20231217_115621.sql.gz
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=mariadb
- DB_USERNAME=mariadb
- DB_NAME=foo
- DB_USERNAME=bar
- DB_PASSWORD=password
- ACCESS_KEY=${ACCESS_KEY}
- SECRET_KEY=${SECRET_KEY}
- BUCKET_NAME=${BUCKET_NAME}
- S3_ENDPOINT=${S3_ENDPOINT}
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
## Run in Scheduled mode
## Deploy on Kubernetes
This tool can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag.
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as Job or CronJob.
Make an automated backup on Docker
## Syntax of crontab (field description)
The syntax is:
- 1: Minute (0-59)
- 2: Hours (0-23)
- 3: Day (0-31)
- 4: Month (0-12 [12 == December])
- 5: Day of the week(0-7 [7 or 0 == sunday])
Easy to remember format:
```conf
* * * * * command to be executed
```
```conf
- - - - -
| | | | |
| | | | ----- Day of week (0 - 7) (Sunday=0 or 7)
| | | ------- Month (1 - 12)
| | --------- Day of month (1 - 31)
| ----------- Hour (0 - 23)
------------- Minute (0 - 59)
```
> At every 30th minute
```conf
*/30 * * * *
```
> “At minute 0.” every hour
```conf
0 * * * *
```
> “At 01:00.” every day
```conf
0 1 * * *
```
## Example of scheduled mode
> Docker run :
```sh
docker run --rm --name mysql-bkup -v $BACKUP_DIR:/backup/ -e "DB_HOST=$DB_HOST" -e "DB_USERNAME=$DB_USERNAME" -e "DB_PASSWORD=$DB_PASSWORD" jkaninda/mysql-bkup mysql-bkup backup --dbname $DB_NAME --mode scheduled --period "0 1 * * *"
```
> With Docker compose
```yaml
version: "3"
services:
mysql-bkup:
image: jkaninda/mysql-bkup
container_name: mysql-bkup
privileged: true
devices:
- "/dev/fuse"
command:
- /bin/sh
- -c
- mysql-bkup backup --storage s3 --path /mys3_custome_path --dbname database_name --mode scheduled --period "*/30 * * * *"
environment:
- DB_PORT=3306
- DB_HOST=mysqlhost
- DB_USERNAME=userName
- DB_PASSWORD=${DB_PASSWORD}
- ACCESS_KEY=${ACCESS_KEY}
- SECRET_KEY=${SECRET_KEY}
- BUCKET_NAME=${BUCKET_NAME}
- S3_ENDPOINT=${S3_ENDPOINT}
```
## Kubernetes CronJob
For Kubernetes you don't need to run it in scheduled mode.
Simple Kubernetes CronJob usage:
### Simple Kubernetes backup Job :
```yaml
apiVersion: batch/v1
kind: CronJob
kind: Job
metadata:
name: mysql-bkup-job
name: backup-job
spec:
schedule: "0 1 * * *"
jobTemplate:
ttlSecondsAfterFinished: 100
template:
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
securityContext:
privileged: true
command:
containers:
- name: pg-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup backup -s s3 --path /custom_path
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: "password"
- name: ACCESS_KEY
value: ""
- name: SECRET_KEY
value: ""
- name: BUCKET_NAME
value: ""
- name: S3_ENDPOINT
value: "https://s3.us-west-2.amazonaws.com"
restartPolicy: Never
- backup -d dbname
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_HOST
value: "mysql"
- name: DB_USERNAME
value: "user"
- name: DB_PASSWORD
value: "password"
volumeMounts:
- mountPath: /backup
name: backup
volumes:
- name: backup
hostPath:
path: /home/toto/backup # directory location on host
type: Directory # this field is optional
restartPolicy: Never
```
## Available image registries
This Docker image is published to both Docker Hub and the GitHub container registry.
Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
```
docker pull jkaninda/mysql-bkup
docker pull ghcr.io/jkaninda/mysql-bkup
```
## Contributing
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
## Supported Engines
This image is developed and tested against the Docker CE engine and Kubernetes exclusively.
While it may work against different implementations, there are no guarantees about support for non-Docker engines.
## References
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
- This image is written in Go.
- `arm64` and `arm/v7` architectures are supported.
- Docker in Swarm mode is supported.
- Kubernetes is supported.
Contributions are welcome! If you encounter any issues or have suggestions for improvements, please create an issue or submit a pull request.
Make sure to follow the existing coding style and provide tests for your changes.
## License

View File

@@ -1,3 +1,9 @@
// Package cmd /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package cmd
import (
@@ -21,10 +27,12 @@ var BackupCmd = &cobra.Command{
func init() {
//Backup
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Set execution mode. default or scheduled")
BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Set schedule period time")
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Prune old backup")
BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "keep all backup and delete within this time interval, default 7 days")
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. default or scheduled")
BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Schedule period time")
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")
BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "Delete files created more than specified days ago, default 7 days")
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
}

View File

@@ -1,14 +0,0 @@
package cmd
import (
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
)
var HistoryCmd = &cobra.Command{
Use: "history",
Short: "Show the history of backup",
Run: func(cmd *cobra.Command, args []string) {
utils.ShowHistory()
},
}

27
cmd/migrate.go Normal file
View File

@@ -0,0 +1,27 @@
// Package cmd /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package cmd
import (
"github.com/jkaninda/mysql-bkup/pkg"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
)
var MigrateCmd = &cobra.Command{
Use: "migrate",
Short: "Migrate database from a source database to a target database",
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
pkg.StartMigration(cmd)
} else {
utils.Fatal("Error, no argument required")
}
},
}

View File

@@ -24,5 +24,7 @@ var RestoreCmd = &cobra.Command{
func init() {
//Restore
RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database")
RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
}

View File

@@ -1,7 +1,9 @@
// Package cmd /*
/*
Copyright © 2024 Jonas Kaninda <jonaskaninda@gmail.com>
*/
// Package cmd /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package cmd
import (
@@ -19,7 +21,6 @@ var rootCmd = &cobra.Command{
Version: appVersion,
}
var operation = ""
var s3Path = "/mysql-bkup"
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
@@ -31,16 +32,10 @@ func Execute() {
}
func init() {
rootCmd.PersistentFlags().StringP("storage", "s", "local", "Set storage. local or s3")
rootCmd.PersistentFlags().StringP("path", "P", s3Path, "Set s3 path, without file name. for S3 storage only")
rootCmd.PersistentFlags().StringP("dbname", "d", "", "Set database name")
rootCmd.PersistentFlags().IntP("timeout", "t", 30, "Set timeout")
rootCmd.PersistentFlags().IntP("port", "p", 3306, "Set database port")
rootCmd.PersistentFlags().StringVarP(&operation, "operation", "o", "", "Set operation, for old version only")
rootCmd.PersistentFlags().StringP("dbname", "d", "", "Database name")
rootCmd.AddCommand(VersionCmd)
rootCmd.AddCommand(BackupCmd)
rootCmd.AddCommand(RestoreCmd)
rootCmd.AddCommand(S3MountCmd)
rootCmd.AddCommand(HistoryCmd)
rootCmd.AddCommand(MigrateCmd)
}

View File

@@ -1,14 +0,0 @@
package cmd
import (
"github.com/jkaninda/mysql-bkup/pkg"
"github.com/spf13/cobra"
)
var S3MountCmd = &cobra.Command{
Use: "s3mount",
Short: "Mount AWS S3 storage",
Run: func(cmd *cobra.Command, args []string) {
pkg.S3Mount()
},
}

View File

@@ -1,9 +1,11 @@
// Package cmd /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package cmd
/*
Copyright © 2024 Jonas Kaninda <jonaskaninda@gmail.com>
*/
import (
"fmt"
"github.com/spf13/cobra"

View File

@@ -1,4 +1,4 @@
FROM golang:1.21.0 AS build
FROM golang:1.22.5 AS build
WORKDIR /app
# Copy the source code.
@@ -10,41 +10,76 @@ RUN go mod download
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/mysql-bkup
FROM ubuntu:24.04
ENV DB_HOST=""
ENV DB_HOST="localhost"
ENV DB_NAME=""
ENV DB_USERNAME=""
ENV DB_PASSWORD=""
ENV DB_PORT="3306"
ENV DB_PORT=3306
ENV STORAGE=local
ENV BUCKET_NAME=""
ENV ACCESS_KEY=""
ENV SECRET_KEY=""
ENV S3_ENDPOINT=https://s3.amazonaws.com
ENV AWS_S3_ENDPOINT=""
ENV AWS_S3_BUCKET_NAME=""
ENV AWS_ACCESS_KEY=""
ENV AWS_SECRET_KEY=""
ENV AWS_REGION="us-west-2"
ENV AWS_S3_PATH=""
ENV AWS_DISABLE_SSL="false"
ENV GPG_PASSPHRASE=""
ENV SSH_USER=""
ENV SSH_REMOTE_PATH=""
ENV SSH_PASSWORD=""
ENV SSH_HOST_NAME=""
ENV SSH_IDENTIFY_FILE=""
ENV SSH_PORT="22"
ENV TARGET_DB_HOST=""
ENV TARGET_DB_PORT=3306
ENV TARGET_DB_NAME="localhost"
ENV TARGET_DB_USERNAME=""
ENV TARGET_DB_PASSWORD=""
ARG DEBIAN_FRONTEND=noninteractive
ENV VERSION="v0.6"
LABEL authors="Jonas Kaninda"
ENV VERSION="v1.2.7"
ENV BACKUP_CRON_EXPRESSION=""
ENV TG_TOKEN=""
ENV TG_CHAT_ID=""
ARG WORKDIR="/config"
ARG BACKUPDIR="/backup"
ARG BACKUP_TMP_DIR="/tmp/backup"
ARG BACKUP_CRON="/etc/cron.d/backup_cron"
ARG BACKUP_CRON_SCRIPT="/usr/local/bin/backup_cron.sh"
LABEL author="Jonas Kaninda"
RUN apt-get update -qq
#RUN apt-get install build-essential libcurl4-openssl-dev libxml2-dev mime-support -y
RUN apt install s3fs mysql-client supervisor cron -y
RUN apt install mysql-client supervisor cron gnupg -y
# Clear cache
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
RUN mkdir /s3mnt
RUN mkdir /tmp/s3cache
RUN chmod 777 /s3mnt
RUN chmod 777 /tmp/s3cache
RUN mkdir $WORKDIR
RUN mkdir $BACKUPDIR
RUN mkdir -p $BACKUP_TMP_DIR
RUN chmod 777 $WORKDIR
RUN chmod 777 $BACKUPDIR
RUN chmod 777 $BACKUP_TMP_DIR
RUN touch $BACKUP_CRON && \
touch $BACKUP_CRON_SCRIPT && \
chmod 777 $BACKUP_CRON && \
chmod 777 $BACKUP_CRON_SCRIPT
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
RUN chmod +x /usr/local/bin/mysql-bkup
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/mysql_bkup
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
# Create backup script and make it executable
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
chmod +x /usr/local/bin/backup
# Create restore script and make it executable
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup restore "$@"' > /usr/local/bin/restore && \
chmod +x /usr/local/bin/restore
# Create migrate script and make it executable
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup migrate "$@"' > /usr/local/bin/migrate && \
chmod +x /usr/local/bin/migrate
RUN mkdir /backup
WORKDIR /backup
WORKDIR $WORKDIR
ENTRYPOINT ["/usr/local/bin/mysql-bkup"]

3
docs/.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
_site
.sass-cache
.jekyll-metadata

24
docs/404.html Normal file
View File

@@ -0,0 +1,24 @@
---
layout: default
---
<style type="text/css" media="screen">
.container {
margin: 10px auto;
max-width: 600px;
text-align: center;
}
h1 {
margin: 30px 0;
font-size: 4em;
line-height: 1;
letter-spacing: -1px;
}
</style>
<div class="container">
<h1>404</h1>
<p><strong>Page not found :(</strong></p>
<p>The requested page could not be found.</p>
</div>

43
docs/Gemfile Normal file
View File

@@ -0,0 +1,43 @@
source "https://rubygems.org"
# Hello! This is where you manage which Jekyll version is used to run.
# When you want to use a different version, change it below, save the
# file and run `bundle install`. Run Jekyll with `bundle exec`, like so:
#
# bundle exec jekyll serve
#
# This will help ensure the proper Jekyll version is running.
# Happy Jekylling!
gem "jekyll", "~> 3.10.0"
# This is the default theme for new Jekyll sites. You may change this to anything you like.
gem "minima", "~> 2.0"
# If you want to use GitHub Pages, remove the "gem "jekyll"" above and
# uncomment the line below. To umysqlrade, run `bundle update github-pages`.
# gem "github-pages", group: :jekyll_plugins
# If you have any plugins, put them here!
group :jekyll_plugins do
gem "jekyll-feed", "~> 0.6"
end
# Windows and JRuby does not include zoneinfo files, so bundle the tzinfo-data gem
# and associated library.
platforms :mingw, :x64_mingw, :mswin, :jruby do
gem "tzinfo", ">= 1", "< 3"
gem "tzinfo-data"
end
# Performance-booster for watching directories on Windows
gem "wdm", "~> 0.1.0", :install_if => Gem.win_platform?
# kramdown v2 ships without the gfm parser by default. If you're using
# kramdown v1, comment out this line.
gem "kramdown-parser-gfm"
# Lock `http_parser.rb` gem to `v0.6.x` on JRuby builds since newer versions of the gem
# do not have a Java counterpart.
gem "http_parser.rb", "~> 0.6.0", :platforms => [:jruby]
gem "just-the-docs"

116
docs/Gemfile.lock Normal file
View File

@@ -0,0 +1,116 @@
GEM
remote: https://rubygems.org/
specs:
addressable (2.8.7)
public_suffix (>= 2.0.2, < 7.0)
colorator (1.1.0)
concurrent-ruby (1.3.3)
csv (3.3.0)
em-websocket (0.5.3)
eventmachine (>= 0.12.9)
http_parser.rb (~> 0)
eventmachine (1.2.7)
ffi (1.17.0)
ffi (1.17.0-aarch64-linux-gnu)
ffi (1.17.0-aarch64-linux-musl)
ffi (1.17.0-arm-linux-gnu)
ffi (1.17.0-arm-linux-musl)
ffi (1.17.0-arm64-darwin)
ffi (1.17.0-x86-linux-gnu)
ffi (1.17.0-x86-linux-musl)
ffi (1.17.0-x86_64-darwin)
ffi (1.17.0-x86_64-linux-gnu)
ffi (1.17.0-x86_64-linux-musl)
forwardable-extended (2.6.0)
http_parser.rb (0.8.0)
i18n (1.14.5)
concurrent-ruby (~> 1.0)
jekyll (3.10.0)
addressable (~> 2.4)
colorator (~> 1.0)
csv (~> 3.0)
em-websocket (~> 0.5)
i18n (>= 0.7, < 2)
jekyll-sass-converter (~> 1.0)
jekyll-watch (~> 2.0)
kramdown (>= 1.17, < 3)
liquid (~> 4.0)
mercenary (~> 0.3.3)
pathutil (~> 0.9)
rouge (>= 1.7, < 4)
safe_yaml (~> 1.0)
webrick (>= 1.0)
jekyll-feed (0.17.0)
jekyll (>= 3.7, < 5.0)
jekyll-include-cache (0.2.1)
jekyll (>= 3.7, < 5.0)
jekyll-sass-converter (1.5.2)
sass (~> 3.4)
jekyll-seo-tag (2.8.0)
jekyll (>= 3.8, < 5.0)
jekyll-watch (2.2.1)
listen (~> 3.0)
just-the-docs (0.8.2)
jekyll (>= 3.8.5)
jekyll-include-cache
jekyll-seo-tag (>= 2.0)
rake (>= 12.3.1)
kramdown (2.4.0)
rexml
kramdown-parser-gfm (1.1.0)
kramdown (~> 2.0)
liquid (4.0.4)
listen (3.9.0)
rb-fsevent (~> 0.10, >= 0.10.3)
rb-inotify (~> 0.9, >= 0.9.10)
mercenary (0.3.6)
minima (2.5.1)
jekyll (>= 3.5, < 5.0)
jekyll-feed (~> 0.9)
jekyll-seo-tag (~> 2.1)
pathutil (0.16.2)
forwardable-extended (~> 2.6)
public_suffix (6.0.1)
rake (13.2.1)
rb-fsevent (0.11.2)
rb-inotify (0.11.1)
ffi (~> 1.0)
rexml (3.3.2)
strscan
rouge (3.30.0)
safe_yaml (1.0.5)
sass (3.7.4)
sass-listen (~> 4.0.0)
sass-listen (4.0.0)
rb-fsevent (~> 0.9, >= 0.9.4)
rb-inotify (~> 0.9, >= 0.9.7)
strscan (3.1.0)
wdm (0.1.1)
webrick (1.8.1)
PLATFORMS
aarch64-linux-gnu
aarch64-linux-musl
arm-linux-gnu
arm-linux-musl
arm64-darwin
ruby
x86-linux-gnu
x86-linux-musl
x86_64-darwin
x86_64-linux-gnu
x86_64-linux-musl
DEPENDENCIES
http_parser.rb (~> 0.6.0)
jekyll (~> 3.10.0)
jekyll-feed (~> 0.6)
just-the-docs
kramdown-parser-gfm
minima (~> 2.0)
tzinfo (>= 1, < 3)
tzinfo-data
wdm (~> 0.1.0)
BUNDLED WITH
2.5.16

71
docs/_config.yml Normal file
View File

@@ -0,0 +1,71 @@
# Welcome to Jekyll!
#
# This config file is meant for settings that affect your whole blog, values
# which you are expected to set up once and rarely edit after that. If you find
# yourself editing this file very often, consider using Jekyll's data files
# feature for the data you need to update frequently.
#
# For technical reasons, this file is *NOT* reloaded automatically when you use
# 'bundle exec jekyll serve'. If you change this file, please restart the server process.
# Site settings
# These are used to personalize your new site. If you look in the HTML files,
# you will see them accessed via {{ site.title }}, {{ site.email }}, and so on.
# You can create any custom variable you would like, and they will be accessible
# in the templates via {{ site.myvariable }}.
title: MySQL Backup Docker container image
email: hi@jonaskaninda.com
description: >- # this means to ignore newlines until "baseurl:"
MySQL Backup is a Docker container image that can be used to backup and restore MySQL database.
It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
baseurl: "" # the subpath of your site, e.g. /blog
url: "jkaninda.github.io/mysql-bkup/" # the base hostname & protocol for your site, e.g. http://example.com
twitter_username: jonaskaninda
github_username: jkaninda
callouts_level: quiet
callouts:
highlight:
color: yellow
important:
title: Important
color: blue
new:
title: New
color: green
note:
title: Note
color: purple
warning:
title: Warning
color: red
# Build settings
markdown: kramdown
theme: just-the-docs
plugins:
- jekyll-feed
aux_links:
'GitHub Repository':
- https://github.com/jkaninda/mysql-bkup
nav_external_links:
- title: GitHub Repository
url: https://github.com/jkaninda/mysql-bkup
footer_content: >-
Copyright &copy; 2024 <a target="_blank" href="https://www.jonaskaninda.com">Jonas Kaninda</a>.
Distributed under the <a href="https://github.com/jkaninda/mysql-bkup/tree/main/LICENSE">MIT License.</a><br>
Something missing, unclear or not working? Open <a href="https://github.com/jkaninda/mysql-bkup/issues">an issue</a>.
# Exclude from processing.
# The following items will not be processed, by default. Create a custom list
# to override the default setting.
# exclude:
# - Gemfile
# - Gemfile2.lock
# - node_modules
# - vendor/bundle/
# - vendor/cache/
# - vendor/gems/
# - vendor/ruby/

View File

@@ -0,0 +1,25 @@
---
layout: post
title: "Welcome to Jekyll!"
date: 2024-07-29 03:36:13 +0200
categories: jekyll update
---
Youll find this post in your `_posts` directory. Go ahead and edit it and re-build the site to see your changes. You can rebuild the site in many different ways, but the most common way is to run `jekyll serve`, which launches a web server and auto-regenerates your site when a file is updated.
To add new posts, simply add a file in the `_posts` directory that follows the convention `YYYY-MM-DD-name-of-post.ext` and includes the necessary front matter. Take a look at the source for this post to get an idea about how it works.
Jekyll also offers powerful support for code snippets:
{% highlight ruby %}
def print_hi(name)
puts "Hi, #{name}"
end
print_hi('Tom')
#=> prints 'Hi, Tom' to STDOUT.
{% endhighlight %}
Check out the [Jekyll docs][jekyll-docs] for more info on how to get the most out of Jekyll. File all bugs/feature requests at [Jekylls GitHub repo][jekyll-gh]. If you have questions, you can ask them on [Jekyll Talk][jekyll-talk].
[jekyll-docs]: https://jekyllrb.com/docs/home
[jekyll-gh]: https://github.com/jekyll/jekyll
[jekyll-talk]: https://talk.jekyllrb.com/

View File

@@ -0,0 +1,133 @@
---
title: Backup to AWS S3
layout: default
parent: How Tos
nav_order: 2
---
# Backup to AWS S3
{: .note }
As described on local backup section, to change the storage of you backup and use S3 as storage. You need to add `--storage s3` (-s s3).
You can also specify a specify folder where you want to save you data by adding `--path /my-custom-path` flag.
## Backup to S3
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup --storage s3 -d database --path /my-custom-path
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## AWS configurations
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup
- AWS_REGION="us-west-2"
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
### Recurring backups to S3
As explained above, you need just to add AWS environment variables and specify the storage type `--storage s3`.
In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## AWS configurations
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup
- AWS_REGION="us-west-2"
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
## Deploy on Kubernetes
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as CronJob.
### Simple Kubernetes CronJob usage:
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: bkup-job
spec:
schedule: "0 1 * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup -s s3 --path /custom_path
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: OnFailure
```

View File

@@ -0,0 +1,140 @@
---
title: Backup to SSH
layout: default
parent: How Tos
nav_order: 3
---
# Backup to SSH remote server
As described for s3 backup section, to change the storage of your backup and use SSH Remote server as storage. You need to add `--storage ssh` or `--storage remote`.
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `SSH_REMOTE_PATH` environment variable.
{: .note }
These environment variables are required for SSH backup `SSH_HOST_NAME`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server.
Accessing the remote server using password is not recommended, use private key instead.
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup --storage remote -d database
volumes:
- ./id_ed25519:/tmp/id_ed25519"
environment:
- DB_PORT=3306
- DB_HOST=mysql
#- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## SSH config
- SSH_HOST_NAME="hostname"
- SSH_PORT=22
- SSH_USER=user
- SSH_REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
## We advise you to use a private jey instead of password
#- SSH_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
### Recurring backups to SSH remote server
As explained above, you need just to add required environment variables and specify the storage type `--storage ssh`.
You can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup -d database --storage ssh --mode scheduled --period "0 1 * * *"
volumes:
- ./id_ed25519:/tmp/id_ed25519"
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## SSH config
- SSH_HOST_NAME="hostname"
- SSH_PORT=22
- SSH_USER=user
- SSH_REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
## We advise you to use a private jey instead of password
#- SSH_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
## Deploy on Kubernetes
For Kubernetes, you don't need to run it in scheduled mode.
You can deploy it as CronJob.
Simple Kubernetes CronJob usage:
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: bkup-job
spec:
schedule: "0 1 * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup -s ssh
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: ""
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "xxx"
- name: SSH_REMOTE_PATH
value: "/home/jkaninda/backups"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: SSH_IDENTIFY_FILE
value: "/tmp/id_ed25519"
restartPolicy: Never
```

83
docs/how-tos/backup.md Normal file
View File

@@ -0,0 +1,83 @@
---
title: Backup
layout: default
parent: How Tos
nav_order: 1
---
# Backup database
To backup the database, you need to add `backup` command.
{: .note }
The default storage is local storage mounted to __/backup__. The backup is compressed by default using gzip. The flag __`disable-compression`__ can be used when you need to disable backup compression.
{: .warning }
Creating a user for backup tasks who has read-only access is recommended!
The backup process can be run in scheduled mode for the recurring backups.
It handles __recurring__ backups of mysql database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup -d database
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
### Backup using Docker CLI
```shell
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d database_name
```
In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup -d database --mode scheduled --period "0 1 * * *"
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```

View File

@@ -0,0 +1,303 @@
---
title: Deploy on Kubernetes
layout: default
parent: How Tos
nav_order: 8
---
## Deploy on Kubernetes
To deploy MySQL Backup on Kubernetes, you can use Job to backup or Restore your database.
For recurring backup you can use CronJob, you don't need to run it in scheduled mode. as described bellow.
## Backup to S3 storage
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: backup
spec:
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup --storage s3
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: Never
```
## Backup Job to SSH remote server
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: backup
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- backup
- --storage
- ssh
- --disable-compression
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "xxx"
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "xxx"
- name: SSH_PASSWORD
value: "xxxx"
- name: SSH_REMOTE_PATH
value: "/home/toto/backup"
# Optional, required if you want to encrypt your backup
- name: GPG_PASSPHRASE
value: "xxxx"
restartPolicy: Never
```
## Restore Job
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: restore-job
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- restore
- --storage
- ssh
- --file store_20231219_022941.sql.gz
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "xxx"
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "xxx"
- name: SSH_PASSWORD
value: "xxxx"
- name: SSH_REMOTE_PATH
value: "/home/xxxx/backup"
# Optional, required if your backup was encrypted
#- name: GPG_PASSPHRASE
# value: "xxxx"
restartPolicy: Never
```
## Recurring backup
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: backup-job
spec:
schedule: "* * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- backup
- --storage
- ssh
- --disable-compression
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "username"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "xxx"
- name: SSH_PORT
value: "xxx"
- name: SSH_USER
value: "jkaninda"
- name: SSH_REMOTE_PATH
value: "/home/jkaninda/backup"
- name: SSH_PASSWORD
value: "password"
# Optional, required if you want to encrypt your backup
#- name: GPG_PASSPHRASE
# value: "xxx"
restartPolicy: Never
```
## Kubernetes Rootless
This image also supports Kubernetes security context, you can run it in Rootless environment.
It has been tested on Openshift, it works well.
Deployment on OpenShift is supported, you need to remove `securityContext` section on your yaml file.
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: backup-job
spec:
schedule: "* * * * *"
jobTemplate:
spec:
template:
spec:
securityContext:
runAsUser: 1000
runAsGroup: 3000
fsGroup: 2000
containers:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- backup
- --storage
- ssh
- --disable-compression
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "xxx"
- name: DB_USERNAME
value: "xxx"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "xxx"
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "jkaninda"
- name: SSH_REMOTE_PATH
value: "/home/jkaninda/backup"
- name: SSH_PASSWORD
value: "password"
# Optional, required if you want to encrypt your backup
#- name: GPG_PASSPHRASE
# value: "xxx"
restartPolicy: OnFailure
```

View File

@@ -0,0 +1,51 @@
---
title: Encrypt backups using GPG
layout: default
parent: How Tos
nav_order: 7
---
# Encrypt backup
The image supports encrypting backups using GPG out of the box. In case a `GPG_PASSPHRASE` environment variable is set, the backup archive will be encrypted using the given key and saved as a sql.gpg file instead or sql.gz.gpg.
{: .warning }
To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process.
To decrypt manually, you need to install `gnupg`
### Decrypt backup
```shell
gpg --batch --passphrase "my-passphrase" \
--output database_20240730_044201.sql.gz \
--decrypt database_20240730_044201.sql.gz.gpg
```
### Backup
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup -d database
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## Required to encrypt backup
- GPG_PASSPHRASE=my-secure-passphrase
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```

8
docs/how-tos/index.md Normal file
View File

@@ -0,0 +1,8 @@
---
title: How Tos
layout: default
nav_order: 3
has_children: true
---
## How Tos

131
docs/how-tos/migrate.md Normal file
View File

@@ -0,0 +1,131 @@
---
title: Migrate database
layout: default
parent: How Tos
nav_order: 9
---
# Migrate database
To migrate the database, you need to add `migrate` command.
{: .note }
The Mysql backup has another great feature: migrating your database from a source database to a target.
As you know, to restore a database from a source to a target database, you need 2 operations: which is to start by backing up the source database and then restoring the source backed database to the target database.
Instead of proceeding like that, you can use the integrated feature `(migrate)`, which will help you migrate your database by doing only one operation.
{: .warning }
The `migrate` operation is irreversible, please backup your target database before this action.
### Docker compose
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: migrate
volumes:
- ./backup:/backup
environment:
## Source database
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## Target database
- TARGET_DB_HOST=target-mysql
- TARGET_DB_PORT=3306
- TARGET_DB_NAME=dbname
- TARGET_DB_USERNAME=username
- TARGET_DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
### Migrate database using Docker CLI
```
## Source database
DB_HOST=mysql
DB_PORT=3306
DB_NAME=dbname
DB_USERNAME=username
DB_PASSWORD=password
## Taget database
TARGET_DB_HOST=target-mysql
TARGET_DB_PORT=3306
TARGET_DB_NAME=dbname
TARGET_DB_USERNAME=username
TARGET_DB_PASSWORD=password
```
```shell
docker run --rm --network your_network_name \
--env-file your-env
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup migrate
```
## Kubernetes
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: migrate-db
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- migrate
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
## Source Database
- name: DB_HOST
value: "mysql"
- name: DB_PORT
value: "3306"
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
- name: DB_PASSWORD
value: "password"
## Target Database
- name: TARGET_DB_HOST
value: "target-mysql"
- name: TARGET_DB_PORT
value: "3306"
- name: TARGET_DB_NAME
value: "dbname"
- name: TARGET_DB_USERNAME
value: "username"
- name: TARGET_DB_PASSWORD
value: "password"
restartPolicy: Never
```

View File

@@ -0,0 +1,95 @@
---
title: Restore database from AWS S3
layout: default
parent: How Tos
nav_order: 5
---
# Restore database from S3 storage
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
{: .note }
It supports __.sql__ and __.sql.gz__ compressed file.
### Restore
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## AWS configurations
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup
- AWS_REGION="us-west-2"
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
## Restore on Kubernetes
Simple Kubernetes restore Job:
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: restore-db
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- restore -s s3 --path /custom_path -f store_20231219_022941.sql.gz
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: Never
backoffLimit: 4
```

View File

@@ -0,0 +1,93 @@
---
title: Restore database from SSH
layout: default
parent: How Tos
nav_order: 6
---
# Restore database from SSH remote server
To restore the database from your remote server, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
{: .note }
It supports __.sql__ and __.sql.gz__ compressed file.
### Restore
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=postgres
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## SSH config
- SSH_HOST_NAME="hostname"
- SSH_PORT=22
- SSH_USER=user
- SSH_REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
## We advise you to use a private jey instead of password
#- SSH_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
## Restore on Kubernetes
Simple Kubernetes restore Job:
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: restore-db
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- restore -s ssh -f store_20231219_022941.sql.gz
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: ""
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "xxx"
- name: SSH_REMOTE_PATH
value: "/home/jkaninda/backups"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: SSH_IDENTIFY_FILE
value: "/tmp/id_ed25519"
restartPolicy: Never
backoffLimit: 4
```

40
docs/how-tos/restore.md Normal file
View File

@@ -0,0 +1,40 @@
---
title: Restore database
layout: default
parent: How Tos
nav_order: 4
---
# Restore database
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
{: .note }
It supports __.sql__ and __.sql.gz__ compressed file.
### Restore
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: restore -d database -f store_20231219_022941.sql.gz
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```

151
docs/index.md Normal file
View File

@@ -0,0 +1,151 @@
---
title: Overview
layout: home
nav_order: 1
---
# About mysql-bkup
{:.no_toc}
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH remote storage.
It also supports __encrypting__ your backups using GPG.
We are open to receiving stars, PRs, and issues!
{: .fs-6 .fw-300 }
---
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
It also supports database __encryption__ using GPG.
{: .note }
Code and documentation for `v1` version on [this branch][v1-branch].
[v1-branch]: https://github.com/jkaninda/mysql-bkup
---
## Quickstart
### Simple backup using Docker CLI
To run a one time backup, bind your local volume to `/backup` in the container and run the `backup` command:
```shell
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d database_name
```
Alternatively, pass a `--env-file` in order to use a full config as described below.
```yaml
docker run --rm --network your_network_name \
--env-file your-env-file \
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup backup -d database_name
```
### Simple backup in docker compose file
```yaml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=foo
- DB_USERNAME=bar
- DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
## Kubernetes
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: backup-job
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup -d dbname
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_HOST
value: "mysql"
- name: DB_USERNAME
value: "user"
- name: DB_PASSWORD
value: "password"
volumeMounts:
- mountPath: /backup
name: backup
volumes:
- name: backup
hostPath:
path: /home/toto/backup # directory location on host
type: Directory # this field is optional
restartPolicy: Never
```
## Available image registries
This Docker image is published to both Docker Hub and the GitHub container registry.
Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
```
docker pull jkaninda/mysql-bkup
docker pull ghcr.io/jkaninda/mysql-bkup
```
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
## Supported Engines
This image is developed and tested against the Docker CE engine and Kubernetes exclusively.
While it may work against different implementations, there are no guarantees about support for non-Docker engines.
## References
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
- This image is written in Go.
- `arm64` and `arm/v7` architectures are supported.
- Docker in Swarm mode is supported.
- Kubernetes is supported.

358
docs/old-version/index.md Normal file
View File

@@ -0,0 +1,358 @@
---
layout: page
title: Old version
permalink: /old-version/
---
This is the documentation of mysql-backup for all old versions bellow `v1.0`.
In the old version, S3 storage was mounted using s3fs, so we decided to migrate to the official AWS SDK.
## Storage:
- local
- s3
- Object storage
## Volumes:
- /s3mnt => S3 mounting path
- /backup => local storage mounting path
### Usage
| Options | Shorts | Usage |
|-----------------------|--------|------------------------------------------------------------------------|
| mysql-bkup | bkup | CLI utility |
| backup | | Backup database operation |
| restore | | Restore database operation |
| history | | Show the history of backup |
| --storage | -s | Storage. local or s3 (default: local) |
| --file | -f | File name to restore |
| --path | | S3 path without file name. eg: /custom_path |
| --dbname | -d | Database name |
| --port | -p | Database port (default: 3306) |
| --mode | -m | Execution mode. default or scheduled (default: default) |
| --disable-compression | | Disable database backup compression |
| --prune | | Delete old backup, default disabled |
| --keep-last | | Delete old backup created more than specified days ago, default 7 days |
| --period | | Crontab period for scheduled mode only. (default: "0 1 * * *") |
| --help | -h | Print this help message and exit |
| --version | -V | Print version information and exit |
## Environment variables
| Name | Requirement | Description |
|-------------|--------------------------------------------------|------------------------------------------------------|
| DB_PORT | Optional, default 3306 | Database port number |
| DB_HOST | Required | Database host |
| DB_NAME | Optional if it was provided from the -d flag | Database name |
| DB_USERNAME | Required | Database user name |
| DB_PASSWORD | Required | Database password |
| ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
| SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
| BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| S3_ENDPOINT | Optional, required for S3 storage | AWS S3 Endpoint |
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
## Note:
Creating a user for backup tasks who has read-only access is recommended!
> create read-only user
## Backup database :
Simple backup usage
```sh
bkup backup
```
### S3
```sh
mysql-bkup backup --storage s3
```
## Docker run:
```sh
docker run --rm --network your_network_name \
--name mysql-bkup -v $PWD/backup:/backup/ \
-e "DB_HOST=database_host_name" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" jkaninda/mysql-bkup:v0.7 mysql-bkup backup -d database_name
```
## Docker compose file:
```yaml
version: '3'
services:
postgres:
image: postgres:14.5
container_name: postgres
restart: unless-stopped
volumes:
- ./postgres:/var/lib/postgresql/data
environment:
POSTGRES_DB: bkup
POSTGRES_PASSWORD: password
POSTGRES_USER: bkup
mysql-bkup:
image: jkaninda/mysql-bkup:v0.7
container_name: mysql-bkup
depends_on:
- postgres
command:
- /bin/sh
- -c
- mysql-bkup backup -d bkup
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=postgres
- DB_NAME=bkup
- DB_USERNAME=bkup
- DB_PASSWORD=password
```
## Restore database :
Simple database restore operation usage
```sh
mysql-bkup restore --file database_20231217_115621.sql --dbname database_name
```
```sh
mysql-bkup restore -f database_20231217_115621.sql -d database_name
```
### S3
```sh
mysql-bkup restore --storage s3 --file database_20231217_115621.sql --dbname database_name
```
## Docker run:
```sh
docker run --rm --network your_network_name \
--name mysql-bkup \
-v $PWD/backup:/backup/ \
-e "DB_HOST=database_host_name" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup:v0.7 mysql-bkup restore -d database_name -f store_20231219_022941.sql.gz
```
## Docker compose file:
```yaml
version: '3'
services:
mysql-bkup:
image: jkaninda/mysql-bkup:v0.7
container_name: mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup restore --file database_20231217_115621.sql -d database_name
volumes:
- ./backup:/backup
environment:
#- FILE_NAME=database_20231217_040238.sql.gz # Optional if file name is set from command
- DB_PORT=3306
- DB_HOST=postgres
- DB_USERNAME=user_name
- DB_PASSWORD=password
```
## Run
```sh
docker-compose up -d
```
## Backup to S3
```sh
docker run --rm --privileged \
--device /dev/fuse --name mysql-bkup \
-e "DB_HOST=db_hostname" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
-e "ACCESS_KEY=your_access_key" \
-e "SECRET_KEY=your_secret_key" \
-e "BUCKETNAME=your_bucket_name" \
-e "S3_ENDPOINT=https://s3.us-west-2.amazonaws.com" \
jkaninda/mysql-bkup:v0.7 mysql-bkup backup -s s3 -d database_name
```
> To change s3 backup path add this flag : --path /my_customPath . default path is /mysql-bkup
Simple S3 backup usage
```sh
mysql-bkup backup --storage s3 --dbname mydatabase
```
```yaml
mysql-bkup:
image: jkaninda/mysql-bkup:v0.7
container_name: mysql-bkup
privileged: true
devices:
- "/dev/fuse"
command:
- /bin/sh
- -c
- mysql-bkup restore --storage s3 -f database_20231217_115621.sql.gz --dbname database_name
environment:
- DB_PORT=3306
- DB_HOST=postgress
- DB_USERNAME=user_name
- DB_PASSWORD=password
- ACCESS_KEY=${ACCESS_KEY}
- SECRET_KEY=${SECRET_KEY}
- BUCKET_NAME=${BUCKET_NAME}
- S3_ENDPOINT=${S3_ENDPOINT}
```
## Run in Scheduled mode
This tool can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag.
Make an automated backup on Docker
## Syntax of crontab (field description)
The syntax is:
- 1: Minute (0-59)
- 2: Hours (0-23)
- 3: Day (0-31)
- 4: Month (0-12 [12 == December])
- 5: Day of the week(0-7 [7 or 0 == sunday])
Easy to remember format:
```conf
* * * * * command to be executed
```
```conf
- - - - -
| | | | |
| | | | ----- Day of week (0 - 7) (Sunday=0 or 7)
| | | ------- Month (1 - 12)
| | --------- Day of month (1 - 31)
| ----------- Hour (0 - 23)
------------- Minute (0 - 59)
```
> At every 30th minute
```conf
*/30 * * * *
```
> “At minute 0.” every hour
```conf
0 * * * *
```
> “At 01:00.” every day
```conf
0 1 * * *
```
## Example of scheduled mode
> Docker run :
```sh
docker run --rm --name mysql-bkup \
-v $BACKUP_DIR:/backup/ \
-e "DB_HOST=$DB_HOST" \
-e "DB_USERNAME=$DB_USERNAME" \
-e "DB_PASSWORD=$DB_PASSWORD" jkaninda/mysql-bkup:v0.7 mysql-bkup backup --dbname $DB_NAME --mode scheduled --period "0 1 * * *"
```
> With Docker compose
```yaml
version: "3"
services:
mysql-bkup:
image: jkaninda/mysql-bkup:v0.7
container_name: mysql-bkup
privileged: true
devices:
- "/dev/fuse"
command:
- /bin/sh
- -c
- mysql-bkup backup --storage s3 --path /mys3_custom_path --dbname database_name --mode scheduled --period "*/30 * * * *"
environment:
- DB_PORT=3306
- DB_HOST=postgreshost
- DB_USERNAME=userName
- DB_PASSWORD=${DB_PASSWORD}
- ACCESS_KEY=${ACCESS_KEY}
- SECRET_KEY=${SECRET_KEY}
- BUCKET_NAME=${BUCKET_NAME}
- S3_ENDPOINT=${S3_ENDPOINT}
```
## Kubernetes CronJob
For Kubernetes, you don't need to run it in scheduled mode.
Simple Kubernetes CronJob usage:
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: bkup-job
spec:
schedule: "0 1 * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup:v0.7
securityContext:
privileged: true
command:
- /bin/sh
- -c
- mysql-bkup backup -s s3 --path /custom_path
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: ACCESS_KEY
value: ""
- name: SECRET_KEY
value: ""
- name: BUCKET_NAME
value: ""
- name: S3_ENDPOINT
value: "https://s3.us-west-2.amazonaws.com"
restartPolicy: Never
```
## Authors
**Jonas Kaninda**
- <https://github.com/jkaninda>

113
docs/reference/index.md Normal file
View File

@@ -0,0 +1,113 @@
---
title: Configuration Reference
layout: default
nav_order: 2
---
# Configuration reference
Backup, restore and migrate targets, schedule and retention are configured using environment variables or flags.
### CLI utility Usage
| Options | Shorts | Usage |
|-----------------------|--------|----------------------------------------------------------------------------------------|
| mysql-bkup | bkup | CLI utility |
| backup | | Backup database operation |
| restore | | Restore database operation |
| migrate | | Migrate database from one instance to another one |
| --storage | -s | Storage. local or s3 (default: local) |
| --file | -f | File name for restoration |
| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` |
| --dbname | -d | Database name |
| --port | -p | Database port (default: 3306) |
| --mode | -m | Execution mode. default or scheduled (default: default) |
| --disable-compression | | Disable database backup compression |
| --prune | | Delete old backup, default disabled |
| --keep-last | | Delete old backup created more than specified days ago, default 7 days |
| --period | | Crontab period for scheduled mode only. (default: "0 1 * * *") |
| --help | -h | Print this help message and exit |
| --version | -V | Print version information and exit |
## Environment variables
| Name | Requirement | Description |
|------------------------|----------------------------------------------------|------------------------------------------------------|
| DB_PORT | Optional, default 3306 | Database port number |
| DB_HOST | Required | Database host |
| DB_NAME | Optional if it was provided from the -d flag | Database name |
| DB_USERNAME | Required | Database user name |
| DB_PASSWORD | Required | Database password |
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_REGION | Optional, required for S3 storage | AWS Region |
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the --period flag | Backup cron expression for docker in scheduled mode |
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
| SSH_USER | Optional, required for SSH storage | ssh remote user |
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
| TARGET_DB_HOST | Optional, required for database migration | Target database host |
| TARGET_DB_PORT | Optional, required for database migration | Target database port |
| TARGET_DB_NAME | Optional, required for database migration | Target database name |
| TARGET_DB_USERNAME | Optional, required for database migration | Target database username |
| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password |
| TG_TOKEN | Optional, required for Telegram notification | Telegram token |
| TG_CHAT_ID | Optional, required for Telegram notification | Telegram Chat ID |
---
## Run in Scheduled mode
This image can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag.
## Syntax of crontab (field description)
The syntax is:
- 1: Minute (0-59)
- 2: Hours (0-23)
- 3: Day (0-31)
- 4: Month (0-12 [12 == December])
- 5: Day of the week(0-7 [7 or 0 == sunday])
Easy to remember format:
```conf
* * * * * command to be executed
```
```conf
- - - - -
| | | | |
| | | | ----- Day of week (0 - 7) (Sunday=0 or 7)
| | | ------- Month (1 - 12)
| | --------- Day of month (1 - 31)
| ----------- Hour (0 - 23)
------------- Minute (0 - 59)
```
> At every 30th minute
```conf
*/30 * * * *
```
> “At minute 0.” every hour
```conf
0 * * * *
```
> “At 01:00.” every day
```conf
0 1 * * *
```

View File

@@ -1,21 +1,28 @@
version: "3"
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
privileged: true
devices:
- "/dev/fuse"
command:
- /bin/sh
- -c
- mysql-bkup backup --storage s3 --path /mys3_custom_path --dbname database_name
command: backup --storage s3 -d my-database"
environment:
- DB_PORT=3306
- DB_HOST=mysqlhost
- DB_USERNAME=userName
- DB_PASSWORD=${DB_PASSWORD}
- ACCESS_KEY=${ACCESS_KEY}
- SECRET_KEY=${SECRET_KEY}
- BUCKET_NAME=${BUCKET_NAME}
- S3_ENDPOINT=https://s3.us-west-2.amazonaws.com
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## AWS configurations
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup
- AWS_REGION="us-west-2"
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:

View File

@@ -1,16 +1,15 @@
version: "3"
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup backup --dbname database_name --mode scheduled --period "0 1 * * *"
command: backup --dbname database_name --mode scheduled --period "0 1 * * *"
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysqlhost
- DB_HOST=mysql
- DB_USERNAME=userName
- DB_PASSWORD=${DB_PASSWORD}

View File

@@ -1,21 +1,28 @@
version: "3"
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
privileged: true
devices:
- "/dev/fuse"
command:
- /bin/sh
- -c
- mysql-bkup backup --storage s3 --path /mys3_custom_path --dbname database_name --mode scheduled --period "0 1 * * *"
command: backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
environment:
- DB_PORT=3306
- DB_HOST=mysqlhost
- DB_USERNAME=userName
- DB_PASSWORD=${DB_PASSWORD}
- ACCESS_KEY=${ACCESS_KEY}
- SECRET_KEY=${SECRET_KEY}
- BUCKET_NAME=${BUCKET_NAME}
- S3_ENDPOINT=https://s3.us-west-2.amazonaws.com
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## AWS configurations
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup
- AWS_REGION="us-west-2"
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:

View File

@@ -3,14 +3,11 @@ services:
mysql-bkup:
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup backup --dbname database_name
command: backup --dbname database_name
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysqlhost
- DB_HOST=mysql
- DB_USERNAME=userName
- DB_PASSWORD=${DB_PASSWORD}

View File

@@ -1,40 +1,47 @@
apiVersion: batch/v1
kind: CronJob
kind: Job
metadata:
name: db-bkup-job
name: backup
spec:
schedule: "0 1 * * *"
jobTemplate:
template:
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
securityContext:
privileged: true
command:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- mysql-bkup backup --storage s3 --path /custom_path
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: "password"
- name: ACCESS_KEY
value: ""
- name: SECRET_KEY
value: ""
- name: BUCKETNAME
value: ""
- name: S3_ENDPOINT
value: "https://s3.us-west-2.amazonaws.com"
restartPolicy: Never
- backup --storage s3
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: Never

17
go.mod
View File

@@ -1,10 +1,21 @@
module github.com/jkaninda/mysql-bkup
go 1.21.0
go 1.22.5
require github.com/spf13/pflag v1.0.5
require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/spf13/cobra v1.8.0 // indirect
github.com/aws/aws-sdk-go v1.55.3
github.com/bramvdbogaerde/go-scp v1.5.0
github.com/hpcloud/tail v1.0.0
github.com/spf13/cobra v1.8.0
golang.org/x/crypto v0.18.0
)
require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
golang.org/x/sys v0.22.0 // indirect
gopkg.in/fsnotify.v1 v1.4.7 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
)

28
go.sum
View File

@@ -1,10 +1,38 @@
github.com/aws/aws-sdk-go v1.55.3 h1:0B5hOX+mIx7I5XPOrjrHlKSDQV/+ypFZpIHOx5LOk3E=
github.com/aws/aws-sdk-go v1.55.3/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/bramvdbogaerde/go-scp v1.5.0 h1:a9BinAjTfQh273eh7vd3qUgmBC+bx+3TRDtkZWmIpzM=
github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

13
main.go
View File

@@ -1,12 +1,11 @@
// Package main /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package main
//main
/*****
* MySQL Backup & Restore
* @author Jonas Kaninda
* @license MIT License <https://opensource.org/licenses/MIT>
* @link https://github.com/jkaninda/mysql-bkup
**/
import "github.com/jkaninda/mysql-bkup/cmd"
func main() {

View File

@@ -1,11 +1,14 @@
// Package pkg /*
/*
Copyright © 2024 Jonas Kaninda <jonaskaninda.gmail.com>
*/
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
"fmt"
"github.com/hpcloud/tail"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
"log"
@@ -16,32 +19,51 @@ import (
)
func StartBackup(cmd *cobra.Command) {
intro()
//Set env
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "dbname", "DB_NAME")
utils.GetEnv(cmd, "port", "DB_PORT")
utils.GetEnv(cmd, "period", "SCHEDULE_PERIOD")
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
//Get flag value and set env
s3Path = utils.GetEnv(cmd, "path", "S3_PATH")
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE")
file = utils.GetEnv(cmd, "file", "FILE_NAME")
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
keepLast, _ := cmd.Flags().GetInt("keep-last")
backupRetention, _ := cmd.Flags().GetInt("keep-last")
prune, _ := cmd.Flags().GetBool("prune")
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
executionMode, _ = cmd.Flags().GetString("mode")
gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
dbConf = getDbConfig(cmd)
//
if gpqPassphrase != "" {
encryption = true
}
//Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbConf.dbName, time.Now().Format("20060102_150405"))
if disableCompression {
backupFileName = fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
}
if executionMode == "default" {
if storage == "s3" {
utils.Info("Backup database to s3 storage")
s3Backup(disableCompression, s3Path, prune, keepLast)
} else {
utils.Info("Backup database to local storage")
BackupDatabase(disableCompression, prune, keepLast)
switch storage {
case "s3":
s3Backup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
case "local":
localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
case "ssh", "remote":
sshBackup(dbConf, backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
case "ftp":
utils.Fatal("Not supported storage type: %s", storage)
default:
localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
}
} else if executionMode == "scheduled" {
scheduledMode()
scheduledMode(dbConf, storage)
} else {
utils.Fatal("Error, unknown execution mode!")
}
@@ -49,150 +71,229 @@ func StartBackup(cmd *cobra.Command) {
}
// Run in scheduled mode
func scheduledMode() {
func scheduledMode(db *dbConfig, storage string) {
fmt.Println()
fmt.Println("**********************************")
fmt.Println(" Starting MySQL Bkup... ")
fmt.Println("***********************************")
utils.Info("Running in Scheduled mode")
utils.Info("Log file in /var/log/mysql-bkup.log")
utils.Info("Execution period ", os.Getenv("SCHEDULE_PERIOD"))
utils.Info("Execution period %s", os.Getenv("BACKUP_CRON_EXPRESSION"))
utils.Info("Storage type %s ", storage)
//Test database connexion
utils.TestDatabaseConnection()
testDatabaseConnection(db)
utils.Info("Creating backup job...")
CreateCrontabScript(disableCompression, storage)
//Start Supervisor
supervisordCmd := exec.Command("supervisord", "-c", "/etc/supervisor/supervisord.conf")
if err := supervisordCmd.Run(); err != nil {
utils.Fatalf("Error starting supervisord: %v\n", err)
supervisorConfig := "/etc/supervisor/supervisord.conf"
// Start Supervisor
cmd := exec.Command("supervisord", "-c", supervisorConfig)
err := cmd.Start()
if err != nil {
utils.Fatal(fmt.Sprintf("Failed to start supervisord: %v", err))
}
utils.Info("Backup job started")
defer func() {
if err := cmd.Process.Kill(); err != nil {
utils.Info("Failed to kill supervisord process: %v", err)
} else {
utils.Info("Supervisor stopped.")
}
}()
if _, err := os.Stat(cronLogFile); os.IsNotExist(err) {
utils.Fatal(fmt.Sprintf("Log file %s does not exist.", cronLogFile))
}
t, err := tail.TailFile(cronLogFile, tail.Config{Follow: true})
if err != nil {
utils.Fatal("Failed to tail file: %v", err)
}
// Read and print new lines from the log file
for line := range t.Lines {
fmt.Println(line.Text)
}
}
func intro() {
utils.Info("Starting MySQL Backup...")
utils.Info("Copyright © 2024 Jonas Kaninda ")
}
// BackupDatabase backup database
func BackupDatabase(disableCompression bool, prune bool, keepLast int) {
dbHost = os.Getenv("DB_HOST")
dbPassword := os.Getenv("DB_PASSWORD")
dbUserName := os.Getenv("DB_USERNAME")
dbName = os.Getenv("DB_NAME")
dbPort = os.Getenv("DB_PORT")
func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) {
storagePath = os.Getenv("STORAGE_PATH")
if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" {
utils.Fatal("Please make sure all required environment variables for database are set")
} else {
utils.TestDatabaseConnection()
// Backup Database database
utils.Info("Backing up database...")
//Generate file name
bkFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
// Verify is compression is disabled
if disableCompression {
//Generate file name
bkFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405"))
// Execute mysqldump
cmd := exec.Command("mysqldump",
"-h", dbHost,
"-P", dbPort,
"-u", dbUserName,
"--password="+dbPassword,
dbName,
)
output, err := cmd.Output()
if err != nil {
log.Fatal(err)
}
// save output
file, err := os.Create(fmt.Sprintf("%s/%s", storagePath, bkFileName))
if err != nil {
log.Fatal(err)
}
defer file.Close()
_, err = file.Write(output)
if err != nil {
log.Fatal(err)
}
utils.Done("Database has been backed up")
} else {
// Execute mysqldump
cmd := exec.Command("mysqldump", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName)
stdout, err := cmd.StdoutPipe()
if err != nil {
log.Fatal(err)
}
gzipCmd := exec.Command("gzip")
gzipCmd.Stdin = stdout
gzipCmd.Stdout, err = os.Create(fmt.Sprintf("%s/%s", storagePath, bkFileName))
gzipCmd.Start()
if err != nil {
log.Fatal(err)
}
if err := cmd.Run(); err != nil {
log.Fatal(err)
}
if err := gzipCmd.Wait(); err != nil {
log.Fatal(err)
}
utils.Done("Database has been backed up")
//Delete old backup
if prune {
deleteOldBackup(keepLast)
}
}
historyFile, err := os.OpenFile(fmt.Sprintf("%s/history.txt", storagePath), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Fatal(err)
}
defer historyFile.Close()
if _, err := historyFile.WriteString(bkFileName + "\n"); err != nil {
log.Fatal(err)
}
}
}
func s3Backup(disableCompression bool, s3Path string, prune bool, keepLast int) {
// Backup Database to S3 storage
MountS3Storage(s3Path)
BackupDatabase(disableCompression, prune, keepLast)
}
func deleteOldBackup(keepLast int) {
utils.Info("Deleting old backups...")
storagePath = os.Getenv("STORAGE_PATH")
// Define the directory path
backupDir := storagePath + "/"
// Get current time
currentTime := time.Now()
// Walk through files in the directory
err := filepath.Walk(backupDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Check if the file is older than defined day days
if info.Mode().IsRegular() && info.ModTime().Before(currentTime.AddDate(0, 0, -keepLast)) {
// Remove the file
err := os.Remove(path)
if err != nil {
utils.Fatal("Error removing file ", path, err)
} else {
utils.Done("Removed file: ", path)
}
}
return nil
})
err := utils.CheckEnvVars(dbHVars)
if err != nil {
utils.Fatal("Error walking through directory: ", err)
utils.Error("Please make sure all required environment variables for database are set")
utils.Fatal("Error checking environment variables: %s", err)
}
utils.Info("Starting database backup...")
err = os.Setenv("MYSQL_PWD", db.dbPassword)
if err != nil {
return
}
testDatabaseConnection(db)
// Backup Database database
utils.Info("Backing up database...")
if disableCompression {
// Execute mysqldump
cmd := exec.Command("mysqldump",
"-h", db.dbHost,
"-P", db.dbPort,
"-u", db.dbUserName,
db.dbName,
)
output, err := cmd.Output()
if err != nil {
log.Fatal(err)
}
// save output
file, err := os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName))
if err != nil {
log.Fatal(err)
}
defer file.Close()
_, err = file.Write(output)
if err != nil {
log.Fatal(err)
}
utils.Done("Database has been backed up")
} else {
// Execute mysqldump
cmd := exec.Command("mysqldump", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName)
stdout, err := cmd.StdoutPipe()
if err != nil {
log.Fatal(err)
}
gzipCmd := exec.Command("gzip")
gzipCmd.Stdin = stdout
gzipCmd.Stdout, err = os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName))
gzipCmd.Start()
if err != nil {
log.Fatal(err)
}
if err := cmd.Run(); err != nil {
log.Fatal(err)
}
if err := gzipCmd.Wait(); err != nil {
log.Fatal(err)
}
utils.Done("Database has been backed up")
}
}
func localBackup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
utils.Info("Backup database to local storage")
BackupDatabase(db, backupFileName, disableCompression)
finalFileName := backupFileName
if encrypt {
encryptBackup(backupFileName)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, gpgExtension)
}
utils.Info("Backup name is %s", finalFileName)
moveToBackup(finalFileName, storagePath)
//Send notification
utils.NotifySuccess(finalFileName)
//Delete old backup
if prune {
deleteOldBackup(backupRetention)
}
//Delete temp
deleteTemp()
}
func s3Backup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
utils.Info("Backup database to s3 storage")
//Backup database
BackupDatabase(db, backupFileName, disableCompression)
finalFileName := backupFileName
if encrypt {
encryptBackup(backupFileName)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
}
utils.Info("Uploading backup archive to remote storage S3 ... ")
utils.Info("Backup name is %s", finalFileName)
err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
if err != nil {
utils.Fatal("Error uploading file to S3: %s ", err)
}
//Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
if err != nil {
fmt.Println("Error deleting file: ", err)
}
// Delete old backup
if prune {
err := utils.DeleteOldBackup(bucket, s3Path, backupRetention)
if err != nil {
utils.Fatal("Error deleting old backup from S3: %s ", err)
}
}
utils.Done("Uploading backup archive to remote storage S3 ... done ")
//Send notification
utils.NotifySuccess(finalFileName)
//Delete temp
deleteTemp()
}
// sshBackup backup database to SSH remote server
func sshBackup(db *dbConfig, backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
utils.Info("Backup database to Remote server")
//Backup database
BackupDatabase(db, backupFileName, disableCompression)
finalFileName := backupFileName
if encrypt {
encryptBackup(backupFileName)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
}
utils.Info("Uploading backup archive to remote storage ... ")
utils.Info("Backup name is %s", finalFileName)
err := CopyToRemote(finalFileName, remotePath)
if err != nil {
utils.Fatal("Error uploading file to the remote server: %s ", err)
}
//Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil {
fmt.Println("Error deleting file: ", err)
}
if prune {
//TODO: Delete old backup from remote server
utils.Info("Deleting old backup from a remote server is not implemented yet")
}
utils.Done("Uploading backup archive to remote storage ... done ")
//Send notification
utils.NotifySuccess(finalFileName)
//Delete temp
deleteTemp()
}
// encryptBackup encrypt backup
func encryptBackup(backupFileName string) {
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase)
if err != nil {
utils.Fatal("Error during encrypting backup %s", err)
}
}

64
pkg/config.go Normal file
View File

@@ -0,0 +1,64 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
"os"
)
type Config struct {
}
type dbConfig struct {
dbHost string
dbPort string
dbName string
dbUserName string
dbPassword string
}
type targetDbConfig struct {
targetDbHost string
targetDbPort string
targetDbUserName string
targetDbPassword string
targetDbName string
}
func getDbConfig(cmd *cobra.Command) *dbConfig {
//Set env
utils.GetEnv(cmd, "dbname", "DB_NAME")
dConf := dbConfig{}
dConf.dbHost = os.Getenv("DB_HOST")
dConf.dbPort = os.Getenv("DB_PORT")
dConf.dbName = os.Getenv("DB_NAME")
dConf.dbUserName = os.Getenv("DB_USERNAME")
dConf.dbPassword = os.Getenv("DB_PASSWORD")
err := utils.CheckEnvVars(dbHVars)
if err != nil {
utils.Error("Please make sure all required environment variables for database are set")
utils.Fatal("Error checking environment variables: %s", err)
}
return &dConf
}
func getTargetDbConfig() *targetDbConfig {
tdbConfig := targetDbConfig{}
tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST")
tdbConfig.targetDbPort = os.Getenv("TARGET_DB_PORT")
tdbConfig.targetDbName = os.Getenv("TARGET_DB_NAME")
tdbConfig.targetDbUserName = os.Getenv("TARGET_DB_USERNAME")
tdbConfig.targetDbPassword = os.Getenv("TARGET_DB_PASSWORD")
err := utils.CheckEnvVars(tdbRVars)
if err != nil {
utils.Error("Please make sure all required environment variables for the target database are set")
utils.Fatal("Error checking target database environment variables: %s", err)
}
return &tdbConfig
}

63
pkg/encrypt.go Normal file
View File

@@ -0,0 +1,63 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
"github.com/jkaninda/mysql-bkup/utils"
"os"
"os/exec"
"strings"
)
func Decrypt(inputFile string, passphrase string) error {
utils.Info("Decrypting backup file: " + inputFile + " ...")
//Create gpg home dir
err := utils.MakeDir(gpgHome)
if err != nil {
return err
}
utils.SetEnv("GNUPGHOME", gpgHome)
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
if err != nil {
return err
}
utils.Info("Backup file decrypted successful!")
return nil
}
func Encrypt(inputFile string, passphrase string) error {
utils.Info("Encrypting backup...")
//Create gpg home dir
err := utils.MakeDir(gpgHome)
if err != nil {
return err
}
utils.SetEnv("GNUPGHOME", gpgHome)
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--symmetric", "--cipher-algo", algorithm, inputFile)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
if err != nil {
return err
}
utils.Info("Backup file encrypted successful!")
return nil
}
func RemoveLastExtension(filename string) string {
if idx := strings.LastIndex(filename, "."); idx != -1 {
return filename[:idx]
}
return filename
}

127
pkg/helper.go Normal file
View File

@@ -0,0 +1,127 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
"bytes"
"fmt"
"github.com/jkaninda/mysql-bkup/utils"
"os"
"os/exec"
"path/filepath"
"time"
)
func copyToTmp(sourcePath string, backupFileName string) {
//Copy backup from storage to /tmp
err := utils.CopyFile(filepath.Join(sourcePath, backupFileName), filepath.Join(tmpPath, backupFileName))
if err != nil {
utils.Fatal(fmt.Sprintf("Error copying file %s %s", backupFileName, err))
}
}
func moveToBackup(backupFileName string, destinationPath string) {
//Copy backup from tmp folder to storage destination
err := utils.CopyFile(filepath.Join(tmpPath, backupFileName), filepath.Join(destinationPath, backupFileName))
if err != nil {
utils.Fatal(fmt.Sprintf("Error copying file %s %s", backupFileName, err))
}
//Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
if err != nil {
fmt.Println("Error deleting file:", err)
}
utils.Done("Database has been backed up and copied to %s", filepath.Join(destinationPath, backupFileName))
}
func deleteOldBackup(retentionDays int) {
utils.Info("Deleting old backups...")
storagePath = os.Getenv("STORAGE_PATH")
// Define the directory path
backupDir := storagePath + "/"
// Get current time
currentTime := time.Now()
// Delete file
deleteFile := func(filePath string) error {
err := os.Remove(filePath)
if err != nil {
utils.Fatal(fmt.Sprintf("Error: %s", err))
} else {
utils.Done("File %s has been deleted successfully", filePath)
}
return err
}
// Walk through the directory and delete files modified more than specified days ago
err := filepath.Walk(backupDir, func(filePath string, fileInfo os.FileInfo, err error) error {
if err != nil {
return err
}
// Check if it's a regular file and if it was modified more than specified days ago
if fileInfo.Mode().IsRegular() {
timeDiff := currentTime.Sub(fileInfo.ModTime())
if timeDiff.Hours() > 24*float64(retentionDays) {
err := deleteFile(filePath)
if err != nil {
return err
}
}
}
return nil
})
if err != nil {
utils.Fatal(fmt.Sprintf("Error: %s", err))
return
}
utils.Done("Deleting old backups...done")
}
func deleteTemp() {
utils.Info("Deleting %s ...", tmpPath)
err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Check if the current item is a file
if !info.IsDir() {
// Delete the file
err = os.Remove(path)
if err != nil {
return err
}
}
return nil
})
if err != nil {
utils.Error("Error deleting files: %v", err)
} else {
utils.Info("Deleting %s ... done", tmpPath)
}
}
// TestDatabaseConnection tests the database connection
func testDatabaseConnection(db *dbConfig) {
err := os.Setenv("MYSQL_PWD", db.dbPassword)
if err != nil {
return
}
utils.Info("Connecting to %s database ...", db.dbName)
cmd := exec.Command("mysql", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName, "-e", "quit")
// Capture the output
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
err = cmd.Run()
if err != nil {
utils.Fatal("Error testing database connection: %v\nOutput: %s", err, out.String())
}
utils.Info("Successfully connected to %s database", db.dbName)
}

40
pkg/migrate.go Normal file
View File

@@ -0,0 +1,40 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
"fmt"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
"time"
)
func StartMigration(cmd *cobra.Command) {
intro()
utils.Info("Starting database migration...")
//Get DB config
dbConf = getDbConfig(cmd)
targetDbConf = getTargetDbConfig()
//Defining the target database variables
newDbConfig := dbConfig{}
newDbConfig.dbHost = targetDbConf.targetDbHost
newDbConfig.dbPort = targetDbConf.targetDbPort
newDbConfig.dbName = targetDbConf.targetDbName
newDbConfig.dbUserName = targetDbConf.targetDbUserName
newDbConfig.dbPassword = targetDbConf.targetDbPassword
//Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
//Backup source Database
BackupDatabase(dbConf, backupFileName, true)
//Restore source database into target database
utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
RestoreDatabase(&newDbConfig, backupFileName)
utils.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
utils.Info("Database migration completed.")
}

View File

@@ -1,3 +1,9 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
@@ -10,77 +16,121 @@ import (
)
func StartRestore(cmd *cobra.Command) {
intro()
//Set env
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "dbname", "DB_NAME")
utils.GetEnv(cmd, "port", "DB_PORT")
//Get flag value and set env
s3Path = utils.GetEnv(cmd, "path", "S3_PATH")
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE")
file = utils.GetEnv(cmd, "file", "FILE_NAME")
executionMode, _ = cmd.Flags().GetString("mode")
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
dbConf = getDbConfig(cmd)
if storage == "s3" {
utils.Info("Restore database from s3")
s3Restore(file, s3Path)
} else {
switch storage {
case "s3":
restoreFromS3(dbConf, file, bucket, s3Path)
case "local":
utils.Info("Restore database from local")
RestoreDatabase(file)
copyToTmp(storagePath, file)
RestoreDatabase(dbConf, file)
case "ssh":
restoreFromRemote(dbConf, file, remotePath)
case "ftp":
utils.Fatal("Restore from FTP is not yet supported")
default:
utils.Info("Restore database from local")
copyToTmp(storagePath, file)
RestoreDatabase(dbConf, file)
}
}
func restoreFromS3(db *dbConfig, file, bucket, s3Path string) {
utils.Info("Restore database from s3")
err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
if err != nil {
utils.Fatal("Error download file from s3 %s %v", file, err)
}
RestoreDatabase(db, file)
}
func restoreFromRemote(db *dbConfig, file, remotePath string) {
utils.Info("Restore database from remote server")
err := CopyFromRemote(file, remotePath)
if err != nil {
utils.Fatal("Error download file from remote server: %s %v ", filepath.Join(remotePath, file), err)
}
RestoreDatabase(db, file)
}
// RestoreDatabase restore database
func RestoreDatabase(file string) {
dbHost = os.Getenv("DB_HOST")
dbName = os.Getenv("DB_NAME")
dbPort = os.Getenv("DB_PORT")
storagePath = os.Getenv("STORAGE_PATH")
func RestoreDatabase(db *dbConfig, file string) {
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
if file == "" {
utils.Fatal("Error, file required")
}
if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" || file == "" {
utils.Fatal("Please make sure all required environment variables are set")
} else {
err := utils.CheckEnvVars(dbHVars)
if err != nil {
utils.Error("Please make sure all required environment variables for database are set")
utils.Fatal("Error checking environment variables: %s", err)
}
if utils.FileExists(fmt.Sprintf("%s/%s", storagePath, file)) {
utils.TestDatabaseConnection()
extension := filepath.Ext(fmt.Sprintf("%s/%s", storagePath, file))
// Restore from compressed file / .sql.gz
if extension == ".gz" {
str := "zcat " + fmt.Sprintf("%s/%s", storagePath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
_, err := exec.Command("bash", "-c", str).Output()
if err != nil {
utils.Fatal("Error, in restoring the database")
}
utils.Done("Database has been restored")
} else if extension == ".sql" {
//Restore from sql file
str := "cat " + fmt.Sprintf("%s/%s", storagePath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
_, err := exec.Command("bash", "-c", str).Output()
if err != nil {
utils.Fatal("Error, in restoring the database", err)
}
utils.Done("Database has been restored")
} else {
utils.Fatal("Unknown file extension ", extension)
}
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
if extension == ".gpg" {
if gpgPassphrase == "" {
utils.Fatal("Error: GPG passphrase is required, your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE environment variable is required.")
} else {
utils.Fatal("File not found in ", fmt.Sprintf("%s/%s", storagePath, file))
//Decrypt file
err := Decrypt(filepath.Join(tmpPath, file), gpgPassphrase)
if err != nil {
utils.Fatal("Error decrypting file %s %v", file, err)
}
//Update file name
file = RemoveLastExtension(file)
}
}
}
func s3Restore(file, s3Path string) {
// Restore database from S3
MountS3Storage(s3Path)
RestoreDatabase(file)
if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) {
err = os.Setenv("MYSQL_PWD", db.dbPassword)
if err != nil {
return
}
testDatabaseConnection(db)
utils.Info("Restoring database...")
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
// Restore from compressed file / .sql.gz
if extension == ".gz" {
str := "zcat " + filepath.Join(tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
_, err := exec.Command("bash", "-c", str).Output()
if err != nil {
utils.Fatal("Error, in restoring the database %v", err)
}
utils.Info("Restoring database... done")
utils.Done("Database has been restored")
//Delete temp
deleteTemp()
} else if extension == ".sql" {
//Restore from sql file
str := "cat " + filepath.Join(tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
_, err := exec.Command("bash", "-c", str).Output()
if err != nil {
utils.Fatal("Error in restoring the database %v", err)
}
utils.Info("Restoring database... done")
utils.Done("Database has been restored")
//Delete temp
deleteTemp()
} else {
utils.Fatal("Unknown file extension %s", extension)
}
} else {
utils.Fatal("File not found in %s", filepath.Join(tmpPath, file))
}
}

View File

@@ -1,80 +0,0 @@
// Package pkg /*
/*
Copyright © 2024 Jonas Kaninda <jonaskaninda.gmail.com>
*/
package pkg
import (
"fmt"
"github.com/jkaninda/mysql-bkup/utils"
"os"
"os/exec"
)
var (
accessKey = ""
secretKey = ""
bucketName = ""
s3Endpoint = ""
)
func S3Mount() {
MountS3Storage(s3Path)
}
// MountS3Storage Mount s3 storage using s3fs
func MountS3Storage(s3Path string) {
accessKey = os.Getenv("ACCESS_KEY")
secretKey = os.Getenv("SECRET_KEY")
bucketName = os.Getenv("BUCKET_NAME")
if bucketName == "" {
bucketName = os.Getenv("BUCKETNAME")
}
s3Endpoint = os.Getenv("S3_ENDPOINT")
if accessKey == "" || secretKey == "" || bucketName == "" {
utils.Fatal("Please make sure all environment variables are set for S3")
} else {
storagePath := fmt.Sprintf("%s%s", s3MountPath, s3Path)
err := os.Setenv("STORAGE_PATH", storagePath)
if err != nil {
return
}
//Write file
err = utils.WriteToFile(s3fsPasswdFile, fmt.Sprintf("%s:%s", accessKey, secretKey))
if err != nil {
utils.Fatal("Error creating file")
}
//Change file permission
utils.ChangePermission(s3fsPasswdFile, 0600)
//Mount object storage
utils.Info("Mounting Object storage in ", s3MountPath)
if isEmpty, _ := utils.IsDirEmpty(s3MountPath); isEmpty {
cmd := exec.Command("s3fs", bucketName, s3MountPath,
"-o", "passwd_file="+s3fsPasswdFile,
"-o", "use_cache=/tmp/s3cache",
"-o", "allow_other",
"-o", "url="+s3Endpoint,
"-o", "use_path_request_style",
)
if err := cmd.Run(); err != nil {
utils.Fatal("Error mounting Object storage:", err)
}
if err := os.MkdirAll(storagePath, os.ModePerm); err != nil {
utils.Fatalf("Error creating directory %v %v", storagePath, err)
}
} else {
utils.Info("Object storage already mounted in " + s3MountPath)
if err := os.MkdirAll(storagePath, os.ModePerm); err != nil {
utils.Fatal("Error creating directory "+storagePath, err)
}
}
}
}

121
pkg/scp.go Normal file
View File

@@ -0,0 +1,121 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
"context"
"errors"
"fmt"
"github.com/bramvdbogaerde/go-scp"
"github.com/bramvdbogaerde/go-scp/auth"
"github.com/jkaninda/mysql-bkup/utils"
"golang.org/x/crypto/ssh"
"os"
"path/filepath"
)
func CopyToRemote(fileName, remotePath string) error {
sshUser := os.Getenv("SSH_USER")
sshPassword := os.Getenv("SSH_PASSWORD")
sshHostName := os.Getenv("SSH_HOST_NAME")
sshPort := os.Getenv("SSH_PORT")
sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE")
err := utils.CheckEnvVars(sshHVars)
if err != nil {
utils.Error("Error checking environment variables: %s", err)
os.Exit(1)
}
clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) {
clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey())
} else {
if sshPassword == "" {
return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty")
}
utils.Warn("Accessing the remote server using password, password is not recommended")
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
}
// Create a new SCP client
client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig)
// Connect to the remote server
err = client.Connect()
if err != nil {
return errors.New("Couldn't establish a connection to the remote server")
}
// Open a file
file, _ := os.Open(filepath.Join(tmpPath, fileName))
// Close client connection after the file has been copied
defer client.Close()
// Close the file after it has been copied
defer file.Close()
// the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application.
err = client.CopyFromFile(context.Background(), *file, filepath.Join(remotePath, fileName), "0655")
if err != nil {
fmt.Println("Error while copying file ")
return err
}
return nil
}
func CopyFromRemote(fileName, remotePath string) error {
sshUser := os.Getenv("SSH_USER")
sshPassword := os.Getenv("SSH_PASSWORD")
sshHostName := os.Getenv("SSH_HOST_NAME")
sshPort := os.Getenv("SSH_PORT")
sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE")
err := utils.CheckEnvVars(sshHVars)
if err != nil {
utils.Error("Error checking environment variables\n: %s", err)
os.Exit(1)
}
clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) {
clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey())
} else {
if sshPassword == "" {
return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n")
}
utils.Warn("Accessing the remote server using password, password is not recommended")
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
}
// Create a new SCP client
client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig)
// Connect to the remote server
err = client.Connect()
if err != nil {
return errors.New("Couldn't establish a connection to the remote server\n")
}
// Close client connection after the file has been copied
defer client.Close()
file, err := os.OpenFile(filepath.Join(tmpPath, fileName), os.O_RDWR|os.O_CREATE, 0777)
if err != nil {
fmt.Println("Couldn't open the output file")
}
defer file.Close()
// the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application.
err = client.CopyFromRemote(context.Background(), file, filepath.Join(remotePath, fileName))
if err != nil {
fmt.Println("Error while copying file ", err)
return err
}
return nil
}

View File

@@ -1,9 +1,11 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
// Package pkg /*
/*
Copyright © 2024 Jonas Kaninda <jonaskaninda.gmail.com>
*/
import (
"fmt"
"github.com/jkaninda/mysql-bkup/utils"
@@ -11,60 +13,53 @@ import (
"os/exec"
)
const cronLogFile = "/var/log/mysql-bkup.log"
const backupCronFile = "/usr/local/bin/backup_cron.sh"
func CreateCrontabScript(disableCompression bool, storage string) {
//task := "/usr/local/bin/backup_cron.sh"
touchCmd := exec.Command("touch", backupCronFile)
if err := touchCmd.Run(); err != nil {
utils.Fatalf("Error creating file %s: %v\n", backupCronFile, err)
utils.Fatal("Error creating file %s: %v\n", backupCronFile, err)
}
var disableC = ""
if disableCompression {
disableC = "--disable-compression"
}
var scriptContent string
if storage == "s3" {
scriptContent = fmt.Sprintf(`#!/usr/bin/env bash
scriptContent := fmt.Sprintf(`#!/usr/bin/env bash
set -e
bkup backup --dbname %s --port %s --storage s3 --path %s %v
`, os.Getenv("DB_NAME"), os.Getenv("DB_PORT"), os.Getenv("S3_PATH"), disableC)
} else {
scriptContent = fmt.Sprintf(`#!/usr/bin/env bash
set -e
bkup backup --dbname %s --port %s %v
`, os.Getenv("DB_NAME"), os.Getenv("DB_PORT"), disableC)
}
/usr/local/bin/mysql-bkup backup --dbname %s --storage %s %v
`, os.Getenv("DB_NAME"), storage, disableC)
if err := utils.WriteToFile(backupCronFile, scriptContent); err != nil {
utils.Fatalf("Error writing to %s: %v\n", backupCronFile, err)
utils.Fatal("Error writing to %s: %v\n", backupCronFile, err)
}
chmodCmd := exec.Command("chmod", "+x", "/usr/local/bin/backup_cron.sh")
if err := chmodCmd.Run(); err != nil {
utils.Fatalf("Error changing permissions of %s: %v\n", backupCronFile, err)
utils.Fatal("Error changing permissions of %s: %v\n", backupCronFile, err)
}
lnCmd := exec.Command("ln", "-s", "/usr/local/bin/backup_cron.sh", "/usr/local/bin/backup_cron")
if err := lnCmd.Run(); err != nil {
utils.Fatalf("Error creating symbolic link: %v\n", err)
utils.Fatal("Error creating symbolic link: %v\n", err)
}
touchLogCmd := exec.Command("touch", cronLogFile)
if err := touchLogCmd.Run(); err != nil {
utils.Fatal("Error creating file %s: %v\n", cronLogFile, err)
}
cronJob := "/etc/cron.d/backup_cron"
touchCronCmd := exec.Command("touch", cronJob)
if err := touchCronCmd.Run(); err != nil {
utils.Fatalf("Error creating file %s: %v\n", cronJob, err)
utils.Fatal("Error creating file %s: %v\n", cronJob, err)
}
cronContent := fmt.Sprintf(`%s root exec /bin/bash -c ". /run/supervisord.env; /usr/local/bin/backup_cron.sh >> %s"
`, os.Getenv("SCHEDULE_PERIOD"), cronLogFile)
`, os.Getenv("BACKUP_CRON_EXPRESSION"), cronLogFile)
if err := utils.WriteToFile(cronJob, cronContent); err != nil {
utils.Fatalf("Error writing to %s: %v\n", cronJob, err)
utils.Fatal("Error writing to %s: %v\n", cronJob, err)
}
utils.ChangePermission("/etc/cron.d/backup_cron", 0644)
@@ -72,5 +67,5 @@ bkup backup --dbname %s --port %s %v
if err := crontabCmd.Run(); err != nil {
utils.Fatal("Error updating crontab: ", err)
}
utils.Info("Starting backup in scheduled mode")
utils.Info("Backup job created.")
}

View File

@@ -1,16 +1,49 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
const s3MountPath string = "/s3mnt"
const s3fsPasswdFile string = "/etc/passwd-s3fs"
const cronLogFile = "/var/log/mysql-bkup.log"
const tmpPath = "/tmp/backup"
const backupCronFile = "/usr/local/bin/backup_cron.sh"
const algorithm = "aes256"
const gpgHome = "gnupg"
const gpgExtension = "gpg"
var (
storage = "local"
file = ""
s3Path = "/mysql-bkup"
dbName = ""
dbHost = ""
dbPort = "3306"
executionMode = "default"
storagePath = "/backup"
disableCompression = false
encryption = false
)
// dbHVars Required environment variables for database
var dbHVars = []string{
"DB_HOST",
"DB_PASSWORD",
"DB_USERNAME",
"DB_NAME",
}
var tdbRVars = []string{
"TARGET_DB_HOST",
"TARGET_DB_PORT",
"TARGET_DB_NAME",
"TARGET_DB_USERNAME",
"TARGET_DB_PASSWORD",
}
var dbConf *dbConfig
var targetDbConf *targetDbConfig
// sshHVars Required environment variables for SSH remote server storage
var sshHVars = []string{
"SSH_USER",
"SSH_REMOTE_PATH",
"SSH_HOST_NAME",
"SSH_PORT",
}

View File

@@ -1,8 +0,0 @@
#!/bin/sh
DB_USERNAME='db_username'
DB_PASSWORD='password'
DB_HOST='db_hostname'
DB_NAME='db_name'
BACKUP_DIR="$PWD/backup"
docker run --rm --name mysql-bkup -v $BACKUP_DIR:/backup/ -e "DB_HOST=$DB_HOST" -e "DB_USERNAME=$DB_USERNAME" -e "DB_PASSWORD=$DB_PASSWORD" jkaninda/mysql-bkup:latest backup -d $DB_NAME

View File

@@ -1,3 +1,9 @@
// Package utils /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package utils
const RestoreExample = "mysql-bkup restore --dbname database --file db_20231219_022941.sql.gz\n" +

67
utils/logger.go Normal file
View File

@@ -0,0 +1,67 @@
// Package utils /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package utils
import (
"fmt"
"os"
"time"
)
var currentTime = time.Now().Format("2006/01/02 15:04:05")
func Info(msg string, args ...any) {
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s INFO: %s\n", currentTime, msg)
} else {
fmt.Printf("%s INFO: %s\n", currentTime, formattedMessage)
}
}
// Warn warning message
func Warn(msg string, args ...any) {
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s WARN: %s\n", currentTime, msg)
} else {
fmt.Printf("%s WARN: %s\n", currentTime, formattedMessage)
}
}
func Error(msg string, args ...any) {
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s ERROR: %s\n", currentTime, msg)
} else {
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
}
}
func Done(msg string, args ...any) {
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s INFO: %s\n", currentTime, msg)
} else {
fmt.Printf("%s INFO: %s\n", currentTime, formattedMessage)
}
}
// Fatal logs an error message and exits the program
func Fatal(msg string, args ...any) {
// Fatal logs an error message and exits the program.
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s ERROR: %s\n", currentTime, msg)
NotifyError(msg)
} else {
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
NotifyError(formattedMessage)
}
os.Exit(1)
os.Kill.Signal()
}

175
utils/s3.go Normal file
View File

@@ -0,0 +1,175 @@
// Package utils /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package utils
import (
"bytes"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"log"
"net/http"
"os"
"path/filepath"
"strconv"
"time"
)
// CreateSession creates a new AWS session
func CreateSession() (*session.Session, error) {
// AwsVars Required environment variables for AWS S3 storage
var awsVars = []string{
"AWS_S3_ENDPOINT",
"AWS_S3_BUCKET_NAME",
"AWS_ACCESS_KEY",
"AWS_SECRET_KEY",
"AWS_REGION",
"AWS_REGION",
"AWS_REGION",
}
endPoint := GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
accessKey := GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
secretKey := GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
_ = GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
region := os.Getenv("AWS_REGION")
awsDisableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
if err != nil {
Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
}
err = CheckEnvVars(awsVars)
if err != nil {
Fatal("Error checking environment variables\n: %s", err)
}
// S3 Config
s3Config := &aws.Config{
Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""),
Endpoint: aws.String(endPoint),
Region: aws.String(region),
DisableSSL: aws.Bool(awsDisableSsl),
S3ForcePathStyle: aws.Bool(true),
}
return session.NewSession(s3Config)
}
// UploadFileToS3 uploads a file to S3 with a given prefix
func UploadFileToS3(filePath, key, bucket, prefix string) error {
sess, err := CreateSession()
if err != nil {
return err
}
svc := s3.New(sess)
file, err := os.Open(filepath.Join(filePath, key))
if err != nil {
return err
}
defer file.Close()
fileInfo, err := file.Stat()
if err != nil {
return err
}
objectKey := filepath.Join(prefix, key)
buffer := make([]byte, fileInfo.Size())
file.Read(buffer)
fileBytes := bytes.NewReader(buffer)
fileType := http.DetectContentType(buffer)
_, err = svc.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(objectKey),
Body: fileBytes,
ContentLength: aws.Int64(fileInfo.Size()),
ContentType: aws.String(fileType),
})
if err != nil {
return err
}
return nil
}
func DownloadFile(destinationPath, key, bucket, prefix string) error {
sess, err := CreateSession()
if err != nil {
return err
}
Info("Download backup from S3 storage...")
file, err := os.Create(filepath.Join(destinationPath, key))
if err != nil {
fmt.Println("Failed to create file", err)
return err
}
defer file.Close()
objectKey := filepath.Join(prefix, key)
downloader := s3manager.NewDownloader(sess)
numBytes, err := downloader.Download(file,
&s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(objectKey),
})
if err != nil {
fmt.Println("Failed to download file", err)
return err
}
Info("Backup downloaded: %s bytes size %s ", file.Name(), numBytes)
return nil
}
func DeleteOldBackup(bucket, prefix string, retention int) error {
sess, err := CreateSession()
if err != nil {
return err
}
svc := s3.New(sess)
// Get the current time and the time threshold for 7 days ago
now := time.Now()
backupRetentionDays := now.AddDate(0, 0, -retention)
// List objects in the bucket
listObjectsInput := &s3.ListObjectsV2Input{
Bucket: aws.String(bucket),
Prefix: aws.String(prefix),
}
err = svc.ListObjectsV2Pages(listObjectsInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, object := range page.Contents {
if object.LastModified.Before(backupRetentionDays) {
// Object is older than retention days, delete it
_, err := svc.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(bucket),
Key: object.Key,
})
if err != nil {
log.Printf("Failed to delete object %s: %v", *object.Key, err)
} else {
fmt.Printf("Deleted object %s\n", *object.Key)
}
}
}
return !lastPage
})
if err != nil {
log.Fatalf("Failed to list objects: %v", err)
}
fmt.Println("Finished deleting old files.")
return nil
}

View File

@@ -1,34 +1,24 @@
// Package utils /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package utils
/*****
* MySQL Backup & Restore
* @author Jonas Kaninda
* @license MIT License <https://opensource.org/licenses/MIT>
* @link https://github.com/jkaninda/mysql-bkup
**/
import (
"bytes"
"encoding/json"
"fmt"
"github.com/spf13/cobra"
"io"
"io/fs"
"io/ioutil"
"net/http"
"os"
"os/exec"
"strconv"
)
func Info(v ...any) {
fmt.Println("⒤ ", fmt.Sprint(v...))
}
func Done(v ...any) {
fmt.Println("✔ ", fmt.Sprint(v...))
}
func Fatal(v ...any) {
fmt.Println("✘ ", fmt.Sprint(v...))
os.Exit(1)
}
func Fatalf(msg string, v ...any) {
fmt.Printf("✘ "+msg, v...)
os.Exit(1)
}
func FileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
@@ -47,9 +37,45 @@ func WriteToFile(filePath, content string) error {
_, err = file.WriteString(content)
return err
}
func DeleteFile(filePath string) error {
err := os.Remove(filePath)
if err != nil {
return fmt.Errorf("failed to delete file: %v", err)
}
return nil
}
func CopyFile(src, dst string) error {
// Open the source file for reading
sourceFile, err := os.Open(src)
if err != nil {
return fmt.Errorf("failed to open source file: %v", err)
}
defer sourceFile.Close()
// Create the destination file
destinationFile, err := os.Create(dst)
if err != nil {
return fmt.Errorf("failed to create destination file: %v", err)
}
defer destinationFile.Close()
// Copy the content from source to destination
_, err = io.Copy(destinationFile, sourceFile)
if err != nil {
return fmt.Errorf("failed to copy file: %v", err)
}
// Flush the buffer to ensure all data is written
err = destinationFile.Sync()
if err != nil {
return fmt.Errorf("failed to sync destination file: %v", err)
}
return nil
}
func ChangePermission(filePath string, mod int) {
if err := os.Chmod(filePath, fs.FileMode(mod)); err != nil {
Fatalf("Error changing permissions of %s: %v\n", filePath, err)
Fatal("Error changing permissions of %s: %v\n", filePath, err)
}
}
@@ -67,18 +93,6 @@ func IsDirEmpty(name string) (bool, error) {
return true, nil
}
// TestDatabaseConnection tests the database connection
func TestDatabaseConnection() {
Info("Testing database connection...")
// Test database connection
cmd := exec.Command("mysql", "-h", os.Getenv("DB_HOST"), "-P", os.Getenv("DB_PORT"), "-u", os.Getenv("DB_USERNAME"), "--password="+os.Getenv("DB_PASSWORD"), os.Getenv("DB_NAME"), "-e", "quit")
err := cmd.Run()
if err != nil {
Fatal("Error testing database connection:", err)
}
}
func GetEnv(cmd *cobra.Command, flagName, envName string) string {
value, _ := cmd.Flags().GetString(flagName)
if value != "" {
@@ -109,6 +123,130 @@ func SetEnv(key, value string) {
return
}
}
func GetEnvVariable(envName, oldEnvName string) string {
value := os.Getenv(envName)
if value == "" {
value = os.Getenv(oldEnvName)
if value != "" {
err := os.Setenv(envName, value)
if err != nil {
return value
}
Warn("%s is deprecated, please use %s instead!", oldEnvName, envName)
}
}
return value
}
func ShowHistory() {
}
// CheckEnvVars checks if all the specified environment variables are set
func CheckEnvVars(vars []string) error {
missingVars := []string{}
for _, v := range vars {
if os.Getenv(v) == "" {
missingVars = append(missingVars, v)
}
}
if len(missingVars) > 0 {
return fmt.Errorf("missing environment variables: %v", missingVars)
}
return nil
}
// MakeDir create directory
func MakeDir(dirPath string) error {
err := os.Mkdir(dirPath, 0700)
if err != nil {
return err
}
return nil
}
// MakeDirAll create directory
func MakeDirAll(dirPath string) error {
err := os.MkdirAll(dirPath, 0700)
if err != nil {
return err
}
return nil
}
func GetIntEnv(envName string) int {
val := os.Getenv(envName)
if val == "" {
return 0
}
ret, err := strconv.Atoi(val)
if err != nil {
Error("Error: %v", err)
}
return ret
}
func sendMessage(msg string) {
Info("Sending notification... ")
chatId := os.Getenv("TG_CHAT_ID")
body, _ := json.Marshal(map[string]string{
"chat_id": chatId,
"text": msg,
})
url := fmt.Sprintf("%s/sendMessage", getTgUrl())
// Create an HTTP post request
request, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
if err != nil {
panic(err)
}
request.Header.Add("Content-Type", "application/json")
client := &http.Client{}
response, err := client.Do(request)
if err != nil {
panic(err)
}
code := response.StatusCode
if code == 200 {
Info("Notification has been sent")
} else {
body, _ := ioutil.ReadAll(response.Body)
Error("Message not sent, error: %s", string(body))
}
}
func NotifySuccess(fileName string) {
var vars = []string{
"TG_TOKEN",
"TG_CHAT_ID",
}
//Telegram notification
err := CheckEnvVars(vars)
if err == nil {
message := "MySQL Backup \n" +
"Database has been backed up \n" +
"Backup name is " + fileName
sendMessage(message)
}
}
func NotifyError(error string) {
var vars = []string{
"TG_TOKEN",
"TG_CHAT_ID",
}
//Telegram notification
err := CheckEnvVars(vars)
if err == nil {
message := "MySQL Backup \n" +
"An error occurred during database backup \n" +
"Error: " + error
sendMessage(message)
}
}
func getTgUrl() string {
return fmt.Sprintf("https://api.telegram.org/bot%s", os.Getenv("TG_TOKEN"))
}