mirror of
https://github.com/jkaninda/mysql-bkup.git
synced 2025-12-06 13:39:41 +01:00
Compare commits
18 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 204f66badf | |||
| e0b40ed433 | |||
| 07d717fad2 | |||
| 3bf4911dee | |||
| 0b34a835f7 | |||
| 22bf95e6ca | |||
| 445a104943 | |||
| caeba955c5 | |||
| d906de6b54 | |||
| c8e68af09f | |||
| 082ef09500 | |||
| 2e61054334 | |||
| f394f28357 | |||
| d8867a9baf | |||
| 6ed9ff0a31 | |||
| a4c37e1a4b | |||
| c6930a00ba | |||
| 00f2fca8e4 |
39
.github/workflows/build.yml
vendored
39
.github/workflows/build.yml
vendored
@@ -1,39 +0,0 @@
|
|||||||
name: Build
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [ "main" ]
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
docker_tag:
|
|
||||||
description: 'Docker tag'
|
|
||||||
required: true
|
|
||||||
default: 'latest'
|
|
||||||
type: string
|
|
||||||
env:
|
|
||||||
BUILDKIT_IMAGE: jkaninda/mysql-bkup
|
|
||||||
jobs:
|
|
||||||
docker:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
-
|
|
||||||
name: Login to DockerHub
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
-
|
|
||||||
name: Build and push
|
|
||||||
uses: docker/build-push-action@v3
|
|
||||||
with:
|
|
||||||
push: true
|
|
||||||
file: "./docker/Dockerfile"
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
tags: |
|
|
||||||
"${{env.BUILDKIT_IMAGE}}:v0.7"
|
|
||||||
"${{env.BUILDKIT_IMAGE}}:latest"
|
|
||||||
55
.github/workflows/deploy-docs.yml
vendored
Normal file
55
.github/workflows/deploy-docs.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
name: Deploy Documenation site to GitHub Pages
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: ['main']
|
||||||
|
paths:
|
||||||
|
- 'docs/**'
|
||||||
|
- '.github/workflows/deploy-docs.yml'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pages: write
|
||||||
|
id-token: write
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: 'pages'
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Setup Ruby
|
||||||
|
uses: ruby/setup-ruby@v1
|
||||||
|
with:
|
||||||
|
ruby-version: '3.2'
|
||||||
|
bundler-cache: true
|
||||||
|
cache-version: 0
|
||||||
|
working-directory: docs
|
||||||
|
- name: Setup Pages
|
||||||
|
id: pages
|
||||||
|
uses: actions/configure-pages@v2
|
||||||
|
- name: Build with Jekyll
|
||||||
|
working-directory: docs
|
||||||
|
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
||||||
|
env:
|
||||||
|
JEKYLL_ENV: production
|
||||||
|
- name: Upload artifact
|
||||||
|
uses: actions/upload-pages-artifact@v1
|
||||||
|
with:
|
||||||
|
path: 'docs/_site/'
|
||||||
|
|
||||||
|
deploy:
|
||||||
|
environment:
|
||||||
|
name: github-pages
|
||||||
|
url: ${{ steps.deployment.outputs.page_url }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
steps:
|
||||||
|
- name: Deploy to GitHub Pages
|
||||||
|
id: deployment
|
||||||
|
uses: actions/deploy-pages@v1
|
||||||
49
.github/workflows/release.yml
vendored
Normal file
49
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
name: Release
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- v**
|
||||||
|
env:
|
||||||
|
BUILDKIT_IMAGE: jkaninda/mysql-bkup
|
||||||
|
jobs:
|
||||||
|
docker:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
packages: write
|
||||||
|
contents: read
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
-
|
||||||
|
name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
- name: Log in to GHCR
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
-
|
||||||
|
name: Get the tag name
|
||||||
|
id: get_tag_name
|
||||||
|
run: echo "TAG_NAME=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||||
|
-
|
||||||
|
name: Build and push
|
||||||
|
uses: docker/build-push-action@v3
|
||||||
|
with:
|
||||||
|
push: true
|
||||||
|
file: "./docker/Dockerfile"
|
||||||
|
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||||
|
tags: |
|
||||||
|
"${{env.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
|
||||||
|
"${{env.BUILDKIT_IMAGE}}:latest"
|
||||||
|
"ghcr.io/${{env.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
|
||||||
|
"ghcr.io/${{env.BUILDKIT_IMAGE}}:latest"
|
||||||
|
|
||||||
24
Makefile
24
Makefile
@@ -2,7 +2,7 @@ BINARY_NAME=mysql-bkup
|
|||||||
include .env
|
include .env
|
||||||
export
|
export
|
||||||
run:
|
run:
|
||||||
go run .
|
go run . backup
|
||||||
|
|
||||||
build:
|
build:
|
||||||
go build -o bin/${BINARY_NAME} .
|
go build -o bin/${BINARY_NAME} .
|
||||||
@@ -17,16 +17,30 @@ docker-build:
|
|||||||
docker build -f docker/Dockerfile -t jkaninda/mysql-bkup:latest .
|
docker build -f docker/Dockerfile -t jkaninda/mysql-bkup:latest .
|
||||||
|
|
||||||
docker-run: docker-build
|
docker-run: docker-build
|
||||||
docker run --rm --network internal --privileged --device /dev/fuse --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" jkaninda/mysql-bkup bkup backup --prune --keep-last 2
|
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --prune --keep-last 2
|
||||||
|
docker-restore: docker-build
|
||||||
|
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup restore -f ${FILE_NAME}
|
||||||
|
|
||||||
|
|
||||||
docker-run-scheduled: docker-build
|
docker-run-scheduled: docker-build
|
||||||
docker run --rm --network internal --privileged --device /dev/fuse --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -v "./backup:/backup" jkaninda/mysql-bkup bkup backup --prune --keep-last=2 --mode scheduled --period "* * * * *"
|
docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --mode scheduled --period "* * * * *"
|
||||||
|
|
||||||
|
|
||||||
docker-run-scheduled-s3: docker-build
|
docker-run-scheduled-s3: docker-build
|
||||||
docker run --rm --network internal --privileged --device /dev/fuse --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" jkaninda/mysql-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
|
docker run --rm --network web --user 1000:1000 --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
|
||||||
|
|
||||||
|
docker-run-s3: docker-build
|
||||||
|
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "AWS_S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage s3 --path /custom-path
|
||||||
|
|
||||||
|
|
||||||
docker-restore-s3: docker-build
|
docker-restore-s3: docker-build
|
||||||
docker run --rm --network internal --privileged --device /dev/fuse --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "FILE_NAME=${FILE_NAME}" jkaninda/mysql-bkup bkup restore --storage s3 --path /custom-path
|
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup restore --storage s3 -f ${FILE_NAME} --path /custom-path
|
||||||
|
|
||||||
|
docker-run-ssh: docker-build
|
||||||
|
docker run --rm --network web -v "${SSH_IDENTIFY_FILE_LOCAL}:" --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/mysql-bkup bkup backup --storage ssh
|
||||||
|
|
||||||
|
docker-restore-ssh: docker-build
|
||||||
|
docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" jkaninda/mysql-bkup bkup restore --storage ssh -f ${FILE_NAME}
|
||||||
|
|
||||||
|
run-docs:
|
||||||
|
cd docs && bundle exec jekyll serve -H 0.0.0.0 -t
|
||||||
387
README.md
387
README.md
@@ -1,22 +1,25 @@
|
|||||||
# MySQL Backup
|
# MySQL Backup
|
||||||
MySQL Backup and Restoration tool. Backup database to AWS S3 storage or any S3 Alternatives for Object Storage.
|
mysql-bkup is a Docker container image that can be used to backup and restore Postgres database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
|
||||||
|
It also supports __encrypting__ your backups using GPG.
|
||||||
|
|
||||||
[](https://github.com/jkaninda/mysql-bkup/actions/workflows/build.yml)
|
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
|
||||||
|
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
|
||||||
|
|
||||||
|
It also supports __encrypting__ your backups using GPG.
|
||||||
|
|
||||||
|
[](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml)
|
||||||
[](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup)
|
[](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup)
|
||||||

|

|
||||||

|

|
||||||
|
|
||||||
<p align="center">
|
|
||||||
<a href="https://github.com/jkaninda/mysql-bkup">
|
|
||||||
<img src="https://www.mysql.com/common/logos/logo-mysql-170x115.png" alt="Logo">
|
|
||||||
</a>
|
|
||||||
</p>
|
|
||||||
|
|
||||||
> Runs on:
|
|
||||||
- Docker
|
- Docker
|
||||||
- Kubernetes
|
- Kubernetes
|
||||||
|
|
||||||
> Links:
|
## Documentation is found at <https://jkaninda.github.io/mysql-bkup>
|
||||||
|
|
||||||
|
|
||||||
|
## Links:
|
||||||
|
|
||||||
- [Docker Hub](https://hub.docker.com/r/jkaninda/mysql-bkup)
|
- [Docker Hub](https://hub.docker.com/r/jkaninda/mysql-bkup)
|
||||||
- [Github](https://github.com/jkaninda/mysql-bkup)
|
- [Github](https://github.com/jkaninda/mysql-bkup)
|
||||||
|
|
||||||
@@ -24,315 +27,69 @@ MySQL Backup and Restoration tool. Backup database to AWS S3 storage or any S3 A
|
|||||||
|
|
||||||
- [PostgreSQL](https://github.com/jkaninda/pg-bkup)
|
- [PostgreSQL](https://github.com/jkaninda/pg-bkup)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Storage:
|
## Storage:
|
||||||
- local
|
- Local
|
||||||
- s3
|
- AWS S3 or any S3 Alternatives for Object Storage
|
||||||
- Object storage
|
- SSH
|
||||||
|
|
||||||
## Volumes:
|
## Quickstart
|
||||||
|
|
||||||
- /s3mnt => S3 mounting path
|
### Simple backup using Docker CLI
|
||||||
- /backup => local storage mounting path
|
|
||||||
|
|
||||||
## Usage
|
To run a one time backup, bind your local volume to `/backup` in the container and run the `mysql-bkup backup` command:
|
||||||
|
|
||||||
| Options | Shorts | Usage |
|
```shell
|
||||||
|-----------------------|--------|-----------------------------------------------------------------------|
|
docker run --rm --network your_network_name \
|
||||||
| mysql-bkup | bkup | CLI utility |
|
-v $PWD/backup:/backup/ \
|
||||||
| backup | | Backup database operation |
|
-e "DB_HOST=dbhost" \
|
||||||
| restore | | Restore database operation |
|
-e "DB_USERNAME=username" \
|
||||||
| history | | Show the history of backup |
|
-e "DB_PASSWORD=password" \
|
||||||
| --storage | -s | Set storage. local or s3 (default: local) |
|
jkaninda/mysql-bkup mysql-bkup backup -d database_name
|
||||||
| --file | -f | Set file name for restoration |
|
|
||||||
| --path | | Set s3 path without file name. eg: /custom_path |
|
|
||||||
| --dbname | -d | Set database name |
|
|
||||||
| --port | -p | Set database port (default: 3306) |
|
|
||||||
| --mode | -m | Set execution mode. default or scheduled (default: default) |
|
|
||||||
| --disable-compression | | Disable database backup compression |
|
|
||||||
| --prune | | Delete old backup |
|
|
||||||
| --keep-last | | keep all backup and delete within this time interval, default 7 days |
|
|
||||||
| --period | | Set crontab period for scheduled mode only. (default: "0 1 * * *") |
|
|
||||||
| --timeout | -t | Set timeout (default: 60s) |
|
|
||||||
| --help | -h | Print this help message and exit |
|
|
||||||
| --version | -V | Print version information and exit |
|
|
||||||
|
|
||||||
|
|
||||||
## Environment variables
|
|
||||||
|
|
||||||
| Name | Requirement | Description |
|
|
||||||
|-------------|--------------------------------------------------|----------------------|
|
|
||||||
| DB_PORT | Optional, default 3306 | Database port number |
|
|
||||||
| DB_HOST | Required | Database host |
|
|
||||||
| DB_NAME | Optional if it was provided from the -d flag | Database name |
|
|
||||||
| DB_USERNAME | Required | Database user name |
|
|
||||||
| DB_PASSWORD | Required | Database password |
|
|
||||||
| ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
|
|
||||||
| SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
|
|
||||||
| BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
|
||||||
| S3_ENDPOINT | Optional, required for S3 storage | AWS S3 Endpoint |
|
|
||||||
| FILE_NAME | Optional if it was provided from the --file flag | File to restore |
|
|
||||||
|
|
||||||
## Note:
|
|
||||||
|
|
||||||
Creating a user for backup tasks who has read-only access is recommended!
|
|
||||||
|
|
||||||
> create read-only user
|
|
||||||
|
|
||||||
```sh
|
|
||||||
mysql -u root -p
|
|
||||||
```
|
```
|
||||||
|
|
||||||
```sql
|
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
||||||
CREATE USER read_only_user IDENTIFIED BY 'your_strong_password';
|
|
||||||
|
|
||||||
```
|
|
||||||
```sql
|
|
||||||
GRANT SELECT, SHOW VIEW ON *.* TO read_only_user;
|
|
||||||
```
|
|
||||||
```sql
|
|
||||||
FLUSH PRIVILEGES;
|
|
||||||
|
|
||||||
```
|
### Simple backup in docker compose file
|
||||||
|
|
||||||
## Backup database :
|
|
||||||
|
|
||||||
Simple backup usage
|
|
||||||
|
|
||||||
```sh
|
|
||||||
mysql-bkup backup --dbname database_name
|
|
||||||
```
|
|
||||||
```sh
|
|
||||||
mysql-bkup backup -d database_name
|
|
||||||
```
|
|
||||||
### S3
|
|
||||||
|
|
||||||
```sh
|
|
||||||
mysql-bkup backup --storage s3 --dbname database_name
|
|
||||||
```
|
|
||||||
## Docker run:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker run --rm --network your_network_name --name mysql-bkup -v $PWD/backup:/backup/ -e "DB_HOST=database_host_name" -e "DB_USERNAME=username" -e "DB_PASSWORD=password" jkaninda/mysql-bkup:latest mysql-bkup backup -d database_name
|
|
||||||
```
|
|
||||||
|
|
||||||
## Docker compose file:
|
|
||||||
```yaml
|
```yaml
|
||||||
version: '3'
|
|
||||||
services:
|
services:
|
||||||
mariadb:
|
|
||||||
container_name: mariadb
|
|
||||||
image: mariadb
|
|
||||||
environment:
|
|
||||||
MYSQL_DATABASE: mariadb
|
|
||||||
MYSQL_USER: mariadb
|
|
||||||
MYSQL_PASSWORD: password
|
|
||||||
MYSQL_ROOT_PASSWORD: password
|
|
||||||
mysql-bkup:
|
mysql-bkup:
|
||||||
|
# In production, it is advised to lock your image tag to a proper
|
||||||
|
# release version instead of using `latest`.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for a list of available releases.
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
container_name: mysql-bkup
|
container_name: mysql-bkup
|
||||||
command:
|
command:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
- -c
|
- -c
|
||||||
- mysql-bkup backup -d database_name
|
- mysql-bkup backup
|
||||||
volumes:
|
volumes:
|
||||||
- ./backup:/backup
|
- ./backup:/backup
|
||||||
environment:
|
environment:
|
||||||
- DB_PORT=3306
|
- DB_PORT=5432
|
||||||
- DB_HOST=mariadb
|
- DB_HOST=postgres
|
||||||
- DB_USERNAME=mariadb
|
- DB_NAME=foo
|
||||||
|
- DB_USERNAME=bar
|
||||||
- DB_PASSWORD=password
|
- DB_PASSWORD=password
|
||||||
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
```
|
```
|
||||||
## Restore database :
|
## Deploy on Kubernetes
|
||||||
|
|
||||||
Simple database restore operation usage
|
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as CronJob.
|
||||||
|
|
||||||
```sh
|
### Simple Kubernetes CronJob usage:
|
||||||
mysql-bkup restore --dbname database_name --file database_20231217_115621.sql
|
|
||||||
```
|
|
||||||
|
|
||||||
```sh
|
|
||||||
mysql-bkup restore -f database_20231217_115621.sql
|
|
||||||
```
|
|
||||||
### S3
|
|
||||||
|
|
||||||
```sh
|
|
||||||
mysql-bkup restore --storage s3 --file database_20231217_115621.sql
|
|
||||||
```
|
|
||||||
|
|
||||||
## Docker run:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker run --rm --network your_network_name --name mysql-bkup -v $PWD/backup:/backup/ -e "DB_HOST=database_host_name" -e "DB_USERNAME=username" -e "DB_PASSWORD=password" jkaninda/mysql-bkup mysql-bkup backup -d database_name -f db_20231219_022941.sql.gz
|
|
||||||
```
|
|
||||||
|
|
||||||
## Docker compose file:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
version: '3'
|
|
||||||
services:
|
|
||||||
mariadb:
|
|
||||||
container_name: mariadb
|
|
||||||
image: mariadb:latest
|
|
||||||
environment:
|
|
||||||
MYSQL_DATABASE: mariadb
|
|
||||||
MYSQL_USER: mariadb
|
|
||||||
MYSQL_PASSWORD: password
|
|
||||||
MYSQL_ROOT_PASSWORD: password
|
|
||||||
mysql-bkup:
|
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
container_name: mysql-bkup
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- mysql-bkup restore --file database_20231217_115621.sql --dbname database_name
|
|
||||||
volumes:
|
|
||||||
- ./backup:/backup
|
|
||||||
environment:
|
|
||||||
#- FILE_NAME=mariadb_20231217_040238.sql # Optional if file name is set from command
|
|
||||||
- DB_PORT=3306
|
|
||||||
- DB_HOST=mariadb
|
|
||||||
- DB_NAME=mariadb
|
|
||||||
- DB_USERNAME=mariadb
|
|
||||||
- DB_PASSWORD=password
|
|
||||||
```
|
|
||||||
## Run
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker-compose up -d
|
|
||||||
```
|
|
||||||
## Backup to S3
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker run --rm --privileged --device /dev/fuse --name mysql-bkup -e "DB_HOST=db_hostname" -e "DB_USERNAME=username" -e "DB_PASSWORD=password" -e "ACCESS_KEY=your_access_key" -e "SECRET_KEY=your_secret_key" -e "BUCKETNAME=your_bucket_name" -e "S3_ENDPOINT=https://s3.us-west-2.amazonaws.com" jkaninda/mysql-bkup mysql-bkup backup -s s3 -d database_name
|
|
||||||
```
|
|
||||||
> To change s3 backup path add this flag : --path /myPath . default path is /mysql_bkup
|
|
||||||
|
|
||||||
Simple S3 backup usage
|
|
||||||
|
|
||||||
```sh
|
|
||||||
bkup backup --storage s3 --dbname mydatabase
|
|
||||||
```
|
|
||||||
```yaml
|
|
||||||
version: '3'
|
|
||||||
services:
|
|
||||||
mysql-bkup:
|
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
container_name: mysql-bkup
|
|
||||||
privileged: true
|
|
||||||
devices:
|
|
||||||
- "/dev/fuse"
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- mysql-bkup restore --storage s3 -f database_20231217_115621.sql.gz
|
|
||||||
environment:
|
|
||||||
- DB_PORT=3306
|
|
||||||
- DB_HOST=mysql
|
|
||||||
- DB_NAME=mariadb
|
|
||||||
- DB_USERNAME=mariadb
|
|
||||||
- DB_PASSWORD=password
|
|
||||||
- ACCESS_KEY=${ACCESS_KEY}
|
|
||||||
- SECRET_KEY=${SECRET_KEY}
|
|
||||||
- BUCKET_NAME=${BUCKET_NAME}
|
|
||||||
- S3_ENDPOINT=${S3_ENDPOINT}
|
|
||||||
|
|
||||||
```
|
|
||||||
## Run in Scheduled mode
|
|
||||||
|
|
||||||
This tool can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
|
|
||||||
For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag.
|
|
||||||
|
|
||||||
Make an automated backup on Docker
|
|
||||||
|
|
||||||
## Syntax of crontab (field description)
|
|
||||||
|
|
||||||
The syntax is:
|
|
||||||
|
|
||||||
- 1: Minute (0-59)
|
|
||||||
- 2: Hours (0-23)
|
|
||||||
- 3: Day (0-31)
|
|
||||||
- 4: Month (0-12 [12 == December])
|
|
||||||
- 5: Day of the week(0-7 [7 or 0 == sunday])
|
|
||||||
|
|
||||||
Easy to remember format:
|
|
||||||
|
|
||||||
```conf
|
|
||||||
* * * * * command to be executed
|
|
||||||
```
|
|
||||||
|
|
||||||
```conf
|
|
||||||
- - - - -
|
|
||||||
| | | | |
|
|
||||||
| | | | ----- Day of week (0 - 7) (Sunday=0 or 7)
|
|
||||||
| | | ------- Month (1 - 12)
|
|
||||||
| | --------- Day of month (1 - 31)
|
|
||||||
| ----------- Hour (0 - 23)
|
|
||||||
------------- Minute (0 - 59)
|
|
||||||
```
|
|
||||||
|
|
||||||
> At every 30th minute
|
|
||||||
|
|
||||||
```conf
|
|
||||||
*/30 * * * *
|
|
||||||
```
|
|
||||||
> “At minute 0.” every hour
|
|
||||||
```conf
|
|
||||||
0 * * * *
|
|
||||||
```
|
|
||||||
|
|
||||||
> “At 01:00.” every day
|
|
||||||
|
|
||||||
```conf
|
|
||||||
0 1 * * *
|
|
||||||
```
|
|
||||||
|
|
||||||
## Example of scheduled mode
|
|
||||||
|
|
||||||
> Docker run :
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker run --rm --name mysql-bkup -v $BACKUP_DIR:/backup/ -e "DB_HOST=$DB_HOST" -e "DB_USERNAME=$DB_USERNAME" -e "DB_PASSWORD=$DB_PASSWORD" jkaninda/mysql-bkup mysql-bkup backup --dbname $DB_NAME --mode scheduled --period "0 1 * * *"
|
|
||||||
```
|
|
||||||
|
|
||||||
> With Docker compose
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
version: "3"
|
|
||||||
services:
|
|
||||||
mysql-bkup:
|
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
container_name: mysql-bkup
|
|
||||||
privileged: true
|
|
||||||
devices:
|
|
||||||
- "/dev/fuse"
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- mysql-bkup backup --storage s3 --path /mys3_custome_path --dbname database_name --mode scheduled --period "*/30 * * * *"
|
|
||||||
environment:
|
|
||||||
- DB_PORT=3306
|
|
||||||
- DB_HOST=mysqlhost
|
|
||||||
- DB_USERNAME=userName
|
|
||||||
- DB_PASSWORD=${DB_PASSWORD}
|
|
||||||
- ACCESS_KEY=${ACCESS_KEY}
|
|
||||||
- SECRET_KEY=${SECRET_KEY}
|
|
||||||
- BUCKET_NAME=${BUCKET_NAME}
|
|
||||||
- S3_ENDPOINT=${S3_ENDPOINT}
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## Kubernetes CronJob
|
|
||||||
For Kubernetes you don't need to run it in scheduled mode.
|
|
||||||
|
|
||||||
Simple Kubernetes CronJob usage:
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: batch/v1
|
apiVersion: batch/v1
|
||||||
kind: CronJob
|
kind: CronJob
|
||||||
metadata:
|
metadata:
|
||||||
name: mysql-bkup-job
|
name: bkup-job
|
||||||
spec:
|
spec:
|
||||||
schedule: "0 1 * * *"
|
schedule: "0 1 * * *"
|
||||||
jobTemplate:
|
jobTemplate:
|
||||||
@@ -342,15 +99,13 @@ spec:
|
|||||||
containers:
|
containers:
|
||||||
- name: mysql-bkup
|
- name: mysql-bkup
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
securityContext:
|
|
||||||
privileged: true
|
|
||||||
command:
|
command:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
- -c
|
- -c
|
||||||
- mysql-bkup backup -s s3 --path /custom_path
|
- mysql-bkup backup -s s3 --path /custom_path
|
||||||
env:
|
env:
|
||||||
- name: DB_PORT
|
- name: DB_PORT
|
||||||
value: "3306"
|
value: "5432"
|
||||||
- name: DB_HOST
|
- name: DB_HOST
|
||||||
value: ""
|
value: ""
|
||||||
- name: DB_NAME
|
- name: DB_NAME
|
||||||
@@ -359,22 +114,48 @@ spec:
|
|||||||
value: ""
|
value: ""
|
||||||
# Please use secret!
|
# Please use secret!
|
||||||
- name: DB_PASSWORD
|
- name: DB_PASSWORD
|
||||||
value: "password"
|
|
||||||
- name: ACCESS_KEY
|
|
||||||
value: ""
|
value: ""
|
||||||
- name: SECRET_KEY
|
- name: AWS_S3_ENDPOINT
|
||||||
value: ""
|
value: "https://s3.amazonaws.com"
|
||||||
- name: BUCKET_NAME
|
- name: AWS_S3_BUCKET_NAME
|
||||||
value: ""
|
value: "xxx"
|
||||||
- name: S3_ENDPOINT
|
- name: AWS_REGION
|
||||||
value: "https://s3.us-west-2.amazonaws.com"
|
value: "us-west-2"
|
||||||
|
- name: AWS_ACCESS_KEY
|
||||||
|
value: "xxxx"
|
||||||
|
- name: AWS_SECRET_KEY
|
||||||
|
value: "xxxx"
|
||||||
|
- name: AWS_DISABLE_SSL
|
||||||
|
value: "false"
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
```
|
```
|
||||||
|
## Available image registries
|
||||||
|
|
||||||
## Contributing
|
This Docker image is published to both Docker Hub and the GitHub container registry.
|
||||||
|
Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker pull jkaninda/mysql-bkup:v1.0
|
||||||
|
docker pull ghcr.io/jkaninda/mysql-bkup:v1.0
|
||||||
|
```
|
||||||
|
|
||||||
|
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
|
||||||
|
|
||||||
|
## Supported Engines
|
||||||
|
|
||||||
|
This image is developed and tested against the Docker CE engine and Kubernetes exclusively.
|
||||||
|
While it may work against different implementations, there are no guarantees about support for non-Docker engines.
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
||||||
|
|
||||||
|
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
|
||||||
|
- This image is written in Go.
|
||||||
|
- `arm64` and `arm/v7` architectures are supported.
|
||||||
|
- Docker in Swarm mode is supported.
|
||||||
|
- Kubernetes is supported.
|
||||||
|
|
||||||
Contributions are welcome! If you encounter any issues or have suggestions for improvements, please create an issue or submit a pull request.
|
|
||||||
Make sure to follow the existing coding style and provide tests for your changes.
|
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
|
|||||||
@@ -21,10 +21,10 @@ var BackupCmd = &cobra.Command{
|
|||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
//Backup
|
//Backup
|
||||||
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Set execution mode. default or scheduled")
|
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. default or scheduled")
|
||||||
BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Set schedule period time")
|
BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Schedule period time")
|
||||||
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Prune old backup")
|
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")
|
||||||
BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "keep all backup and delete within this time interval, default 7 days")
|
BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "Delete files created more than specified days ago, default 7 days")
|
||||||
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
|
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,14 +0,0 @@
|
|||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
)
|
|
||||||
|
|
||||||
var HistoryCmd = &cobra.Command{
|
|
||||||
Use: "history",
|
|
||||||
Short: "Show the history of backup",
|
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
|
||||||
utils.ShowHistory()
|
|
||||||
},
|
|
||||||
}
|
|
||||||
14
cmd/root.go
14
cmd/root.go
@@ -1,6 +1,6 @@
|
|||||||
// Package cmd /*
|
// Package cmd /*
|
||||||
/*
|
/*
|
||||||
Copyright © 2024 Jonas Kaninda <jonaskaninda@gmail.com>
|
Copyright © 2024 Jonas Kaninda
|
||||||
*/
|
*/
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
@@ -19,7 +19,6 @@ var rootCmd = &cobra.Command{
|
|||||||
Version: appVersion,
|
Version: appVersion,
|
||||||
}
|
}
|
||||||
var operation = ""
|
var operation = ""
|
||||||
var s3Path = "/mysql-bkup"
|
|
||||||
|
|
||||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||||
@@ -31,16 +30,13 @@ func Execute() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
rootCmd.PersistentFlags().StringP("storage", "s", "local", "Set storage. local or s3")
|
rootCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
|
||||||
rootCmd.PersistentFlags().StringP("path", "P", s3Path, "Set s3 path, without file name. for S3 storage only")
|
rootCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
||||||
rootCmd.PersistentFlags().StringP("dbname", "d", "", "Set database name")
|
rootCmd.PersistentFlags().StringP("dbname", "d", "", "Database name")
|
||||||
rootCmd.PersistentFlags().IntP("timeout", "t", 30, "Set timeout")
|
rootCmd.PersistentFlags().IntP("port", "p", 3306, "Database port")
|
||||||
rootCmd.PersistentFlags().IntP("port", "p", 3306, "Set database port")
|
|
||||||
rootCmd.PersistentFlags().StringVarP(&operation, "operation", "o", "", "Set operation, for old version only")
|
rootCmd.PersistentFlags().StringVarP(&operation, "operation", "o", "", "Set operation, for old version only")
|
||||||
|
|
||||||
rootCmd.AddCommand(VersionCmd)
|
rootCmd.AddCommand(VersionCmd)
|
||||||
rootCmd.AddCommand(BackupCmd)
|
rootCmd.AddCommand(BackupCmd)
|
||||||
rootCmd.AddCommand(RestoreCmd)
|
rootCmd.AddCommand(RestoreCmd)
|
||||||
rootCmd.AddCommand(S3MountCmd)
|
|
||||||
rootCmd.AddCommand(HistoryCmd)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,14 +0,0 @@
|
|||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/jkaninda/mysql-bkup/pkg"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
)
|
|
||||||
|
|
||||||
var S3MountCmd = &cobra.Command{
|
|
||||||
Use: "s3mount",
|
|
||||||
Short: "Mount AWS S3 storage",
|
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
|
||||||
pkg.S3Mount()
|
|
||||||
},
|
|
||||||
}
|
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright © 2024 Jonas Kaninda <jonaskaninda@gmail.com>
|
Copyright © 2024 Jonas Kaninda
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
FROM golang:1.21.0 AS build
|
FROM golang:1.22.5 AS build
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Copy the source code.
|
# Copy the source code.
|
||||||
@@ -16,35 +16,51 @@ ENV DB_USERNAME=""
|
|||||||
ENV DB_PASSWORD=""
|
ENV DB_PASSWORD=""
|
||||||
ENV DB_PORT="3306"
|
ENV DB_PORT="3306"
|
||||||
ENV STORAGE=local
|
ENV STORAGE=local
|
||||||
ENV BUCKET_NAME=""
|
ENV AWS_S3_ENDPOINT=""
|
||||||
ENV ACCESS_KEY=""
|
ENV AWS_S3_BUCKET_NAME=""
|
||||||
ENV SECRET_KEY=""
|
ENV AWS_ACCESS_KEY=""
|
||||||
ENV S3_ENDPOINT=https://s3.amazonaws.com
|
ENV AWS_SECRET_KEY=""
|
||||||
|
ENV AWS_REGION="us-west-2"
|
||||||
|
ENV AWS_DISABLE_SSL="false"
|
||||||
|
ENV GPG_PASSPHRASE=""
|
||||||
|
ENV SSH_USER=""
|
||||||
|
ENV SSH_REMOTE_PATH=""
|
||||||
|
ENV SSH_PASSWORD=""
|
||||||
|
ENV SSH_HOST_NAME=""
|
||||||
|
ENV SSH_IDENTIFY_FILE=""
|
||||||
|
ENV SSH_PORT="22"
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
ENV VERSION="v0.6"
|
ENV VERSION="v1.0"
|
||||||
LABEL authors="Jonas Kaninda"
|
ARG WORKDIR="/app"
|
||||||
|
ARG BACKUPDIR="/backup"
|
||||||
|
ARG BACKUP_TMP_DIR="/tmp/backup"
|
||||||
|
ARG BACKUP_CRON="/etc/cron.d/backup_cron"
|
||||||
|
ARG BACKUP_CRON_SCRIPT="/usr/local/bin/backup_cron.sh"
|
||||||
|
LABEL author="Jonas Kaninda"
|
||||||
|
|
||||||
RUN apt-get update -qq
|
RUN apt-get update -qq
|
||||||
#RUN apt-get install build-essential libcurl4-openssl-dev libxml2-dev mime-support -y
|
#RUN apt-get install build-essential libcurl4-openssl-dev libxml2-dev mime-support -y
|
||||||
RUN apt install s3fs mysql-client supervisor cron -y
|
RUN apt install mysql-client supervisor cron gnupg -y
|
||||||
|
|
||||||
# Clear cache
|
# Clear cache
|
||||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN mkdir /s3mnt
|
RUN mkdir $WORKDIR
|
||||||
RUN mkdir /tmp/s3cache
|
RUN mkdir $BACKUPDIR
|
||||||
RUN chmod 777 /s3mnt
|
RUN mkdir -p $BACKUP_TMP_DIR
|
||||||
RUN chmod 777 /tmp/s3cache
|
RUN chmod 777 $WORKDIR
|
||||||
|
RUN chmod 777 $BACKUPDIR
|
||||||
|
RUN chmod 777 $BACKUP_TMP_DIR
|
||||||
|
RUN touch $BACKUP_CRON && \
|
||||||
|
touch $BACKUP_CRON_SCRIPT && \
|
||||||
|
chmod 777 $BACKUP_CRON && \
|
||||||
|
chmod 777 $BACKUP_CRON_SCRIPT
|
||||||
|
|
||||||
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
|
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
|
||||||
RUN chmod +x /usr/local/bin/mysql-bkup
|
RUN chmod +x /usr/local/bin/mysql-bkup
|
||||||
|
|
||||||
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
|
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
|
||||||
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/mysql_bkup
|
|
||||||
|
|
||||||
|
|
||||||
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
|
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
|
||||||
|
|
||||||
|
WORKDIR $WORKDIR
|
||||||
RUN mkdir /backup
|
|
||||||
WORKDIR /backup
|
|
||||||
3
docs/.gitignore
vendored
Normal file
3
docs/.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
_site
|
||||||
|
.sass-cache
|
||||||
|
.jekyll-metadata
|
||||||
24
docs/404.html
Normal file
24
docs/404.html
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
layout: default
|
||||||
|
---
|
||||||
|
|
||||||
|
<style type="text/css" media="screen">
|
||||||
|
.container {
|
||||||
|
margin: 10px auto;
|
||||||
|
max-width: 600px;
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
|
h1 {
|
||||||
|
margin: 30px 0;
|
||||||
|
font-size: 4em;
|
||||||
|
line-height: 1;
|
||||||
|
letter-spacing: -1px;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
|
||||||
|
<div class="container">
|
||||||
|
<h1>404</h1>
|
||||||
|
|
||||||
|
<p><strong>Page not found :(</strong></p>
|
||||||
|
<p>The requested page could not be found.</p>
|
||||||
|
</div>
|
||||||
12
docs/Dockerfile
Normal file
12
docs/Dockerfile
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
FROM ruby:3.3.4
|
||||||
|
|
||||||
|
ENV LC_ALL C.UTF-8
|
||||||
|
ENV LANG en_US.UTF-8
|
||||||
|
ENV LANGUAGE en_US.UTF-8
|
||||||
|
|
||||||
|
WORKDIR /usr/src/app
|
||||||
|
|
||||||
|
COPY . ./
|
||||||
|
RUN gem install bundler && bundle install
|
||||||
|
|
||||||
|
EXPOSE 4000
|
||||||
43
docs/Gemfile
Normal file
43
docs/Gemfile
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
source "https://rubygems.org"
|
||||||
|
|
||||||
|
# Hello! This is where you manage which Jekyll version is used to run.
|
||||||
|
# When you want to use a different version, change it below, save the
|
||||||
|
# file and run `bundle install`. Run Jekyll with `bundle exec`, like so:
|
||||||
|
#
|
||||||
|
# bundle exec jekyll serve
|
||||||
|
#
|
||||||
|
# This will help ensure the proper Jekyll version is running.
|
||||||
|
# Happy Jekylling!
|
||||||
|
gem "jekyll", "~> 3.10.0"
|
||||||
|
|
||||||
|
# This is the default theme for new Jekyll sites. You may change this to anything you like.
|
||||||
|
gem "minima", "~> 2.0"
|
||||||
|
|
||||||
|
# If you want to use GitHub Pages, remove the "gem "jekyll"" above and
|
||||||
|
# uncomment the line below. To umysqlrade, run `bundle update github-pages`.
|
||||||
|
# gem "github-pages", group: :jekyll_plugins
|
||||||
|
|
||||||
|
# If you have any plugins, put them here!
|
||||||
|
group :jekyll_plugins do
|
||||||
|
gem "jekyll-feed", "~> 0.6"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Windows and JRuby does not include zoneinfo files, so bundle the tzinfo-data gem
|
||||||
|
# and associated library.
|
||||||
|
platforms :mingw, :x64_mingw, :mswin, :jruby do
|
||||||
|
gem "tzinfo", ">= 1", "< 3"
|
||||||
|
gem "tzinfo-data"
|
||||||
|
end
|
||||||
|
|
||||||
|
# Performance-booster for watching directories on Windows
|
||||||
|
gem "wdm", "~> 0.1.0", :install_if => Gem.win_platform?
|
||||||
|
|
||||||
|
# kramdown v2 ships without the gfm parser by default. If you're using
|
||||||
|
# kramdown v1, comment out this line.
|
||||||
|
gem "kramdown-parser-gfm"
|
||||||
|
|
||||||
|
# Lock `http_parser.rb` gem to `v0.6.x` on JRuby builds since newer versions of the gem
|
||||||
|
# do not have a Java counterpart.
|
||||||
|
gem "http_parser.rb", "~> 0.6.0", :platforms => [:jruby]
|
||||||
|
gem "just-the-docs"
|
||||||
|
|
||||||
116
docs/Gemfile.lock
Normal file
116
docs/Gemfile.lock
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
GEM
|
||||||
|
remote: https://rubygems.org/
|
||||||
|
specs:
|
||||||
|
addressable (2.8.7)
|
||||||
|
public_suffix (>= 2.0.2, < 7.0)
|
||||||
|
colorator (1.1.0)
|
||||||
|
concurrent-ruby (1.3.3)
|
||||||
|
csv (3.3.0)
|
||||||
|
em-websocket (0.5.3)
|
||||||
|
eventmachine (>= 0.12.9)
|
||||||
|
http_parser.rb (~> 0)
|
||||||
|
eventmachine (1.2.7)
|
||||||
|
ffi (1.17.0)
|
||||||
|
ffi (1.17.0-aarch64-linux-gnu)
|
||||||
|
ffi (1.17.0-aarch64-linux-musl)
|
||||||
|
ffi (1.17.0-arm-linux-gnu)
|
||||||
|
ffi (1.17.0-arm-linux-musl)
|
||||||
|
ffi (1.17.0-arm64-darwin)
|
||||||
|
ffi (1.17.0-x86-linux-gnu)
|
||||||
|
ffi (1.17.0-x86-linux-musl)
|
||||||
|
ffi (1.17.0-x86_64-darwin)
|
||||||
|
ffi (1.17.0-x86_64-linux-gnu)
|
||||||
|
ffi (1.17.0-x86_64-linux-musl)
|
||||||
|
forwardable-extended (2.6.0)
|
||||||
|
http_parser.rb (0.8.0)
|
||||||
|
i18n (1.14.5)
|
||||||
|
concurrent-ruby (~> 1.0)
|
||||||
|
jekyll (3.10.0)
|
||||||
|
addressable (~> 2.4)
|
||||||
|
colorator (~> 1.0)
|
||||||
|
csv (~> 3.0)
|
||||||
|
em-websocket (~> 0.5)
|
||||||
|
i18n (>= 0.7, < 2)
|
||||||
|
jekyll-sass-converter (~> 1.0)
|
||||||
|
jekyll-watch (~> 2.0)
|
||||||
|
kramdown (>= 1.17, < 3)
|
||||||
|
liquid (~> 4.0)
|
||||||
|
mercenary (~> 0.3.3)
|
||||||
|
pathutil (~> 0.9)
|
||||||
|
rouge (>= 1.7, < 4)
|
||||||
|
safe_yaml (~> 1.0)
|
||||||
|
webrick (>= 1.0)
|
||||||
|
jekyll-feed (0.17.0)
|
||||||
|
jekyll (>= 3.7, < 5.0)
|
||||||
|
jekyll-include-cache (0.2.1)
|
||||||
|
jekyll (>= 3.7, < 5.0)
|
||||||
|
jekyll-sass-converter (1.5.2)
|
||||||
|
sass (~> 3.4)
|
||||||
|
jekyll-seo-tag (2.8.0)
|
||||||
|
jekyll (>= 3.8, < 5.0)
|
||||||
|
jekyll-watch (2.2.1)
|
||||||
|
listen (~> 3.0)
|
||||||
|
just-the-docs (0.8.2)
|
||||||
|
jekyll (>= 3.8.5)
|
||||||
|
jekyll-include-cache
|
||||||
|
jekyll-seo-tag (>= 2.0)
|
||||||
|
rake (>= 12.3.1)
|
||||||
|
kramdown (2.4.0)
|
||||||
|
rexml
|
||||||
|
kramdown-parser-gfm (1.1.0)
|
||||||
|
kramdown (~> 2.0)
|
||||||
|
liquid (4.0.4)
|
||||||
|
listen (3.9.0)
|
||||||
|
rb-fsevent (~> 0.10, >= 0.10.3)
|
||||||
|
rb-inotify (~> 0.9, >= 0.9.10)
|
||||||
|
mercenary (0.3.6)
|
||||||
|
minima (2.5.1)
|
||||||
|
jekyll (>= 3.5, < 5.0)
|
||||||
|
jekyll-feed (~> 0.9)
|
||||||
|
jekyll-seo-tag (~> 2.1)
|
||||||
|
pathutil (0.16.2)
|
||||||
|
forwardable-extended (~> 2.6)
|
||||||
|
public_suffix (6.0.1)
|
||||||
|
rake (13.2.1)
|
||||||
|
rb-fsevent (0.11.2)
|
||||||
|
rb-inotify (0.11.1)
|
||||||
|
ffi (~> 1.0)
|
||||||
|
rexml (3.3.2)
|
||||||
|
strscan
|
||||||
|
rouge (3.30.0)
|
||||||
|
safe_yaml (1.0.5)
|
||||||
|
sass (3.7.4)
|
||||||
|
sass-listen (~> 4.0.0)
|
||||||
|
sass-listen (4.0.0)
|
||||||
|
rb-fsevent (~> 0.9, >= 0.9.4)
|
||||||
|
rb-inotify (~> 0.9, >= 0.9.7)
|
||||||
|
strscan (3.1.0)
|
||||||
|
wdm (0.1.1)
|
||||||
|
webrick (1.8.1)
|
||||||
|
|
||||||
|
PLATFORMS
|
||||||
|
aarch64-linux-gnu
|
||||||
|
aarch64-linux-musl
|
||||||
|
arm-linux-gnu
|
||||||
|
arm-linux-musl
|
||||||
|
arm64-darwin
|
||||||
|
ruby
|
||||||
|
x86-linux-gnu
|
||||||
|
x86-linux-musl
|
||||||
|
x86_64-darwin
|
||||||
|
x86_64-linux-gnu
|
||||||
|
x86_64-linux-musl
|
||||||
|
|
||||||
|
DEPENDENCIES
|
||||||
|
http_parser.rb (~> 0.6.0)
|
||||||
|
jekyll (~> 3.10.0)
|
||||||
|
jekyll-feed (~> 0.6)
|
||||||
|
just-the-docs
|
||||||
|
kramdown-parser-gfm
|
||||||
|
minima (~> 2.0)
|
||||||
|
tzinfo (>= 1, < 3)
|
||||||
|
tzinfo-data
|
||||||
|
wdm (~> 0.1.0)
|
||||||
|
|
||||||
|
BUNDLED WITH
|
||||||
|
2.5.16
|
||||||
70
docs/_config.yml
Normal file
70
docs/_config.yml
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
# Welcome to Jekyll!
|
||||||
|
#
|
||||||
|
# This config file is meant for settings that affect your whole blog, values
|
||||||
|
# which you are expected to set up once and rarely edit after that. If you find
|
||||||
|
# yourself editing this file very often, consider using Jekyll's data files
|
||||||
|
# feature for the data you need to update frequently.
|
||||||
|
#
|
||||||
|
# For technical reasons, this file is *NOT* reloaded automatically when you use
|
||||||
|
# 'bundle exec jekyll serve'. If you change this file, please restart the server process.
|
||||||
|
|
||||||
|
# Site settings
|
||||||
|
# These are used to personalize your new site. If you look in the HTML files,
|
||||||
|
# you will see them accessed via {{ site.title }}, {{ site.email }}, and so on.
|
||||||
|
# You can create any custom variable you would like, and they will be accessible
|
||||||
|
# in the templates via {{ site.myvariable }}.
|
||||||
|
title: MySQL database backup
|
||||||
|
email: hi@jonaskaninda.com
|
||||||
|
description: >- # this means to ignore newlines until "baseurl:"
|
||||||
|
MySQL Backup and Restore Docker container image. Backup database to AWS S3 storage or SSH remote server.
|
||||||
|
|
||||||
|
baseurl: "" # the subpath of your site, e.g. /blog
|
||||||
|
url: "jkaninda.github.io/mysql-bkup/" # the base hostname & protocol for your site, e.g. http://example.com
|
||||||
|
twitter_username: jonaskaninda
|
||||||
|
github_username: jkaninda
|
||||||
|
|
||||||
|
callouts_level: quiet
|
||||||
|
callouts:
|
||||||
|
highlight:
|
||||||
|
color: yellow
|
||||||
|
important:
|
||||||
|
title: Important
|
||||||
|
color: blue
|
||||||
|
new:
|
||||||
|
title: New
|
||||||
|
color: green
|
||||||
|
note:
|
||||||
|
title: Note
|
||||||
|
color: purple
|
||||||
|
warning:
|
||||||
|
title: Warning
|
||||||
|
color: red
|
||||||
|
# Build settings
|
||||||
|
markdown: kramdown
|
||||||
|
theme: just-the-docs
|
||||||
|
plugins:
|
||||||
|
- jekyll-feed
|
||||||
|
aux_links:
|
||||||
|
'GitHub Repository':
|
||||||
|
- https://github.com/jkaninda/mysql-bkup
|
||||||
|
|
||||||
|
nav_external_links:
|
||||||
|
- title: GitHub Repository
|
||||||
|
url: https://github.com/jkaninda/mysql-bkup
|
||||||
|
|
||||||
|
footer_content: >-
|
||||||
|
Copyright © 2024 <a target="_blank" href="https://www.jonaskaninda.com">Jonas Kaninda</a>.
|
||||||
|
Distributed under the <a href="https://github.com/jkaninda/mysql-bkup/tree/main/LICENSE">MIT License.</a><br>
|
||||||
|
Something missing, unclear or not working? Open <a href="https://github.com/jkaninda/mysql-bkup/issues">an issue</a>.
|
||||||
|
|
||||||
|
# Exclude from processing.
|
||||||
|
# The following items will not be processed, by default. Create a custom list
|
||||||
|
# to override the default setting.
|
||||||
|
# exclude:
|
||||||
|
# - Gemfile
|
||||||
|
# - Gemfile2.lock
|
||||||
|
# - node_modules
|
||||||
|
# - vendor/bundle/
|
||||||
|
# - vendor/cache/
|
||||||
|
# - vendor/gems/
|
||||||
|
# - vendor/ruby/
|
||||||
25
docs/_posts/2024-07-29-welcome-to-jekyll.markdown
Normal file
25
docs/_posts/2024-07-29-welcome-to-jekyll.markdown
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
layout: post
|
||||||
|
title: "Welcome to Jekyll!"
|
||||||
|
date: 2024-07-29 03:36:13 +0200
|
||||||
|
categories: jekyll update
|
||||||
|
---
|
||||||
|
You’ll find this post in your `_posts` directory. Go ahead and edit it and re-build the site to see your changes. You can rebuild the site in many different ways, but the most common way is to run `jekyll serve`, which launches a web server and auto-regenerates your site when a file is updated.
|
||||||
|
|
||||||
|
To add new posts, simply add a file in the `_posts` directory that follows the convention `YYYY-MM-DD-name-of-post.ext` and includes the necessary front matter. Take a look at the source for this post to get an idea about how it works.
|
||||||
|
|
||||||
|
Jekyll also offers powerful support for code snippets:
|
||||||
|
|
||||||
|
{% highlight ruby %}
|
||||||
|
def print_hi(name)
|
||||||
|
puts "Hi, #{name}"
|
||||||
|
end
|
||||||
|
print_hi('Tom')
|
||||||
|
#=> prints 'Hi, Tom' to STDOUT.
|
||||||
|
{% endhighlight %}
|
||||||
|
|
||||||
|
Check out the [Jekyll docs][jekyll-docs] for more info on how to get the most out of Jekyll. File all bugs/feature requests at [Jekyll’s GitHub repo][jekyll-gh]. If you have questions, you can ask them on [Jekyll Talk][jekyll-talk].
|
||||||
|
|
||||||
|
[jekyll-docs]: https://jekyllrb.com/docs/home
|
||||||
|
[jekyll-gh]: https://github.com/jekyll/jekyll
|
||||||
|
[jekyll-talk]: https://talk.jekyllrb.com/
|
||||||
13
docs/docker-compose.yml
Normal file
13
docs/docker-compose.yml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
services:
|
||||||
|
jekyll:
|
||||||
|
build:
|
||||||
|
context: ./
|
||||||
|
ports:
|
||||||
|
- 4000:4000
|
||||||
|
environment:
|
||||||
|
- JEKYLL_ENV=development
|
||||||
|
volumes:
|
||||||
|
- .:/usr/src/app
|
||||||
|
stdin_open: true
|
||||||
|
tty: true
|
||||||
|
command: bundle exec jekyll serve -H 0.0.0.0 -t
|
||||||
139
docs/how-tos/backup-to-s3.md
Normal file
139
docs/how-tos/backup-to-s3.md
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
---
|
||||||
|
title: Backup to AWS S3
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 2
|
||||||
|
---
|
||||||
|
# Backup to AWS S3
|
||||||
|
|
||||||
|
{: .note }
|
||||||
|
As described on local backup section, to change the storage of you backup and use S3 as storage. You need to add `--storage s3` (-s s3).
|
||||||
|
You can also specify a specify folder where you want to save you data by adding `--path /my-custom-path` flag.
|
||||||
|
|
||||||
|
|
||||||
|
## Backup to S3
|
||||||
|
|
||||||
|
```yml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, it is advised to lock your image tag to a proper
|
||||||
|
# release version instead of using `latest`.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for a list of available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- mysql-bkup backup --storage s3 -d database --path /my-custom-path
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
## AWS configurations
|
||||||
|
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
|
||||||
|
- AWS_S3_BUCKET_NAME=backup
|
||||||
|
- AWS_REGION="us-west-2"
|
||||||
|
- AWS_ACCESS_KEY=xxxx
|
||||||
|
- AWS_SECRET_KEY=xxxxx
|
||||||
|
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||||
|
- AWS_DISABLE_SSL="false"
|
||||||
|
|
||||||
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
|
|
||||||
|
### Recurring backups to S3
|
||||||
|
|
||||||
|
As explained above, you need just to add AWS environment variables and specify the storage type `--storage s3`.
|
||||||
|
In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
|
||||||
|
|
||||||
|
```yml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, it is advised to lock your image tag to a proper
|
||||||
|
# release version instead of using `latest`.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for a list of available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- mysql-bkup backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
## AWS configurations
|
||||||
|
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
|
||||||
|
- AWS_S3_BUCKET_NAME=backup
|
||||||
|
- AWS_REGION="us-west-2"
|
||||||
|
- AWS_ACCESS_KEY=xxxx
|
||||||
|
- AWS_SECRET_KEY=xxxxx
|
||||||
|
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||||
|
- AWS_DISABLE_SSL="false"
|
||||||
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deploy on Kubernetes
|
||||||
|
|
||||||
|
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as CronJob.
|
||||||
|
|
||||||
|
### Simple Kubernetes CronJob usage:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: CronJob
|
||||||
|
metadata:
|
||||||
|
name: bkup-job
|
||||||
|
spec:
|
||||||
|
schedule: "0 1 * * *"
|
||||||
|
jobTemplate:
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: mysql-bkup
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- mysql-bkup backup -s s3 --path /custom_path
|
||||||
|
env:
|
||||||
|
- name: DB_PORT
|
||||||
|
value: "3306"
|
||||||
|
- name: DB_HOST
|
||||||
|
value: ""
|
||||||
|
- name: DB_NAME
|
||||||
|
value: ""
|
||||||
|
- name: DB_USERNAME
|
||||||
|
value: ""
|
||||||
|
# Please use secret!
|
||||||
|
- name: DB_PASSWORD
|
||||||
|
value: ""
|
||||||
|
- name: AWS_S3_ENDPOINT
|
||||||
|
value: "https://s3.amazonaws.com"
|
||||||
|
- name: AWS_S3_BUCKET_NAME
|
||||||
|
value: "xxx"
|
||||||
|
- name: AWS_REGION
|
||||||
|
value: "us-west-2"
|
||||||
|
- name: AWS_ACCESS_KEY
|
||||||
|
value: "xxxx"
|
||||||
|
- name: AWS_SECRET_KEY
|
||||||
|
value: "xxxx"
|
||||||
|
- name: AWS_DISABLE_SSL
|
||||||
|
value: "false"
|
||||||
|
restartPolicy: OnFailure
|
||||||
|
```
|
||||||
146
docs/how-tos/backup-to-ssh.md
Normal file
146
docs/how-tos/backup-to-ssh.md
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
---
|
||||||
|
title: Backup to SSH
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 3
|
||||||
|
---
|
||||||
|
# Backup to SSH remote server
|
||||||
|
|
||||||
|
|
||||||
|
As described for s3 backup section, to change the storage of you backup and use S3 as storage. You need to add `--storage ssh` or `--storage remote`.
|
||||||
|
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `SSH_REMOTE_PATH` environment variable.
|
||||||
|
|
||||||
|
{: .note }
|
||||||
|
These environment variables are required for SSH backup `SSH_HOST_NAME`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server.
|
||||||
|
Accessing the remote server using password is not recommended, use private key instead.
|
||||||
|
|
||||||
|
```yml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, it is advised to lock your image tag to a proper
|
||||||
|
# release version instead of using `latest`.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for a list of available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- mysql-bkup backup --storage remote -d database
|
||||||
|
volumes:
|
||||||
|
- ./id_ed25519:/tmp/id_ed25519"
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
## SSH config
|
||||||
|
- SSH_HOST_NAME="hostname"
|
||||||
|
- SSH_PORT=22
|
||||||
|
- SSH_USER=user
|
||||||
|
- SSH_REMOTE_PATH=/home/jkaninda/backups
|
||||||
|
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
||||||
|
## We advise you to use a private jey instead of password
|
||||||
|
#- SSH_PASSWORD=password
|
||||||
|
|
||||||
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Recurring backups to SSH remote server
|
||||||
|
|
||||||
|
As explained above, you need just to add required environment variables and specify the storage type `--storage ssh`.
|
||||||
|
You can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
|
||||||
|
|
||||||
|
```yml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, it is advised to lock your image tag to a proper
|
||||||
|
# release version instead of using `latest`.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for a list of available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- mysql-bkup backup -d database --storage s3 --mode scheduled --period "0 1 * * *"
|
||||||
|
volumes:
|
||||||
|
- ./id_ed25519:/tmp/id_ed25519"
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
## SSH config
|
||||||
|
- SSH_HOST_NAME="hostname"
|
||||||
|
- SSH_PORT=22
|
||||||
|
- SSH_USER=user
|
||||||
|
- SSH_REMOTE_PATH=/home/jkaninda/backups
|
||||||
|
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
||||||
|
## We advise you to use a private jey instead of password
|
||||||
|
#- SSH_PASSWORD=password
|
||||||
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deploy on Kubernetes
|
||||||
|
|
||||||
|
For Kubernetes, you don't need to run it in scheduled mode.
|
||||||
|
You can deploy it as CronJob.
|
||||||
|
|
||||||
|
Simple Kubernetes CronJob usage:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: CronJob
|
||||||
|
metadata:
|
||||||
|
name: bkup-job
|
||||||
|
spec:
|
||||||
|
schedule: "0 1 * * *"
|
||||||
|
jobTemplate:
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: mysql-bkup
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- mysql-bkup backup -s s3 --path /custom_path
|
||||||
|
env:
|
||||||
|
- name: DB_PORT
|
||||||
|
value: "3306"
|
||||||
|
- name: DB_HOST
|
||||||
|
value: ""
|
||||||
|
- name: DB_NAME
|
||||||
|
value: ""
|
||||||
|
- name: DB_USERNAME
|
||||||
|
value: ""
|
||||||
|
# Please use secret!
|
||||||
|
- name: DB_PASSWORD
|
||||||
|
value: ""
|
||||||
|
- name: SSH_HOST_NAME
|
||||||
|
value: ""
|
||||||
|
- name: SSH_PORT
|
||||||
|
value: "22"
|
||||||
|
- name: SSH_USER
|
||||||
|
value: "xxx"
|
||||||
|
- name: SSH_REMOTE_PATH
|
||||||
|
value: "/home/jkaninda/backups"
|
||||||
|
- name: AWS_ACCESS_KEY
|
||||||
|
value: "xxxx"
|
||||||
|
- name: SSH_IDENTIFY_FILE
|
||||||
|
value: "/home/jkaninda/backups"
|
||||||
|
restartPolicy: OnFailure
|
||||||
|
```
|
||||||
89
docs/how-tos/backup.md
Normal file
89
docs/how-tos/backup.md
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
---
|
||||||
|
title: Backup
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 1
|
||||||
|
---
|
||||||
|
|
||||||
|
# Backup database
|
||||||
|
|
||||||
|
To backup the database, you need to add `backup` subcommand to `mysql-bkup` or `bkup`.
|
||||||
|
|
||||||
|
{: .note }
|
||||||
|
The default storage is local storage mounted to __/backup__. The backup is compressed by default using gzip. The flag __`disable-compression`__ can be used when you need to disable backup compression.
|
||||||
|
|
||||||
|
{: .warning }
|
||||||
|
Creating a user for backup tasks who has read-only access is recommended!
|
||||||
|
|
||||||
|
The backup process can be run in scheduled mode for the recurring backups.
|
||||||
|
It handles __recurring__ backups of mysql database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
|
||||||
|
|
||||||
|
```yml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, it is advised to lock your image tag to a proper
|
||||||
|
# release version instead of using `latest`.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for a list of available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- mysql-bkup backup -d database
|
||||||
|
volumes:
|
||||||
|
- ./backup:/backup
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
|
|
||||||
|
### Backup using Docker CLI
|
||||||
|
|
||||||
|
```shell
|
||||||
|
docker run --rm --network your_network_name \
|
||||||
|
-v $PWD/backup:/backup/ \
|
||||||
|
-e "DB_HOST=dbhost" \
|
||||||
|
-e "DB_USERNAME=username" \
|
||||||
|
-e "DB_PASSWORD=password" \
|
||||||
|
jkaninda/mysql-bkup mysql-bkup backup -d database_name
|
||||||
|
```
|
||||||
|
|
||||||
|
In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
|
||||||
|
|
||||||
|
```yml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, it is advised to lock your image tag to a proper
|
||||||
|
# release version instead of using `latest`.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for a list of available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- mysql-bkup backup -d database --mode scheduled --period "0 1 * * *"
|
||||||
|
volumes:
|
||||||
|
- ./backup:/backup
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
|
|
||||||
54
docs/how-tos/encrypt-backup.md
Normal file
54
docs/how-tos/encrypt-backup.md
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
---
|
||||||
|
title: Encrypt backups using GPG
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 7
|
||||||
|
---
|
||||||
|
# Encrypt backup
|
||||||
|
|
||||||
|
The image supports encrypting backups using GPG out of the box. In case a `GPG_PASSPHRASE` environment variable is set, the backup archive will be encrypted using the given key and saved as a sql.gpg file instead or sql.gz.gpg.
|
||||||
|
|
||||||
|
{: .warning }
|
||||||
|
To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process.
|
||||||
|
|
||||||
|
To decrypt manually, you need to install gnupg
|
||||||
|
|
||||||
|
### Decrypt backup
|
||||||
|
|
||||||
|
```shell
|
||||||
|
gpg --batch --passphrase "my-passphrase" \
|
||||||
|
--output database_20240730_044201.sql.gz \
|
||||||
|
--decrypt database_20240730_044201.sql.gz.gpg
|
||||||
|
```
|
||||||
|
|
||||||
|
### Backup
|
||||||
|
|
||||||
|
```yml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, it is advised to lock your image tag to a proper
|
||||||
|
# release version instead of using `latest`.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for a list of available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- mysql-bkup backup -d database
|
||||||
|
volumes:
|
||||||
|
- ./backup:/backup
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
## Required to encrypt backup
|
||||||
|
- GPG_PASSPHRASE=my-secure-passphrase
|
||||||
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
8
docs/how-tos/index.md
Normal file
8
docs/how-tos/index.md
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
title: How Tos
|
||||||
|
layout: default
|
||||||
|
nav_order: 3
|
||||||
|
has_children: true
|
||||||
|
---
|
||||||
|
|
||||||
|
## How Tos
|
||||||
51
docs/how-tos/restore-from-s3.md
Normal file
51
docs/how-tos/restore-from-s3.md
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
---
|
||||||
|
title: Restore database from AWS S3
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 5
|
||||||
|
---
|
||||||
|
|
||||||
|
# Restore database from S3 storage
|
||||||
|
|
||||||
|
To restore the database, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||||
|
|
||||||
|
{: .note }
|
||||||
|
It supports __.sql__ and __.sql.gz__ compressed file.
|
||||||
|
|
||||||
|
### Restore
|
||||||
|
|
||||||
|
```yml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, it is advised to lock your image tag to a proper
|
||||||
|
# release version instead of using `latest`.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for a list of available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- mysql-bkup restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path
|
||||||
|
volumes:
|
||||||
|
- ./backup:/backup
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
## AWS configurations
|
||||||
|
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
|
||||||
|
- AWS_S3_BUCKET_NAME=backup
|
||||||
|
- AWS_REGION="us-west-2"
|
||||||
|
- AWS_ACCESS_KEY=xxxx
|
||||||
|
- AWS_SECRET_KEY=xxxxx
|
||||||
|
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||||
|
- AWS_DISABLE_SSL="false"
|
||||||
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
50
docs/how-tos/restore-from-ssh.md
Normal file
50
docs/how-tos/restore-from-ssh.md
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
---
|
||||||
|
title: Restore database from SSH
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 6
|
||||||
|
---
|
||||||
|
# Restore database from SSH remote server
|
||||||
|
|
||||||
|
To restore the database from your remote server, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||||
|
|
||||||
|
{: .note }
|
||||||
|
It supports __.sql__ and __.sql.gz__ compressed file.
|
||||||
|
|
||||||
|
### Restore
|
||||||
|
|
||||||
|
```yml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, it is advised to lock your image tag to a proper
|
||||||
|
# release version instead of using `latest`.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for a list of available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- mysql-bkup restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups
|
||||||
|
volumes:
|
||||||
|
- ./backup:/backup
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=postgres
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
## SSH config
|
||||||
|
- SSH_HOST_NAME="hostname"
|
||||||
|
- SSH_PORT=22
|
||||||
|
- SSH_USER=user
|
||||||
|
- SSH_REMOTE_PATH=/home/jkaninda/backups
|
||||||
|
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
||||||
|
## We advise you to use a private jey instead of password
|
||||||
|
#- SSH_PASSWORD=password
|
||||||
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
43
docs/how-tos/restore.md
Normal file
43
docs/how-tos/restore.md
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
---
|
||||||
|
title: Restore database
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 4
|
||||||
|
---
|
||||||
|
|
||||||
|
# Restore database
|
||||||
|
|
||||||
|
To restore the database, you need to add `restore` subcommand to `mysql-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||||
|
|
||||||
|
{: .note }
|
||||||
|
It supports __.sql__ and __.sql.gz__ compressed file.
|
||||||
|
|
||||||
|
### Restore
|
||||||
|
|
||||||
|
```yml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, it is advised to lock your image tag to a proper
|
||||||
|
# release version instead of using `latest`.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for a list of available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- mysql-bkup restore -d database -f store_20231219_022941.sql.gz
|
||||||
|
volumes:
|
||||||
|
- ./backup:/backup
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
103
docs/index.md
Normal file
103
docs/index.md
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
---
|
||||||
|
title: Overview
|
||||||
|
layout: home
|
||||||
|
nav_order: 1
|
||||||
|
---
|
||||||
|
|
||||||
|
# About mysql-bkup
|
||||||
|
{:.no_toc}
|
||||||
|
mysql-bkup is a Docker container image that can be used to backup and restore MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
|
||||||
|
It also supports __encrypting__ your backups using GPG.
|
||||||
|
|
||||||
|
We are open to receiving stars, PRs, and issues!
|
||||||
|
|
||||||
|
|
||||||
|
{: .fs-6 .fw-300 }
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
|
||||||
|
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
|
||||||
|
|
||||||
|
It also supports __encrypting__ your backups using GPG.
|
||||||
|
|
||||||
|
{: .note }
|
||||||
|
Code and documentation for `v1` version on [this branch][v1-branch].
|
||||||
|
|
||||||
|
[v1-branch]: https://github.com/jkaninda/mysql-bkup
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Quickstart
|
||||||
|
|
||||||
|
### Simple backup using Docker CLI
|
||||||
|
|
||||||
|
To run a one time backup, bind your local volume to `/backup` in the container and run the `mysql-bkup backup` command:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
docker run --rm --network your_network_name \
|
||||||
|
-v $PWD/backup:/backup/ \
|
||||||
|
-e "DB_HOST=dbhost" \
|
||||||
|
-e "DB_USERNAME=username" \
|
||||||
|
-e "DB_PASSWORD=password" \
|
||||||
|
jkaninda/mysql-bkup mysql-bkup backup -d database_name
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
||||||
|
|
||||||
|
### Simple backup in docker compose file
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, it is advised to lock your image tag to a proper
|
||||||
|
# release version instead of using `latest`.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for a list of available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- mysql-bkup backup
|
||||||
|
volumes:
|
||||||
|
- ./backup:/backup
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=postgres
|
||||||
|
- DB_NAME=foo
|
||||||
|
- DB_USERNAME=bar
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
|
|
||||||
|
## Available image registries
|
||||||
|
|
||||||
|
This Docker image is published to both Docker Hub and the GitHub container registry.
|
||||||
|
Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker pull jkaninda/mysql-bkup:v1.0
|
||||||
|
docker pull ghcr.io/jkaninda/mysql-bkup:v1.0
|
||||||
|
```
|
||||||
|
|
||||||
|
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
|
||||||
|
|
||||||
|
## Supported Engines
|
||||||
|
|
||||||
|
This image is developed and tested against the Docker CE engine and Kubernetes exclusively.
|
||||||
|
While it may work against different implementations, there are no guarantees about support for non-Docker engines.
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
||||||
|
|
||||||
|
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
|
||||||
|
- This image is written in Go.
|
||||||
|
- `arm64` and `arm/v7` architectures are supported.
|
||||||
|
- Docker in Swarm mode is supported.
|
||||||
|
- Kubernetes is supported.
|
||||||
358
docs/old-version/index.md
Normal file
358
docs/old-version/index.md
Normal file
@@ -0,0 +1,358 @@
|
|||||||
|
---
|
||||||
|
layout: page
|
||||||
|
title: Old version
|
||||||
|
permalink: /old-version/
|
||||||
|
---
|
||||||
|
|
||||||
|
This is the documentation of mysql-backup for all old versions bellow `v1.0`.
|
||||||
|
In the old version, S3 storage was mounted using s3fs, so we decided to migrate to the official AWS SDK.
|
||||||
|
|
||||||
|
## Storage:
|
||||||
|
- local
|
||||||
|
- s3
|
||||||
|
- Object storage
|
||||||
|
|
||||||
|
## Volumes:
|
||||||
|
|
||||||
|
- /s3mnt => S3 mounting path
|
||||||
|
- /backup => local storage mounting path
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
| Options | Shorts | Usage |
|
||||||
|
|-----------------------|--------|------------------------------------------------------------------------|
|
||||||
|
| mysql-bkup | bkup | CLI utility |
|
||||||
|
| backup | | Backup database operation |
|
||||||
|
| restore | | Restore database operation |
|
||||||
|
| history | | Show the history of backup |
|
||||||
|
| --storage | -s | Storage. local or s3 (default: local) |
|
||||||
|
| --file | -f | File name to restore |
|
||||||
|
| --path | | S3 path without file name. eg: /custom_path |
|
||||||
|
| --dbname | -d | Database name |
|
||||||
|
| --port | -p | Database port (default: 3306) |
|
||||||
|
| --mode | -m | Execution mode. default or scheduled (default: default) |
|
||||||
|
| --disable-compression | | Disable database backup compression |
|
||||||
|
| --prune | | Delete old backup, default disabled |
|
||||||
|
| --keep-last | | Delete old backup created more than specified days ago, default 7 days |
|
||||||
|
| --period | | Crontab period for scheduled mode only. (default: "0 1 * * *") |
|
||||||
|
| --help | -h | Print this help message and exit |
|
||||||
|
| --version | -V | Print version information and exit |
|
||||||
|
|
||||||
|
|
||||||
|
## Environment variables
|
||||||
|
|
||||||
|
| Name | Requirement | Description |
|
||||||
|
|-------------|--------------------------------------------------|------------------------------------------------------|
|
||||||
|
| DB_PORT | Optional, default 3306 | Database port number |
|
||||||
|
| DB_HOST | Required | Database host |
|
||||||
|
| DB_NAME | Optional if it was provided from the -d flag | Database name |
|
||||||
|
| DB_USERNAME | Required | Database user name |
|
||||||
|
| DB_PASSWORD | Required | Database password |
|
||||||
|
| ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
|
||||||
|
| SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
|
||||||
|
| BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
||||||
|
| S3_ENDPOINT | Optional, required for S3 storage | AWS S3 Endpoint |
|
||||||
|
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
|
||||||
|
|
||||||
|
|
||||||
|
## Note:
|
||||||
|
|
||||||
|
Creating a user for backup tasks who has read-only access is recommended!
|
||||||
|
|
||||||
|
> create read-only user
|
||||||
|
|
||||||
|
|
||||||
|
## Backup database :
|
||||||
|
|
||||||
|
Simple backup usage
|
||||||
|
|
||||||
|
```sh
|
||||||
|
bkup backup
|
||||||
|
```
|
||||||
|
|
||||||
|
### S3
|
||||||
|
|
||||||
|
```sh
|
||||||
|
mysql-bkup backup --storage s3
|
||||||
|
```
|
||||||
|
## Docker run:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
docker run --rm --network your_network_name \
|
||||||
|
--name mysql-bkup -v $PWD/backup:/backup/ \
|
||||||
|
-e "DB_HOST=database_host_name" \
|
||||||
|
-e "DB_USERNAME=username" \
|
||||||
|
-e "DB_PASSWORD=password" jkaninda/mysql-bkup:v0.7 mysql-bkup backup -d database_name
|
||||||
|
```
|
||||||
|
|
||||||
|
## Docker compose file:
|
||||||
|
```yaml
|
||||||
|
version: '3'
|
||||||
|
services:
|
||||||
|
postgres:
|
||||||
|
image: postgres:14.5
|
||||||
|
container_name: postgres
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- ./postgres:/var/lib/postgresql/data
|
||||||
|
environment:
|
||||||
|
POSTGRES_DB: bkup
|
||||||
|
POSTGRES_PASSWORD: password
|
||||||
|
POSTGRES_USER: bkup
|
||||||
|
mysql-bkup:
|
||||||
|
image: jkaninda/mysql-bkup:v0.7
|
||||||
|
container_name: mysql-bkup
|
||||||
|
depends_on:
|
||||||
|
- postgres
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- mysql-bkup backup -d bkup
|
||||||
|
volumes:
|
||||||
|
- ./backup:/backup
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=postgres
|
||||||
|
- DB_NAME=bkup
|
||||||
|
- DB_USERNAME=bkup
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
```
|
||||||
|
## Restore database :
|
||||||
|
|
||||||
|
Simple database restore operation usage
|
||||||
|
|
||||||
|
```sh
|
||||||
|
mysql-bkup restore --file database_20231217_115621.sql --dbname database_name
|
||||||
|
```
|
||||||
|
|
||||||
|
```sh
|
||||||
|
mysql-bkup restore -f database_20231217_115621.sql -d database_name
|
||||||
|
```
|
||||||
|
### S3
|
||||||
|
|
||||||
|
```sh
|
||||||
|
mysql-bkup restore --storage s3 --file database_20231217_115621.sql --dbname database_name
|
||||||
|
```
|
||||||
|
|
||||||
|
## Docker run:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
docker run --rm --network your_network_name \
|
||||||
|
--name mysql-bkup \
|
||||||
|
-v $PWD/backup:/backup/ \
|
||||||
|
-e "DB_HOST=database_host_name" \
|
||||||
|
-e "DB_USERNAME=username" \
|
||||||
|
-e "DB_PASSWORD=password" \
|
||||||
|
jkaninda/mysql-bkup:v0.7 mysql-bkup restore -d database_name -f store_20231219_022941.sql.gz
|
||||||
|
```
|
||||||
|
|
||||||
|
## Docker compose file:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
version: '3'
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
image: jkaninda/mysql-bkup:v0.7
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- mysql-bkup restore --file database_20231217_115621.sql -d database_name
|
||||||
|
volumes:
|
||||||
|
- ./backup:/backup
|
||||||
|
environment:
|
||||||
|
#- FILE_NAME=database_20231217_040238.sql.gz # Optional if file name is set from command
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=postgres
|
||||||
|
- DB_USERNAME=user_name
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
```
|
||||||
|
## Run
|
||||||
|
|
||||||
|
```sh
|
||||||
|
docker-compose up -d
|
||||||
|
```
|
||||||
|
## Backup to S3
|
||||||
|
|
||||||
|
```sh
|
||||||
|
docker run --rm --privileged \
|
||||||
|
--device /dev/fuse --name mysql-bkup \
|
||||||
|
-e "DB_HOST=db_hostname" \
|
||||||
|
-e "DB_USERNAME=username" \
|
||||||
|
-e "DB_PASSWORD=password" \
|
||||||
|
-e "ACCESS_KEY=your_access_key" \
|
||||||
|
-e "SECRET_KEY=your_secret_key" \
|
||||||
|
-e "BUCKETNAME=your_bucket_name" \
|
||||||
|
-e "S3_ENDPOINT=https://s3.us-west-2.amazonaws.com" \
|
||||||
|
jkaninda/mysql-bkup:v0.7 mysql-bkup backup -s s3 -d database_name
|
||||||
|
```
|
||||||
|
> To change s3 backup path add this flag : --path /my_customPath . default path is /mysql-bkup
|
||||||
|
|
||||||
|
Simple S3 backup usage
|
||||||
|
|
||||||
|
```sh
|
||||||
|
mysql-bkup backup --storage s3 --dbname mydatabase
|
||||||
|
```
|
||||||
|
```yaml
|
||||||
|
mysql-bkup:
|
||||||
|
image: jkaninda/mysql-bkup:v0.7
|
||||||
|
container_name: mysql-bkup
|
||||||
|
privileged: true
|
||||||
|
devices:
|
||||||
|
- "/dev/fuse"
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- mysql-bkup restore --storage s3 -f database_20231217_115621.sql.gz --dbname database_name
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=postgress
|
||||||
|
- DB_USERNAME=user_name
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
- ACCESS_KEY=${ACCESS_KEY}
|
||||||
|
- SECRET_KEY=${SECRET_KEY}
|
||||||
|
- BUCKET_NAME=${BUCKET_NAME}
|
||||||
|
- S3_ENDPOINT=${S3_ENDPOINT}
|
||||||
|
|
||||||
|
```
|
||||||
|
## Run in Scheduled mode
|
||||||
|
|
||||||
|
This tool can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
|
||||||
|
For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag.
|
||||||
|
|
||||||
|
Make an automated backup on Docker
|
||||||
|
|
||||||
|
## Syntax of crontab (field description)
|
||||||
|
|
||||||
|
The syntax is:
|
||||||
|
|
||||||
|
- 1: Minute (0-59)
|
||||||
|
- 2: Hours (0-23)
|
||||||
|
- 3: Day (0-31)
|
||||||
|
- 4: Month (0-12 [12 == December])
|
||||||
|
- 5: Day of the week(0-7 [7 or 0 == sunday])
|
||||||
|
|
||||||
|
Easy to remember format:
|
||||||
|
|
||||||
|
```conf
|
||||||
|
* * * * * command to be executed
|
||||||
|
```
|
||||||
|
|
||||||
|
```conf
|
||||||
|
- - - - -
|
||||||
|
| | | | |
|
||||||
|
| | | | ----- Day of week (0 - 7) (Sunday=0 or 7)
|
||||||
|
| | | ------- Month (1 - 12)
|
||||||
|
| | --------- Day of month (1 - 31)
|
||||||
|
| ----------- Hour (0 - 23)
|
||||||
|
------------- Minute (0 - 59)
|
||||||
|
```
|
||||||
|
|
||||||
|
> At every 30th minute
|
||||||
|
|
||||||
|
```conf
|
||||||
|
*/30 * * * *
|
||||||
|
```
|
||||||
|
> “At minute 0.” every hour
|
||||||
|
```conf
|
||||||
|
0 * * * *
|
||||||
|
```
|
||||||
|
|
||||||
|
> “At 01:00.” every day
|
||||||
|
|
||||||
|
```conf
|
||||||
|
0 1 * * *
|
||||||
|
```
|
||||||
|
|
||||||
|
## Example of scheduled mode
|
||||||
|
|
||||||
|
> Docker run :
|
||||||
|
|
||||||
|
```sh
|
||||||
|
docker run --rm --name mysql-bkup \
|
||||||
|
-v $BACKUP_DIR:/backup/ \
|
||||||
|
-e "DB_HOST=$DB_HOST" \
|
||||||
|
-e "DB_USERNAME=$DB_USERNAME" \
|
||||||
|
-e "DB_PASSWORD=$DB_PASSWORD" jkaninda/mysql-bkup:v0.7 mysql-bkup backup --dbname $DB_NAME --mode scheduled --period "0 1 * * *"
|
||||||
|
```
|
||||||
|
|
||||||
|
> With Docker compose
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
version: "3"
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
image: jkaninda/mysql-bkup:v0.7
|
||||||
|
container_name: mysql-bkup
|
||||||
|
privileged: true
|
||||||
|
devices:
|
||||||
|
- "/dev/fuse"
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- mysql-bkup backup --storage s3 --path /mys3_custom_path --dbname database_name --mode scheduled --period "*/30 * * * *"
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=postgreshost
|
||||||
|
- DB_USERNAME=userName
|
||||||
|
- DB_PASSWORD=${DB_PASSWORD}
|
||||||
|
- ACCESS_KEY=${ACCESS_KEY}
|
||||||
|
- SECRET_KEY=${SECRET_KEY}
|
||||||
|
- BUCKET_NAME=${BUCKET_NAME}
|
||||||
|
- S3_ENDPOINT=${S3_ENDPOINT}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Kubernetes CronJob
|
||||||
|
|
||||||
|
For Kubernetes, you don't need to run it in scheduled mode.
|
||||||
|
|
||||||
|
Simple Kubernetes CronJob usage:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: CronJob
|
||||||
|
metadata:
|
||||||
|
name: bkup-job
|
||||||
|
spec:
|
||||||
|
schedule: "0 1 * * *"
|
||||||
|
jobTemplate:
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: mysql-bkup
|
||||||
|
image: jkaninda/mysql-bkup:v0.7
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- mysql-bkup backup -s s3 --path /custom_path
|
||||||
|
env:
|
||||||
|
- name: DB_PORT
|
||||||
|
value: "3306"
|
||||||
|
- name: DB_HOST
|
||||||
|
value: ""
|
||||||
|
- name: DB_NAME
|
||||||
|
value: ""
|
||||||
|
- name: DB_USERNAME
|
||||||
|
value: ""
|
||||||
|
# Please use secret!
|
||||||
|
- name: DB_PASSWORD
|
||||||
|
value: ""
|
||||||
|
- name: ACCESS_KEY
|
||||||
|
value: ""
|
||||||
|
- name: SECRET_KEY
|
||||||
|
value: ""
|
||||||
|
- name: BUCKET_NAME
|
||||||
|
value: ""
|
||||||
|
- name: S3_ENDPOINT
|
||||||
|
value: "https://s3.us-west-2.amazonaws.com"
|
||||||
|
restartPolicy: Never
|
||||||
|
```
|
||||||
|
|
||||||
|
## Authors
|
||||||
|
|
||||||
|
**Jonas Kaninda**
|
||||||
|
- <https://github.com/jkaninda>
|
||||||
|
|
||||||
105
docs/reference/index.md
Normal file
105
docs/reference/index.md
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
---
|
||||||
|
title: Configuration Reference
|
||||||
|
layout: default
|
||||||
|
nav_order: 2
|
||||||
|
---
|
||||||
|
|
||||||
|
# Configuration reference
|
||||||
|
|
||||||
|
Backup and restore targets, schedule and retention are configured using environment variables or flags.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### CLI utility Usage
|
||||||
|
|
||||||
|
| Options | Shorts | Usage |
|
||||||
|
|-----------------------|--------|----------------------------------------------------------------------------------------|
|
||||||
|
| mysql-bkup | bkup | CLI utility |
|
||||||
|
| backup | | Backup database operation |
|
||||||
|
| restore | | Restore database operation |
|
||||||
|
| --storage | -s | Storage. local or s3 (default: local) |
|
||||||
|
| --file | -f | File name for restoration |
|
||||||
|
| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` |
|
||||||
|
| --dbname | -d | Database name |
|
||||||
|
| --port | -p | Database port (default: 3306) |
|
||||||
|
| --mode | -m | Execution mode. default or scheduled (default: default) |
|
||||||
|
| --disable-compression | | Disable database backup compression |
|
||||||
|
| --prune | | Delete old backup, default disabled |
|
||||||
|
| --keep-last | | Delete old backup created more than specified days ago, default 7 days |
|
||||||
|
| --period | | Crontab period for scheduled mode only. (default: "0 1 * * *") |
|
||||||
|
| --help | -h | Print this help message and exit |
|
||||||
|
| --version | -V | Print version information and exit |
|
||||||
|
|
||||||
|
## Environment variables
|
||||||
|
|
||||||
|
| Name | Requirement | Description |
|
||||||
|
|-------------------|--------------------------------------------------|------------------------------------------------------|
|
||||||
|
| DB_PORT | Optional, default 3306 | Database port number |
|
||||||
|
| DB_HOST | Required | Database host |
|
||||||
|
| DB_NAME | Optional if it was provided from the -d flag | Database name |
|
||||||
|
| DB_USERNAME | Required | Database user name |
|
||||||
|
| DB_PASSWORD | Required | Database password |
|
||||||
|
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
|
||||||
|
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
|
||||||
|
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
||||||
|
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
||||||
|
| AWS_REGION | Optional, required for S3 storage | AWS Region |
|
||||||
|
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
|
||||||
|
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
|
||||||
|
| Gmysql_PASSPHRASE | Optional, required to encrypt and restore backup | Gmysql passphrase |
|
||||||
|
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
|
||||||
|
| SSH_USER | Optional, required for SSH storage | ssh remote user |
|
||||||
|
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
|
||||||
|
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
|
||||||
|
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
|
||||||
|
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
|
||||||
|
|
||||||
|
---
|
||||||
|
## Run in Scheduled mode
|
||||||
|
|
||||||
|
This image can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
|
||||||
|
For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag.
|
||||||
|
|
||||||
|
## Syntax of crontab (field description)
|
||||||
|
|
||||||
|
The syntax is:
|
||||||
|
|
||||||
|
- 1: Minute (0-59)
|
||||||
|
- 2: Hours (0-23)
|
||||||
|
- 3: Day (0-31)
|
||||||
|
- 4: Month (0-12 [12 == December])
|
||||||
|
- 5: Day of the week(0-7 [7 or 0 == sunday])
|
||||||
|
|
||||||
|
Easy to remember format:
|
||||||
|
|
||||||
|
```conf
|
||||||
|
* * * * * command to be executed
|
||||||
|
```
|
||||||
|
|
||||||
|
```conf
|
||||||
|
- - - - -
|
||||||
|
| | | | |
|
||||||
|
| | | | ----- Day of week (0 - 7) (Sunday=0 or 7)
|
||||||
|
| | | ------- Month (1 - 12)
|
||||||
|
| | --------- Day of month (1 - 31)
|
||||||
|
| ----------- Hour (0 - 23)
|
||||||
|
------------- Minute (0 - 59)
|
||||||
|
```
|
||||||
|
|
||||||
|
> At every 30th minute
|
||||||
|
|
||||||
|
```conf
|
||||||
|
*/30 * * * *
|
||||||
|
```
|
||||||
|
> “At minute 0.” every hour
|
||||||
|
```conf
|
||||||
|
0 * * * *
|
||||||
|
```
|
||||||
|
|
||||||
|
> “At 01:00.” every day
|
||||||
|
|
||||||
|
```conf
|
||||||
|
0 1 * * *
|
||||||
|
```
|
||||||
@@ -1,21 +1,31 @@
|
|||||||
version: "3"
|
|
||||||
services:
|
services:
|
||||||
mysql-bkup:
|
mysql-bkup:
|
||||||
|
# In production, it is advised to lock your image tag to a proper
|
||||||
|
# release version instead of using `latest`.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for a list of available releases.
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
container_name: mysql-bkup
|
container_name: mysql-bkup
|
||||||
privileged: true
|
|
||||||
devices:
|
|
||||||
- "/dev/fuse"
|
|
||||||
command:
|
command:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
- -c
|
- -c
|
||||||
- mysql-bkup backup --storage s3 --path /mys3_custom_path --dbname database_name
|
- mysql-bkup backup --storage s3 -d my-database"
|
||||||
environment:
|
environment:
|
||||||
- DB_PORT=3306
|
- DB_PORT=3306
|
||||||
- DB_HOST=mysqlhost
|
- DB_HOST=mysql
|
||||||
- DB_USERNAME=userName
|
- DB_NAME=database
|
||||||
- DB_PASSWORD=${DB_PASSWORD}
|
- DB_USERNAME=username
|
||||||
- ACCESS_KEY=${ACCESS_KEY}
|
- DB_PASSWORD=password
|
||||||
- SECRET_KEY=${SECRET_KEY}
|
## AWS configurations
|
||||||
- BUCKET_NAME=${BUCKET_NAME}
|
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
|
||||||
- S3_ENDPOINT=https://s3.us-west-2.amazonaws.com
|
- AWS_S3_BUCKET_NAME=backup
|
||||||
|
- AWS_REGION="us-west-2"
|
||||||
|
- AWS_ACCESS_KEY=xxxx
|
||||||
|
- AWS_SECRET_KEY=xxxxx
|
||||||
|
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||||
|
- AWS_DISABLE_SSL="false"
|
||||||
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
@@ -11,6 +11,6 @@ services:
|
|||||||
- ./backup:/backup
|
- ./backup:/backup
|
||||||
environment:
|
environment:
|
||||||
- DB_PORT=3306
|
- DB_PORT=3306
|
||||||
- DB_HOST=mysqlhost
|
- DB_HOST=mysql
|
||||||
- DB_USERNAME=userName
|
- DB_USERNAME=userName
|
||||||
- DB_PASSWORD=${DB_PASSWORD}
|
- DB_PASSWORD=${DB_PASSWORD}
|
||||||
@@ -1,21 +1,31 @@
|
|||||||
version: "3"
|
|
||||||
services:
|
services:
|
||||||
mysql-bkup:
|
mysql-bkup:
|
||||||
|
# In production, it is advised to lock your image tag to a proper
|
||||||
|
# release version instead of using `latest`.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for a list of available releases.
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
container_name: mysql-bkup
|
container_name: mysql-bkup
|
||||||
privileged: true
|
|
||||||
devices:
|
|
||||||
- "/dev/fuse"
|
|
||||||
command:
|
command:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
- -c
|
- -c
|
||||||
- mysql-bkup backup --storage s3 --path /mys3_custom_path --dbname database_name --mode scheduled --period "0 1 * * *"
|
- mysql-bkup backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
|
||||||
environment:
|
environment:
|
||||||
- DB_PORT=3306
|
- DB_PORT=3306
|
||||||
- DB_HOST=mysqlhost
|
- DB_HOST=mysql
|
||||||
- DB_USERNAME=userName
|
- DB_NAME=database
|
||||||
- DB_PASSWORD=${DB_PASSWORD}
|
- DB_USERNAME=username
|
||||||
- ACCESS_KEY=${ACCESS_KEY}
|
- DB_PASSWORD=password
|
||||||
- SECRET_KEY=${SECRET_KEY}
|
## AWS configurations
|
||||||
- BUCKET_NAME=${BUCKET_NAME}
|
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
|
||||||
- S3_ENDPOINT=https://s3.us-west-2.amazonaws.com
|
- AWS_S3_BUCKET_NAME=backup
|
||||||
|
- AWS_REGION="us-west-2"
|
||||||
|
- AWS_ACCESS_KEY=xxxx
|
||||||
|
- AWS_SECRET_KEY=xxxxx
|
||||||
|
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||||
|
- AWS_DISABLE_SSL="false"
|
||||||
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
@@ -11,6 +11,6 @@ services:
|
|||||||
- ./backup:/backup
|
- ./backup:/backup
|
||||||
environment:
|
environment:
|
||||||
- DB_PORT=3306
|
- DB_PORT=3306
|
||||||
- DB_HOST=mysqlhost
|
- DB_HOST=mysql
|
||||||
- DB_USERNAME=userName
|
- DB_USERNAME=userName
|
||||||
- DB_PASSWORD=${DB_PASSWORD}
|
- DB_PASSWORD=${DB_PASSWORD}
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
apiVersion: batch/v1
|
piVersion: batch/v1
|
||||||
kind: CronJob
|
kind: CronJob
|
||||||
metadata:
|
metadata:
|
||||||
name: db-bkup-job
|
name: bkup-job
|
||||||
spec:
|
spec:
|
||||||
schedule: "0 1 * * *"
|
schedule: "0 1 * * *"
|
||||||
jobTemplate:
|
jobTemplate:
|
||||||
@@ -11,12 +11,10 @@ spec:
|
|||||||
containers:
|
containers:
|
||||||
- name: mysql-bkup
|
- name: mysql-bkup
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
securityContext:
|
|
||||||
privileged: true
|
|
||||||
command:
|
command:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
- -c
|
- -c
|
||||||
- mysql-bkup backup --storage s3 --path /custom_path
|
- mysql-bkup backup -s s3 --path /custom_path
|
||||||
env:
|
env:
|
||||||
- name: DB_PORT
|
- name: DB_PORT
|
||||||
value: "3306"
|
value: "3306"
|
||||||
@@ -28,13 +26,19 @@ spec:
|
|||||||
value: ""
|
value: ""
|
||||||
# Please use secret!
|
# Please use secret!
|
||||||
- name: DB_PASSWORD
|
- name: DB_PASSWORD
|
||||||
value: "password"
|
value: ""
|
||||||
- name: ACCESS_KEY
|
- name: ACCESS_KEY
|
||||||
value: ""
|
value: ""
|
||||||
- name: SECRET_KEY
|
- name: AWS_S3_ENDPOINT
|
||||||
value: ""
|
value: "https://s3.amazonaws.com"
|
||||||
- name: BUCKETNAME
|
- name: AWS_S3_BUCKET_NAME
|
||||||
value: ""
|
value: "xxx"
|
||||||
- name: S3_ENDPOINT
|
- name: AWS_REGION
|
||||||
value: "https://s3.us-west-2.amazonaws.com"
|
value: "us-west-2"
|
||||||
restartPolicy: Never
|
- name: AWS_ACCESS_KEY
|
||||||
|
value: "xxxx"
|
||||||
|
- name: AWS_SECRET_KEY
|
||||||
|
value: "xxxx"
|
||||||
|
- name: AWS_DISABLE_SSL
|
||||||
|
value: "false"
|
||||||
|
restartPolicy: OnFailure
|
||||||
18
go.mod
18
go.mod
@@ -1,10 +1,22 @@
|
|||||||
module github.com/jkaninda/mysql-bkup
|
module github.com/jkaninda/mysql-bkup
|
||||||
|
|
||||||
go 1.21.0
|
go 1.22.5
|
||||||
|
|
||||||
require github.com/spf13/pflag v1.0.5
|
require github.com/spf13/pflag v1.0.5
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/aws/aws-sdk-go v1.55.3
|
||||||
github.com/spf13/cobra v1.8.0 // indirect
|
github.com/bramvdbogaerde/go-scp v1.5.0
|
||||||
|
github.com/hpcloud/tail v1.0.0
|
||||||
|
github.com/spf13/cobra v1.8.0
|
||||||
|
golang.org/x/crypto v0.18.0
|
||||||
|
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
|
||||||
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
|
golang.org/x/sys v0.22.0 // indirect
|
||||||
|
gopkg.in/fsnotify.v1 v1.4.7 // indirect
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
28
go.sum
28
go.sum
@@ -1,10 +1,38 @@
|
|||||||
|
github.com/aws/aws-sdk-go v1.55.3 h1:0B5hOX+mIx7I5XPOrjrHlKSDQV/+ypFZpIHOx5LOk3E=
|
||||||
|
github.com/aws/aws-sdk-go v1.55.3/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||||
|
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
|
||||||
|
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||||
|
github.com/bramvdbogaerde/go-scp v1.5.0 h1:a9BinAjTfQh273eh7vd3qUgmBC+bx+3TRDtkZWmIpzM=
|
||||||
|
github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||||
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
|
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||||
|
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||||
|
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||||
|
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||||
|
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
|
||||||
|
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||||
|
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||||
|
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||||
|
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
||||||
|
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||||
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||||
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
|||||||
223
pkg/backup.go
223
pkg/backup.go
@@ -1,11 +1,12 @@
|
|||||||
// Package pkg /*
|
// Package pkg /*
|
||||||
/*
|
/*
|
||||||
Copyright © 2024 Jonas Kaninda <jonaskaninda.gmail.com>
|
Copyright © 2024 Jonas Kaninda
|
||||||
*/
|
*/
|
||||||
package pkg
|
package pkg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/hpcloud/tail"
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"log"
|
"log"
|
||||||
@@ -16,6 +17,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func StartBackup(cmd *cobra.Command) {
|
func StartBackup(cmd *cobra.Command) {
|
||||||
|
_, _ = cmd.Flags().GetString("operation")
|
||||||
//Set env
|
//Set env
|
||||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||||
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
||||||
@@ -23,23 +25,41 @@ func StartBackup(cmd *cobra.Command) {
|
|||||||
utils.GetEnv(cmd, "period", "SCHEDULE_PERIOD")
|
utils.GetEnv(cmd, "period", "SCHEDULE_PERIOD")
|
||||||
|
|
||||||
//Get flag value and set env
|
//Get flag value and set env
|
||||||
s3Path = utils.GetEnv(cmd, "path", "S3_PATH")
|
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||||
|
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
|
||||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||||
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
||||||
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
|
backupRetention, _ := cmd.Flags().GetInt("keep-last")
|
||||||
keepLast, _ := cmd.Flags().GetInt("keep-last")
|
|
||||||
prune, _ := cmd.Flags().GetBool("prune")
|
prune, _ := cmd.Flags().GetBool("prune")
|
||||||
|
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
|
||||||
executionMode, _ = cmd.Flags().GetString("mode")
|
executionMode, _ = cmd.Flags().GetString("mode")
|
||||||
|
dbName = os.Getenv("DB_NAME")
|
||||||
|
gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||||
|
//
|
||||||
|
if gpqPassphrase != "" {
|
||||||
|
encryption = true
|
||||||
|
}
|
||||||
|
|
||||||
|
//Generate file name
|
||||||
|
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
|
||||||
|
if disableCompression {
|
||||||
|
backupFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405"))
|
||||||
|
}
|
||||||
|
|
||||||
if executionMode == "default" {
|
if executionMode == "default" {
|
||||||
if storage == "s3" {
|
switch storage {
|
||||||
utils.Info("Backup database to s3 storage")
|
case "s3":
|
||||||
s3Backup(disableCompression, s3Path, prune, keepLast)
|
s3Backup(backupFileName, s3Path, disableCompression, prune, backupRetention, encryption)
|
||||||
} else {
|
case "local":
|
||||||
utils.Info("Backup database to local storage")
|
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||||
BackupDatabase(disableCompression, prune, keepLast)
|
case "ssh", "remote":
|
||||||
|
sshBackup(backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
|
||||||
|
case "ftp":
|
||||||
|
utils.Fatal("Not supported storage type: %s", storage)
|
||||||
|
default:
|
||||||
|
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if executionMode == "scheduled" {
|
} else if executionMode == "scheduled" {
|
||||||
scheduledMode()
|
scheduledMode()
|
||||||
} else {
|
} else {
|
||||||
@@ -56,7 +76,6 @@ func scheduledMode() {
|
|||||||
fmt.Println(" Starting MySQL Bkup... ")
|
fmt.Println(" Starting MySQL Bkup... ")
|
||||||
fmt.Println("***********************************")
|
fmt.Println("***********************************")
|
||||||
utils.Info("Running in Scheduled mode")
|
utils.Info("Running in Scheduled mode")
|
||||||
utils.Info("Log file in /var/log/mysql-bkup.log")
|
|
||||||
utils.Info("Execution period ", os.Getenv("SCHEDULE_PERIOD"))
|
utils.Info("Execution period ", os.Getenv("SCHEDULE_PERIOD"))
|
||||||
|
|
||||||
//Test database connexion
|
//Test database connexion
|
||||||
@@ -65,35 +84,65 @@ func scheduledMode() {
|
|||||||
utils.Info("Creating backup job...")
|
utils.Info("Creating backup job...")
|
||||||
CreateCrontabScript(disableCompression, storage)
|
CreateCrontabScript(disableCompression, storage)
|
||||||
|
|
||||||
|
supervisorConfig := "/etc/supervisor/supervisord.conf"
|
||||||
|
|
||||||
// Start Supervisor
|
// Start Supervisor
|
||||||
supervisordCmd := exec.Command("supervisord", "-c", "/etc/supervisor/supervisord.conf")
|
cmd := exec.Command("supervisord", "-c", supervisorConfig)
|
||||||
if err := supervisordCmd.Run(); err != nil {
|
err := cmd.Start()
|
||||||
utils.Fatalf("Error starting supervisord: %v\n", err)
|
if err != nil {
|
||||||
|
utils.Fatal(fmt.Sprintf("Failed to start supervisord: %v", err))
|
||||||
|
}
|
||||||
|
utils.Info("Backup job started")
|
||||||
|
defer func() {
|
||||||
|
if err := cmd.Process.Kill(); err != nil {
|
||||||
|
utils.Info("Failed to kill supervisord process: %v", err)
|
||||||
|
} else {
|
||||||
|
utils.Info("Supervisor stopped.")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if _, err := os.Stat(cronLogFile); os.IsNotExist(err) {
|
||||||
|
utils.Fatal(fmt.Sprintf("Log file %s does not exist.", cronLogFile))
|
||||||
|
}
|
||||||
|
t, err := tail.TailFile(cronLogFile, tail.Config{Follow: true})
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Failed to tail file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read and print new lines from the log file
|
||||||
|
for line := range t.Lines {
|
||||||
|
fmt.Println(line.Text)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// BackupDatabase backup database
|
// BackupDatabase backup database
|
||||||
func BackupDatabase(disableCompression bool, prune bool, keepLast int) {
|
func BackupDatabase(backupFileName string, disableCompression bool) {
|
||||||
dbHost = os.Getenv("DB_HOST")
|
dbHost = os.Getenv("DB_HOST")
|
||||||
dbPassword := os.Getenv("DB_PASSWORD")
|
dbPassword = os.Getenv("DB_PASSWORD")
|
||||||
dbUserName := os.Getenv("DB_USERNAME")
|
dbUserName = os.Getenv("DB_USERNAME")
|
||||||
dbName = os.Getenv("DB_NAME")
|
dbName = os.Getenv("DB_NAME")
|
||||||
dbPort = os.Getenv("DB_PORT")
|
dbPort = os.Getenv("DB_PORT")
|
||||||
storagePath = os.Getenv("STORAGE_PATH")
|
storagePath = os.Getenv("STORAGE_PATH")
|
||||||
|
|
||||||
if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" {
|
// dbHVars Required environment variables for database
|
||||||
utils.Fatal("Please make sure all required environment variables for database are set")
|
var dbHVars = []string{
|
||||||
} else {
|
"DB_HOST",
|
||||||
|
"DB_PASSWORD",
|
||||||
|
"DB_USERNAME",
|
||||||
|
"DB_NAME",
|
||||||
|
}
|
||||||
|
err := utils.CheckEnvVars(dbHVars)
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Please make sure all required environment variables for database are set")
|
||||||
|
utils.Fatal("Error checking environment variables: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
utils.Info("Starting database backup...")
|
||||||
utils.TestDatabaseConnection()
|
utils.TestDatabaseConnection()
|
||||||
|
|
||||||
// Backup Database database
|
// Backup Database database
|
||||||
utils.Info("Backing up database...")
|
utils.Info("Backing up database...")
|
||||||
//Generate file name
|
|
||||||
bkFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
|
|
||||||
|
|
||||||
// Verify is compression is disabled
|
|
||||||
if disableCompression {
|
if disableCompression {
|
||||||
//Generate file name
|
|
||||||
bkFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405"))
|
|
||||||
// Execute mysqldump
|
// Execute mysqldump
|
||||||
cmd := exec.Command("mysqldump",
|
cmd := exec.Command("mysqldump",
|
||||||
"-h", dbHost,
|
"-h", dbHost,
|
||||||
@@ -108,7 +157,7 @@ func BackupDatabase(disableCompression bool, prune bool, keepLast int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// save output
|
// save output
|
||||||
file, err := os.Create(fmt.Sprintf("%s/%s", storagePath, bkFileName))
|
file, err := os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -129,7 +178,7 @@ func BackupDatabase(disableCompression bool, prune bool, keepLast int) {
|
|||||||
}
|
}
|
||||||
gzipCmd := exec.Command("gzip")
|
gzipCmd := exec.Command("gzip")
|
||||||
gzipCmd.Stdin = stdout
|
gzipCmd.Stdin = stdout
|
||||||
gzipCmd.Stdout, err = os.Create(fmt.Sprintf("%s/%s", storagePath, bkFileName))
|
gzipCmd.Stdout, err = os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName))
|
||||||
gzipCmd.Start()
|
gzipCmd.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
@@ -141,58 +190,96 @@ func BackupDatabase(disableCompression bool, prune bool, keepLast int) {
|
|||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
utils.Done("Database has been backed up")
|
utils.Done("Database has been backed up")
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
func localBackup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||||
|
utils.Info("Backup database to local storage")
|
||||||
|
BackupDatabase(backupFileName, disableCompression)
|
||||||
|
finalFileName := backupFileName
|
||||||
|
if encrypt {
|
||||||
|
encryptBackup(backupFileName)
|
||||||
|
finalFileName = fmt.Sprintf("%s.%s", backupFileName, gpgExtension)
|
||||||
|
}
|
||||||
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
|
moveToBackup(finalFileName, storagePath)
|
||||||
//Delete old backup
|
//Delete old backup
|
||||||
if prune {
|
if prune {
|
||||||
deleteOldBackup(keepLast)
|
deleteOldBackup(backupRetention)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func s3Backup(backupFileName string, s3Path string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||||
|
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||||
|
utils.Info("Backup database to s3 storage")
|
||||||
|
//Backup database
|
||||||
|
BackupDatabase(backupFileName, disableCompression)
|
||||||
|
finalFileName := backupFileName
|
||||||
|
if encrypt {
|
||||||
|
encryptBackup(backupFileName)
|
||||||
|
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
|
||||||
}
|
}
|
||||||
|
utils.Info("Uploading backup file to S3 storage...")
|
||||||
historyFile, err := os.OpenFile(fmt.Sprintf("%s/history.txt", storagePath), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
|
err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
utils.Fatal("Error uploading file to S3: %s ", err)
|
||||||
}
|
|
||||||
defer historyFile.Close()
|
|
||||||
if _, err := historyFile.WriteString(bkFileName + "\n"); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func s3Backup(disableCompression bool, s3Path string, prune bool, keepLast int) {
|
//Delete backup file from tmp folder
|
||||||
// Backup Database to S3 storage
|
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
|
||||||
MountS3Storage(s3Path)
|
|
||||||
BackupDatabase(disableCompression, prune, keepLast)
|
|
||||||
}
|
|
||||||
|
|
||||||
func deleteOldBackup(keepLast int) {
|
|
||||||
utils.Info("Deleting old backups...")
|
|
||||||
storagePath = os.Getenv("STORAGE_PATH")
|
|
||||||
// Define the directory path
|
|
||||||
backupDir := storagePath + "/"
|
|
||||||
// Get current time
|
|
||||||
currentTime := time.Now()
|
|
||||||
// Walk through files in the directory
|
|
||||||
err := filepath.Walk(backupDir, func(path string, info os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
fmt.Println("Error deleting file: ", err)
|
||||||
}
|
|
||||||
// Check if the file is older than defined day days
|
|
||||||
if info.Mode().IsRegular() && info.ModTime().Before(currentTime.AddDate(0, 0, -keepLast)) {
|
|
||||||
// Remove the file
|
|
||||||
err := os.Remove(path)
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatal("Error removing file ", path, err)
|
|
||||||
} else {
|
|
||||||
utils.Done("Removed file: ", path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
|
}
|
||||||
|
// Delete old backup
|
||||||
|
if prune {
|
||||||
|
err := utils.DeleteOldBackup(bucket, s3Path, backupRetention)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error walking through directory: ", err)
|
utils.Fatal("Error deleting old backup from S3: %s ", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
utils.Done("Database has been backed up and uploaded to s3 ")
|
||||||
|
}
|
||||||
|
func sshBackup(backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||||
|
utils.Info("Backup database to Remote server")
|
||||||
|
//Backup database
|
||||||
|
BackupDatabase(backupFileName, disableCompression)
|
||||||
|
finalFileName := backupFileName
|
||||||
|
if encrypt {
|
||||||
|
encryptBackup(backupFileName)
|
||||||
|
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
|
||||||
|
}
|
||||||
|
utils.Info("Uploading backup file to remote server...")
|
||||||
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
|
err := CopyToRemote(finalFileName, remotePath)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error uploading file to the remote server: %s ", err)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
//Delete backup file from tmp folder
|
||||||
|
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Error deleting file: ", err)
|
||||||
|
|
||||||
|
}
|
||||||
|
if prune {
|
||||||
|
//TODO: Delete old backup from remote server
|
||||||
|
utils.Info("Deleting old backup from a remote server is not implemented yet")
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
utils.Done("Database has been backed up and uploaded to remote server ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func encryptBackup(backupFileName string) {
|
||||||
|
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||||
|
err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error during encrypting backup %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
4
pkg/config.go
Normal file
4
pkg/config.go
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
}
|
||||||
48
pkg/encrypt.go
Normal file
48
pkg/encrypt.go
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Decrypt(inputFile string, passphrase string) error {
|
||||||
|
utils.Info("Decrypting backup file: " + inputFile + " ...")
|
||||||
|
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile)
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
|
||||||
|
err := cmd.Run()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
utils.Info("Backup file decrypted successful!")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func Encrypt(inputFile string, passphrase string) error {
|
||||||
|
utils.Info("Encrypting backup...")
|
||||||
|
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--symmetric", "--cipher-algo", algorithm, inputFile)
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
|
||||||
|
err := cmd.Run()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
utils.Info("Backup file encrypted successful!")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func RemoveLastExtension(filename string) string {
|
||||||
|
if idx := strings.LastIndex(filename, "."); idx != -1 {
|
||||||
|
return filename[:idx]
|
||||||
|
}
|
||||||
|
return filename
|
||||||
|
}
|
||||||
74
pkg/helper.go
Normal file
74
pkg/helper.go
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func copyToTmp(sourcePath string, backupFileName string) {
|
||||||
|
//Copy backup from storage to /tmp
|
||||||
|
err := utils.CopyFile(filepath.Join(sourcePath, backupFileName), filepath.Join(tmpPath, backupFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal(fmt.Sprintf("Error copying file %s %s", backupFileName, err))
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func moveToBackup(backupFileName string, destinationPath string) {
|
||||||
|
//Copy backup from tmp folder to storage destination
|
||||||
|
err := utils.CopyFile(filepath.Join(tmpPath, backupFileName), filepath.Join(destinationPath, backupFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal(fmt.Sprintf("Error copying file %s %s", backupFileName, err))
|
||||||
|
|
||||||
|
}
|
||||||
|
//Delete backup file from tmp folder
|
||||||
|
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Error deleting file:", err)
|
||||||
|
|
||||||
|
}
|
||||||
|
utils.Done("Database has been backed up and copied to %s", filepath.Join(destinationPath, backupFileName))
|
||||||
|
}
|
||||||
|
func deleteOldBackup(retentionDays int) {
|
||||||
|
utils.Info("Deleting old backups...")
|
||||||
|
storagePath = os.Getenv("STORAGE_PATH")
|
||||||
|
// Define the directory path
|
||||||
|
backupDir := storagePath + "/"
|
||||||
|
// Get current time
|
||||||
|
currentTime := time.Now()
|
||||||
|
// Delete file
|
||||||
|
deleteFile := func(filePath string) error {
|
||||||
|
err := os.Remove(filePath)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal(fmt.Sprintf("Error: %s", err))
|
||||||
|
} else {
|
||||||
|
utils.Done("File %s has been deleted successfully", filePath)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk through the directory and delete files modified more than specified days ago
|
||||||
|
err := filepath.Walk(backupDir, func(filePath string, fileInfo os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Check if it's a regular file and if it was modified more than specified days ago
|
||||||
|
if fileInfo.Mode().IsRegular() {
|
||||||
|
timeDiff := currentTime.Sub(fileInfo.ModTime())
|
||||||
|
if timeDiff.Hours() > 24*float64(retentionDays) {
|
||||||
|
err := deleteFile(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal(fmt.Sprintf("Error: %s", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
116
pkg/restore.go
116
pkg/restore.go
@@ -17,70 +17,118 @@ func StartRestore(cmd *cobra.Command) {
|
|||||||
utils.GetEnv(cmd, "port", "DB_PORT")
|
utils.GetEnv(cmd, "port", "DB_PORT")
|
||||||
|
|
||||||
//Get flag value and set env
|
//Get flag value and set env
|
||||||
s3Path = utils.GetEnv(cmd, "path", "S3_PATH")
|
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||||
|
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
|
||||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||||
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
||||||
executionMode, _ = cmd.Flags().GetString("mode")
|
executionMode, _ = cmd.Flags().GetString("mode")
|
||||||
|
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||||
if storage == "s3" {
|
switch storage {
|
||||||
utils.Info("Restore database from s3")
|
case "s3":
|
||||||
s3Restore(file, s3Path)
|
restoreFromS3(file, bucket, s3Path)
|
||||||
} else {
|
case "local":
|
||||||
|
utils.Info("Restore database from local")
|
||||||
|
copyToTmp(storagePath, file)
|
||||||
|
RestoreDatabase(file)
|
||||||
|
case "ssh":
|
||||||
|
restoreFromRemote(file, remotePath)
|
||||||
|
case "ftp":
|
||||||
|
utils.Fatal("Restore from FTP is not yet supported")
|
||||||
|
default:
|
||||||
utils.Info("Restore database from local")
|
utils.Info("Restore database from local")
|
||||||
RestoreDatabase(file)
|
RestoreDatabase(file)
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func restoreFromS3(file, bucket, s3Path string) {
|
||||||
|
utils.Info("Restore database from s3")
|
||||||
|
err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal(fmt.Sprintf("Error download file from s3 %s %s", file, err))
|
||||||
|
}
|
||||||
|
RestoreDatabase(file)
|
||||||
|
}
|
||||||
|
func restoreFromRemote(file, remotePath string) {
|
||||||
|
utils.Info("Restore database from remote server")
|
||||||
|
err := CopyFromRemote(file, remotePath)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal(fmt.Sprintf("Error download file from remote server: ", filepath.Join(remotePath, file), err))
|
||||||
|
}
|
||||||
|
RestoreDatabase(file)
|
||||||
|
}
|
||||||
|
|
||||||
// RestoreDatabase restore database
|
// RestoreDatabase restore database
|
||||||
func RestoreDatabase(file string) {
|
func RestoreDatabase(file string) {
|
||||||
dbHost = os.Getenv("DB_HOST")
|
dbHost = os.Getenv("DB_HOST")
|
||||||
|
dbPassword = os.Getenv("DB_PASSWORD")
|
||||||
|
dbUserName = os.Getenv("DB_USERNAME")
|
||||||
dbName = os.Getenv("DB_NAME")
|
dbName = os.Getenv("DB_NAME")
|
||||||
dbPort = os.Getenv("DB_PORT")
|
dbPort = os.Getenv("DB_PORT")
|
||||||
storagePath = os.Getenv("STORAGE_PATH")
|
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||||
if file == "" {
|
if file == "" {
|
||||||
utils.Fatal("Error, file required")
|
utils.Fatal("Error, file required")
|
||||||
}
|
}
|
||||||
|
// dbHVars Required environment variables for database
|
||||||
if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" || file == "" {
|
var dbHVars = []string{
|
||||||
utils.Fatal("Please make sure all required environment variables are set")
|
"DB_HOST",
|
||||||
} else {
|
"DB_PASSWORD",
|
||||||
|
"DB_USERNAME",
|
||||||
if utils.FileExists(fmt.Sprintf("%s/%s", storagePath, file)) {
|
"DB_NAME",
|
||||||
utils.TestDatabaseConnection()
|
}
|
||||||
|
err := utils.CheckEnvVars(dbHVars)
|
||||||
extension := filepath.Ext(fmt.Sprintf("%s/%s", storagePath, file))
|
|
||||||
// Restore from compressed file / .sql.gz
|
|
||||||
if extension == ".gz" {
|
|
||||||
str := "zcat " + fmt.Sprintf("%s/%s", storagePath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
|
|
||||||
_, err := exec.Command("bash", "-c", str).Output()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error, in restoring the database")
|
utils.Error("Please make sure all required environment variables for database are set")
|
||||||
|
utils.Fatal("Error checking environment variables: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
|
||||||
|
if extension == ".gpg" {
|
||||||
|
if gpgPassphrase == "" {
|
||||||
|
utils.Fatal("Error: GPG passphrase is required, your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE environment variable is required.")
|
||||||
|
|
||||||
|
} else {
|
||||||
|
//Decrypt file
|
||||||
|
err := Decrypt(filepath.Join(tmpPath, file), gpgPassphrase)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error decrypting file ", file, err)
|
||||||
|
}
|
||||||
|
//Update file name
|
||||||
|
file = RemoveLastExtension(file)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) {
|
||||||
|
|
||||||
|
err := os.Setenv("mysqlPASSWORD", dbPassword)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
utils.TestDatabaseConnection()
|
||||||
|
|
||||||
|
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
|
||||||
|
// Restore from compressed file / .sql.gz
|
||||||
|
if extension == ".gz" {
|
||||||
|
str := "zcat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
|
||||||
|
_, err := exec.Command("bash", "-c", str).Output()
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal(fmt.Sprintf("Error, in restoring the database %s", err))
|
||||||
|
}
|
||||||
utils.Done("Database has been restored")
|
utils.Done("Database has been restored")
|
||||||
|
|
||||||
} else if extension == ".sql" {
|
} else if extension == ".sql" {
|
||||||
//Restore from sql file
|
//Restore from sql file
|
||||||
str := "cat " + fmt.Sprintf("%s/%s", storagePath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
|
str := "cat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + os.Getenv("DB_HOST") + " -P " + os.Getenv("DB_PORT") + " -u " + os.Getenv("DB_USERNAME") + " --password=" + os.Getenv("DB_PASSWORD") + " " + os.Getenv("DB_NAME")
|
||||||
_, err := exec.Command("bash", "-c", str).Output()
|
_, err := exec.Command("bash", "-c", str).Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error, in restoring the database", err)
|
utils.Fatal(fmt.Sprintf("Error in restoring the database %s", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
utils.Done("Database has been restored")
|
utils.Done("Database has been restored")
|
||||||
} else {
|
} else {
|
||||||
utils.Fatal("Unknown file extension ", extension)
|
utils.Fatal(fmt.Sprintf("Unknown file extension %s", extension))
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
utils.Fatal("File not found in ", fmt.Sprintf("%s/%s", storagePath, file))
|
utils.Fatal(fmt.Sprintf("File not found in %s", fmt.Sprintf("%s/%s", tmpPath, file)))
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func s3Restore(file, s3Path string) {
|
|
||||||
// Restore database from S3
|
|
||||||
MountS3Storage(s3Path)
|
|
||||||
RestoreDatabase(file)
|
|
||||||
}
|
|
||||||
|
|||||||
80
pkg/s3fs.go
80
pkg/s3fs.go
@@ -1,80 +0,0 @@
|
|||||||
// Package pkg /*
|
|
||||||
/*
|
|
||||||
Copyright © 2024 Jonas Kaninda <jonaskaninda.gmail.com>
|
|
||||||
*/
|
|
||||||
package pkg
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
accessKey = ""
|
|
||||||
secretKey = ""
|
|
||||||
bucketName = ""
|
|
||||||
s3Endpoint = ""
|
|
||||||
)
|
|
||||||
|
|
||||||
func S3Mount() {
|
|
||||||
MountS3Storage(s3Path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MountS3Storage Mount s3 storage using s3fs
|
|
||||||
func MountS3Storage(s3Path string) {
|
|
||||||
accessKey = os.Getenv("ACCESS_KEY")
|
|
||||||
secretKey = os.Getenv("SECRET_KEY")
|
|
||||||
bucketName = os.Getenv("BUCKET_NAME")
|
|
||||||
if bucketName == "" {
|
|
||||||
bucketName = os.Getenv("BUCKETNAME")
|
|
||||||
}
|
|
||||||
s3Endpoint = os.Getenv("S3_ENDPOINT")
|
|
||||||
|
|
||||||
if accessKey == "" || secretKey == "" || bucketName == "" {
|
|
||||||
utils.Fatal("Please make sure all environment variables are set for S3")
|
|
||||||
} else {
|
|
||||||
storagePath := fmt.Sprintf("%s%s", s3MountPath, s3Path)
|
|
||||||
err := os.Setenv("STORAGE_PATH", storagePath)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
//Write file
|
|
||||||
err = utils.WriteToFile(s3fsPasswdFile, fmt.Sprintf("%s:%s", accessKey, secretKey))
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatal("Error creating file")
|
|
||||||
}
|
|
||||||
//Change file permission
|
|
||||||
utils.ChangePermission(s3fsPasswdFile, 0600)
|
|
||||||
|
|
||||||
//Mount object storage
|
|
||||||
utils.Info("Mounting Object storage in ", s3MountPath)
|
|
||||||
if isEmpty, _ := utils.IsDirEmpty(s3MountPath); isEmpty {
|
|
||||||
cmd := exec.Command("s3fs", bucketName, s3MountPath,
|
|
||||||
"-o", "passwd_file="+s3fsPasswdFile,
|
|
||||||
"-o", "use_cache=/tmp/s3cache",
|
|
||||||
"-o", "allow_other",
|
|
||||||
"-o", "url="+s3Endpoint,
|
|
||||||
"-o", "use_path_request_style",
|
|
||||||
)
|
|
||||||
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
utils.Fatal("Error mounting Object storage:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.MkdirAll(storagePath, os.ModePerm); err != nil {
|
|
||||||
utils.Fatalf("Error creating directory %v %v", storagePath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
utils.Info("Object storage already mounted in " + s3MountPath)
|
|
||||||
if err := os.MkdirAll(storagePath, os.ModePerm); err != nil {
|
|
||||||
utils.Fatal("Error creating directory "+storagePath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
117
pkg/scp.go
Normal file
117
pkg/scp.go
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"github.com/bramvdbogaerde/go-scp"
|
||||||
|
"github.com/bramvdbogaerde/go-scp/auth"
|
||||||
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
|
"golang.org/x/crypto/ssh"
|
||||||
|
"golang.org/x/exp/slog"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
func CopyToRemote(fileName, remotePath string) error {
|
||||||
|
sshUser := os.Getenv("SSH_USER")
|
||||||
|
sshPassword := os.Getenv("SSH_PASSWORD")
|
||||||
|
sshHostName := os.Getenv("SSH_HOST_NAME")
|
||||||
|
sshPort := os.Getenv("SSH_PORT")
|
||||||
|
sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE")
|
||||||
|
|
||||||
|
// SSSHVars Required environment variables for SSH remote server storage
|
||||||
|
var sshHVars = []string{
|
||||||
|
"SSH_USER",
|
||||||
|
"SSH_REMOTE_PATH",
|
||||||
|
"SSH_HOST_NAME",
|
||||||
|
"SSH_PORT",
|
||||||
|
}
|
||||||
|
err := utils.CheckEnvVars(sshHVars)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error(fmt.Sprintf("Error checking environment variables\n: %s", err))
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
|
||||||
|
if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) {
|
||||||
|
clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey())
|
||||||
|
|
||||||
|
} else {
|
||||||
|
if sshPassword == "" {
|
||||||
|
return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n")
|
||||||
|
}
|
||||||
|
slog.Warn("Accessing the remote server using password, password is not recommended\n")
|
||||||
|
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
|
||||||
|
|
||||||
|
}
|
||||||
|
// Create a new SCP client
|
||||||
|
client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig)
|
||||||
|
|
||||||
|
// Connect to the remote server
|
||||||
|
err = client.Connect()
|
||||||
|
if err != nil {
|
||||||
|
return errors.New("Couldn't establish a connection to the remote server\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open a file
|
||||||
|
file, _ := os.Open(filepath.Join(tmpPath, fileName))
|
||||||
|
|
||||||
|
// Close client connection after the file has been copied
|
||||||
|
defer client.Close()
|
||||||
|
// Close the file after it has been copied
|
||||||
|
defer file.Close()
|
||||||
|
// the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application.
|
||||||
|
err = client.CopyFromFile(context.Background(), *file, filepath.Join(remotePath, fileName), "0655")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Error while copying file ")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func CopyFromRemote(fileName, remotePath string) error {
|
||||||
|
sshUser := os.Getenv("SSH_USER")
|
||||||
|
sshPassword := os.Getenv("SSH_PASSWORD")
|
||||||
|
sshHostName := os.Getenv("SSH_HOST_NAME")
|
||||||
|
sshPort := os.Getenv("SSH_PORT")
|
||||||
|
sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE")
|
||||||
|
|
||||||
|
clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
|
||||||
|
if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) {
|
||||||
|
clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey())
|
||||||
|
|
||||||
|
} else {
|
||||||
|
if sshPassword == "" {
|
||||||
|
return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n")
|
||||||
|
}
|
||||||
|
slog.Warn("Accessing the remote server using password, password is not recommended\n")
|
||||||
|
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
|
||||||
|
|
||||||
|
}
|
||||||
|
// Create a new SCP client
|
||||||
|
client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig)
|
||||||
|
|
||||||
|
// Connect to the remote server
|
||||||
|
err := client.Connect()
|
||||||
|
if err != nil {
|
||||||
|
return errors.New("Couldn't establish a connection to the remote server\n")
|
||||||
|
}
|
||||||
|
// Close client connection after the file has been copied
|
||||||
|
defer client.Close()
|
||||||
|
file, err := os.OpenFile(filepath.Join(tmpPath, fileName), os.O_RDWR|os.O_CREATE, 0777)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Couldn't open the output file")
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
// the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application.
|
||||||
|
err = client.CopyFromRemote(context.Background(), file, filepath.Join(remotePath, fileName))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Error while copying file ", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
@@ -2,7 +2,7 @@ package pkg
|
|||||||
|
|
||||||
// Package pkg /*
|
// Package pkg /*
|
||||||
/*
|
/*
|
||||||
Copyright © 2024 Jonas Kaninda <jonaskaninda.gmail.com>
|
Copyright © 2024 Jonas Kaninda
|
||||||
*/
|
*/
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -11,14 +11,11 @@ import (
|
|||||||
"os/exec"
|
"os/exec"
|
||||||
)
|
)
|
||||||
|
|
||||||
const cronLogFile = "/var/log/mysql-bkup.log"
|
|
||||||
const backupCronFile = "/usr/local/bin/backup_cron.sh"
|
|
||||||
|
|
||||||
func CreateCrontabScript(disableCompression bool, storage string) {
|
func CreateCrontabScript(disableCompression bool, storage string) {
|
||||||
//task := "/usr/local/bin/backup_cron.sh"
|
//task := "/usr/local/bin/backup_cron.sh"
|
||||||
touchCmd := exec.Command("touch", backupCronFile)
|
touchCmd := exec.Command("touch", backupCronFile)
|
||||||
if err := touchCmd.Run(); err != nil {
|
if err := touchCmd.Run(); err != nil {
|
||||||
utils.Fatalf("Error creating file %s: %v\n", backupCronFile, err)
|
utils.Fatal("Error creating file %s: %v\n", backupCronFile, err)
|
||||||
}
|
}
|
||||||
var disableC = ""
|
var disableC = ""
|
||||||
if disableCompression {
|
if disableCompression {
|
||||||
@@ -40,31 +37,36 @@ bkup backup --dbname %s --port %s %v
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := utils.WriteToFile(backupCronFile, scriptContent); err != nil {
|
if err := utils.WriteToFile(backupCronFile, scriptContent); err != nil {
|
||||||
utils.Fatalf("Error writing to %s: %v\n", backupCronFile, err)
|
utils.Fatal("Error writing to %s: %v\n", backupCronFile, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
chmodCmd := exec.Command("chmod", "+x", "/usr/local/bin/backup_cron.sh")
|
chmodCmd := exec.Command("chmod", "+x", "/usr/local/bin/backup_cron.sh")
|
||||||
if err := chmodCmd.Run(); err != nil {
|
if err := chmodCmd.Run(); err != nil {
|
||||||
utils.Fatalf("Error changing permissions of %s: %v\n", backupCronFile, err)
|
utils.Fatal("Error changing permissions of %s: %v\n", backupCronFile, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
lnCmd := exec.Command("ln", "-s", "/usr/local/bin/backup_cron.sh", "/usr/local/bin/backup_cron")
|
lnCmd := exec.Command("ln", "-s", "/usr/local/bin/backup_cron.sh", "/usr/local/bin/backup_cron")
|
||||||
if err := lnCmd.Run(); err != nil {
|
if err := lnCmd.Run(); err != nil {
|
||||||
utils.Fatalf("Error creating symbolic link: %v\n", err)
|
utils.Fatal("Error creating symbolic link: %v\n", err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
touchLogCmd := exec.Command("touch", cronLogFile)
|
||||||
|
if err := touchLogCmd.Run(); err != nil {
|
||||||
|
utils.Fatal("Error creating file %s: %v\n", cronLogFile, err)
|
||||||
|
}
|
||||||
|
|
||||||
cronJob := "/etc/cron.d/backup_cron"
|
cronJob := "/etc/cron.d/backup_cron"
|
||||||
touchCronCmd := exec.Command("touch", cronJob)
|
touchCronCmd := exec.Command("touch", cronJob)
|
||||||
if err := touchCronCmd.Run(); err != nil {
|
if err := touchCronCmd.Run(); err != nil {
|
||||||
utils.Fatalf("Error creating file %s: %v\n", cronJob, err)
|
utils.Fatal("Error creating file %s: %v\n", cronJob, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cronContent := fmt.Sprintf(`%s root exec /bin/bash -c ". /run/supervisord.env; /usr/local/bin/backup_cron.sh >> %s"
|
cronContent := fmt.Sprintf(`%s root exec /bin/bash -c ". /run/supervisord.env; /usr/local/bin/backup_cron.sh >> %s"
|
||||||
`, os.Getenv("SCHEDULE_PERIOD"), cronLogFile)
|
`, os.Getenv("SCHEDULE_PERIOD"), cronLogFile)
|
||||||
|
|
||||||
if err := utils.WriteToFile(cronJob, cronContent); err != nil {
|
if err := utils.WriteToFile(cronJob, cronContent); err != nil {
|
||||||
utils.Fatalf("Error writing to %s: %v\n", cronJob, err)
|
utils.Fatal("Error writing to %s: %v\n", cronJob, err)
|
||||||
}
|
}
|
||||||
utils.ChangePermission("/etc/cron.d/backup_cron", 0644)
|
utils.ChangePermission("/etc/cron.d/backup_cron", 0644)
|
||||||
|
|
||||||
@@ -72,5 +74,5 @@ bkup backup --dbname %s --port %s %v
|
|||||||
if err := crontabCmd.Run(); err != nil {
|
if err := crontabCmd.Run(); err != nil {
|
||||||
utils.Fatal("Error updating crontab: ", err)
|
utils.Fatal("Error updating crontab: ", err)
|
||||||
}
|
}
|
||||||
utils.Info("Starting backup in scheduled mode")
|
utils.Info("Backup job created.")
|
||||||
}
|
}
|
||||||
|
|||||||
11
pkg/var.go
11
pkg/var.go
@@ -1,16 +1,21 @@
|
|||||||
package pkg
|
package pkg
|
||||||
|
|
||||||
const s3MountPath string = "/s3mnt"
|
const cronLogFile = "/var/log/mysql-bkup.log"
|
||||||
const s3fsPasswdFile string = "/etc/passwd-s3fs"
|
const tmpPath = "/tmp/backup"
|
||||||
|
const backupCronFile = "/usr/local/bin/backup_cron.sh"
|
||||||
|
const algorithm = "aes256"
|
||||||
|
const gpgExtension = "gpg"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
storage = "local"
|
storage = "local"
|
||||||
file = ""
|
file = ""
|
||||||
s3Path = "/mysql-bkup"
|
dbPassword = ""
|
||||||
|
dbUserName = ""
|
||||||
dbName = ""
|
dbName = ""
|
||||||
dbHost = ""
|
dbHost = ""
|
||||||
dbPort = "3306"
|
dbPort = "3306"
|
||||||
executionMode = "default"
|
executionMode = "default"
|
||||||
storagePath = "/backup"
|
storagePath = "/backup"
|
||||||
disableCompression = false
|
disableCompression = false
|
||||||
|
encryption = false
|
||||||
)
|
)
|
||||||
|
|||||||
171
utils/s3.go
Normal file
171
utils/s3.go
Normal file
@@ -0,0 +1,171 @@
|
|||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
|
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||||
|
"golang.org/x/exp/slog"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CreateSession creates a new AWS session
|
||||||
|
func CreateSession() (*session.Session, error) {
|
||||||
|
// AwsVars Required environment variables for AWS S3 storage
|
||||||
|
var awsVars = []string{
|
||||||
|
"AWS_S3_ENDPOINT",
|
||||||
|
"AWS_S3_BUCKET_NAME",
|
||||||
|
"AWS_ACCESS_KEY",
|
||||||
|
"AWS_SECRET_KEY",
|
||||||
|
"AWS_REGION",
|
||||||
|
"AWS_REGION",
|
||||||
|
"AWS_REGION",
|
||||||
|
}
|
||||||
|
|
||||||
|
endPoint := GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
|
||||||
|
accessKey := GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
|
||||||
|
secretKey := GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
|
||||||
|
_ = GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||||
|
|
||||||
|
region := os.Getenv("AWS_REGION")
|
||||||
|
awsDisableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
|
||||||
|
if err != nil {
|
||||||
|
Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = CheckEnvVars(awsVars)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error(fmt.Sprintf("Error checking environment variables\n: %s", err))
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
// S3 Config
|
||||||
|
s3Config := &aws.Config{
|
||||||
|
Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""),
|
||||||
|
Endpoint: aws.String(endPoint),
|
||||||
|
Region: aws.String(region),
|
||||||
|
DisableSSL: aws.Bool(awsDisableSsl),
|
||||||
|
S3ForcePathStyle: aws.Bool(true),
|
||||||
|
}
|
||||||
|
return session.NewSession(s3Config)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadFileToS3 uploads a file to S3 with a given prefix
|
||||||
|
func UploadFileToS3(filePath, key, bucket, prefix string) error {
|
||||||
|
sess, err := CreateSession()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
svc := s3.New(sess)
|
||||||
|
|
||||||
|
file, err := os.Open(filepath.Join(filePath, key))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
fileInfo, err := file.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
objectKey := filepath.Join(prefix, key)
|
||||||
|
|
||||||
|
buffer := make([]byte, fileInfo.Size())
|
||||||
|
file.Read(buffer)
|
||||||
|
fileBytes := bytes.NewReader(buffer)
|
||||||
|
fileType := http.DetectContentType(buffer)
|
||||||
|
|
||||||
|
_, err = svc.PutObject(&s3.PutObjectInput{
|
||||||
|
Bucket: aws.String(bucket),
|
||||||
|
Key: aws.String(objectKey),
|
||||||
|
Body: fileBytes,
|
||||||
|
ContentLength: aws.Int64(fileInfo.Size()),
|
||||||
|
ContentType: aws.String(fileType),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func DownloadFile(destinationPath, key, bucket, prefix string) error {
|
||||||
|
|
||||||
|
sess, err := CreateSession()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
Info("Download backup from S3 storage...")
|
||||||
|
file, err := os.Create(filepath.Join(destinationPath, key))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Failed to create file", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
objectKey := filepath.Join(prefix, key)
|
||||||
|
|
||||||
|
downloader := s3manager.NewDownloader(sess)
|
||||||
|
numBytes, err := downloader.Download(file,
|
||||||
|
&s3.GetObjectInput{
|
||||||
|
Bucket: aws.String(bucket),
|
||||||
|
Key: aws.String(objectKey),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Failed to download file", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
Info(fmt.Sprintf("Backup downloaded: ", file.Name(), " bytes size ", numBytes))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func DeleteOldBackup(bucket, prefix string, retention int) error {
|
||||||
|
sess, err := CreateSession()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
svc := s3.New(sess)
|
||||||
|
|
||||||
|
// Get the current time and the time threshold for 7 days ago
|
||||||
|
now := time.Now()
|
||||||
|
backupRetentionDays := now.AddDate(0, 0, -retention)
|
||||||
|
|
||||||
|
// List objects in the bucket
|
||||||
|
listObjectsInput := &s3.ListObjectsV2Input{
|
||||||
|
Bucket: aws.String(bucket),
|
||||||
|
Prefix: aws.String(prefix),
|
||||||
|
}
|
||||||
|
err = svc.ListObjectsV2Pages(listObjectsInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||||
|
for _, object := range page.Contents {
|
||||||
|
if object.LastModified.Before(backupRetentionDays) {
|
||||||
|
// Object is older than retention days, delete it
|
||||||
|
_, err := svc.DeleteObject(&s3.DeleteObjectInput{
|
||||||
|
Bucket: aws.String(bucket),
|
||||||
|
Key: object.Key,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Failed to delete object %s: %v", *object.Key, err)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Deleted object %s\n", *object.Key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return !lastPage
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to list objects: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("Finished deleting old files.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
139
utils/utils.go
139
utils/utils.go
@@ -7,25 +7,51 @@ package utils
|
|||||||
* @link https://github.com/jkaninda/mysql-bkup
|
* @link https://github.com/jkaninda/mysql-bkup
|
||||||
**/
|
**/
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"golang.org/x/exp/slog"
|
||||||
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Info(v ...any) {
|
func Info(msg string, args ...any) {
|
||||||
fmt.Println("⒤ ", fmt.Sprint(v...))
|
if len(args) == 0 {
|
||||||
|
slog.Info(msg)
|
||||||
|
} else {
|
||||||
|
slog.Info(fmt.Sprintf(msg, args...))
|
||||||
}
|
}
|
||||||
func Done(v ...any) {
|
|
||||||
fmt.Println("✔ ", fmt.Sprint(v...))
|
|
||||||
}
|
}
|
||||||
func Fatal(v ...any) {
|
func Worn(msg string, args ...any) {
|
||||||
fmt.Println("✘ ", fmt.Sprint(v...))
|
if len(args) == 0 {
|
||||||
os.Exit(1)
|
slog.Warn(msg)
|
||||||
|
} else {
|
||||||
|
slog.Warn(fmt.Sprintf(msg, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func Error(msg string, args ...any) {
|
||||||
|
if len(args) == 0 {
|
||||||
|
slog.Error(msg)
|
||||||
|
} else {
|
||||||
|
slog.Error(fmt.Sprintf(msg, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func Done(msg string, args ...any) {
|
||||||
|
if len(args) == 0 {
|
||||||
|
slog.Info(msg)
|
||||||
|
} else {
|
||||||
|
slog.Info(fmt.Sprintf(msg, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func Fatal(msg string, args ...any) {
|
||||||
|
// Fatal logs an error message and exits the program.
|
||||||
|
if len(args) == 0 {
|
||||||
|
slog.Error(msg)
|
||||||
|
} else {
|
||||||
|
slog.Error(fmt.Sprintf(msg, args...))
|
||||||
}
|
}
|
||||||
func Fatalf(msg string, v ...any) {
|
|
||||||
fmt.Printf("✘ "+msg, v...)
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -47,9 +73,45 @@ func WriteToFile(filePath, content string) error {
|
|||||||
_, err = file.WriteString(content)
|
_, err = file.WriteString(content)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
func DeleteFile(filePath string) error {
|
||||||
|
err := os.Remove(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to delete file: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func CopyFile(src, dst string) error {
|
||||||
|
// Open the source file for reading
|
||||||
|
sourceFile, err := os.Open(src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open source file: %v", err)
|
||||||
|
}
|
||||||
|
defer sourceFile.Close()
|
||||||
|
|
||||||
|
// Create the destination file
|
||||||
|
destinationFile, err := os.Create(dst)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create destination file: %v", err)
|
||||||
|
}
|
||||||
|
defer destinationFile.Close()
|
||||||
|
|
||||||
|
// Copy the content from source to destination
|
||||||
|
_, err = io.Copy(destinationFile, sourceFile)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to copy file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush the buffer to ensure all data is written
|
||||||
|
err = destinationFile.Sync()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to sync destination file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
func ChangePermission(filePath string, mod int) {
|
func ChangePermission(filePath string, mod int) {
|
||||||
if err := os.Chmod(filePath, fs.FileMode(mod)); err != nil {
|
if err := os.Chmod(filePath, fs.FileMode(mod)); err != nil {
|
||||||
Fatalf("Error changing permissions of %s: %v\n", filePath, err)
|
Fatal("Error changing permissions of %s: %v\n", filePath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -69,15 +131,31 @@ func IsDirEmpty(name string) (bool, error) {
|
|||||||
|
|
||||||
// TestDatabaseConnection tests the database connection
|
// TestDatabaseConnection tests the database connection
|
||||||
func TestDatabaseConnection() {
|
func TestDatabaseConnection() {
|
||||||
Info("Testing database connection...")
|
dbHost := os.Getenv("DB_HOST")
|
||||||
// Test database connection
|
dbPassword := os.Getenv("DB_PASSWORD")
|
||||||
cmd := exec.Command("mysql", "-h", os.Getenv("DB_HOST"), "-P", os.Getenv("DB_PORT"), "-u", os.Getenv("DB_USERNAME"), "--password="+os.Getenv("DB_PASSWORD"), os.Getenv("DB_NAME"), "-e", "quit")
|
dbUserName := os.Getenv("DB_USERNAME")
|
||||||
|
dbName := os.Getenv("DB_NAME")
|
||||||
|
dbPort := os.Getenv("DB_PORT")
|
||||||
|
|
||||||
|
if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" {
|
||||||
|
Fatal("Please make sure all required database environment variables are set")
|
||||||
|
} else {
|
||||||
|
Info("Connecting to database ...")
|
||||||
|
|
||||||
|
cmd := exec.Command("mysql", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName, "-e", "quit")
|
||||||
|
|
||||||
|
// Capture the output
|
||||||
|
var out bytes.Buffer
|
||||||
|
cmd.Stdout = &out
|
||||||
|
cmd.Stderr = &out
|
||||||
err := cmd.Run()
|
err := cmd.Run()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatal("Error testing database connection:", err)
|
slog.Error(fmt.Sprintf("Error testing database connection: %v\nOutput: %s\n", err, out.String()))
|
||||||
|
os.Exit(1)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
Info("Successfully connected to database")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
func GetEnv(cmd *cobra.Command, flagName, envName string) string {
|
func GetEnv(cmd *cobra.Command, flagName, envName string) string {
|
||||||
value, _ := cmd.Flags().GetString(flagName)
|
value, _ := cmd.Flags().GetString(flagName)
|
||||||
@@ -109,6 +187,37 @@ func SetEnv(key, value string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
func GetEnvVariable(envName, oldEnvName string) string {
|
||||||
|
value := os.Getenv(envName)
|
||||||
|
if value == "" {
|
||||||
|
value = os.Getenv(oldEnvName)
|
||||||
|
if value != "" {
|
||||||
|
err := os.Setenv(envName, value)
|
||||||
|
if err != nil {
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
Worn("%s is deprecated, please use %s instead!\n", oldEnvName, envName)
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return value
|
||||||
|
}
|
||||||
func ShowHistory() {
|
func ShowHistory() {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CheckEnvVars checks if all the specified environment variables are set
|
||||||
|
func CheckEnvVars(vars []string) error {
|
||||||
|
missingVars := []string{}
|
||||||
|
|
||||||
|
for _, v := range vars {
|
||||||
|
if os.Getenv(v) == "" {
|
||||||
|
missingVars = append(missingVars, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(missingVars) > 0 {
|
||||||
|
return fmt.Errorf("missing environment variables: %v", missingVars)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user