mirror of
https://github.com/jkaninda/mysql-bkup.git
synced 2025-12-06 13:39:41 +01:00
Compare commits
256 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| e666466d27 | |||
| d9d44c2798 | |||
| 300a592508 | |||
| be82e841e7 | |||
| a73a365ebf | |||
| 75e965c0c5 | |||
| fc60ddb308 | |||
| 573ef15ef3 | |||
| b1776d3689 | |||
| 376d47f738 | |||
| eb6268f8ec | |||
| 731e2d789d | |||
| 6300a8f2dd | |||
| cd827a9277 | |||
| 71cf3fae85 | |||
| 528282bbd4 | |||
| 002c93a796 | |||
| b6192f4c42 | |||
| d5061453b0 | |||
| 0bc7497512 | |||
| 489dfdf842 | |||
|
|
907e70d552 | ||
| 696477fe5c | |||
|
|
56a8b51660 | ||
| c76a00139c | |||
| 0f43871765 | |||
| 9ba6abe3f4 | |||
|
|
764583d88f | ||
|
|
dbf4dc596a | ||
|
|
06c89a9b78 | ||
| ec8bdd806c | |||
|
|
828b11c6dd | ||
| 1d01e13909 | |||
| bd65db2418 | |||
| 75b809511e | |||
| fc028a2c55 | |||
| 7fa0c6a118 | |||
| 661702a97e | |||
| dd5f33f17d | |||
| b7cdfebd9c | |||
| 4b93becdf2 | |||
| 748cccec58 | |||
| 3e8bfabc44 | |||
| 777b59fd7c | |||
|
|
2b25f39c0a | ||
| e5ba397bb4 | |||
| 3a1bfc512d | |||
| b7b09ad6fd | |||
| 1206140a67 | |||
| 24573a96ad | |||
| fff0b55722 | |||
| 68322e6b9f | |||
| 0f28772659 | |||
| b95ccf3905 | |||
| a06872834f | |||
|
|
393168c6c5 | ||
| 5b9ec8a224 | |||
| 2c3f2f4a46 | |||
| 0df14f37b4 | |||
| 1b60ca6fd2 | |||
| d880f40108 | |||
|
|
c845b36797 | ||
| 63d615f838 | |||
| 6f31d35df2 | |||
| f36d01cc96 | |||
| 07b7f54a75 | |||
| 7ff9a32f08 | |||
| 95a81cb6b7 | |||
| 057d5277b0 | |||
| 8e58d7a4c3 | |||
| 4bd7d9fa72 | |||
|
|
156f22f1e5 | ||
| fd444293b4 | |||
|
|
1940ceba9a | ||
|
|
07d580a8a9 | ||
| 9a261b22ec | |||
|
|
e7a58f0569 | ||
| 1b529725d7 | |||
|
|
d8c73560b8 | ||
|
|
d5a0adc981 | ||
| 6df3bae9e2 | |||
|
|
f7d624fd15 | ||
| 1e9e1ed951 | |||
|
|
917ba8947f | ||
| 94a1dcdff7 | |||
|
|
f70e549b16 | ||
|
|
607478fcc6 | ||
| 2862e504f5 | |||
|
|
29420ee13e | ||
| f53272ccf0 | |||
|
|
c360441445 | ||
|
|
f6916231f7 | ||
|
|
afd4afc83b | ||
|
|
9016a9ec7a | ||
| 4ecd96e75c | |||
|
|
8a88e4a727 | ||
| 62f86adea9 | |||
| eb414d818c | |||
| 6721cc430d | |||
|
|
8e20e9595f | ||
| 02e3267237 | |||
|
|
448ef4d988 | ||
| 70ac78c2cd | |||
|
|
72f5ef4839 | ||
| 6a51f591a5 | |||
| d55ade3c21 | |||
|
|
cdbd6dcd6a | ||
|
|
307e18d9ff | ||
| 8d366f0302 | |||
| 05e32c3cc1 | |||
|
|
edd13907d0 | ||
|
|
7cb1c50927 | ||
| f545704b02 | |||
| 90f5391b24 | |||
| ca241b4fef | |||
|
|
3911296921 | ||
| 8d04d276ba | |||
|
|
221079e0ea | ||
| 590b2d8bc6 | |||
|
|
d2aeb55ebc | ||
|
|
431be36210 | ||
| ef2c5c80cd | |||
|
|
3a0137d6ea | ||
|
|
8afb5ace40 | ||
|
|
5569258a71 | ||
|
|
f3ec395e37 | ||
| ba432997c8 | |||
|
|
dc20ea9635 | ||
| 40557af437 | |||
|
|
1dcb9586a6 | ||
|
|
2c6336e84a | ||
| c16ee3a492 | |||
|
|
3f7d28ea49 | ||
| cea1ef9c3b | |||
|
|
56c271bc29 | ||
| 45c30dca5f | |||
|
|
b0ae212578 | ||
|
|
6e2d3a9f21 | ||
|
|
dd314aa4cb | ||
|
|
24ccdaa671 | ||
| 45e3452376 | |||
|
|
3527b4cdcd | ||
| dc6fe2f4b9 | |||
|
|
f0afc0f4e0 | ||
| 7d7c813bb0 | |||
|
|
6b8491cdc0 | ||
| a1dd6e3f58 | |||
|
|
86ba3530c9 | ||
| e1f3b15003 | |||
|
|
1577e92a66 | ||
| 7b67f88769 | |||
|
|
043233dabe | ||
|
|
d6652cfb75 | ||
| 140ed608ab | |||
|
|
98211a27b8 | ||
| 4e4d45e555 | |||
|
|
01e41acb5c | ||
| 3dce2017f8 | |||
|
|
ed2f1b8d9c | ||
| b64875df21 | |||
|
|
fc90507b3f | ||
| df0efd24d3 | |||
|
|
e5dd7e76ce | ||
| 12fbb67a09 | |||
|
|
df490af7b6 | ||
| d930c3e2f6 | |||
|
|
e4258cb12e | ||
| 4c44166921 | |||
| 554df819ab | |||
|
|
ca5633882e | ||
| c5cca82841 | |||
|
|
bbd5422089 | ||
|
|
d72156f890 | ||
|
|
909a50dbe7 | ||
|
|
94ceb71da2 | ||
|
|
fe05fe5110 | ||
| dabba2050a | |||
|
|
47e1ac407b | ||
| 28f6ed3a82 | |||
|
|
504926c7cd | ||
| 737f473f92 | |||
|
|
300d2a8205 | ||
|
|
a4ad0502cf | ||
| f344867edf | |||
|
|
d774584f64 | ||
| 96927cd57e | |||
|
|
ceacfa1d9d | ||
|
|
9380a18b45 | ||
|
|
d186071df9 | ||
|
|
71429b0e1a | ||
|
|
0bed86ded4 | ||
|
|
e891801125 | ||
|
|
01cf8a3392 | ||
|
|
efea81833a | ||
|
|
1cbf65d686 | ||
|
|
73d19913f8 | ||
|
|
b0224e43ef | ||
|
|
fa0485bb5a | ||
|
|
65ef6d3e8f | ||
|
|
a7b6abb101 | ||
|
|
3b21c109bc | ||
|
|
a50a1ef6f9 | ||
|
|
76bbfa35c4 | ||
|
|
599d93bef4 | ||
|
|
247e90f73e | ||
|
|
7d544aca68 | ||
|
|
1722ee0eeb | ||
|
|
726fd14831 | ||
|
|
fdc88e6064 | ||
|
|
2ba1b516e9 | ||
|
|
301594676b | ||
|
|
d06f2f2d7e | ||
|
|
2f06bd1c3a | ||
|
|
f383f5559d | ||
|
|
3725809d28 | ||
|
|
b1598ef7d0 | ||
|
|
e4a83b9851 | ||
|
|
4b2527f416 | ||
|
|
e97fc7512a | ||
|
|
7912ce46ed | ||
|
|
050f5e81bc | ||
|
|
b39e97b77d | ||
|
|
cbb73ae89b | ||
|
|
29a58aa26d | ||
|
|
041e0a07e9 | ||
|
|
9daac9c654 | ||
|
|
f6098769cd | ||
|
|
5cdfaa4d94 | ||
|
|
b205cd61ea | ||
|
|
e1307250e8 | ||
|
|
17ac951deb | ||
|
|
6e2e08224d | ||
|
|
570b775f48 | ||
|
|
e38e106983 | ||
|
|
3040420a09 | ||
|
|
eac5f70408 | ||
|
|
3476c6f529 | ||
|
|
1a9c8483f8 | ||
|
|
f8722f7ae4 | ||
|
|
421bf12910 | ||
|
|
3da4a27baa | ||
|
|
0881f075ef | ||
|
|
066e73f8e4 | ||
|
|
645243ff77 | ||
|
|
9384998127 | ||
|
|
390e7dad0c | ||
|
|
67ea22385f | ||
|
|
cde82d8cfc | ||
|
|
4808f093e5 | ||
|
|
c7a03861fe | ||
|
|
36ec63d522 | ||
|
|
0f07de1d83 | ||
|
|
ae55839996 | ||
|
|
a7f7e57a0d | ||
|
|
b2ddaec93b | ||
|
|
b3570d774c |
81
.env.example
Normal file
81
.env.example
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
### Database
|
||||||
|
DB_HOST=
|
||||||
|
DB_PORT=3306
|
||||||
|
DB_USERNAME=
|
||||||
|
DB_PASSWORD=
|
||||||
|
DB_NAME=
|
||||||
|
TZ=Europe/Paris
|
||||||
|
|
||||||
|
### Database Migration
|
||||||
|
#TARGET_DB_HOST=
|
||||||
|
#TARGET_DB_PORT=3306
|
||||||
|
#TARGET_DB_NAME=
|
||||||
|
#TARGET_DB_USERNAME=
|
||||||
|
#TARGET_DB_PASSWORD=
|
||||||
|
|
||||||
|
### Backup restoration
|
||||||
|
#FILE_NAME=
|
||||||
|
|
||||||
|
### AWS S3 Storage
|
||||||
|
#ACCESS_KEY=
|
||||||
|
#SECRET_KEY=
|
||||||
|
#AWS_S3_BUCKET_NAME=
|
||||||
|
#AWS_S3_ENDPOINT=
|
||||||
|
#AWS_REGION=
|
||||||
|
#AWS_S3_PATH=
|
||||||
|
#AWS_DISABLE_SSL=false
|
||||||
|
#AWS_FORCE_PATH_STYLE=true
|
||||||
|
|
||||||
|
### Backup Cron Expression
|
||||||
|
#BACKUP_CRON_EXPRESSION=@midnight
|
||||||
|
##Delete old backup created more than specified days ago
|
||||||
|
#BACKUP_RETENTION_DAYS=7
|
||||||
|
|
||||||
|
####SSH Storage
|
||||||
|
#SSH_HOST_NAME=
|
||||||
|
#SSH_PORT=22
|
||||||
|
#SSH_USER=
|
||||||
|
#SSH_PASSWORD=
|
||||||
|
#SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
||||||
|
|
||||||
|
####FTP Storage
|
||||||
|
#FTP_PASSWORD=
|
||||||
|
#FTP_HOST_NAME=
|
||||||
|
#FTP_USER=
|
||||||
|
#FTP_PORT=21
|
||||||
|
#REMOTE_PATH=
|
||||||
|
|
||||||
|
## Azure Blob storage
|
||||||
|
AZURE_STORAGE_CONTAINER_NAME=
|
||||||
|
AZURE_STORAGE_ACCOUNT_NAME=
|
||||||
|
AZURE_STORAGE_ACCOUNT_KEY=
|
||||||
|
|
||||||
|
#### Backup encryption
|
||||||
|
#GPG_PUBLIC_KEY=/config/public_key.asc
|
||||||
|
#GPG_PRIVATE_KEY=/config/private_key.asc
|
||||||
|
#GPG_PASSPHRASE=Your strong passphrase
|
||||||
|
|
||||||
|
## For multiple database backup on Docker or Docker in Swarm mode
|
||||||
|
#BACKUP_CONFIG_FILE=/config/config.yaml
|
||||||
|
|
||||||
|
### Database restoration
|
||||||
|
#FILE_NAME=
|
||||||
|
|
||||||
|
### Notification
|
||||||
|
#BACKUP_REFERENCE=K8s/Paris cluster
|
||||||
|
|
||||||
|
## Telegram
|
||||||
|
#TG_TOKEN=
|
||||||
|
#TG_CHAT_ID=
|
||||||
|
|
||||||
|
### Email
|
||||||
|
#MAIL_HOST=
|
||||||
|
#MAIL_PORT=
|
||||||
|
#MAIL_USERNAME=
|
||||||
|
#MAIL_PASSWORD=
|
||||||
|
#MAIL_FROM=Backup Jobs <backup-jobs@example.com>
|
||||||
|
#MAIL_TO=backup@example.com,me@example.com,team@example.com
|
||||||
|
#MAIL_SKIP_TLS=false
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
3
.github/FUNDING.yml
vendored
Normal file
3
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# These are supported funding model platforms
|
||||||
|
|
||||||
|
ko_fi: jkaninda
|
||||||
10
.github/dependabot.yml
vendored
Normal file
10
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: docker
|
||||||
|
directory: /
|
||||||
|
schedule:
|
||||||
|
interval: weekly
|
||||||
|
- package-ecosystem: gomod
|
||||||
|
directory: /
|
||||||
|
schedule:
|
||||||
|
interval: weekly
|
||||||
8
.github/workflows/build.yml
vendored
8
.github/workflows/build.yml
vendored
@@ -1,7 +1,7 @@
|
|||||||
name: Build
|
name: Build
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: ['develop']
|
branches: ['nightly']
|
||||||
env:
|
env:
|
||||||
BUILDKIT_IMAGE: jkaninda/mysql-bkup
|
BUILDKIT_IMAGE: jkaninda/mysql-bkup
|
||||||
jobs:
|
jobs:
|
||||||
@@ -25,8 +25,10 @@ jobs:
|
|||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
push: true
|
push: true
|
||||||
file: "./docker/Dockerfile"
|
file: "./Dockerfile"
|
||||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||||
|
build-args: |
|
||||||
|
appVersion=nightly
|
||||||
tags: |
|
tags: |
|
||||||
"${{env.BUILDKIT_IMAGE}}:develop-${{ github.sha }}"
|
"${{vars.BUILDKIT_IMAGE}}:nightly"
|
||||||
|
|
||||||
|
|||||||
6
.github/workflows/deploy-docs.yml
vendored
6
.github/workflows/deploy-docs.yml
vendored
@@ -32,14 +32,14 @@ jobs:
|
|||||||
working-directory: docs
|
working-directory: docs
|
||||||
- name: Setup Pages
|
- name: Setup Pages
|
||||||
id: pages
|
id: pages
|
||||||
uses: actions/configure-pages@v2
|
uses: actions/configure-pages@v5
|
||||||
- name: Build with Jekyll
|
- name: Build with Jekyll
|
||||||
working-directory: docs
|
working-directory: docs
|
||||||
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
||||||
env:
|
env:
|
||||||
JEKYLL_ENV: production
|
JEKYLL_ENV: production
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
uses: actions/upload-pages-artifact@v1
|
uses: actions/upload-pages-artifact@v3
|
||||||
with:
|
with:
|
||||||
path: 'docs/_site/'
|
path: 'docs/_site/'
|
||||||
|
|
||||||
@@ -52,4 +52,4 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Deploy to GitHub Pages
|
- name: Deploy to GitHub Pages
|
||||||
id: deployment
|
id: deployment
|
||||||
uses: actions/deploy-pages@v1
|
uses: actions/deploy-pages@v4
|
||||||
23
.github/workflows/lint.yml
vendored
Normal file
23
.github/workflows/lint.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
name: Lint
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
lint:
|
||||||
|
name: Run on Ubuntu
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Clone the code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: '~1.23'
|
||||||
|
|
||||||
|
- name: Run linter
|
||||||
|
uses: golangci/golangci-lint-action@v6
|
||||||
|
with:
|
||||||
|
version: v1.61
|
||||||
12
.github/workflows/release.yml
vendored
12
.github/workflows/release.yml
vendored
@@ -39,11 +39,13 @@ jobs:
|
|||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
push: true
|
push: true
|
||||||
file: "./docker/Dockerfile"
|
file: "./Dockerfile"
|
||||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||||
|
build-args: |
|
||||||
|
appVersion=${{ env.TAG_NAME }}
|
||||||
tags: |
|
tags: |
|
||||||
"${{env.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
|
"${{vars.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
|
||||||
"${{env.BUILDKIT_IMAGE}}:latest"
|
"${{vars.BUILDKIT_IMAGE}}:latest"
|
||||||
"ghcr.io/${{env.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
|
"ghcr.io/${{vars.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
|
||||||
"ghcr.io/${{env.BUILDKIT_IMAGE}}:latest"
|
"ghcr.io/${{vars.BUILDKIT_IMAGE}}:latest"
|
||||||
|
|
||||||
|
|||||||
289
.github/workflows/tests.yml
vendored
Normal file
289
.github/workflows/tests.yml
vendored
Normal file
@@ -0,0 +1,289 @@
|
|||||||
|
name: Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- nightly
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
env:
|
||||||
|
IMAGE_NAME: mysql-bkup
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
services:
|
||||||
|
mysql:
|
||||||
|
image: mysql:9
|
||||||
|
env:
|
||||||
|
MYSQL_ROOT_PASSWORD: password
|
||||||
|
MYSQL_DATABASE: testdb
|
||||||
|
MYSQL_USER: user
|
||||||
|
MYSQL_PASSWORD: password
|
||||||
|
ports:
|
||||||
|
- 3306:3306
|
||||||
|
options: >-
|
||||||
|
--health-cmd="mysqladmin ping -h 127.0.0.1 -uuser -ppassword"
|
||||||
|
--health-interval=10s
|
||||||
|
--health-timeout=5s
|
||||||
|
--health-retries=5
|
||||||
|
mysql8:
|
||||||
|
image: mysql:8
|
||||||
|
env:
|
||||||
|
MYSQL_ROOT_PASSWORD: password
|
||||||
|
MYSQL_DATABASE: testdb
|
||||||
|
MYSQL_USER: user
|
||||||
|
MYSQL_PASSWORD: password
|
||||||
|
ports:
|
||||||
|
- 3308:3306
|
||||||
|
options: >-
|
||||||
|
--health-cmd="mysqladmin ping -h 127.0.0.1 -uuser -ppassword"
|
||||||
|
--health-interval=10s
|
||||||
|
--health-timeout=5s
|
||||||
|
--health-retries=5
|
||||||
|
mysql5:
|
||||||
|
image: mysql:5
|
||||||
|
env:
|
||||||
|
MYSQL_ROOT_PASSWORD: password
|
||||||
|
MYSQL_DATABASE: testdb
|
||||||
|
MYSQL_USER: user
|
||||||
|
MYSQL_PASSWORD: password
|
||||||
|
ports:
|
||||||
|
- 3305:3306
|
||||||
|
options: >-
|
||||||
|
--health-cmd="mysqladmin ping -h 127.0.0.1 -uuser -ppassword"
|
||||||
|
--health-interval=10s
|
||||||
|
--health-timeout=5s
|
||||||
|
--health-retries=5
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
- name: Create Minio container
|
||||||
|
run: |
|
||||||
|
docker run -d --rm --name minio \
|
||||||
|
--network host \
|
||||||
|
-p 9000:9000 \
|
||||||
|
-e MINIO_ACCESS_KEY=minioadmin \
|
||||||
|
-e MINIO_SECRET_KEY=minioadmin \
|
||||||
|
-e MINIO_REGION_NAME="eu" \
|
||||||
|
minio/minio server /data
|
||||||
|
echo "Create Minio container completed"
|
||||||
|
- name: Install MinIO Client (mc)
|
||||||
|
run: |
|
||||||
|
curl -O https://dl.min.io/client/mc/release/linux-amd64/mc
|
||||||
|
chmod +x mc
|
||||||
|
sudo mv mc /usr/local/bin/
|
||||||
|
|
||||||
|
- name: Wait for MinIO to be ready
|
||||||
|
run: sleep 5
|
||||||
|
|
||||||
|
- name: Configure MinIO Client
|
||||||
|
run: |
|
||||||
|
mc alias set local http://localhost:9000 minioadmin minioadmin
|
||||||
|
mc alias list
|
||||||
|
|
||||||
|
- name: Create MinIO Bucket
|
||||||
|
run: |
|
||||||
|
mc mb local/backups
|
||||||
|
echo "Bucket backups created successfully."
|
||||||
|
# Build the Docker image
|
||||||
|
- name: Build Docker Image
|
||||||
|
run: |
|
||||||
|
docker buildx build --build-arg appVersion=test -t ${{ env.IMAGE_NAME }}:latest --load .
|
||||||
|
|
||||||
|
- name: Verify Docker images
|
||||||
|
run: |
|
||||||
|
docker images
|
||||||
|
|
||||||
|
- name: Wait for MySQL to be ready
|
||||||
|
run: |
|
||||||
|
docker run --rm --network host mysql:9 mysqladmin ping -h 127.0.0.1 -uuser -ppassword --wait
|
||||||
|
- name: Test restore
|
||||||
|
run: |
|
||||||
|
docker run --rm --name ${{ env.IMAGE_NAME }} \
|
||||||
|
-v ./migrations:/backup/ \
|
||||||
|
--network host \
|
||||||
|
-e DB_HOST=127.0.0.1 \
|
||||||
|
-e DB_USERNAME=root \
|
||||||
|
-e DB_PASSWORD=password \
|
||||||
|
-e DB_NAME=testdb \
|
||||||
|
${{ env.IMAGE_NAME }}:latest restore -f init.sql
|
||||||
|
echo "Database restore completed"
|
||||||
|
- name: Test restore Mysql8
|
||||||
|
run: |
|
||||||
|
docker run --rm --name ${{ env.IMAGE_NAME }} \
|
||||||
|
-v ./migrations:/backup/ \
|
||||||
|
--network host \
|
||||||
|
-e DB_HOST=127.0.0.1 \
|
||||||
|
-e DB_PORT=3308 \
|
||||||
|
-e DB_USERNAME=root \
|
||||||
|
-e DB_PASSWORD=password \
|
||||||
|
-e DB_NAME=testdb \
|
||||||
|
${{ env.IMAGE_NAME }}:latest restore -f init.sql
|
||||||
|
echo "Test restore Mysql8 completed"
|
||||||
|
- name: Test restore Mysql5
|
||||||
|
run: |
|
||||||
|
docker run --rm --name ${{ env.IMAGE_NAME }} \
|
||||||
|
-v ./migrations:/backup/ \
|
||||||
|
--network host \
|
||||||
|
-e DB_HOST=127.0.0.1 \
|
||||||
|
-e DB_PORT=3305 \
|
||||||
|
-e DB_USERNAME=root \
|
||||||
|
-e DB_PASSWORD=password \
|
||||||
|
-e DB_NAME=testdb \
|
||||||
|
${{ env.IMAGE_NAME }}:latest restore -f init.sql
|
||||||
|
echo "Test restore Mysql5 completed"
|
||||||
|
- name: Test backup
|
||||||
|
run: |
|
||||||
|
docker run --rm --name ${{ env.IMAGE_NAME }} \
|
||||||
|
-v ./migrations:/backup/ \
|
||||||
|
--network host \
|
||||||
|
-e DB_HOST=127.0.0.1 \
|
||||||
|
-e DB_USERNAME=user \
|
||||||
|
-e DB_PASSWORD=password \
|
||||||
|
-e DB_NAME=testdb \
|
||||||
|
${{ env.IMAGE_NAME }}:latest backup
|
||||||
|
echo "Database backup completed"
|
||||||
|
- name: Test backup Mysql8
|
||||||
|
run: |
|
||||||
|
docker run --rm --name ${{ env.IMAGE_NAME }} \
|
||||||
|
-v ./migrations:/backup/ \
|
||||||
|
--network host \
|
||||||
|
-e DB_PORT=3308 \
|
||||||
|
-e DB_HOST=127.0.0.1 \
|
||||||
|
-e DB_USERNAME=user \
|
||||||
|
-e DB_PASSWORD=password \
|
||||||
|
-e DB_NAME=testdb \
|
||||||
|
${{ env.IMAGE_NAME }}:latest backup
|
||||||
|
echo "Test backup Mysql8 completed"
|
||||||
|
- name: Test backup Mysql5
|
||||||
|
run: |
|
||||||
|
docker run --rm --name ${{ env.IMAGE_NAME }} \
|
||||||
|
-v ./migrations:/backup/ \
|
||||||
|
--network host \
|
||||||
|
-e DB_PORT=3305 \
|
||||||
|
-e DB_HOST=127.0.0.1 \
|
||||||
|
-e DB_USERNAME=user \
|
||||||
|
-e DB_PASSWORD=password \
|
||||||
|
-e DB_NAME=testdb \
|
||||||
|
${{ env.IMAGE_NAME }}:latest backup
|
||||||
|
echo "Test backup Mysql5 completed"
|
||||||
|
- name: Test encrypted backup
|
||||||
|
run: |
|
||||||
|
docker run --rm --name ${{ env.IMAGE_NAME }} \
|
||||||
|
-v ./migrations:/backup/ \
|
||||||
|
--network host \
|
||||||
|
-e DB_HOST=127.0.0.1 \
|
||||||
|
-e DB_USERNAME=user \
|
||||||
|
-e DB_PASSWORD=password \
|
||||||
|
-e GPG_PASSPHRASE=password \
|
||||||
|
${{ env.IMAGE_NAME }}:latest backup -d testdb --disable-compression --custom-name encrypted-bkup
|
||||||
|
echo "Database encrypted backup completed"
|
||||||
|
- name: Test restore encrypted backup | testdb -> testdb2
|
||||||
|
run: |
|
||||||
|
docker run --rm --name ${{ env.IMAGE_NAME }} \
|
||||||
|
-v ./migrations:/backup/ \
|
||||||
|
--network host \
|
||||||
|
-e DB_HOST=127.0.0.1 \
|
||||||
|
-e DB_USERNAME=root \
|
||||||
|
-e DB_PASSWORD=password \
|
||||||
|
-e GPG_PASSPHRASE=password \
|
||||||
|
-e DB_NAME=testdb2 \
|
||||||
|
${{ env.IMAGE_NAME }}:latest restore -f /backup/encrypted-bkup.sql.gpg
|
||||||
|
echo "Test restore encrypted backup completed"
|
||||||
|
- name: Test migrate database testdb -> testdb3
|
||||||
|
run: |
|
||||||
|
docker run --rm --name ${{ env.IMAGE_NAME }} \
|
||||||
|
-v ./migrations:/backup/ \
|
||||||
|
--network host \
|
||||||
|
-e DB_HOST=127.0.0.1 \
|
||||||
|
-e DB_USERNAME=root \
|
||||||
|
-e DB_PASSWORD=password \
|
||||||
|
-e GPG_PASSPHRASE=password \
|
||||||
|
-e DB_NAME=testdb \
|
||||||
|
-e TARGET_DB_HOST=127.0.0.1 \
|
||||||
|
-e TARGET_DB_PORT=3306 \
|
||||||
|
-e TARGET_DB_NAME=testdb3 \
|
||||||
|
-e TARGET_DB_USERNAME=root \
|
||||||
|
-e TARGET_DB_PASSWORD=password \
|
||||||
|
${{ env.IMAGE_NAME }}:latest migrate
|
||||||
|
echo "Test migrate database testdb -> testdb3 completed"
|
||||||
|
- name: Test backup all databases
|
||||||
|
run: |
|
||||||
|
docker run --rm --name ${{ env.IMAGE_NAME }} \
|
||||||
|
-v ./migrations:/backup/ \
|
||||||
|
--network host \
|
||||||
|
-e DB_HOST=127.0.0.1 \
|
||||||
|
-e DB_USERNAME=root \
|
||||||
|
-e DB_PASSWORD=password \
|
||||||
|
-e DB_NAME=testdb \
|
||||||
|
${{ env.IMAGE_NAME }}:latest backup --all-databases
|
||||||
|
echo "Database backup completed"
|
||||||
|
- name: Test multiple backup
|
||||||
|
run: |
|
||||||
|
docker run --rm --name ${{ env.IMAGE_NAME }} \
|
||||||
|
-v ./migrations:/backup/ \
|
||||||
|
--network host \
|
||||||
|
-e DB_HOST=127.0.0.1 \
|
||||||
|
-e TESTDB2_DB_USERNAME=root \
|
||||||
|
-e TESTDB2_DB_PASSWORD=password \
|
||||||
|
-e TESTDB2_DB_HOST=127.0.0.1 \
|
||||||
|
${{ env.IMAGE_NAME }}:latest backup -c /backup/test_config.yaml
|
||||||
|
echo "Database backup completed"
|
||||||
|
- name: Test backup Minio (s3)
|
||||||
|
run: |
|
||||||
|
docker run --rm --name ${{ env.IMAGE_NAME }} \
|
||||||
|
--network host \
|
||||||
|
-e DB_HOST=127.0.0.1 \
|
||||||
|
-e DB_USERNAME=user \
|
||||||
|
-e DB_PASSWORD=password \
|
||||||
|
-e DB_NAME=testdb \
|
||||||
|
-e AWS_S3_ENDPOINT="http://127.0.0.1:9000" \
|
||||||
|
-e AWS_S3_BUCKET_NAME=backups \
|
||||||
|
-e AWS_ACCESS_KEY=minioadmin \
|
||||||
|
-e AWS_SECRET_KEY=minioadmin \
|
||||||
|
-e AWS_DISABLE_SSL="true" \
|
||||||
|
-e AWS_REGION="eu" \
|
||||||
|
-e AWS_FORCE_PATH_STYLE="true" ${{ env.IMAGE_NAME }}:latest backup -s s3 --custom-name minio-backup
|
||||||
|
echo "Test backup Minio (s3) completed"
|
||||||
|
- name: Test restore Minio (s3)
|
||||||
|
run: |
|
||||||
|
docker run --rm --name ${{ env.IMAGE_NAME }} \
|
||||||
|
--network host \
|
||||||
|
-e DB_HOST=127.0.0.1 \
|
||||||
|
-e DB_USERNAME=user \
|
||||||
|
-e DB_PASSWORD=password \
|
||||||
|
-e DB_NAME=testdb \
|
||||||
|
-e AWS_S3_ENDPOINT="http://127.0.0.1:9000" \
|
||||||
|
-e AWS_S3_BUCKET_NAME=backups \
|
||||||
|
-e AWS_ACCESS_KEY=minioadmin \
|
||||||
|
-e AWS_SECRET_KEY=minioadmin \
|
||||||
|
-e AWS_DISABLE_SSL="true" \
|
||||||
|
-e AWS_REGION="eu" \
|
||||||
|
-e AWS_FORCE_PATH_STYLE="true" ${{ env.IMAGE_NAME }}:latest restore -s s3 -f minio-backup.sql.gz
|
||||||
|
echo "Test backup Minio (s3) completed"
|
||||||
|
- name: Test scheduled backup
|
||||||
|
run: |
|
||||||
|
docker run -d --rm --name ${{ env.IMAGE_NAME }} \
|
||||||
|
-v ./migrations:/backup/ \
|
||||||
|
--network host \
|
||||||
|
-e DB_HOST=127.0.0.1 \
|
||||||
|
-e DB_USERNAME=user \
|
||||||
|
-e DB_PASSWORD=password \
|
||||||
|
-e DB_NAME=testdb \
|
||||||
|
${{ env.IMAGE_NAME }}:latest backup -e "@every 10s"
|
||||||
|
|
||||||
|
echo "Waiting for backup to be done..."
|
||||||
|
sleep 25
|
||||||
|
docker logs ${{ env.IMAGE_NAME }}
|
||||||
|
echo "Test scheduled backup completed"
|
||||||
|
# Cleanup: Stop and remove containers
|
||||||
|
- name: Clean up
|
||||||
|
run: |
|
||||||
|
docker stop ${{ env.IMAGE_NAME }} || true
|
||||||
|
docker rm ${{ env.IMAGE_NAME }} || true
|
||||||
44
.golangci.yml
Normal file
44
.golangci.yml
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
run:
|
||||||
|
timeout: 5m
|
||||||
|
allow-parallel-runners: true
|
||||||
|
|
||||||
|
issues:
|
||||||
|
# don't skip warning about doc comments
|
||||||
|
# don't exclude the default set of lint
|
||||||
|
exclude-use-default: false
|
||||||
|
# restore some of the defaults
|
||||||
|
# (fill in the rest as needed)
|
||||||
|
exclude-rules:
|
||||||
|
- path: "internal/*"
|
||||||
|
linters:
|
||||||
|
- dupl
|
||||||
|
- lll
|
||||||
|
- goimports
|
||||||
|
linters:
|
||||||
|
disable-all: true
|
||||||
|
enable:
|
||||||
|
- dupl
|
||||||
|
- errcheck
|
||||||
|
- copyloopvar
|
||||||
|
- ginkgolinter
|
||||||
|
- goconst
|
||||||
|
- gocyclo
|
||||||
|
- gofmt
|
||||||
|
- gosimple
|
||||||
|
- govet
|
||||||
|
- ineffassign
|
||||||
|
# - lll
|
||||||
|
- misspell
|
||||||
|
- nakedret
|
||||||
|
- prealloc
|
||||||
|
- revive
|
||||||
|
- staticcheck
|
||||||
|
- typecheck
|
||||||
|
- unconvert
|
||||||
|
- unparam
|
||||||
|
- unused
|
||||||
|
|
||||||
|
linters-settings:
|
||||||
|
revive:
|
||||||
|
rules:
|
||||||
|
- name: comment-spacings
|
||||||
44
Dockerfile
Normal file
44
Dockerfile
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
FROM golang:1.24.1 AS build
|
||||||
|
WORKDIR /app
|
||||||
|
ARG appVersion=""
|
||||||
|
|
||||||
|
# Copy the source code.
|
||||||
|
COPY . .
|
||||||
|
# Installs Go dependencies
|
||||||
|
RUN go mod download
|
||||||
|
|
||||||
|
# Build
|
||||||
|
RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-X 'github.com/jkaninda/mysql-bkup/utils.Version=${appVersion}'" -o /app/mysql-bkup
|
||||||
|
|
||||||
|
FROM alpine:3.21.3
|
||||||
|
ENV TZ=UTC
|
||||||
|
ARG WORKDIR="/config"
|
||||||
|
ARG BACKUPDIR="/backup"
|
||||||
|
ARG BACKUP_TMP_DIR="/tmp/backup"
|
||||||
|
ARG TEMPLATES_DIR="/config/templates"
|
||||||
|
ARG appVersion=""
|
||||||
|
ENV VERSION=${appVersion}
|
||||||
|
LABEL author="Jonas Kaninda"
|
||||||
|
LABEL version=${appVersion}
|
||||||
|
LABEL github="github.com/jkaninda/mysql-bkup"
|
||||||
|
|
||||||
|
RUN apk --update add --no-cache mysql-client mariadb-connector-c tzdata ca-certificates
|
||||||
|
RUN mkdir -p $WORKDIR $BACKUPDIR $TEMPLATES_DIR $BACKUP_TMP_DIR && \
|
||||||
|
chmod a+rw $WORKDIR $BACKUPDIR $BACKUP_TMP_DIR
|
||||||
|
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
|
||||||
|
COPY ./templates/* $TEMPLATES_DIR/
|
||||||
|
RUN chmod +x /usr/local/bin/mysql-bkup && \
|
||||||
|
ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
|
||||||
|
|
||||||
|
# Create backup script and make it executable
|
||||||
|
RUN printf '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
|
||||||
|
chmod +x /usr/local/bin/backup
|
||||||
|
# Create restore script and make it executable
|
||||||
|
RUN printf '#!/bin/sh\n/usr/local/bin/mysql-bkup restore "$@"' > /usr/local/bin/restore && \
|
||||||
|
chmod +x /usr/local/bin/restore
|
||||||
|
# Create migrate script and make it executable
|
||||||
|
RUN printf '#!/bin/sh\n/usr/local/bin/mysql-bkup migrate "$@"' > /usr/local/bin/migrate && \
|
||||||
|
chmod +x /usr/local/bin/migrate
|
||||||
|
|
||||||
|
WORKDIR $WORKDIR
|
||||||
|
ENTRYPOINT ["/usr/local/bin/mysql-bkup"]
|
||||||
94
README.md
94
README.md
@@ -1,16 +1,45 @@
|
|||||||
# MySQL Backup
|
# MYSQL-BKUP
|
||||||
MySQL Backup is a Docker container image that can be used to backup and restore MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
|
|
||||||
It also supports __encrypting__ your backups using GPG.
|
|
||||||
|
|
||||||
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
|
**MYSQL-BKUP** is a Docker container image designed to **backup, restore, and migrate MySQL databases**.
|
||||||
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
|
It supports a variety of storage options and ensures data security through GPG encryption.
|
||||||
|
|
||||||
It also supports __encrypting__ your backups using GPG.
|
|
||||||
|
|
||||||
|
[](https://github.com/jkaninda/mysql-bkup/actions/workflows/tests.yml)
|
||||||
[](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml)
|
[](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml)
|
||||||
[](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup)
|
[](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup)
|
||||||

|

|
||||||

|

|
||||||
|
<a href="https://ko-fi.com/jkaninda"><img src="https://uploads-ssl.webflow.com/5c14e387dab576fe667689cf/5cbed8a4ae2b88347c06c923_BuyMeACoffee_blue.png" height="20" alt="buy ma a coffee"></a>
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- **Storage Options:**
|
||||||
|
- Local storage
|
||||||
|
- AWS S3 or any S3-compatible object storage
|
||||||
|
- FTP
|
||||||
|
- SSH-compatible storage
|
||||||
|
- Azure Blob storage
|
||||||
|
|
||||||
|
- **Data Security:**
|
||||||
|
- Backups can be encrypted using **GPG** to ensure confidentiality.
|
||||||
|
|
||||||
|
- **Deployment Flexibility:**
|
||||||
|
- Available as the [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image.
|
||||||
|
- Deployable on **Docker**, **Docker Swarm**, and **Kubernetes**.
|
||||||
|
- Supports recurring backups of MySQL databases when deployed:
|
||||||
|
- On Docker for automated backup schedules.
|
||||||
|
- As a **Job** or **CronJob** on Kubernetes.
|
||||||
|
|
||||||
|
- **Notifications:**
|
||||||
|
- Get real-time updates on backup success or failure via:
|
||||||
|
- **Telegram**
|
||||||
|
- **Email**
|
||||||
|
|
||||||
|
## Use Cases
|
||||||
|
|
||||||
|
- **Automated Recurring Backups:** Schedule regular backups for MySQL databases.
|
||||||
|
- **Cross-Environment Migration:** Easily migrate your MySQL databases across different environments using supported storage options.
|
||||||
|
- **Secure Backup Management:** Protect your data with GPG encryption.
|
||||||
|
|
||||||
|
|
||||||
Successfully tested on:
|
Successfully tested on:
|
||||||
- Docker
|
- Docker
|
||||||
@@ -33,8 +62,9 @@ Successfully tested on:
|
|||||||
## Storage:
|
## Storage:
|
||||||
- Local
|
- Local
|
||||||
- AWS S3 or any S3 Alternatives for Object Storage
|
- AWS S3 or any S3 Alternatives for Object Storage
|
||||||
- SSH remote server
|
- SSH remote storage server
|
||||||
|
- FTP remote storage server
|
||||||
|
- Azure Blob storage
|
||||||
## Quickstart
|
## Quickstart
|
||||||
|
|
||||||
### Simple backup using Docker CLI
|
### Simple backup using Docker CLI
|
||||||
@@ -45,6 +75,7 @@ To run a one time backup, bind your local volume to `/backup` in the container a
|
|||||||
docker run --rm --network your_network_name \
|
docker run --rm --network your_network_name \
|
||||||
-v $PWD/backup:/backup/ \
|
-v $PWD/backup:/backup/ \
|
||||||
-e "DB_HOST=dbhost" \
|
-e "DB_HOST=dbhost" \
|
||||||
|
-e "DB_PORT=3306" \
|
||||||
-e "DB_USERNAME=username" \
|
-e "DB_USERNAME=username" \
|
||||||
-e "DB_PASSWORD=password" \
|
-e "DB_PASSWORD=password" \
|
||||||
jkaninda/mysql-bkup backup -d database_name
|
jkaninda/mysql-bkup backup -d database_name
|
||||||
@@ -58,7 +89,19 @@ Alternatively, pass a `--env-file` in order to use a full config as described be
|
|||||||
-v $PWD/backup:/backup/ \
|
-v $PWD/backup:/backup/ \
|
||||||
jkaninda/mysql-bkup backup -d database_name
|
jkaninda/mysql-bkup backup -d database_name
|
||||||
```
|
```
|
||||||
|
### Simple restore using Docker CLI
|
||||||
|
|
||||||
|
To restore a database, bind your local volume to `/backup` in the container and run the `restore` command:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
docker run --rm --network your_network_name \
|
||||||
|
-v $PWD/backup:/backup/ \
|
||||||
|
-e "DB_HOST=dbhost" \
|
||||||
|
-e "DB_PORT=3306" \
|
||||||
|
-e "DB_USERNAME=username" \
|
||||||
|
-e "DB_PASSWORD=password" \
|
||||||
|
jkaninda/mysql-bkup restore -d database_name -f backup_file.sql.gz
|
||||||
|
```
|
||||||
### Simple backup in docker compose file
|
### Simple backup in docker compose file
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
@@ -79,12 +122,25 @@ services:
|
|||||||
- DB_NAME=foo
|
- DB_NAME=foo
|
||||||
- DB_USERNAME=bar
|
- DB_USERNAME=bar
|
||||||
- DB_PASSWORD=password
|
- DB_PASSWORD=password
|
||||||
|
- TZ=Europe/Paris
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
```
|
```
|
||||||
|
### Docker recurring backup
|
||||||
|
|
||||||
|
```shell
|
||||||
|
docker run --rm --network network_name \
|
||||||
|
-v $PWD/backup:/backup/ \
|
||||||
|
-e "DB_HOST=hostname" \
|
||||||
|
-e "DB_USERNAME=user" \
|
||||||
|
-e "DB_PASSWORD=password" \
|
||||||
|
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 15m" #@midnight
|
||||||
|
```
|
||||||
|
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
||||||
|
|
||||||
## Deploy on Kubernetes
|
## Deploy on Kubernetes
|
||||||
|
|
||||||
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as Job or CronJob.
|
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as Job or CronJob.
|
||||||
@@ -101,7 +157,7 @@ spec:
|
|||||||
template:
|
template:
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: pg-bkup
|
- name: mysql-bkup
|
||||||
# In production, it is advised to lock your image tag to a proper
|
# In production, it is advised to lock your image tag to a proper
|
||||||
# release version instead of using `latest`.
|
# release version instead of using `latest`.
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
@@ -144,20 +200,14 @@ docker pull ghcr.io/jkaninda/mysql-bkup
|
|||||||
|
|
||||||
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
|
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
|
||||||
|
|
||||||
## Supported Engines
|
|
||||||
|
|
||||||
This image is developed and tested against the Docker CE engine and Kubernetes exclusively.
|
|
||||||
While it may work against different implementations, there are no guarantees about support for non-Docker engines.
|
|
||||||
|
|
||||||
## References
|
## References
|
||||||
|
|
||||||
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
We created this image as a simpler and more lightweight alternative to existing solutions. Here’s why:
|
||||||
|
|
||||||
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
|
- **Lightweight:** Written in Go, the image is optimized for performance and minimal resource usage.
|
||||||
- This image is written in Go.
|
- **Multi-Architecture Support:** Supports `arm64` and `arm/v7` architectures.
|
||||||
- `arm64` and `arm/v7` architectures are supported.
|
- **Docker Swarm Support:** Fully compatible with Docker in Swarm mode.
|
||||||
- Docker in Swarm mode is supported.
|
- **Kubernetes Support:** Designed to work seamlessly with Kubernetes.
|
||||||
- Kubernetes is supported.
|
|
||||||
|
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|||||||
@@ -1,3 +1,27 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -14,19 +38,20 @@ var BackupCmd = &cobra.Command{
|
|||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
pkg.StartBackup(cmd)
|
pkg.StartBackup(cmd)
|
||||||
} else {
|
} else {
|
||||||
utils.Fatal("Error, no argument required")
|
utils.Fatal(`"backup" accepts no argument %q`, args)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
//Backup
|
// Backup
|
||||||
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
|
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Define storage: local, s3, ssh, ftp, azure")
|
||||||
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
BackupCmd.PersistentFlags().StringP("path", "P", "", "Storage path without file name. e.g: /custom_path or ssh remote path `/home/foo/backup`")
|
||||||
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. default or scheduled")
|
BackupCmd.PersistentFlags().StringP("cron-expression", "e", "", "Backup cron expression (e.g., `0 0 * * *` or `@daily`)")
|
||||||
BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Schedule period time")
|
BackupCmd.PersistentFlags().StringP("config", "c", "", "Configuration file for multi database backup. (e.g: `/backup/config.yaml`)")
|
||||||
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")
|
|
||||||
BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "Delete files created more than specified days ago, default 7 days")
|
|
||||||
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
|
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
|
||||||
|
BackupCmd.PersistentFlags().BoolP("all-databases", "a", false, "Backup all databases")
|
||||||
|
BackupCmd.PersistentFlags().BoolP("all-in-one", "A", false, "Backup all databases in a single file")
|
||||||
|
BackupCmd.PersistentFlags().StringP("custom-name", "", "", "Custom backup name")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,3 +1,27 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -13,7 +37,7 @@ var MigrateCmd = &cobra.Command{
|
|||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
pkg.StartMigration(cmd)
|
pkg.StartMigration(cmd)
|
||||||
} else {
|
} else {
|
||||||
utils.Fatal("Error, no argument required")
|
utils.Fatal(`"migrate" accepts no argument %q`, args)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,27 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -14,7 +38,7 @@ var RestoreCmd = &cobra.Command{
|
|||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
pkg.StartRestore(cmd)
|
pkg.StartRestore(cmd)
|
||||||
} else {
|
} else {
|
||||||
utils.Fatal("Error, no argument required")
|
utils.Fatal(`"restore" accepts no argument %q`, args)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -22,9 +46,9 @@ var RestoreCmd = &cobra.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
//Restore
|
// Restore
|
||||||
RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database")
|
RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database")
|
||||||
RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
|
RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Define storage: local, s3, ssh, ftp")
|
||||||
RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
26
cmd/root.go
26
cmd/root.go
@@ -1,7 +1,27 @@
|
|||||||
// Package cmd /*
|
|
||||||
/*
|
/*
|
||||||
Copyright © 2024 Jonas Kaninda
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -18,7 +38,6 @@ var rootCmd = &cobra.Command{
|
|||||||
Example: utils.MainExample,
|
Example: utils.MainExample,
|
||||||
Version: appVersion,
|
Version: appVersion,
|
||||||
}
|
}
|
||||||
var operation = ""
|
|
||||||
|
|
||||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||||
@@ -31,7 +50,6 @@ func Execute() {
|
|||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
rootCmd.PersistentFlags().StringP("dbname", "d", "", "Database name")
|
rootCmd.PersistentFlags().StringP("dbname", "d", "", "Database name")
|
||||||
rootCmd.PersistentFlags().StringVarP(&operation, "operation", "o", "", "Set operation, for old version only")
|
|
||||||
rootCmd.AddCommand(VersionCmd)
|
rootCmd.AddCommand(VersionCmd)
|
||||||
rootCmd.AddCommand(BackupCmd)
|
rootCmd.AddCommand(BackupCmd)
|
||||||
rootCmd.AddCommand(RestoreCmd)
|
rootCmd.AddCommand(RestoreCmd)
|
||||||
|
|||||||
@@ -1,11 +1,32 @@
|
|||||||
package cmd
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright © 2024 Jonas Kaninda
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
@@ -21,6 +42,6 @@ var VersionCmd = &cobra.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Version() {
|
func Version() {
|
||||||
fmt.Printf("Version: %s \n", appVersion)
|
fmt.Printf("Version: %s \n", utils.Version)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,83 +0,0 @@
|
|||||||
FROM golang:1.22.5 AS build
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
# Copy the source code.
|
|
||||||
COPY . .
|
|
||||||
# Installs Go dependencies
|
|
||||||
RUN go mod download
|
|
||||||
|
|
||||||
# Build
|
|
||||||
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/mysql-bkup
|
|
||||||
|
|
||||||
FROM ubuntu:24.04
|
|
||||||
ENV DB_HOST="localhost"
|
|
||||||
ENV DB_NAME=""
|
|
||||||
ENV DB_USERNAME=""
|
|
||||||
ENV DB_PASSWORD=""
|
|
||||||
ENV DB_PORT=3306
|
|
||||||
ENV STORAGE=local
|
|
||||||
ENV AWS_S3_ENDPOINT=""
|
|
||||||
ENV AWS_S3_BUCKET_NAME=""
|
|
||||||
ENV AWS_ACCESS_KEY=""
|
|
||||||
ENV AWS_SECRET_KEY=""
|
|
||||||
ENV AWS_REGION="us-west-2"
|
|
||||||
ENV AWS_S3_PATH=""
|
|
||||||
ENV AWS_DISABLE_SSL="false"
|
|
||||||
ENV GPG_PASSPHRASE=""
|
|
||||||
ENV SSH_USER=""
|
|
||||||
ENV SSH_REMOTE_PATH=""
|
|
||||||
ENV SSH_PASSWORD=""
|
|
||||||
ENV SSH_HOST_NAME=""
|
|
||||||
ENV SSH_IDENTIFY_FILE=""
|
|
||||||
ENV SSH_PORT="22"
|
|
||||||
ENV TARGET_DB_HOST=""
|
|
||||||
ENV TARGET_DB_PORT=3306
|
|
||||||
ENV TARGET_DB_NAME="localhost"
|
|
||||||
ENV TARGET_DB_USERNAME=""
|
|
||||||
ENV TARGET_DB_PASSWORD=""
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
|
||||||
ENV VERSION="v1.2.4"
|
|
||||||
ENV BACKUP_CRON_EXPRESSION=""
|
|
||||||
ARG WORKDIR="/config"
|
|
||||||
ARG BACKUPDIR="/backup"
|
|
||||||
ARG BACKUP_TMP_DIR="/tmp/backup"
|
|
||||||
ARG BACKUP_CRON="/etc/cron.d/backup_cron"
|
|
||||||
ARG BACKUP_CRON_SCRIPT="/usr/local/bin/backup_cron.sh"
|
|
||||||
LABEL author="Jonas Kaninda"
|
|
||||||
|
|
||||||
RUN apt-get update -qq
|
|
||||||
RUN apt install mysql-client supervisor cron gnupg -y
|
|
||||||
|
|
||||||
# Clear cache
|
|
||||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
RUN mkdir $WORKDIR
|
|
||||||
RUN mkdir $BACKUPDIR
|
|
||||||
RUN mkdir -p $BACKUP_TMP_DIR
|
|
||||||
RUN chmod 777 $WORKDIR
|
|
||||||
RUN chmod 777 $BACKUPDIR
|
|
||||||
RUN chmod 777 $BACKUP_TMP_DIR
|
|
||||||
RUN touch $BACKUP_CRON && \
|
|
||||||
touch $BACKUP_CRON_SCRIPT && \
|
|
||||||
chmod 777 $BACKUP_CRON && \
|
|
||||||
chmod 777 $BACKUP_CRON_SCRIPT
|
|
||||||
|
|
||||||
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
|
|
||||||
RUN chmod +x /usr/local/bin/mysql-bkup
|
|
||||||
|
|
||||||
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
|
|
||||||
|
|
||||||
ADD docker/supervisord.conf /etc/supervisor/supervisord.conf
|
|
||||||
|
|
||||||
# Create backup script and make it executable
|
|
||||||
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
|
|
||||||
chmod +x /usr/local/bin/backup
|
|
||||||
# Create restore script and make it executable
|
|
||||||
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup restore "$@"' > /usr/local/bin/restore && \
|
|
||||||
chmod +x /usr/local/bin/restore
|
|
||||||
# Create migrate script and make it executable
|
|
||||||
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup migrate "$@"' > /usr/local/bin/migrate && \
|
|
||||||
chmod +x /usr/local/bin/migrate
|
|
||||||
|
|
||||||
WORKDIR $WORKDIR
|
|
||||||
ENTRYPOINT ["/usr/local/bin/mysql-bkup"]
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
[supervisord]
|
|
||||||
nodaemon=true
|
|
||||||
user=root
|
|
||||||
logfile=/var/log/supervisor/supervisord.log
|
|
||||||
pidfile=/var/run/supervisord.pid
|
|
||||||
|
|
||||||
[program:cron]
|
|
||||||
command = /bin/bash -c "declare -p | grep -Ev '^declare -[[:alpha:]]*r' > /run/supervisord.env && /usr/sbin/cron -f -L 15"
|
|
||||||
autostart=true
|
|
||||||
autorestart=true
|
|
||||||
user = root
|
|
||||||
stderr_logfile=/var/log/cron.err.log
|
|
||||||
stdout_logfile=/var/log/cron.out.log
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
FROM ruby:3.3.4
|
|
||||||
|
|
||||||
ENV LC_ALL C.UTF-8
|
|
||||||
ENV LANG en_US.UTF-8
|
|
||||||
ENV LANGUAGE en_US.UTF-8
|
|
||||||
|
|
||||||
WORKDIR /usr/src/app
|
|
||||||
|
|
||||||
COPY . ./
|
|
||||||
RUN gem install bundler && bundle install
|
|
||||||
|
|
||||||
EXPOSE 4000
|
|
||||||
@@ -20,7 +20,7 @@ description: >- # this means to ignore newlines until "baseurl:"
|
|||||||
It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
|
It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
|
||||||
|
|
||||||
baseurl: "" # the subpath of your site, e.g. /blog
|
baseurl: "" # the subpath of your site, e.g. /blog
|
||||||
url: "jkaninda.github.io/mysql-bkup/" # the base hostname & protocol for your site, e.g. http://example.com
|
url: "" # the base hostname & protocol for your site, e.g. http://example.com
|
||||||
twitter_username: jonaskaninda
|
twitter_username: jonaskaninda
|
||||||
github_username: jkaninda
|
github_username: jkaninda
|
||||||
|
|
||||||
|
|||||||
@@ -1,13 +0,0 @@
|
|||||||
services:
|
|
||||||
jekyll:
|
|
||||||
build:
|
|
||||||
context: ./
|
|
||||||
ports:
|
|
||||||
- 4000:4000
|
|
||||||
environment:
|
|
||||||
- JEKYLL_ENV=development
|
|
||||||
volumes:
|
|
||||||
- .:/usr/src/app
|
|
||||||
stdin_open: true
|
|
||||||
tty: true
|
|
||||||
command: bundle exec jekyll serve -H 0.0.0.0 -t
|
|
||||||
BIN
docs/favicon.ico
Normal file
BIN
docs/favicon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 4.2 KiB |
72
docs/how-tos/azure-blob.md
Normal file
72
docs/how-tos/azure-blob.md
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
---
|
||||||
|
title: Azure Blob storage
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 5
|
||||||
|
---
|
||||||
|
|
||||||
|
# Backup to Azure Blob Storage
|
||||||
|
|
||||||
|
To store your backups on Azure Blob Storage, you can configure the backup process to use the `--storage azure` option.
|
||||||
|
|
||||||
|
This section explains how to set up and configure Azure Blob-based backups.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Configuration Steps
|
||||||
|
|
||||||
|
1. **Specify the Storage Type**
|
||||||
|
Add the `--storage azure` flag to your backup command.
|
||||||
|
|
||||||
|
2. **Set the Blob Path**
|
||||||
|
Optionally, specify a custom folder within your Azure Blob container where backups will be stored using the `--path` flag.
|
||||||
|
Example: `--path my-custom-path`.
|
||||||
|
|
||||||
|
3. **Required Environment Variables**
|
||||||
|
The following environment variables are mandatory for Azure Blob-based backups:
|
||||||
|
|
||||||
|
- `AZURE_STORAGE_CONTAINER_NAME`: The name of the Azure Blob container where backups will be stored.
|
||||||
|
- `AZURE_STORAGE_ACCOUNT_NAME`: The name of your Azure Storage account.
|
||||||
|
- `AZURE_STORAGE_ACCOUNT_KEY`: The access key for your Azure Storage account.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Example Configuration
|
||||||
|
|
||||||
|
Below is an example `docker-compose.yml` configuration for backing up to Azure Blob Storage:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, lock your image tag to a specific release version
|
||||||
|
# instead of using `latest`. Check https://github.com/jkaninda/mysqlbkup/releases
|
||||||
|
# for available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command: backup --storage azure -d database --path my-custom-path
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
## Azure Blob Configuration
|
||||||
|
- AZURE_STORAGE_CONTAINER_NAME=backup-container
|
||||||
|
- AZURE_STORAGE_ACCOUNT_NAME=account-name
|
||||||
|
- AZURE_STORAGE_ACCOUNT_KEY=Ppby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==
|
||||||
|
|
||||||
|
# Ensure the mysql-bkup container is connected to the same network as your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Key Notes
|
||||||
|
|
||||||
|
- **Custom Path**: Use the `--path` flag to specify a folder within your Azure Blob container for organizing backups.
|
||||||
|
- **Security**: Ensure your `AZURE_STORAGE_ACCOUNT_KEY` is kept secure and not exposed in public repositories.
|
||||||
|
- **Compatibility**: This configuration works with Azure Blob Storage and other compatible storage solutions.
|
||||||
61
docs/how-tos/backup-all.md
Normal file
61
docs/how-tos/backup-all.md
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
---
|
||||||
|
title: Backup all databases in the server
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 12
|
||||||
|
---
|
||||||
|
|
||||||
|
# Backup All Databases
|
||||||
|
|
||||||
|
MySQL-Bkup supports backing up all databases on the server using the `--all-databases` (`-a`) flag. By default, this creates separate backup files for each database. If you prefer a single backup file, you can use the `--all-in-on`e (`-A`) flag.
|
||||||
|
|
||||||
|
Backing up all databases is useful for creating a snapshot of the entire database server, whether for disaster recovery or migration purposes.
|
||||||
|
## Backup Modes
|
||||||
|
|
||||||
|
### Separate Backup Files (Default)
|
||||||
|
|
||||||
|
Using --all-databases without --all-in-one creates individual backup files for each database.
|
||||||
|
|
||||||
|
- Creates separate backup files for each database.
|
||||||
|
- Provides more flexibility in restoring individual databases or tables.
|
||||||
|
- Can be more manageable in cases where different databases have different retention policies.
|
||||||
|
- Might take slightly longer due to multiple file operations.
|
||||||
|
- It is the default behavior when using the `--all-databases` flag.
|
||||||
|
- It does not backup system databases (`information_schema`, `performance_schema`, `mysql`, `sys`, `innodb`).
|
||||||
|
|
||||||
|
**Command:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run --rm --network your_network_name \
|
||||||
|
-v $PWD/backup:/backup/ \
|
||||||
|
-e "DB_HOST=dbhost" \
|
||||||
|
-e "DB_PORT=3306" \
|
||||||
|
-e "DB_USERNAME=username" \
|
||||||
|
-e "DB_PASSWORD=password" \
|
||||||
|
jkaninda/mysql-bkup backup --all-databases
|
||||||
|
```
|
||||||
|
### Single Backup File
|
||||||
|
|
||||||
|
Using --all-in-one (-A) creates a single backup file containing all databases.
|
||||||
|
|
||||||
|
- Creates a single backup file containing all databases.
|
||||||
|
- Easier to manage if you need to restore everything at once.
|
||||||
|
- Faster to back up and restore in bulk.
|
||||||
|
- Can be problematic if you only need to restore a specific database or table.
|
||||||
|
- It is recommended to use this option for disaster recovery purposes.
|
||||||
|
- It backups system databases as well.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run --rm --network your_network_name \
|
||||||
|
-v $PWD/backup:/backup/ \
|
||||||
|
-e "DB_HOST=dbhost" \
|
||||||
|
-e "DB_PORT=3306" \
|
||||||
|
-e "DB_USERNAME=username" \
|
||||||
|
-e "DB_PASSWORD=password" \
|
||||||
|
jkaninda/mysql-bkup backup --all-in-one
|
||||||
|
```
|
||||||
|
|
||||||
|
### When to Use Which?
|
||||||
|
|
||||||
|
- Use `--all-in-one` if you want a quick, simple backup for disaster recovery where you'll restore everything at once.
|
||||||
|
- Use `--all-databases` if you need granularity in restoring specific databases or tables without affecting others.
|
||||||
75
docs/how-tos/backup-to-ftp.md
Normal file
75
docs/how-tos/backup-to-ftp.md
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
---
|
||||||
|
title: Backup to FTP remote server
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 4
|
||||||
|
---
|
||||||
|
|
||||||
|
# Backup to FTP Remote Server
|
||||||
|
|
||||||
|
To store your backups on an FTP remote server, you can configure the backup process to use the `--storage ftp` option.
|
||||||
|
|
||||||
|
This section explains how to set up and configure FTP-based backups.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Configuration Steps
|
||||||
|
|
||||||
|
1. **Specify the Storage Type**
|
||||||
|
Add the `--storage ftp` flag to your backup command.
|
||||||
|
|
||||||
|
2. **Set the Remote Path**
|
||||||
|
Define the full remote path where backups will be stored using the `--path` flag or the `REMOTE_PATH` environment variable.
|
||||||
|
Example: `--path /home/jkaninda/backups`.
|
||||||
|
|
||||||
|
3. **Required Environment Variables**
|
||||||
|
The following environment variables are mandatory for FTP-based backups:
|
||||||
|
|
||||||
|
- `FTP_HOST`: The hostname or IP address of the FTP server.
|
||||||
|
- `FTP_PORT`: The FTP port (default is `21`).
|
||||||
|
- `FTP_USER`: The username for FTP authentication.
|
||||||
|
- `FTP_PASSWORD`: The password for FTP authentication.
|
||||||
|
- `REMOTE_PATH`: The directory on the FTP server where backups will be stored.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Example Configuration
|
||||||
|
|
||||||
|
Below is an example `docker-compose.yml` configuration for backing up to an FTP remote server:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, lock your image tag to a specific release version
|
||||||
|
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command: backup --storage ftp -d database
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
## FTP Configuration
|
||||||
|
- FTP_HOST="hostname"
|
||||||
|
- FTP_PORT=21
|
||||||
|
- FTP_USER=user
|
||||||
|
- FTP_PASSWORD=password
|
||||||
|
- REMOTE_PATH=/home/jkaninda/backups
|
||||||
|
|
||||||
|
# Ensure the mysql-bkup container is connected to the same network as your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Key Notes
|
||||||
|
|
||||||
|
- **Security**: FTP transmits data, including passwords, in plaintext. For better security, consider using SFTP (SSH File Transfer Protocol) or FTPS (FTP Secure) if supported by your server.
|
||||||
|
- **Remote Path**: Ensure the `REMOTE_PATH` directory exists on the FTP server and is writable by the specified `FTP_USER`.
|
||||||
@@ -4,130 +4,123 @@ layout: default
|
|||||||
parent: How Tos
|
parent: How Tos
|
||||||
nav_order: 2
|
nav_order: 2
|
||||||
---
|
---
|
||||||
# Backup to AWS S3
|
# Backup to AWS S3
|
||||||
|
|
||||||
{: .note }
|
To store your backups on AWS S3, you can configure the backup process to use the `--storage s3` option. This section explains how to set up and configure S3-based backups.
|
||||||
As described on local backup section, to change the storage of you backup and use S3 as storage. You need to add `--storage s3` (-s s3).
|
|
||||||
You can also specify a specify folder where you want to save you data by adding `--path /my-custom-path` flag.
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Backup to S3
|
## Configuration Steps
|
||||||
|
|
||||||
```yml
|
1. **Specify the Storage Type**
|
||||||
services:
|
Add the `--storage s3` flag to your backup command.
|
||||||
mysql-bkup:
|
|
||||||
# In production, it is advised to lock your image tag to a proper
|
|
||||||
# release version instead of using `latest`.
|
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
|
||||||
# for a list of available releases.
|
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
container_name: mysql-bkup
|
|
||||||
command: backup --storage s3 -d database --path /my-custom-path
|
|
||||||
environment:
|
|
||||||
- DB_PORT=3306
|
|
||||||
- DB_HOST=mysql
|
|
||||||
- DB_NAME=database
|
|
||||||
- DB_USERNAME=username
|
|
||||||
- DB_PASSWORD=password
|
|
||||||
## AWS configurations
|
|
||||||
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
|
|
||||||
- AWS_S3_BUCKET_NAME=backup
|
|
||||||
- AWS_REGION="us-west-2"
|
|
||||||
- AWS_ACCESS_KEY=xxxx
|
|
||||||
- AWS_SECRET_KEY=xxxxx
|
|
||||||
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
|
||||||
- AWS_DISABLE_SSL="false"
|
|
||||||
|
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
|
||||||
networks:
|
|
||||||
- web
|
|
||||||
networks:
|
|
||||||
web:
|
|
||||||
```
|
|
||||||
|
|
||||||
### Recurring backups to S3
|
2. **Set the S3 Path**
|
||||||
|
Optionally, specify a custom folder within your S3 bucket where backups will be stored using the `--path` flag.
|
||||||
|
Example: `--path /my-custom-path`.
|
||||||
|
|
||||||
As explained above, you need just to add AWS environment variables and specify the storage type `--storage s3`.
|
3. **Required Environment Variables**
|
||||||
In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
|
The following environment variables are mandatory for S3-based backups:
|
||||||
|
|
||||||
```yml
|
- `AWS_S3_ENDPOINT`: The S3 endpoint URL (e.g., `https://s3.amazonaws.com`).
|
||||||
services:
|
- `AWS_S3_BUCKET_NAME`: The name of the S3 bucket where backups will be stored.
|
||||||
mysql-bkup:
|
- `AWS_REGION`: The AWS region where the bucket is located (e.g., `us-west-2`).
|
||||||
# In production, it is advised to lock your image tag to a proper
|
- `AWS_ACCESS_KEY`: Your AWS access key.
|
||||||
# release version instead of using `latest`.
|
- `AWS_SECRET_KEY`: Your AWS secret key.
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
- `AWS_DISABLE_SSL`: Set to `"true"` if using an S3 alternative like Minio without SSL (default is `"false"`).
|
||||||
# for a list of available releases.
|
- `AWS_FORCE_PATH_STYLE`: Set to `"true"` if using an S3 alternative like Minio (default is `"false"`).
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
container_name: mysql-bkup
|
|
||||||
command: backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
|
|
||||||
environment:
|
|
||||||
- DB_PORT=3306
|
|
||||||
- DB_HOST=mysql
|
|
||||||
- DB_NAME=database
|
|
||||||
- DB_USERNAME=username
|
|
||||||
- DB_PASSWORD=password
|
|
||||||
## AWS configurations
|
|
||||||
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
|
|
||||||
- AWS_S3_BUCKET_NAME=backup
|
|
||||||
- AWS_REGION="us-west-2"
|
|
||||||
- AWS_ACCESS_KEY=xxxx
|
|
||||||
- AWS_SECRET_KEY=xxxxx
|
|
||||||
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
|
||||||
- AWS_DISABLE_SSL="false"
|
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
|
||||||
networks:
|
|
||||||
- web
|
|
||||||
networks:
|
|
||||||
web:
|
|
||||||
```
|
|
||||||
|
|
||||||
## Deploy on Kubernetes
|
---
|
||||||
|
|
||||||
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as CronJob.
|
## Example Configuration
|
||||||
|
|
||||||
### Simple Kubernetes CronJob usage:
|
Below is an example `docker-compose.yml` configuration for backing up to AWS S3:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: batch/v1
|
services:
|
||||||
kind: CronJob
|
mysql-bkup:
|
||||||
metadata:
|
# In production, lock your image tag to a specific release version
|
||||||
name: bkup-job
|
# instead of using `latest`. Check https://github.com/jkaninda/pg-bkup/releases
|
||||||
spec:
|
# for available releases.
|
||||||
schedule: "0 1 * * *"
|
image: jkaninda/pg-bkup
|
||||||
jobTemplate:
|
container_name: pg-bkup
|
||||||
spec:
|
command: backup --storage s3 -d database --path /my-custom-path
|
||||||
template:
|
environment:
|
||||||
spec:
|
- DB_PORT=5432
|
||||||
containers:
|
- DB_HOST=postgres
|
||||||
- name: mysql-bkup
|
- DB_NAME=database
|
||||||
image: jkaninda/mysql-bkup
|
- DB_USERNAME=username
|
||||||
command:
|
- DB_PASSWORD=password
|
||||||
- /bin/sh
|
## AWS Configuration
|
||||||
- -c
|
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
|
||||||
- mysql-bkup backup -s s3 --path /custom_path
|
- AWS_S3_BUCKET_NAME=backup
|
||||||
env:
|
- AWS_REGION=us-west-2
|
||||||
- name: DB_PORT
|
- AWS_ACCESS_KEY=xxxx
|
||||||
value: "3306"
|
- AWS_SECRET_KEY=xxxxx
|
||||||
- name: DB_HOST
|
## Optional: Disable SSL for S3 alternatives like Minio
|
||||||
value: ""
|
- AWS_DISABLE_SSL="false"
|
||||||
- name: DB_NAME
|
## Optional: Enable path-style access for S3 alternatives like Minio
|
||||||
value: ""
|
- AWS_FORCE_PATH_STYLE=false
|
||||||
- name: DB_USERNAME
|
|
||||||
value: ""
|
# Ensure the mysql-bkup container is connected to the same network as your database
|
||||||
# Please use secret!
|
networks:
|
||||||
- name: DB_PASSWORD
|
- web
|
||||||
value: ""
|
|
||||||
- name: AWS_S3_ENDPOINT
|
networks:
|
||||||
value: "https://s3.amazonaws.com"
|
web:
|
||||||
- name: AWS_S3_BUCKET_NAME
|
```
|
||||||
value: "xxx"
|
|
||||||
- name: AWS_REGION
|
---
|
||||||
value: "us-west-2"
|
|
||||||
- name: AWS_ACCESS_KEY
|
## Recurring Backups to S3
|
||||||
value: "xxxx"
|
|
||||||
- name: AWS_SECRET_KEY
|
To schedule recurring backups to S3, use the `--cron-expression` flag or the `BACKUP_CRON_EXPRESSION` environment variable. This allows you to define a cron schedule for automated backups.
|
||||||
value: "xxxx"
|
|
||||||
- name: AWS_DISABLE_SSL
|
### Example: Recurring Backup Configuration
|
||||||
value: "false"
|
|
||||||
restartPolicy: OnFailure
|
```yaml
|
||||||
```
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, lock your image tag to a specific release version
|
||||||
|
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command: backup --storage s3 -d database --cron-expression "0 1 * * *"
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
## AWS Configuration
|
||||||
|
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
|
||||||
|
- AWS_S3_BUCKET_NAME=backup
|
||||||
|
- AWS_REGION=us-west-2
|
||||||
|
- AWS_ACCESS_KEY=xxxx
|
||||||
|
- AWS_SECRET_KEY=xxxxx
|
||||||
|
## Optional: Define a cron schedule for recurring backups
|
||||||
|
#- BACKUP_CRON_EXPRESSION=0 1 * * *
|
||||||
|
## Optional: Delete old backups after a specified number of days
|
||||||
|
#- BACKUP_RETENTION_DAYS=7
|
||||||
|
## Optional: Disable SSL for S3 alternatives like Minio
|
||||||
|
- AWS_DISABLE_SSL="false"
|
||||||
|
## Optional: Enable path-style access for S3 alternatives like Minio
|
||||||
|
- AWS_FORCE_PATH_STYLE=false
|
||||||
|
|
||||||
|
# Ensure the pg-bkup container is connected to the same network as your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Key Notes
|
||||||
|
|
||||||
|
- **Cron Expression**: Use the `--cron-expression` flag or `BACKUP_CRON_EXPRESSION` environment variable to define the backup schedule. For example, `0 1 * * *` runs the backup daily at 1:00 AM.
|
||||||
|
- **Backup Retention**: Optionally, use the `BACKUP_RETENTION_DAYS` environment variable to automatically delete backups older than a specified number of days.
|
||||||
|
- **S3 Alternatives**: If using an S3 alternative like Minio, set `AWS_DISABLE_SSL="true"` and `AWS_FORCE_PATH_STYLE="true"` as needed.
|
||||||
|
|
||||||
|
|||||||
@@ -1,140 +1,129 @@
|
|||||||
---
|
---
|
||||||
title: Backup to SSH
|
title: Backup to SSH or SFTP
|
||||||
layout: default
|
layout: default
|
||||||
parent: How Tos
|
parent: How Tos
|
||||||
nav_order: 3
|
nav_order: 3
|
||||||
---
|
---
|
||||||
# Backup to SSH remote server
|
# Backup to SFTP or SSH Remote Server
|
||||||
|
|
||||||
|
To store your backups on an `SFTP` or `SSH` remote server instead of the default storage, you can configure the backup process to use the `--storage ssh` or `--storage remote` option.
|
||||||
|
This section explains how to set up and configure SSH-based backups.
|
||||||
|
|
||||||
As described for s3 backup section, to change the storage of your backup and use SSH Remote server as storage. You need to add `--storage ssh` or `--storage remote`.
|
---
|
||||||
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `SSH_REMOTE_PATH` environment variable.
|
|
||||||
|
|
||||||
{: .note }
|
## Configuration Steps
|
||||||
These environment variables are required for SSH backup `SSH_HOST_NAME`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server.
|
|
||||||
Accessing the remote server using password is not recommended, use private key instead.
|
|
||||||
|
|
||||||
```yml
|
1. **Specify the Storage Type**
|
||||||
|
Add the `--storage ssh` or `--storage remote` flag to your backup command.
|
||||||
|
|
||||||
|
2. **Set the Remote Path**
|
||||||
|
Define the full remote path where backups will be stored using the `--path` flag or the `REMOTE_PATH` environment variable.
|
||||||
|
Example: `--path /home/jkaninda/backups`.
|
||||||
|
|
||||||
|
3. **Required Environment Variables**
|
||||||
|
The following environment variables are mandatory for SSH-based backups:
|
||||||
|
|
||||||
|
- `SSH_HOST`: The hostname or IP address of the remote server.
|
||||||
|
- `SSH_USER`: The username for SSH authentication.
|
||||||
|
- `REMOTE_PATH`: The directory on the remote server where backups will be stored.
|
||||||
|
- `SSH_IDENTIFY_FILE`: The path to the private key file for SSH authentication.
|
||||||
|
- `SSH_PORT`: The SSH port (default is `22`).
|
||||||
|
- `SSH_PASSWORD`: (Optional) Use this only if you are not using a private key for authentication.
|
||||||
|
|
||||||
|
{: .note }
|
||||||
|
**Security Recommendation**: Using a private key (`SSH_IDENTIFY_FILE`) is strongly recommended over password-based authentication (`SSH_PASSWORD`) for better security.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Example Configuration
|
||||||
|
|
||||||
|
Below is an example `docker-compose.yml` configuration for backing up to an SSH remote server:
|
||||||
|
|
||||||
|
```yaml
|
||||||
services:
|
services:
|
||||||
mysql-bkup:
|
mysql-bkup:
|
||||||
# In production, it is advised to lock your image tag to a proper
|
# In production, lock your image tag to a specific release version
|
||||||
# release version instead of using `latest`.
|
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
# for available releases.
|
||||||
# for a list of available releases.
|
image: jkaninda/mysql-bkup
|
||||||
image: jkaninda/mysql-bkup
|
container_name: mysql-bkup
|
||||||
container_name: mysql-bkup
|
command: backup --storage remote -d database
|
||||||
command: backup --storage remote -d database
|
volumes:
|
||||||
volumes:
|
- ./id_ed25519:/tmp/id_ed25519
|
||||||
- ./id_ed25519:/tmp/id_ed25519"
|
environment:
|
||||||
environment:
|
- DB_PORT=3306
|
||||||
- DB_PORT=3306
|
- DB_HOST=mysql
|
||||||
- DB_HOST=mysql
|
- DB_NAME=database
|
||||||
#- DB_NAME=database
|
- DB_USERNAME=username
|
||||||
- DB_USERNAME=username
|
- DB_PASSWORD=password
|
||||||
- DB_PASSWORD=password
|
## SSH Configuration
|
||||||
## SSH config
|
- SSH_HOST="hostname"
|
||||||
- SSH_HOST_NAME="hostname"
|
- SSH_PORT=22
|
||||||
- SSH_PORT=22
|
- SSH_USER=user
|
||||||
- SSH_USER=user
|
- REMOTE_PATH=/home/jkaninda/backups
|
||||||
- SSH_REMOTE_PATH=/home/jkaninda/backups
|
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
||||||
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
## Optional: Use password instead of private key (not recommended)
|
||||||
## We advise you to use a private jey instead of password
|
#- SSH_PASSWORD=password
|
||||||
#- SSH_PASSWORD=password
|
|
||||||
|
# Ensure the mysql-bkup container is connected to the same network as your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
|
||||||
networks:
|
|
||||||
- web
|
|
||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
### Recurring backups to SSH remote server
|
## Recurring Backups to SSH Remote Server
|
||||||
|
|
||||||
As explained above, you need just to add required environment variables and specify the storage type `--storage ssh`.
|
To schedule recurring backups, you can use the `--cron-expression` flag or the `BACKUP_CRON_EXPRESSION` environment variable.
|
||||||
You can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
|
This allows you to define a cron schedule for automated backups.
|
||||||
|
|
||||||
```yml
|
### Example: Recurring Backup Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
services:
|
services:
|
||||||
mysql-bkup:
|
mysql-bkup:
|
||||||
# In production, it is advised to lock your image tag to a proper
|
# In production, lock your image tag to a specific release version
|
||||||
# release version instead of using `latest`.
|
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
# for available releases.
|
||||||
# for a list of available releases.
|
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
container_name: mysql-bkup
|
container_name: mysql-bkup
|
||||||
command: backup -d database --storage ssh --mode scheduled --period "0 1 * * *"
|
command: backup -d database --storage ssh --cron-expression "@daily"
|
||||||
volumes:
|
volumes:
|
||||||
- ./id_ed25519:/tmp/id_ed25519"
|
- ./id_ed25519:/tmp/id_ed25519
|
||||||
environment:
|
environment:
|
||||||
- DB_PORT=3306
|
- DB_PORT=3306
|
||||||
- DB_HOST=mysql
|
- DB_HOST=postgres
|
||||||
- DB_NAME=database
|
- DB_NAME=database
|
||||||
- DB_USERNAME=username
|
- DB_USERNAME=username
|
||||||
- DB_PASSWORD=password
|
- DB_PASSWORD=password
|
||||||
## SSH config
|
## SSH Configuration
|
||||||
- SSH_HOST_NAME="hostname"
|
- SSH_HOST="hostname"
|
||||||
- SSH_PORT=22
|
- SSH_PORT=22
|
||||||
- SSH_USER=user
|
- SSH_USER=user
|
||||||
- SSH_REMOTE_PATH=/home/jkaninda/backups
|
- REMOTE_PATH=/home/jkaninda/backups
|
||||||
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
||||||
## We advise you to use a private jey instead of password
|
## Optional: Delete old backups after a specified number of days
|
||||||
|
#- BACKUP_RETENTION_DAYS=7
|
||||||
|
## Optional: Use password instead of private key (not recommended)
|
||||||
#- SSH_PASSWORD=password
|
#- SSH_PASSWORD=password
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
|
||||||
|
# Ensure the mysql-bkup container is connected to the same network as your database
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
```
|
```
|
||||||
|
|
||||||
## Deploy on Kubernetes
|
---
|
||||||
|
|
||||||
For Kubernetes, you don't need to run it in scheduled mode.
|
## Key Notes
|
||||||
You can deploy it as CronJob.
|
|
||||||
|
|
||||||
Simple Kubernetes CronJob usage:
|
- **Cron Expression**: Use the `--cron-expression` flag or `BACKUP_CRON_EXPRESSION` environment variable to define the backup schedule. For example, `0 1 * * *` runs the backup daily at 1:00 AM.
|
||||||
|
- **Backup Retention**: Optionally, use the `BACKUP_RETENTION_DAYS` environment variable to automatically delete backups older than a specified number of days.
|
||||||
|
- **Security**: Always prefer private key authentication (`SSH_IDENTIFY_FILE`) over password-based authentication (`SSH_PASSWORD`) for enhanced security.
|
||||||
|
|
||||||
```yaml
|
---
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: CronJob
|
|
||||||
metadata:
|
|
||||||
name: bkup-job
|
|
||||||
spec:
|
|
||||||
schedule: "0 1 * * *"
|
|
||||||
jobTemplate:
|
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: mysql-bkup
|
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- mysql-bkup backup -s ssh
|
|
||||||
env:
|
|
||||||
- name: DB_PORT
|
|
||||||
value: "3306"
|
|
||||||
- name: DB_HOST
|
|
||||||
value: ""
|
|
||||||
- name: DB_NAME
|
|
||||||
value: ""
|
|
||||||
- name: DB_USERNAME
|
|
||||||
value: ""
|
|
||||||
# Please use secret!
|
|
||||||
- name: DB_PASSWORD
|
|
||||||
value: ""
|
|
||||||
- name: SSH_HOST_NAME
|
|
||||||
value: ""
|
|
||||||
- name: SSH_PORT
|
|
||||||
value: "22"
|
|
||||||
- name: SSH_USER
|
|
||||||
value: "xxx"
|
|
||||||
- name: SSH_REMOTE_PATH
|
|
||||||
value: "/home/jkaninda/backups"
|
|
||||||
- name: AWS_ACCESS_KEY
|
|
||||||
value: "xxxx"
|
|
||||||
- name: SSH_IDENTIFY_FILE
|
|
||||||
value: "/tmp/id_ed25519"
|
|
||||||
restartPolicy: Never
|
|
||||||
```
|
|
||||||
@@ -5,26 +5,35 @@ parent: How Tos
|
|||||||
nav_order: 1
|
nav_order: 1
|
||||||
---
|
---
|
||||||
|
|
||||||
# Backup database
|
# Backup Database
|
||||||
|
|
||||||
To backup the database, you need to add `backup` command.
|
To back up your database, use the `backup` command.
|
||||||
|
|
||||||
|
This section explains how to configure and run backups, including recurring backups, using Docker or Kubernetes.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Default Configuration
|
||||||
|
|
||||||
|
- **Storage**: By default, backups are stored locally in the `/backup` directory.
|
||||||
|
- **Compression**: Backups are compressed using `gzip` by default. Use the `--disable-compression` flag to disable compression.
|
||||||
|
- **Security**: It is recommended to create a dedicated user with read-only access for backup tasks.
|
||||||
|
|
||||||
{: .note }
|
{: .note }
|
||||||
The default storage is local storage mounted to __/backup__. The backup is compressed by default using gzip. The flag __`disable-compression`__ can be used when you need to disable backup compression.
|
The backup process supports recurring backups on Docker or Docker Swarm. On Kubernetes, it can be deployed as a CronJob.
|
||||||
|
|
||||||
{: .warning }
|
---
|
||||||
Creating a user for backup tasks who has read-only access is recommended!
|
|
||||||
|
|
||||||
The backup process can be run in scheduled mode for the recurring backups.
|
## Example: Basic Backup Configuration
|
||||||
It handles __recurring__ backups of mysql database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
|
|
||||||
|
|
||||||
```yml
|
Below is an example `docker-compose.yml` configuration for backing up a database:
|
||||||
|
|
||||||
|
```yaml
|
||||||
services:
|
services:
|
||||||
mysql-bkup:
|
mysql-bkup:
|
||||||
# In production, it is advised to lock your image tag to a proper
|
# In production, lock your image tag to a specific release version
|
||||||
# release version instead of using `latest`.
|
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
# for available releases.
|
||||||
# for a list of available releases.
|
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
container_name: mysql-bkup
|
container_name: mysql-bkup
|
||||||
command: backup -d database
|
command: backup -d database
|
||||||
@@ -36,36 +45,47 @@ services:
|
|||||||
- DB_NAME=database
|
- DB_NAME=database
|
||||||
- DB_USERNAME=username
|
- DB_USERNAME=username
|
||||||
- DB_PASSWORD=password
|
- DB_PASSWORD=password
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
|
||||||
|
# Ensure the mysql-bkup container is connected to the same network as your database
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
```
|
```
|
||||||
|
|
||||||
### Backup using Docker CLI
|
---
|
||||||
|
|
||||||
```shell
|
## Backup Using Docker CLI
|
||||||
docker run --rm --network your_network_name \
|
|
||||||
-v $PWD/backup:/backup/ \
|
You can also run backups directly using the Docker CLI:
|
||||||
-e "DB_HOST=dbhost" \
|
|
||||||
-e "DB_USERNAME=username" \
|
```bash
|
||||||
-e "DB_PASSWORD=password" \
|
docker run --rm --network your_network_name \
|
||||||
jkaninda/mysql-bkup backup -d database_name
|
-v $PWD/backup:/backup/ \
|
||||||
|
-e "DB_HOST=dbhost" \
|
||||||
|
-e "DB_USERNAME=username" \
|
||||||
|
-e "DB_PASSWORD=password" \
|
||||||
|
jkaninda/pg-bkup backup -d database_name
|
||||||
```
|
```
|
||||||
|
|
||||||
In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below.
|
---
|
||||||
|
|
||||||
```yml
|
## Recurring Backups
|
||||||
|
|
||||||
|
To schedule recurring backups, use the `--cron-expression (-e)` flag or the `BACKUP_CRON_EXPRESSION` environment variable. This allows you to define a cron schedule for automated backups.
|
||||||
|
|
||||||
|
### Example: Recurring Backup Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
services:
|
services:
|
||||||
mysql-bkup:
|
mysql-bkup:
|
||||||
# In production, it is advised to lock your image tag to a proper
|
# In production, lock your image tag to a specific release version
|
||||||
# release version instead of using `latest`.
|
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
# for available releases.
|
||||||
# for a list of available releases.
|
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
container_name: mysql-bkup
|
container_name: mysql-bkup
|
||||||
command: backup -d database --mode scheduled --period "0 1 * * *"
|
command: backup -d database --cron-expression @midnight
|
||||||
volumes:
|
volumes:
|
||||||
- ./backup:/backup
|
- ./backup:/backup
|
||||||
environment:
|
environment:
|
||||||
@@ -74,10 +94,24 @@ services:
|
|||||||
- DB_NAME=database
|
- DB_NAME=database
|
||||||
- DB_USERNAME=username
|
- DB_USERNAME=username
|
||||||
- DB_PASSWORD=password
|
- DB_PASSWORD=password
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
## Optional: Define a cron schedule for recurring backups
|
||||||
|
- BACKUP_CRON_EXPRESSION=@midnight
|
||||||
|
## Optional: Delete old backups after a specified number of days
|
||||||
|
#- BACKUP_RETENTION_DAYS=7
|
||||||
|
|
||||||
|
# Ensure the mysql-bkup container is connected to the same network as your database
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Key Notes
|
||||||
|
|
||||||
|
- **Cron Expression**: Use the `--cron-expression (-e)` flag or `BACKUP_CRON_EXPRESSION` environment variable to define the backup schedule. For example:
|
||||||
|
- `@midnight`: Runs the backup daily at midnight.
|
||||||
|
- `0 1 * * *`: Runs the backup daily at 1:00 AM.
|
||||||
|
- **Backup Retention**: Optionally, use the `BACKUP_RETENTION_DAYS` environment variable to automatically delete backups older than a specified number of days.
|
||||||
|
|||||||
@@ -2,15 +2,20 @@
|
|||||||
title: Deploy on Kubernetes
|
title: Deploy on Kubernetes
|
||||||
layout: default
|
layout: default
|
||||||
parent: How Tos
|
parent: How Tos
|
||||||
nav_order: 8
|
nav_order: 9
|
||||||
---
|
---
|
||||||
|
|
||||||
## Deploy on Kubernetes
|
# Deploy on Kubernetes
|
||||||
|
|
||||||
To deploy MySQL Backup on Kubernetes, you can use Job to backup or Restore your database.
|
To deploy MySQL Backup on Kubernetes, you can use a `Job` for one-time backups or restores, and a `CronJob` for recurring backups.
|
||||||
For recurring backup you can use CronJob, you don't need to run it in scheduled mode. as described bellow.
|
|
||||||
|
|
||||||
## Backup to S3 storage
|
Below are examples for different use cases.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Backup Job to S3 Storage
|
||||||
|
|
||||||
|
This example demonstrates how to configure a Kubernetes `Job` to back up a MySQL database to an S3-compatible storage.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: batch/v1
|
apiVersion: batch/v1
|
||||||
@@ -21,51 +26,53 @@ spec:
|
|||||||
template:
|
template:
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: mysql-bkup
|
- name: mysql-bkup
|
||||||
# In production, it is advised to lock your image tag to a proper
|
# In production, lock your image tag to a specific release version
|
||||||
# release version instead of using `latest`.
|
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
# for available releases.
|
||||||
# for a list of available releases.
|
image: jkaninda/mysql-bkup
|
||||||
image: jkaninda/mysql-bkup
|
command:
|
||||||
command:
|
- /bin/sh
|
||||||
- /bin/sh
|
- -c
|
||||||
- -c
|
- backup --storage s3
|
||||||
- bkup
|
resources:
|
||||||
- backup
|
limits:
|
||||||
- --storage
|
memory: "128Mi"
|
||||||
- s3
|
cpu: "500m"
|
||||||
resources:
|
env:
|
||||||
limits:
|
- name: DB_PORT
|
||||||
memory: "128Mi"
|
value: "3306"
|
||||||
cpu: "500m"
|
- name: DB_HOST
|
||||||
env:
|
value: ""
|
||||||
- name: DB_PORT
|
- name: DB_NAME
|
||||||
value: "3306"
|
value: ""
|
||||||
- name: DB_HOST
|
- name: DB_USERNAME
|
||||||
value: ""
|
value: ""
|
||||||
- name: DB_NAME
|
# Use Kubernetes Secrets for sensitive data like passwords
|
||||||
value: "dbname"
|
- name: DB_PASSWORD
|
||||||
- name: DB_USERNAME
|
value: ""
|
||||||
value: "username"
|
- name: AWS_S3_ENDPOINT
|
||||||
# Please use secret!
|
value: "https://s3.amazonaws.com"
|
||||||
- name: DB_PASSWORD
|
- name: AWS_S3_BUCKET_NAME
|
||||||
value: ""
|
value: "xxx"
|
||||||
- name: AWS_S3_ENDPOINT
|
- name: AWS_REGION
|
||||||
value: "https://s3.amazonaws.com"
|
value: "us-west-2"
|
||||||
- name: AWS_S3_BUCKET_NAME
|
- name: AWS_ACCESS_KEY
|
||||||
value: "xxx"
|
value: "xxxx"
|
||||||
- name: AWS_REGION
|
- name: AWS_SECRET_KEY
|
||||||
value: "us-west-2"
|
value: "xxxx"
|
||||||
- name: AWS_ACCESS_KEY
|
- name: AWS_DISABLE_SSL
|
||||||
value: "xxxx"
|
value: "false"
|
||||||
- name: AWS_SECRET_KEY
|
- name: AWS_FORCE_PATH_STYLE
|
||||||
value: "xxxx"
|
value: "false"
|
||||||
- name: AWS_DISABLE_SSL
|
|
||||||
value: "false"
|
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
```
|
```
|
||||||
|
|
||||||
## Backup Job to SSH remote server
|
---
|
||||||
|
|
||||||
|
## Backup Job to SSH Remote Server
|
||||||
|
|
||||||
|
This example demonstrates how to configure a Kubernetes `Job` to back up a MySQL database to an SSH remote server.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: batch/v1
|
apiVersion: batch/v1
|
||||||
@@ -78,19 +85,14 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: mysql-bkup
|
- name: mysql-bkup
|
||||||
# In production, it is advised to lock your image tag to a proper
|
# In production, lock your image tag to a specific release version
|
||||||
# release version instead of using `latest`.
|
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
# for available releases.
|
||||||
# for a list of available releases.
|
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
command:
|
command:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
- -c
|
- -c
|
||||||
- bkup
|
- backup --storage ssh --disable-compression
|
||||||
- backup
|
|
||||||
- --storage
|
|
||||||
- ssh
|
|
||||||
- --disable-compression
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
memory: "128Mi"
|
memory: "128Mi"
|
||||||
@@ -103,8 +105,8 @@ spec:
|
|||||||
- name: DB_NAME
|
- name: DB_NAME
|
||||||
value: "dbname"
|
value: "dbname"
|
||||||
- name: DB_USERNAME
|
- name: DB_USERNAME
|
||||||
value: "username"
|
value: "postgres"
|
||||||
# Please use secret!
|
# Use Kubernetes Secrets for sensitive data like passwords
|
||||||
- name: DB_PASSWORD
|
- name: DB_PASSWORD
|
||||||
value: ""
|
value: ""
|
||||||
- name: SSH_HOST_NAME
|
- name: SSH_HOST_NAME
|
||||||
@@ -117,14 +119,18 @@ spec:
|
|||||||
value: "xxxx"
|
value: "xxxx"
|
||||||
- name: SSH_REMOTE_PATH
|
- name: SSH_REMOTE_PATH
|
||||||
value: "/home/toto/backup"
|
value: "/home/toto/backup"
|
||||||
# Optional, required if you want to encrypt your backup
|
# Optional: Required if you want to encrypt your backup
|
||||||
- name: GPG_PASSPHRASE
|
- name: GPG_PASSPHRASE
|
||||||
value: "xxxx"
|
value: "xxxx"
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
```
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Restore Job
|
## Restore Job
|
||||||
|
|
||||||
|
This example demonstrates how to configure a Kubernetes `Job` to restore a MySQL database from a backup stored on an SSH remote server.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: batch/v1
|
apiVersion: batch/v1
|
||||||
kind: Job
|
kind: Job
|
||||||
@@ -136,52 +142,51 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: mysql-bkup
|
- name: mysql-bkup
|
||||||
# In production, it is advised to lock your image tag to a proper
|
# In production, lock your image tag to a specific release version
|
||||||
# release version instead of using `latest`.
|
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
# for available releases.
|
||||||
# for a list of available releases.
|
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
command:
|
command:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
- -c
|
- -c
|
||||||
- bkup
|
- restore --storage ssh --file store_20231219_022941.sql.gz
|
||||||
- restore
|
|
||||||
- --storage
|
|
||||||
- ssh
|
|
||||||
- --file store_20231219_022941.sql.gz
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
memory: "128Mi"
|
memory: "128Mi"
|
||||||
cpu: "500m"
|
cpu: "500m"
|
||||||
env:
|
env:
|
||||||
- name: DB_PORT
|
- name: DB_PORT
|
||||||
value: "3306"
|
value: "3306"
|
||||||
- name: DB_HOST
|
- name: DB_HOST
|
||||||
value: ""
|
value: ""
|
||||||
- name: DB_NAME
|
- name: DB_NAME
|
||||||
value: "dbname"
|
value: "dbname"
|
||||||
- name: DB_USERNAME
|
- name: DB_USERNAME
|
||||||
value: "username"
|
value: "postgres"
|
||||||
# Please use secret!
|
# Use Kubernetes Secrets for sensitive data like passwords
|
||||||
- name: DB_PASSWORD
|
- name: DB_PASSWORD
|
||||||
value: ""
|
value: ""
|
||||||
- name: SSH_HOST_NAME
|
- name: SSH_HOST_NAME
|
||||||
value: "xxx"
|
value: "xxx"
|
||||||
- name: SSH_PORT
|
- name: SSH_PORT
|
||||||
value: "22"
|
value: "22"
|
||||||
- name: SSH_USER
|
- name: SSH_USER
|
||||||
value: "xxx"
|
value: "xxx"
|
||||||
- name: SSH_PASSWORD
|
- name: SSH_PASSWORD
|
||||||
value: "xxxx"
|
value: "xxxx"
|
||||||
- name: SSH_REMOTE_PATH
|
- name: SSH_REMOTE_PATH
|
||||||
value: "/home/xxxx/backup"
|
value: "/home/toto/backup"
|
||||||
# Optional, required if your backup was encrypted
|
# Optional: Required if your backup was encrypted
|
||||||
#- name: GPG_PASSPHRASE
|
#- name: GPG_PASSPHRASE
|
||||||
# value: "xxxx"
|
# value: "xxxx"
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
```
|
```
|
||||||
|
|
||||||
## Recurring backup
|
---
|
||||||
|
|
||||||
|
## Recurring Backup with CronJob
|
||||||
|
|
||||||
|
This example demonstrates how to configure a Kubernetes `CronJob` for recurring backups to an SSH remote server.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: batch/v1
|
apiVersion: batch/v1
|
||||||
@@ -196,52 +201,51 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: mysql-bkup
|
- name: mysql-bkup
|
||||||
|
# In production, lock your image tag to a specific release version
|
||||||
|
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for available releases.
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
command:
|
command:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
- -c
|
- -c
|
||||||
- bkup
|
- backup --storage ssh --disable-compression
|
||||||
- backup
|
|
||||||
- --storage
|
|
||||||
- ssh
|
|
||||||
- --disable-compression
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
memory: "128Mi"
|
memory: "128Mi"
|
||||||
cpu: "500m"
|
cpu: "500m"
|
||||||
env:
|
env:
|
||||||
- name: DB_PORT
|
- name: DB_PORT
|
||||||
value: "3306"
|
value: "3306"
|
||||||
- name: DB_HOST
|
- name: DB_HOST
|
||||||
value: ""
|
value: ""
|
||||||
- name: DB_NAME
|
- name: DB_NAME
|
||||||
value: "username"
|
value: "test"
|
||||||
- name: DB_USERNAME
|
- name: DB_USERNAME
|
||||||
value: "username"
|
value: "postgres"
|
||||||
# Please use secret!
|
# Use Kubernetes Secrets for sensitive data like passwords
|
||||||
- name: DB_PASSWORD
|
- name: DB_PASSWORD
|
||||||
value: ""
|
value: ""
|
||||||
- name: SSH_HOST_NAME
|
- name: SSH_HOST_NAME
|
||||||
value: "xxx"
|
value: "192.168.1.16"
|
||||||
- name: SSH_PORT
|
- name: SSH_PORT
|
||||||
value: "xxx"
|
value: "2222"
|
||||||
- name: SSH_USER
|
- name: SSH_USER
|
||||||
value: "jkaninda"
|
value: "jkaninda"
|
||||||
- name: SSH_REMOTE_PATH
|
- name: SSH_REMOTE_PATH
|
||||||
value: "/home/jkaninda/backup"
|
value: "/config/backup"
|
||||||
- name: SSH_PASSWORD
|
- name: SSH_PASSWORD
|
||||||
value: "password"
|
value: "password"
|
||||||
# Optional, required if you want to encrypt your backup
|
# Optional: Required if you want to encrypt your backup
|
||||||
#- name: GPG_PASSPHRASE
|
#- name: GPG_PASSPHRASE
|
||||||
# value: "xxx"
|
# value: "xxx"
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
```
|
```
|
||||||
|
|
||||||
## Kubernetes Rootless
|
---
|
||||||
|
|
||||||
This image also supports Kubernetes security context, you can run it in Rootless environment.
|
## Kubernetes Rootless Deployment
|
||||||
It has been tested on Openshift, it works well.
|
|
||||||
Deployment on OpenShift is supported, you need to remove `securityContext` section on your yaml file.
|
This example demonstrates how to run the backup container in a rootless environment, suitable for platforms like OpenShift.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: batch/v1
|
apiVersion: batch/v1
|
||||||
@@ -259,48 +263,107 @@ spec:
|
|||||||
runAsGroup: 3000
|
runAsGroup: 3000
|
||||||
fsGroup: 2000
|
fsGroup: 2000
|
||||||
containers:
|
containers:
|
||||||
# In production, it is advised to lock your image tag to a proper
|
- name: mysql-bkup
|
||||||
# release version instead of using `latest`.
|
# In production, lock your image tag to a specific release version
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
# for a list of available releases.
|
# for available releases.
|
||||||
- name: mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
image: jkaninda/mysql-bkup
|
command:
|
||||||
command:
|
- /bin/sh
|
||||||
- /bin/sh
|
- -c
|
||||||
- -c
|
- backup --storage ssh --disable-compression
|
||||||
- bkup
|
resources:
|
||||||
- backup
|
limits:
|
||||||
- --storage
|
memory: "128Mi"
|
||||||
- ssh
|
cpu: "500m"
|
||||||
- --disable-compression
|
env:
|
||||||
resources:
|
- name: DB_PORT
|
||||||
limits:
|
value: "3306"
|
||||||
memory: "128Mi"
|
- name: DB_HOST
|
||||||
cpu: "500m"
|
value: ""
|
||||||
env:
|
- name: DB_NAME
|
||||||
- name: DB_PORT
|
value: "test"
|
||||||
value: "3306"
|
- name: DB_USERNAME
|
||||||
- name: DB_HOST
|
value: "postgres"
|
||||||
value: ""
|
# Use Kubernetes Secrets for sensitive data like passwords
|
||||||
- name: DB_NAME
|
- name: DB_PASSWORD
|
||||||
value: "xxx"
|
value: ""
|
||||||
- name: DB_USERNAME
|
- name: SSH_HOST_NAME
|
||||||
value: "xxx"
|
value: "192.168.1.16"
|
||||||
# Please use secret!
|
- name: SSH_PORT
|
||||||
- name: DB_PASSWORD
|
value: "2222"
|
||||||
value: ""
|
- name: SSH_USER
|
||||||
- name: SSH_HOST_NAME
|
value: "jkaninda"
|
||||||
value: "xxx"
|
- name: SSH_REMOTE_PATH
|
||||||
- name: SSH_PORT
|
value: "/config/backup"
|
||||||
value: "22"
|
- name: SSH_PASSWORD
|
||||||
- name: SSH_USER
|
value: "password"
|
||||||
value: "jkaninda"
|
# Optional: Required if you want to encrypt your backup
|
||||||
- name: SSH_REMOTE_PATH
|
|
||||||
value: "/home/jkaninda/backup"
|
|
||||||
- name: SSH_PASSWORD
|
|
||||||
value: "password"
|
|
||||||
# Optional, required if you want to encrypt your backup
|
|
||||||
#- name: GPG_PASSPHRASE
|
#- name: GPG_PASSPHRASE
|
||||||
# value: "xxx"
|
# value: "xxx"
|
||||||
restartPolicy: OnFailure
|
restartPolicy: OnFailure
|
||||||
```
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Migrate Database
|
||||||
|
|
||||||
|
This example demonstrates how to configure a Kubernetes `Job` to migrate a MySQL database from one server to another.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: Job
|
||||||
|
metadata:
|
||||||
|
name: migrate-db
|
||||||
|
spec:
|
||||||
|
ttlSecondsAfterFinished: 100
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: mysql-bkup
|
||||||
|
# In production, lock your image tag to a specific release version
|
||||||
|
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- migrate
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: "128Mi"
|
||||||
|
cpu: "500m"
|
||||||
|
env:
|
||||||
|
## Source Database
|
||||||
|
- name: DB_HOST
|
||||||
|
value: "postgres"
|
||||||
|
- name: DB_PORT
|
||||||
|
value: "3306"
|
||||||
|
- name: DB_NAME
|
||||||
|
value: "dbname"
|
||||||
|
- name: DB_USERNAME
|
||||||
|
value: "username"
|
||||||
|
- name: DB_PASSWORD
|
||||||
|
value: "password"
|
||||||
|
## Target Database
|
||||||
|
- name: TARGET_DB_HOST
|
||||||
|
value: "target-postgres"
|
||||||
|
- name: TARGET_DB_PORT
|
||||||
|
value: "3306"
|
||||||
|
- name: TARGET_DB_NAME
|
||||||
|
value: "dbname"
|
||||||
|
- name: TARGET_DB_USERNAME
|
||||||
|
value: "username"
|
||||||
|
- name: TARGET_DB_PASSWORD
|
||||||
|
value: "password"
|
||||||
|
restartPolicy: Never
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Key Notes
|
||||||
|
|
||||||
|
- **Security**: Always use Kubernetes Secrets for sensitive data like passwords and access keys.
|
||||||
|
- **Resource Limits**: Adjust resource limits (`memory` and `cpu`) based on your workload requirements.
|
||||||
|
- **Cron Schedule**: Use standard cron expressions for scheduling recurring backups.
|
||||||
|
- **Rootless Deployment**: The image supports running in rootless environments, making it suitable for platforms like OpenShift.
|
||||||
|
|||||||
6
docs/how-tos/deprecated-configs.md
Normal file
6
docs/how-tos/deprecated-configs.md
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
title: Update deprecated configurations
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 11
|
||||||
|
---
|
||||||
@@ -2,34 +2,37 @@
|
|||||||
title: Encrypt backups using GPG
|
title: Encrypt backups using GPG
|
||||||
layout: default
|
layout: default
|
||||||
parent: How Tos
|
parent: How Tos
|
||||||
nav_order: 7
|
nav_order: 8
|
||||||
---
|
---
|
||||||
# Encrypt backup
|
# Encrypt Backup
|
||||||
|
|
||||||
The image supports encrypting backups using GPG out of the box. In case a `GPG_PASSPHRASE` environment variable is set, the backup archive will be encrypted using the given key and saved as a sql.gpg file instead or sql.gz.gpg.
|
The image supports encrypting backups using one of two methods: **GPG with a passphrase** or **GPG with a public key**. When a `GPG_PASSPHRASE` or `GPG_PUBLIC_KEY` environment variable is set, the backup archive will be encrypted and saved as a `.sql.gpg` or `.sql.gz.gpg` file.
|
||||||
|
|
||||||
{: .warning }
|
{: .warning }
|
||||||
To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process.
|
To restore an encrypted backup, you must provide the same GPG passphrase or private key used during the backup process.
|
||||||
|
|
||||||
To decrypt manually, you need to install `gnupg`
|
---
|
||||||
|
|
||||||
### Decrypt backup
|
## Key Features
|
||||||
|
|
||||||
```shell
|
- **Cipher Algorithm**: `aes256`
|
||||||
gpg --batch --passphrase "my-passphrase" \
|
- **Automatic Restoration**: Backups encrypted with a GPG passphrase can be restored automatically without manual decryption.
|
||||||
--output database_20240730_044201.sql.gz \
|
- **Manual Decryption**: Backups encrypted with a GPG public key require manual decryption before restoration.
|
||||||
--decrypt database_20240730_044201.sql.gz.gpg
|
|
||||||
```
|
|
||||||
|
|
||||||
### Backup
|
---
|
||||||
|
|
||||||
```yml
|
## Using GPG Passphrase
|
||||||
|
|
||||||
|
To encrypt backups using a GPG passphrase, set the `GPG_PASSPHRASE` environment variable. The backup will be encrypted and can be restored automatically.
|
||||||
|
|
||||||
|
### Example Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
services:
|
services:
|
||||||
mysql-bkup:
|
mysql-bkup:
|
||||||
# In production, it is advised to lock your image tag to a proper
|
# In production, lock your image tag to a specific release version
|
||||||
# release version instead of using `latest`.
|
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
# for available releases.
|
||||||
# for a list of available releases.
|
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
container_name: mysql-bkup
|
container_name: mysql-bkup
|
||||||
command: backup -d database
|
command: backup -d database
|
||||||
@@ -43,9 +46,75 @@ services:
|
|||||||
- DB_PASSWORD=password
|
- DB_PASSWORD=password
|
||||||
## Required to encrypt backup
|
## Required to encrypt backup
|
||||||
- GPG_PASSPHRASE=my-secure-passphrase
|
- GPG_PASSPHRASE=my-secure-passphrase
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# Ensure the pg-bkup container is connected to the same network as your database
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Using GPG Public Key
|
||||||
|
|
||||||
|
To encrypt backups using a GPG public key, set the `GPG_PUBLIC_KEY` environment variable to the path of your public key file. Backups encrypted with a public key require manual decryption before restoration.
|
||||||
|
|
||||||
|
### Example Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, lock your image tag to a specific release version
|
||||||
|
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command: backup -d database
|
||||||
|
volumes:
|
||||||
|
- ./backup:/backup
|
||||||
|
- ./public_key.asc:/config/public_key.asc
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
## Required to encrypt backup
|
||||||
|
- GPG_PUBLIC_KEY=/config/public_key.asc
|
||||||
|
# Ensure the pg-bkup container is connected to the same network as your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Manual Decryption
|
||||||
|
|
||||||
|
If you encrypted your backup using a GPG public key, you must manually decrypt it before restoration. Use the `gnupg` tool for decryption.
|
||||||
|
|
||||||
|
### Decrypt Using a Passphrase
|
||||||
|
|
||||||
|
```bash
|
||||||
|
gpg --batch --passphrase "my-passphrase" \
|
||||||
|
--output database_20240730_044201.sql.gz \
|
||||||
|
--decrypt database_20240730_044201.sql.gz.gpg
|
||||||
|
```
|
||||||
|
|
||||||
|
### Decrypt Using a Private Key
|
||||||
|
|
||||||
|
```bash
|
||||||
|
gpg --output database_20240730_044201.sql.gz \
|
||||||
|
--decrypt database_20240730_044201.sql.gz.gpg
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Key Notes
|
||||||
|
|
||||||
|
- **Automatic Restoration**: Backups encrypted with a GPG passphrase can be restored directly without manual decryption.
|
||||||
|
- **Manual Decryption**: Backups encrypted with a GPG public key require manual decryption using the corresponding private key.
|
||||||
|
- **Security**: Always keep your GPG passphrase and private key secure. Use Kubernetes Secrets or other secure methods to manage sensitive data.
|
||||||
|
|||||||
@@ -2,130 +2,105 @@
|
|||||||
title: Migrate database
|
title: Migrate database
|
||||||
layout: default
|
layout: default
|
||||||
parent: How Tos
|
parent: How Tos
|
||||||
nav_order: 9
|
nav_order: 10
|
||||||
---
|
---
|
||||||
|
|
||||||
# Migrate database
|
# Migrate Database
|
||||||
|
|
||||||
To migrate the database, you need to add `migrate` command.
|
To migrate a MySQL database from a source to a target database, you can use the `migrate` command. This feature simplifies the process by combining the backup and restore operations into a single step.
|
||||||
|
|
||||||
{: .note }
|
{: .note }
|
||||||
The Mysql backup has another great feature: migrating your database from a source database to a target.
|
The `migrate` command eliminates the need for separate backup and restore operations. It directly transfers data from the source database to the target database.
|
||||||
|
|
||||||
As you know, to restore a database from a source to a target database, you need 2 operations: which is to start by backing up the source database and then restoring the source backed database to the target database.
|
|
||||||
Instead of proceeding like that, you can use the integrated feature `(migrate)`, which will help you migrate your database by doing only one operation.
|
|
||||||
|
|
||||||
{: .warning }
|
{: .warning }
|
||||||
The `migrate` operation is irreversible, please backup your target database before this action.
|
The `migrate` operation is **irreversible**. Always back up your target database before performing this action.
|
||||||
|
|
||||||
### Docker compose
|
---
|
||||||
```yml
|
|
||||||
|
## Configuration Steps
|
||||||
|
|
||||||
|
1. **Source Database**: Provide connection details for the source database.
|
||||||
|
2. **Target Database**: Provide connection details for the target database.
|
||||||
|
3. **Run the Migration**: Use the `migrate` command to initiate the migration.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Example: Docker Compose Configuration
|
||||||
|
|
||||||
|
Below is an example `docker-compose.yml` configuration for migrating a database:
|
||||||
|
|
||||||
|
```yaml
|
||||||
services:
|
services:
|
||||||
mysql-bkup:
|
mysql-bkup:
|
||||||
# In production, it is advised to lock your image tag to a proper
|
# In production, lock your image tag to a specific release version
|
||||||
# release version instead of using `latest`.
|
# instead of using `latest`. Check https://github.com/jkaninda/mysqlbkup/releases
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
# for available releases.
|
||||||
# for a list of available releases.
|
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
container_name: mysql-bkup
|
container_name: mysql-bkup
|
||||||
command: migrate
|
command: migrate
|
||||||
volumes:
|
volumes:
|
||||||
- ./backup:/backup
|
- ./backup:/backup
|
||||||
environment:
|
environment:
|
||||||
## Source database
|
## Source Database
|
||||||
- DB_PORT=3306
|
- DB_PORT=3306
|
||||||
- DB_HOST=mysql
|
- DB_HOST=mysql
|
||||||
- DB_NAME=database
|
- DB_NAME=database
|
||||||
- DB_USERNAME=username
|
- DB_USERNAME=username
|
||||||
- DB_PASSWORD=password
|
- DB_PASSWORD=password
|
||||||
## Target database
|
|
||||||
- TARGET_DB_HOST=target-mysql
|
## Target Database
|
||||||
|
- TARGET_DB_HOST=target-postgres
|
||||||
- TARGET_DB_PORT=3306
|
- TARGET_DB_PORT=3306
|
||||||
- TARGET_DB_NAME=dbname
|
- TARGET_DB_NAME=dbname
|
||||||
- TARGET_DB_USERNAME=username
|
- TARGET_DB_USERNAME=username
|
||||||
- TARGET_DB_PASSWORD=password
|
- TARGET_DB_PASSWORD=password
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
|
||||||
|
# Ensure the mysql-bkup container is connected to the same network as your database
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
### Migrate database using Docker CLI
|
## Migrate Database Using Docker CLI
|
||||||
|
|
||||||
|
You can also run the migration directly using the Docker CLI. Below is an example:
|
||||||
|
|
||||||
```
|
### Environment Variables
|
||||||
## Source database
|
|
||||||
DB_HOST=mysql
|
Save your source and target database connection details in an environment file (e.g., `your-env`):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
## Source Database
|
||||||
|
DB_HOST=postgres
|
||||||
DB_PORT=3306
|
DB_PORT=3306
|
||||||
DB_NAME=dbname
|
DB_NAME=dbname
|
||||||
DB_USERNAME=username
|
DB_USERNAME=username
|
||||||
DB_PASSWORD=password
|
DB_PASSWORD=password
|
||||||
|
|
||||||
## Taget database
|
## Target Database
|
||||||
TARGET_DB_HOST=target-mysql
|
TARGET_DB_HOST=target-postgres
|
||||||
TARGET_DB_PORT=3306
|
TARGET_DB_PORT=3306
|
||||||
TARGET_DB_NAME=dbname
|
TARGET_DB_NAME=dbname
|
||||||
TARGET_DB_USERNAME=username
|
TARGET_DB_USERNAME=username
|
||||||
TARGET_DB_PASSWORD=password
|
TARGET_DB_PASSWORD=password
|
||||||
```
|
```
|
||||||
|
|
||||||
```shell
|
### Run the Migration
|
||||||
docker run --rm --network your_network_name \
|
|
||||||
--env-file your-env
|
```bash
|
||||||
-v $PWD/backup:/backup/ \
|
docker run --rm --network your_network_name \
|
||||||
jkaninda/mysql-bkup migrate
|
--env-file your-env \
|
||||||
|
-v $PWD/backup:/backup/ \
|
||||||
|
jkaninda/pg-bkup migrate
|
||||||
```
|
```
|
||||||
|
|
||||||
## Kubernetes
|
---
|
||||||
|
|
||||||
```yaml
|
## Key Notes
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
- **Irreversible Operation**: The `migrate` command directly transfers data from the source to the target database. Ensure you have a backup of the target database before proceeding.
|
||||||
metadata:
|
- **Network Configuration**: Ensure the `mysql-bkup` container is connected to the same network as your source and target databases.
|
||||||
name: migrate-db
|
|
||||||
spec:
|
|
||||||
ttlSecondsAfterFinished: 100
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: mysql-bkup
|
|
||||||
# In production, it is advised to lock your image tag to a proper
|
|
||||||
# release version instead of using `latest`.
|
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
|
||||||
# for a list of available releases.
|
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- migrate
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
memory: "128Mi"
|
|
||||||
cpu: "500m"
|
|
||||||
env:
|
|
||||||
## Source Database
|
|
||||||
- name: DB_HOST
|
|
||||||
value: "mysql"
|
|
||||||
- name: DB_PORT
|
|
||||||
value: "3306"
|
|
||||||
- name: DB_NAME
|
|
||||||
value: "dbname"
|
|
||||||
- name: DB_USERNAME
|
|
||||||
value: "username"
|
|
||||||
- name: DB_PASSWORD
|
|
||||||
value: "password"
|
|
||||||
## Target Database
|
|
||||||
- name: TARGET_DB_HOST
|
|
||||||
value: "target-mysql"
|
|
||||||
- name: TARGET_DB_PORT
|
|
||||||
value: "3306"
|
|
||||||
- name: TARGET_DB_NAME
|
|
||||||
value: "dbname"
|
|
||||||
- name: TARGET_DB_USERNAME
|
|
||||||
value: "username"
|
|
||||||
- name: TARGET_DB_PASSWORD
|
|
||||||
value: "password"
|
|
||||||
restartPolicy: Never
|
|
||||||
```
|
|
||||||
|
|||||||
103
docs/how-tos/mutli-backup.md
Normal file
103
docs/how-tos/mutli-backup.md
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
---
|
||||||
|
title: Run multiple database backup schedules in the same container
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 11
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
# Multiple Backup Schedules
|
||||||
|
|
||||||
|
This tool supports running multiple database backup schedules within the same container.
|
||||||
|
You can configure these schedules with different settings using a **configuration file**. This flexibility allows you to manage backups for multiple databases efficiently.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Configuration File Setup
|
||||||
|
|
||||||
|
The configuration file can be mounted into the container at `/config/config.yaml`, `/config/config.yml`, or specified via the `BACKUP_CONFIG_FILE` environment variable.
|
||||||
|
|
||||||
|
### Key Features:
|
||||||
|
- **Global Environment Variables**: Use these for databases that share the same configuration.
|
||||||
|
- **Database-Specific Overrides**: Override global settings for individual databases by specifying them in the configuration file or using the database name as a suffix in the variable name (e.g., `DB_HOST_DATABASE1`).
|
||||||
|
- **Global Cron Expression**: Define a global `cronExpression` in the configuration file to schedule backups for all databases. If omitted, backups will run immediately.
|
||||||
|
- **Configuration File Path**: Specify the configuration file path using:
|
||||||
|
- The `BACKUP_CONFIG_FILE` environment variable.
|
||||||
|
- The `--config` or `-c` flag for the backup command.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Configuration File Example
|
||||||
|
|
||||||
|
Below is an example configuration file (`config.yaml`) that defines multiple databases and their respective backup settings:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# Optional: Define a global cron expression for scheduled backups.
|
||||||
|
# Example: "@every 20m" (runs every 20 minutes). If omitted, backups run immediately.
|
||||||
|
cronExpression: "" # Optional: Define a global cron expression for scheduled backups.
|
||||||
|
backupRescueMode: false # Optional: Set to true to enable rescue mode for backups.
|
||||||
|
databases:
|
||||||
|
- host: mysql1 # Optional: Overrides DB_HOST or uses DB_HOST_DATABASE1.
|
||||||
|
port: 3306 # Optional: Default is 5432. Overrides DB_PORT or uses DB_PORT_DATABASE1.
|
||||||
|
name: database1 # Required: Database name.
|
||||||
|
user: database1 # Optional: Overrides DB_USERNAME or uses DB_USERNAME_DATABASE1.
|
||||||
|
password: password # Optional: Overrides DB_PASSWORD or uses DB_PASSWORD_DATABASE1.
|
||||||
|
path: /s3-path/database1 # Required: Backup path for SSH, FTP, or S3 (e.g., /home/toto/backup/).
|
||||||
|
|
||||||
|
- host: mysql2 # Optional: Overrides DB_HOST or uses DB_HOST_LLAP.
|
||||||
|
port: 3306 # Optional: Default is 5432. Overrides DB_PORT or uses DB_PORT_LLAP.
|
||||||
|
name: lldap # Required: Database name.
|
||||||
|
user: lldap # Optional: Overrides DB_USERNAME or uses DB_USERNAME_LLAP.
|
||||||
|
password: password # Optional: Overrides DB_PASSWORD or uses DB_PASSWORD_LLAP.
|
||||||
|
path: /s3-path/lldap # Required: Backup path for SSH, FTP, or S3 (e.g., /home/toto/backup/).
|
||||||
|
|
||||||
|
- host: mysql3 # Optional: Overrides DB_HOST or uses DB_HOST_KEYCLOAK.
|
||||||
|
port: 3306 # Optional: Default is 5432. Overrides DB_PORT or uses DB_PORT_KEYCLOAK.
|
||||||
|
name: keycloak # Required: Database name.
|
||||||
|
user: keycloak # Optional: Overrides DB_USERNAME or uses DB_USERNAME_KEYCLOAK.
|
||||||
|
password: password # Optional: Overrides DB_PASSWORD or uses DB_PASSWORD_KEYCLOAK.
|
||||||
|
path: /s3-path/keycloak # Required: Backup path for SSH, FTP, or S3 (e.g., /home/toto/backup/).
|
||||||
|
|
||||||
|
- host: mysql4 # Optional: Overrides DB_HOST or uses DB_HOST_JOPLIN.
|
||||||
|
port: 3306 # Optional: Default is 5432. Overrides DB_PORT or uses DB_PORT_JOPLIN.
|
||||||
|
name: joplin # Required: Database name.
|
||||||
|
user: joplin # Optional: Overrides DB_USERNAME or uses DB_USERNAME_JOPLIN.
|
||||||
|
password: password # Optional: Overrides DB_PASSWORD or uses DB_PASSWORD_JOPLIN.
|
||||||
|
path: /s3-path/joplin # Required: Backup path for SSH, FTP, or S3 (e.g., /home/toto/backup/).
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Docker Compose Configuration
|
||||||
|
|
||||||
|
To use the configuration file in a Docker Compose setup, mount the file and specify its path using the `BACKUP_CONFIG_FILE` environment variable.
|
||||||
|
|
||||||
|
### Example: Docker Compose File
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, lock your image tag to a specific release version
|
||||||
|
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command: backup #--config /backup/config.yaml # config file
|
||||||
|
volumes:
|
||||||
|
- ./backup:/backup # Mount the backup directory
|
||||||
|
- ./config.yaml:/backup/config.yaml # Mount the configuration file
|
||||||
|
environment:
|
||||||
|
## Specify the path to the configuration file
|
||||||
|
- BACKUP_CONFIG_FILE=/backup/config.yaml
|
||||||
|
# Ensure the pg-bkup container is connected to the same network as your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
197
docs/how-tos/receive-notification.md
Normal file
197
docs/how-tos/receive-notification.md
Normal file
@@ -0,0 +1,197 @@
|
|||||||
|
---
|
||||||
|
title: Receive notifications
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 13
|
||||||
|
---
|
||||||
|
|
||||||
|
# Receive Notifications
|
||||||
|
|
||||||
|
You can configure the system to send email or Telegram notifications when a backup succeeds or fails.
|
||||||
|
|
||||||
|
This section explains how to set up and customize notifications.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Email Notifications
|
||||||
|
|
||||||
|
To send email notifications, provide SMTP credentials, a sender address, and recipient addresses. Notifications will be sent for both successful and failed backup runs.
|
||||||
|
|
||||||
|
### Example: Email Notification Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command: backup
|
||||||
|
volumes:
|
||||||
|
- ./backup:/backup
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
## SMTP Configuration
|
||||||
|
- MAIL_HOST=smtp.example.com
|
||||||
|
- MAIL_PORT=587
|
||||||
|
- MAIL_USERNAME=your-email@example.com
|
||||||
|
- MAIL_PASSWORD=your-email-password
|
||||||
|
- MAIL_FROM=Backup Jobs <backup@example.com>
|
||||||
|
## Multiple recipients separated by a comma
|
||||||
|
- MAIL_TO=me@example.com,team@example.com,manager@example.com
|
||||||
|
- MAIL_SKIP_TLS=false
|
||||||
|
## Time format for notifications
|
||||||
|
- TIME_FORMAT=2006-01-02 at 15:04:05
|
||||||
|
## Backup reference (e.g., database/cluster name or server name)
|
||||||
|
- BACKUP_REFERENCE=database/Paris cluster
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Telegram Notifications
|
||||||
|
|
||||||
|
To send Telegram notifications, provide your bot token and chat ID. Notifications will be sent for both successful and failed backup runs.
|
||||||
|
|
||||||
|
### Example: Telegram Notification Configuration
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command: backup
|
||||||
|
volumes:
|
||||||
|
- ./backup:/backup
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
## Telegram Configuration
|
||||||
|
- TG_TOKEN=[BOT ID]:[BOT TOKEN]
|
||||||
|
- TG_CHAT_ID=your-chat-id
|
||||||
|
## Time format for notifications
|
||||||
|
- TIME_FORMAT=2006-01-02 at 15:04:05
|
||||||
|
## Backup reference (e.g., database/cluster name or server name)
|
||||||
|
- BACKUP_REFERENCE=database/Paris cluster
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Customize Notifications
|
||||||
|
|
||||||
|
You can customize the title and body of notifications using Go templates. Template files must be mounted inside the container at `/config/templates`. The following templates are supported:
|
||||||
|
|
||||||
|
- `email.tmpl`: Template for successful email notifications.
|
||||||
|
- `telegram.tmpl`: Template for successful Telegram notifications.
|
||||||
|
- `email-error.tmpl`: Template for failed email notifications.
|
||||||
|
- `telegram-error.tmpl`: Template for failed Telegram notifications.
|
||||||
|
|
||||||
|
### Template Data
|
||||||
|
|
||||||
|
The following data is passed to the templates:
|
||||||
|
|
||||||
|
- `Database`: Database name.
|
||||||
|
- `StartTime`: Backup start time.
|
||||||
|
- `EndTime`: Backup end time.
|
||||||
|
- `Storage`: Backup storage type (e.g., local, S3, SSH).
|
||||||
|
- `BackupLocation`: Backup file location.
|
||||||
|
- `BackupSize`: Backup file size in bytes.
|
||||||
|
- `BackupReference`: Backup reference (e.g., database/cluster name or server name).
|
||||||
|
- `Error`: Error message (only for error templates).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Example Templates
|
||||||
|
|
||||||
|
#### `email.tmpl` (Successful Backup)
|
||||||
|
|
||||||
|
```html
|
||||||
|
<h2>Hi,</h2>
|
||||||
|
<p>Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.</p>
|
||||||
|
<h3>Backup Details:</h3>
|
||||||
|
<ul>
|
||||||
|
<li>Database Name: {{.Database}}</li>
|
||||||
|
<li>Backup Start Time: {{.StartTime}}</li>
|
||||||
|
<li>Backup End Time: {{.EndTime}}</li>
|
||||||
|
<li>Backup Storage: {{.Storage}}</li>
|
||||||
|
<li>Backup Location: {{.BackupLocation}}</li>
|
||||||
|
<li>Backup Size: {{.BackupSize}} bytes</li>
|
||||||
|
<li>Backup Reference: {{.BackupReference}}</li>
|
||||||
|
</ul>
|
||||||
|
<p>Best regards,</p>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `telegram.tmpl` (Successful Backup)
|
||||||
|
|
||||||
|
```html
|
||||||
|
✅ Database Backup Notification – {{.Database}}
|
||||||
|
Hi,
|
||||||
|
Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.
|
||||||
|
|
||||||
|
Backup Details:
|
||||||
|
- Database Name: {{.Database}}
|
||||||
|
- Backup Start Time: {{.StartTime}}
|
||||||
|
- Backup End Time: {{.EndTime}}
|
||||||
|
- Backup Storage: {{.Storage}}
|
||||||
|
- Backup Location: {{.BackupLocation}}
|
||||||
|
- Backup Size: {{.BackupSize}} bytes
|
||||||
|
- Backup Reference: {{.BackupReference}}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `email-error.tmpl` (Failed Backup)
|
||||||
|
|
||||||
|
```html
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<title>🔴 Urgent: Database Backup Failure Notification</title>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h2>Hi,</h2>
|
||||||
|
<p>An error occurred during database backup.</p>
|
||||||
|
<h3>Failure Details:</h3>
|
||||||
|
<ul>
|
||||||
|
<li>Error Message: {{.Error}}</li>
|
||||||
|
<li>Date: {{.EndTime}}</li>
|
||||||
|
<li>Backup Reference: {{.BackupReference}}</li>
|
||||||
|
</ul>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `telegram-error.tmpl` (Failed Backup)
|
||||||
|
|
||||||
|
```html
|
||||||
|
🔴 Urgent: Database Backup Failure Notification
|
||||||
|
|
||||||
|
An error occurred during database backup.
|
||||||
|
Failure Details:
|
||||||
|
|
||||||
|
Error Message: {{.Error}}
|
||||||
|
Date: {{.EndTime}}
|
||||||
|
Backup Reference: {{.BackupReference}}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Key Notes
|
||||||
|
|
||||||
|
- **SMTP Configuration**: Ensure your SMTP server supports TLS unless `MAIL_SKIP_TLS` is set to `true`.
|
||||||
|
- **Telegram Configuration**: Obtain your bot token and chat ID from Telegram.
|
||||||
|
- **Custom Templates**: Mount custom templates to `/config/templates` to override default notifications.
|
||||||
|
- **Time Format**: Use the `TIME_FORMAT` environment variable to customize the timestamp format in notifications.
|
||||||
@@ -2,94 +2,74 @@
|
|||||||
title: Restore database from AWS S3
|
title: Restore database from AWS S3
|
||||||
layout: default
|
layout: default
|
||||||
parent: How Tos
|
parent: How Tos
|
||||||
nav_order: 5
|
nav_order: 6
|
||||||
---
|
---
|
||||||
|
|
||||||
# Restore database from S3 storage
|
# Restore Database from S3 Storage
|
||||||
|
|
||||||
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
To restore a MySQL database from a backup stored in S3, use the `restore` command and specify the backup file with the `--file` flag. The system supports the following file formats:
|
||||||
|
|
||||||
{: .note }
|
- `.sql` (uncompressed SQL dump)
|
||||||
It supports __.sql__ and __.sql.gz__ compressed file.
|
- `.sql.gz` (gzip-compressed SQL dump)
|
||||||
|
- `.sql.gpg` (GPG-encrypted SQL dump)
|
||||||
|
- `.sql.gz.gpg` (GPG-encrypted and gzip-compressed SQL dump)
|
||||||
|
|
||||||
### Restore
|
---
|
||||||
|
|
||||||
```yml
|
## Configuration Steps
|
||||||
|
|
||||||
|
1. **Specify the Backup File**: Use the `--file` flag to specify the backup file to restore.
|
||||||
|
2. **Set the Storage Type**: Add the `--storage s3` flag to indicate that the backup is stored in S3.
|
||||||
|
3. **Provide S3 Configuration**: Include the necessary AWS S3 credentials and configuration.
|
||||||
|
4. **Provide Database Credentials**: Ensure the correct database connection details are provided.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Example: Restore from S3 Configuration
|
||||||
|
|
||||||
|
Below is an example `docker-compose.yml` configuration for restoring a database from S3 storage:
|
||||||
|
|
||||||
|
```yaml
|
||||||
services:
|
services:
|
||||||
mysql-bkup:
|
mysql-bkup:
|
||||||
# In production, it is advised to lock your image tag to a proper
|
# In production, lock your image tag to a specific release version
|
||||||
# release version instead of using `latest`.
|
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
# for available releases.
|
||||||
# for a list of available releases.
|
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
container_name: mysql-bkup
|
container_name: mysql-bkup
|
||||||
command: restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path
|
command: restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path
|
||||||
volumes:
|
volumes:
|
||||||
- ./backup:/backup
|
- ./backup:/backup # Mount the directory for local operations (if needed)
|
||||||
environment:
|
environment:
|
||||||
- DB_PORT=3306
|
- DB_PORT=3306
|
||||||
- DB_HOST=mysql
|
- DB_HOST=mysql
|
||||||
- DB_NAME=database
|
- DB_NAME=database
|
||||||
- DB_USERNAME=username
|
- DB_USERNAME=username
|
||||||
- DB_PASSWORD=password
|
- DB_PASSWORD=password
|
||||||
## AWS configurations
|
## AWS S3 Configuration
|
||||||
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
|
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
|
||||||
- AWS_S3_BUCKET_NAME=backup
|
- AWS_S3_BUCKET_NAME=backup
|
||||||
- AWS_REGION="us-west-2"
|
- AWS_REGION=us-west-2
|
||||||
- AWS_ACCESS_KEY=xxxx
|
- AWS_ACCESS_KEY=xxxx
|
||||||
- AWS_SECRET_KEY=xxxxx
|
- AWS_SECRET_KEY=xxxxx
|
||||||
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
## Optional: Disable SSL for S3 alternatives like Minio
|
||||||
- AWS_DISABLE_SSL="false"
|
- AWS_DISABLE_SSL=false
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
## Optional: Enable path-style access for S3 alternatives like Minio
|
||||||
|
- AWS_FORCE_PATH_STYLE=false
|
||||||
|
# Ensure the pg-bkup container is connected to the same network as your database
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
```
|
```
|
||||||
|
|
||||||
## Restore on Kubernetes
|
---
|
||||||
|
|
||||||
Simple Kubernetes restore Job:
|
## Key Notes
|
||||||
|
|
||||||
```yaml
|
- **Supported File Formats**: The restore process supports `.sql`, `.sql.gz`, `.sql.gpg`, and `.sql.gz.gpg` files.
|
||||||
apiVersion: batch/v1
|
- **S3 Path**: Use the `--path` flag to specify the folder within the S3 bucket where the backup file is located.
|
||||||
kind: Job
|
- **Encrypted Backups**: If the backup is encrypted with GPG, ensure the `GPG_PASSPHRASE` environment variable is set for automatic decryption.
|
||||||
metadata:
|
- **S3 Alternatives**: For S3-compatible storage like Minio, set `AWS_DISABLE_SSL` and `AWS_FORCE_PATH_STYLE` as needed.
|
||||||
name: restore-db
|
- **Network Configuration**: Ensure the `pg-bkup` container is connected to the same network as your database.
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: mysql-bkup
|
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- bkup restore -s s3 --path /custom_path -f store_20231219_022941.sql.gz
|
|
||||||
env:
|
|
||||||
- name: DB_PORT
|
|
||||||
value: "3306"
|
|
||||||
- name: DB_HOST
|
|
||||||
value: ""
|
|
||||||
- name: DB_NAME
|
|
||||||
value: ""
|
|
||||||
- name: DB_USERNAME
|
|
||||||
value: ""
|
|
||||||
# Please use secret!
|
|
||||||
- name: DB_PASSWORD
|
|
||||||
value: ""
|
|
||||||
- name: AWS_S3_ENDPOINT
|
|
||||||
value: "https://s3.amazonaws.com"
|
|
||||||
- name: AWS_S3_BUCKET_NAME
|
|
||||||
value: "xxx"
|
|
||||||
- name: AWS_REGION
|
|
||||||
value: "us-west-2"
|
|
||||||
- name: AWS_ACCESS_KEY
|
|
||||||
value: "xxxx"
|
|
||||||
- name: AWS_SECRET_KEY
|
|
||||||
value: "xxxx"
|
|
||||||
- name: AWS_DISABLE_SSL
|
|
||||||
value: "false"
|
|
||||||
restartPolicy: Never
|
|
||||||
backoffLimit: 4
|
|
||||||
```
|
|
||||||
@@ -2,92 +2,73 @@
|
|||||||
title: Restore database from SSH
|
title: Restore database from SSH
|
||||||
layout: default
|
layout: default
|
||||||
parent: How Tos
|
parent: How Tos
|
||||||
nav_order: 6
|
nav_order: 7
|
||||||
---
|
---
|
||||||
# Restore database from SSH remote server
|
|
||||||
|
|
||||||
To restore the database from your remote server, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
# Restore Database from SSH Remote Server
|
||||||
|
|
||||||
{: .note }
|
To restore a MySQL database from a backup stored on an SSH remote server, use the `restore` command and specify the backup file with the `--file` flag. The system supports the following file formats:
|
||||||
It supports __.sql__ and __.sql.gz__ compressed file.
|
|
||||||
|
|
||||||
### Restore
|
- `.sql` (uncompressed SQL dump)
|
||||||
|
- `.sql.gz` (gzip-compressed SQL dump)
|
||||||
|
- `.sql.gpg` (GPG-encrypted SQL dump)
|
||||||
|
- `.sql.gz.gpg` (GPG-encrypted and gzip-compressed SQL dump)
|
||||||
|
|
||||||
```yml
|
---
|
||||||
|
|
||||||
|
## Configuration Steps
|
||||||
|
|
||||||
|
1. **Specify the Backup File**: Use the `--file` flag to specify the backup file to restore.
|
||||||
|
2. **Set the Storage Type**: Add the `--storage ssh` flag to indicate that the backup is stored on an SSH remote server.
|
||||||
|
3. **Provide SSH Configuration**: Include the necessary SSH credentials and configuration.
|
||||||
|
4. **Provide Database Credentials**: Ensure the correct database connection details are provided.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Example: Restore from SSH Remote Server Configuration
|
||||||
|
|
||||||
|
Below is an example `docker-compose.yml` configuration for restoring a database from an SSH remote server:
|
||||||
|
|
||||||
|
```yaml
|
||||||
services:
|
services:
|
||||||
mysql-bkup:
|
mysql-bkup:
|
||||||
# In production, it is advised to lock your image tag to a proper
|
# In production, lock your image tag to a specific release version
|
||||||
# release version instead of using `latest`.
|
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
# for available releases.
|
||||||
# for a list of available releases.
|
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
container_name: mysql-bkup
|
container_name: mysql-bkup
|
||||||
command: restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups
|
command: restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups
|
||||||
volumes:
|
volumes:
|
||||||
- ./backup:/backup
|
- ./backup:/backup # Mount the directory for local operations (if needed)
|
||||||
|
- ./id_ed25519:/tmp/id_ed25519 # Mount the SSH private key file
|
||||||
environment:
|
environment:
|
||||||
- DB_PORT=3306
|
- DB_PORT=3306
|
||||||
- DB_HOST=postgres
|
- DB_HOST=mysql
|
||||||
- DB_NAME=database
|
- DB_NAME=database
|
||||||
- DB_USERNAME=username
|
- DB_USERNAME=username
|
||||||
- DB_PASSWORD=password
|
- DB_PASSWORD=password
|
||||||
## SSH config
|
## SSH Configuration
|
||||||
- SSH_HOST_NAME="hostname"
|
- SSH_HOST_NAME=hostname
|
||||||
- SSH_PORT=22
|
- SSH_PORT=22
|
||||||
- SSH_USER=user
|
- SSH_USER=user
|
||||||
- SSH_REMOTE_PATH=/home/jkaninda/backups
|
- SSH_REMOTE_PATH=/home/jkaninda/backups
|
||||||
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
||||||
## We advise you to use a private jey instead of password
|
## Optional: Use password instead of private key (not recommended)
|
||||||
#- SSH_PASSWORD=password
|
#- SSH_PASSWORD=password
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# Ensure the mysql-bkup container is connected to the same network as your database
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
```
|
```
|
||||||
## Restore on Kubernetes
|
|
||||||
|
|
||||||
Simple Kubernetes restore Job:
|
---
|
||||||
|
|
||||||
```yaml
|
## Key Notes
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
- **Supported File Formats**: The restore process supports `.sql`, `.sql.gz`, `.sql.gpg`, and `.sql.gz.gpg` files.
|
||||||
metadata:
|
- **SSH Path**: Use the `--path` flag to specify the folder on the SSH remote server where the backup file is located.
|
||||||
name: restore-db
|
- **Encrypted Backups**: If the backup is encrypted with GPG, ensure the `GPG_PASSPHRASE` environment variable is set for automatic decryption.
|
||||||
spec:
|
- **SSH Authentication**: Use a private key (`SSH_IDENTIFY_FILE`) for SSH authentication instead of a password for better security.
|
||||||
template:
|
- **Network Configuration**: Ensure the `mysql-bkup` container is connected to the same network as your database.
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: mysql-bkup
|
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- bkup restore -s ssh -f store_20231219_022941.sql.gz
|
|
||||||
env:
|
|
||||||
- name: DB_PORT
|
|
||||||
value: "3306"
|
|
||||||
- name: DB_HOST
|
|
||||||
value: ""
|
|
||||||
- name: DB_NAME
|
|
||||||
value: ""
|
|
||||||
- name: DB_USERNAME
|
|
||||||
value: ""
|
|
||||||
# Please use secret!
|
|
||||||
- name: DB_PASSWORD
|
|
||||||
value: ""
|
|
||||||
- name: SSH_HOST_NAME
|
|
||||||
value: ""
|
|
||||||
- name: SSH_PORT
|
|
||||||
value: "22"
|
|
||||||
- name: SSH_USER
|
|
||||||
value: "xxx"
|
|
||||||
- name: SSH_REMOTE_PATH
|
|
||||||
value: "/home/jkaninda/backups"
|
|
||||||
- name: AWS_ACCESS_KEY
|
|
||||||
value: "xxxx"
|
|
||||||
- name: SSH_IDENTIFY_FILE
|
|
||||||
value: "/tmp/id_ed25519"
|
|
||||||
restartPolicy: Never
|
|
||||||
backoffLimit: 4
|
|
||||||
```
|
|
||||||
@@ -2,39 +2,63 @@
|
|||||||
title: Restore database
|
title: Restore database
|
||||||
layout: default
|
layout: default
|
||||||
parent: How Tos
|
parent: How Tos
|
||||||
nav_order: 4
|
nav_order: 5
|
||||||
---
|
---
|
||||||
|
|
||||||
# Restore database
|
|
||||||
|
|
||||||
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
# Restore Database
|
||||||
|
|
||||||
{: .note }
|
To restore a MySQL database, use the `restore` command and specify the backup file to restore with the `--file` flag.
|
||||||
It supports __.sql__ and __.sql.gz__ compressed file.
|
|
||||||
|
|
||||||
### Restore
|
The system supports the following file formats:
|
||||||
|
|
||||||
```yml
|
- `.sql` (uncompressed SQL dump)
|
||||||
|
- `.sql.gz` (gzip-compressed SQL dump)
|
||||||
|
- `.sql.gpg` (GPG-encrypted SQL dump)
|
||||||
|
- `.sql.gz.gpg` (GPG-encrypted and gzip-compressed SQL dump)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Configuration Steps
|
||||||
|
|
||||||
|
1. **Specify the Backup File**: Use the `--file` flag to specify the backup file to restore.
|
||||||
|
2. **Provide Database Credentials**: Ensure the correct database connection details are provided.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Example: Restore Configuration
|
||||||
|
|
||||||
|
Below is an example `docker-compose.yml` configuration for restoring a database:
|
||||||
|
|
||||||
|
```yaml
|
||||||
services:
|
services:
|
||||||
mysql-bkup:
|
mysql-bkup:
|
||||||
# In production, it is advised to lock your image tag to a proper
|
# In production, lock your image tag to a specific release version
|
||||||
# release version instead of using `latest`.
|
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
# for available releases.
|
||||||
# for a list of available releases.
|
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
container_name: mysql-bkup
|
container_name: mysql-bkup
|
||||||
command: restore -d database -f store_20231219_022941.sql.gz
|
command: restore -d database -f store_20231219_022941.sql.gz
|
||||||
volumes:
|
volumes:
|
||||||
- ./backup:/backup
|
- ./backup:/backup # Mount the directory containing the backup file
|
||||||
environment:
|
environment:
|
||||||
- DB_PORT=3306
|
- DB_PORT=3306
|
||||||
- DB_HOST=mysql
|
- DB_HOST=postgres
|
||||||
- DB_NAME=database
|
- DB_NAME=database
|
||||||
- DB_USERNAME=username
|
- DB_USERNAME=username
|
||||||
- DB_PASSWORD=password
|
- DB_PASSWORD=password
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# Ensure the pg-bkup container is connected to the same network as your database
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Key Notes
|
||||||
|
|
||||||
|
- **Supported File Formats**: The restore process supports `.sql`, `.sql.gz`, `.sql.gpg`, and `.sql.gz.gpg` files.
|
||||||
|
- **Encrypted Backups**: If the backup is encrypted with GPG, ensure the `GPG_PASSPHRASE` environment variable is set for automatic decryption.
|
||||||
|
- **Network Configuration**: Ensure the `mysql-bkup` container is connected to the same network as your database.
|
||||||
|
|||||||
171
docs/index.md
171
docs/index.md
@@ -6,145 +6,80 @@ nav_order: 1
|
|||||||
|
|
||||||
# About mysql-bkup
|
# About mysql-bkup
|
||||||
{:.no_toc}
|
{:.no_toc}
|
||||||
MySQL Backup is a Docker container image that can be used to backup and restore MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH remote storage.
|
|
||||||
It also supports __encrypting__ your backups using GPG.
|
|
||||||
|
|
||||||
We are open to receiving stars, PRs, and issues!
|
**MYSQL-BKUP** is a Docker container image designed to **backup, restore, and migrate MySQL databases**.
|
||||||
|
It supports a variety of storage options and ensures data security through GPG encryption.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Key Features
|
||||||
|
|
||||||
|
### Storage Options
|
||||||
|
- **Local storage**
|
||||||
|
- **AWS S3** or any S3-compatible object storage
|
||||||
|
- **FTP**
|
||||||
|
- **SFTP**
|
||||||
|
- **SSH-compatible storage**
|
||||||
|
- **Azure Blob storage**
|
||||||
|
|
||||||
|
### Data Security
|
||||||
|
- Backups can be encrypted using **GPG** to ensure data confidentiality.
|
||||||
|
|
||||||
|
### Deployment Flexibility
|
||||||
|
- Available as the [jkaninda/pg-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image.
|
||||||
|
- Deployable on **Docker**, **Docker Swarm**, and **Kubernetes**.
|
||||||
|
- Supports recurring backups of MySQL databases:
|
||||||
|
- On Docker for automated backup schedules.
|
||||||
|
- As a **Job** or **CronJob** on Kubernetes.
|
||||||
|
|
||||||
|
### Notifications
|
||||||
|
- Receive real-time updates on backup success or failure via:
|
||||||
|
- **Telegram**
|
||||||
|
- **Email**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Use Cases
|
||||||
|
|
||||||
|
- **Automated Recurring Backups:** Schedule regular backups for MySQL databases.
|
||||||
|
- **Cross-Environment Migration:** Easily migrate MySQL databases across different environments using supported storage options.
|
||||||
|
- **Secure Backup Management:** Protect your data with GPG encryption.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Get Involved
|
||||||
|
|
||||||
|
We welcome contributions! Feel free to give us a ⭐, submit PRs, or open issues on our [GitHub repository](https://github.com/jkaninda/mysql-bkup).
|
||||||
|
|
||||||
{: .fs-6 .fw-300 }
|
{: .fs-6 .fw-300 }
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
|
|
||||||
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
|
|
||||||
|
|
||||||
It also supports __encrypting__ your backups using GPG.
|
|
||||||
|
|
||||||
{: .note }
|
{: .note }
|
||||||
Code and documentation for `v1` version on [this branch][v1-branch].
|
Code and documentation for the `v1` version are available on [this branch][v1-branch].
|
||||||
|
|
||||||
[v1-branch]: https://github.com/jkaninda/mysql-bkup
|
[v1-branch]: https://github.com/jkaninda/mysql-bkup
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Quickstart
|
## Available Image Registries
|
||||||
|
|
||||||
### Simple backup using Docker CLI
|
The Docker image is published to both **Docker Hub** and the **GitHub Container Registry**. You can use either of the following:
|
||||||
|
|
||||||
To run a one time backup, bind your local volume to `/backup` in the container and run the `backup` command:
|
```bash
|
||||||
|
|
||||||
```shell
|
|
||||||
docker run --rm --network your_network_name \
|
|
||||||
-v $PWD/backup:/backup/ \
|
|
||||||
-e "DB_HOST=dbhost" \
|
|
||||||
-e "DB_USERNAME=username" \
|
|
||||||
-e "DB_PASSWORD=password" \
|
|
||||||
jkaninda/mysql-bkup backup -d database_name
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
docker run --rm --network your_network_name \
|
|
||||||
--env-file your-env-file \
|
|
||||||
-v $PWD/backup:/backup/ \
|
|
||||||
jkaninda/mysql-bkup backup -d database_name
|
|
||||||
```
|
|
||||||
|
|
||||||
### Simple backup in docker compose file
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
services:
|
|
||||||
mysql-bkup:
|
|
||||||
# In production, it is advised to lock your image tag to a proper
|
|
||||||
# release version instead of using `latest`.
|
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
|
||||||
# for a list of available releases.
|
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
container_name: mysql-bkup
|
|
||||||
command: backup
|
|
||||||
volumes:
|
|
||||||
- ./backup:/backup
|
|
||||||
environment:
|
|
||||||
- DB_PORT=3306
|
|
||||||
- DB_HOST=mysql
|
|
||||||
- DB_NAME=foo
|
|
||||||
- DB_USERNAME=bar
|
|
||||||
- DB_PASSWORD=password
|
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
|
||||||
networks:
|
|
||||||
- web
|
|
||||||
networks:
|
|
||||||
web:
|
|
||||||
```
|
|
||||||
## Kubernetes
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
name: backup-job
|
|
||||||
spec:
|
|
||||||
ttlSecondsAfterFinished: 100
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: mysql-bkup
|
|
||||||
# In production, it is advised to lock your image tag to a proper
|
|
||||||
# release version instead of using `latest`.
|
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
|
||||||
# for a list of available releases.
|
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- backup -d dbname
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
memory: "128Mi"
|
|
||||||
cpu: "500m"
|
|
||||||
env:
|
|
||||||
- name: DB_HOST
|
|
||||||
value: "mysql"
|
|
||||||
- name: DB_USERNAME
|
|
||||||
value: "user"
|
|
||||||
- name: DB_PASSWORD
|
|
||||||
value: "password"
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /backup
|
|
||||||
name: backup
|
|
||||||
volumes:
|
|
||||||
- name: backup
|
|
||||||
hostPath:
|
|
||||||
path: /home/toto/backup # directory location on host
|
|
||||||
type: Directory # this field is optional
|
|
||||||
restartPolicy: Never
|
|
||||||
```
|
|
||||||
|
|
||||||
## Available image registries
|
|
||||||
|
|
||||||
This Docker image is published to both Docker Hub and the GitHub container registry.
|
|
||||||
Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
|
|
||||||
|
|
||||||
```
|
|
||||||
docker pull jkaninda/mysql-bkup
|
docker pull jkaninda/mysql-bkup
|
||||||
docker pull ghcr.io/jkaninda/mysql-bkup
|
docker pull ghcr.io/jkaninda/mysql-bkup
|
||||||
```
|
```
|
||||||
|
|
||||||
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
|
While the documentation references Docker Hub, all examples work seamlessly with `ghcr.io`.
|
||||||
|
|
||||||
## Supported Engines
|
---
|
||||||
|
|
||||||
This image is developed and tested against the Docker CE engine and Kubernetes exclusively.
|
|
||||||
While it may work against different implementations, there are no guarantees about support for non-Docker engines.
|
|
||||||
|
|
||||||
## References
|
## References
|
||||||
|
|
||||||
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
We created this image as a simpler and more lightweight alternative to existing solutions. Here’s why:
|
||||||
|
|
||||||
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
|
- **Lightweight:** Written in Go, the image is optimized for performance and minimal resource usage.
|
||||||
- This image is written in Go.
|
- **Multi-Architecture Support:** Supports `arm64` and `arm/v7` architectures.
|
||||||
- `arm64` and `arm/v7` architectures are supported.
|
- **Docker Swarm Support:** Fully compatible with Docker in Swarm mode.
|
||||||
- Docker in Swarm mode is supported.
|
- **Kubernetes Support:** Designed to work seamlessly with Kubernetes.
|
||||||
- Kubernetes is supported.
|
|
||||||
|
|||||||
152
docs/quickstart/index.md
Normal file
152
docs/quickstart/index.md
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
---
|
||||||
|
title: Quickstart
|
||||||
|
layout: home
|
||||||
|
nav_order: 2
|
||||||
|
---
|
||||||
|
|
||||||
|
# Quickstart
|
||||||
|
|
||||||
|
This guide provides quick examples for running backups using Docker CLI, Docker Compose, and Kubernetes.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Simple Backup Using Docker CLI
|
||||||
|
|
||||||
|
To run a one-time backup, bind your local volume to `/backup` in the container and execute the `backup` command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run --rm --network your_network_name \
|
||||||
|
-v $PWD/backup:/backup/ \
|
||||||
|
-e "DB_HOST=dbhost" \
|
||||||
|
-e "DB_PORT=3306" \
|
||||||
|
-e "DB_USERNAME=username" \
|
||||||
|
-e "DB_PASSWORD=password" \
|
||||||
|
jkaninda/mysql-bkup backup -d database_name
|
||||||
|
```
|
||||||
|
|
||||||
|
### Using an Environment File
|
||||||
|
|
||||||
|
Alternatively, you can use an `--env-file` to pass a full configuration:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run --rm --network your_network_name \
|
||||||
|
--env-file your-env-file \
|
||||||
|
-v $PWD/backup:/backup/ \
|
||||||
|
jkaninda/mysql-bkup backup -d database_name
|
||||||
|
```
|
||||||
|
|
||||||
|
### Simple restore using Docker CLI
|
||||||
|
|
||||||
|
To restore a database, bind your local volume to `/backup` in the container and run the `restore` command:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
docker run --rm --network your_network_name \
|
||||||
|
-v $PWD/backup:/backup/ \
|
||||||
|
-e "DB_HOST=dbhost" \
|
||||||
|
-e "DB_PORT=3306" \
|
||||||
|
-e "DB_USERNAME=username" \
|
||||||
|
-e "DB_PASSWORD=password" \
|
||||||
|
jkaninda/mysql-bkup restore -d database_name -f backup_file.sql.gz
|
||||||
|
```
|
||||||
|
---
|
||||||
|
|
||||||
|
## Simple Backup Using Docker Compose
|
||||||
|
|
||||||
|
Below is an example `docker-compose.yml` configuration for running a backup:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, lock the image tag to a specific release version.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases for available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command: backup
|
||||||
|
volumes:
|
||||||
|
- ./backup:/backup
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=foo
|
||||||
|
- DB_USERNAME=bar
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
- TZ=Europe/Paris
|
||||||
|
# Ensure the mysql-bkup container is connected to the same network as your database.
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Recurring Backup with Docker
|
||||||
|
|
||||||
|
To schedule recurring backups, use the `--cron-expression` flag:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run --rm --network network_name \
|
||||||
|
-v $PWD/backup:/backup/ \
|
||||||
|
-e "DB_HOST=hostname" \
|
||||||
|
-e "DB_USERNAME=user" \
|
||||||
|
-e "DB_PASSWORD=password" \
|
||||||
|
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 15m"
|
||||||
|
```
|
||||||
|
|
||||||
|
For predefined schedules, refer to the [documentation](https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Backup Using Kubernetes
|
||||||
|
|
||||||
|
Below is an example Kubernetes `Job` configuration for running a backup:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: Job
|
||||||
|
metadata:
|
||||||
|
name: backup-job
|
||||||
|
spec:
|
||||||
|
ttlSecondsAfterFinished: 100
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: mysql-bkup
|
||||||
|
# In production, lock the image tag to a specific release version.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases for available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- backup -d dbname
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: "128Mi"
|
||||||
|
cpu: "500m"
|
||||||
|
env:
|
||||||
|
- name: DB_HOST
|
||||||
|
value: "mysql"
|
||||||
|
- name: DB_USERNAME
|
||||||
|
value: "postgres"
|
||||||
|
- name: DB_PASSWORD
|
||||||
|
value: "password"
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /backup
|
||||||
|
name: backup
|
||||||
|
volumes:
|
||||||
|
- name: backup
|
||||||
|
hostPath:
|
||||||
|
path: /home/toto/backup # Directory location on the host
|
||||||
|
type: Directory # Optional field
|
||||||
|
restartPolicy: Never
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Key Notes
|
||||||
|
|
||||||
|
- **Volume Binding**: Ensure the `/backup` directory is mounted to persist backup files.
|
||||||
|
- **Environment Variables**: Use environment variables or an `--env-file` to pass database credentials and other configurations.
|
||||||
|
- **Cron Expressions**: Use standard cron expressions or predefined schedules for recurring backups.
|
||||||
|
- **Kubernetes Jobs**: Use Kubernetes `Job` or `CronJob` for running backups in a Kubernetes cluster.
|
||||||
@@ -1,112 +1,132 @@
|
|||||||
---
|
---
|
||||||
title: Configuration Reference
|
title: Configuration Reference
|
||||||
layout: default
|
layout: default
|
||||||
nav_order: 2
|
nav_order: 3
|
||||||
---
|
---
|
||||||
|
|
||||||
# Configuration reference
|
# Configuration Reference
|
||||||
|
|
||||||
Backup, restore and migrate targets, schedule and retention are configured using environment variables or flags.
|
MySQL backup, restore, and migration processes can be configured using **environment variables** or **CLI flags**.
|
||||||
|
|
||||||
|
## CLI Utility Usage
|
||||||
|
|
||||||
|
The `mysql-bkup` CLI provides commands and options to manage MySQL backups efficiently.
|
||||||
|
|
||||||
|
| Option | Short Flag | Description |
|
||||||
|
|-------------------------|------------|-----------------------------------------------------------------------------------------|
|
||||||
### CLI utility Usage
|
| `mysql-bkup` | `bkup` | CLI tool for managing MySQL backups, restoration, and migration. |
|
||||||
|
| `backup` | | Executes a backup operation. |
|
||||||
| Options | Shorts | Usage |
|
| `restore` | | Restores a database from a backup file. |
|
||||||
|-----------------------|--------|----------------------------------------------------------------------------------------|
|
| `migrate` | | Migrates a database from one instance to another. |
|
||||||
| mysql-bkup | bkup | CLI utility |
|
| `--storage` | `-s` | Specifies the storage type (`local`, `s3`, `ssh`, etc.). Default: `local`. |
|
||||||
| backup | | Backup database operation |
|
| `--file` | `-f` | Defines the backup file name for restoration. |
|
||||||
| restore | | Restore database operation |
|
| `--path` | | Sets the storage path (e.g., `/custom_path` for S3 or `/home/foo/backup` for SSH). |
|
||||||
| migrate | | Migrate database from one instance to another one |
|
| `--config` | `-c` | Provides a configuration file for multi-database backups (e.g., `/backup/config.yaml`). |
|
||||||
| --storage | -s | Storage. local or s3 (default: local) |
|
| `--dbname` | `-d` | Specifies the database name to back up or restore. |
|
||||||
| --file | -f | File name for restoration |
|
| `--port` | `-p` | Defines the database port. Default: `3306`. |
|
||||||
| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` |
|
| `--disable-compression` | | Disables compression for database backups. |
|
||||||
| --dbname | -d | Database name |
|
| `--cron-expression` | `-e` | Schedules backups using a cron expression (e.g., `0 0 * * *` or `@daily`). |
|
||||||
| --port | -p | Database port (default: 3306) |
|
| `--all-databases` | `-a` | Backs up all databases separately (e.g., `backup --all-databases`). |
|
||||||
| --mode | -m | Execution mode. default or scheduled (default: default) |
|
| `--all-in-one` | `-A` | Backs up all databases in a single file (e.g., `backup --all-databases --single-file`). |
|
||||||
| --disable-compression | | Disable database backup compression |
|
| `--custom-name` | `` | Sets custom backup name for one time backup |
|
||||||
| --prune | | Delete old backup, default disabled |
|
| `--help` | `-h` | Displays the help message and exits. |
|
||||||
| --keep-last | | Delete old backup created more than specified days ago, default 7 days |
|
| `--version` | `-V` | Shows version information and exits. |
|
||||||
| --period | | Crontab period for scheduled mode only. (default: "0 1 * * *") |
|
|
||||||
| --help | -h | Print this help message and exit |
|
|
||||||
| --version | -V | Print version information and exit |
|
|
||||||
|
|
||||||
## Environment variables
|
|
||||||
|
|
||||||
| Name | Requirement | Description |
|
|
||||||
|------------------------|----------------------------------------------------|------------------------------------------------------|
|
|
||||||
| DB_PORT | Optional, default 3306 | Database port number |
|
|
||||||
| DB_HOST | Required | Database host |
|
|
||||||
| DB_NAME | Optional if it was provided from the -d flag | Database name |
|
|
||||||
| DB_USERNAME | Required | Database user name |
|
|
||||||
| DB_PASSWORD | Required | Database password |
|
|
||||||
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
|
|
||||||
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
|
|
||||||
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
|
||||||
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
|
|
||||||
| AWS_REGION | Optional, required for S3 storage | AWS Region |
|
|
||||||
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
|
|
||||||
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
|
|
||||||
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the --period flag | Backup cron expression for docker in scheduled mode |
|
|
||||||
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
|
|
||||||
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
|
|
||||||
| SSH_USER | Optional, required for SSH storage | ssh remote user |
|
|
||||||
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
|
|
||||||
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
|
|
||||||
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
|
|
||||||
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
|
|
||||||
| TARGET_DB_HOST | Optional, required for database migration | Target database host |
|
|
||||||
| TARGET_DB_PORT | Optional, required for database migration | Target database port |
|
|
||||||
| TARGET_DB_NAME | Optional, required for database migration | Target database name |
|
|
||||||
| TARGET_DB_USERNAME | Optional, required for database migration | Target database username |
|
|
||||||
| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password |
|
|
||||||
|
|
||||||
---
|
---
|
||||||
## Run in Scheduled mode
|
|
||||||
|
|
||||||
This image can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
|
## Environment Variables
|
||||||
For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag.
|
|
||||||
|
|
||||||
## Syntax of crontab (field description)
|
| Name | Requirement | Description |
|
||||||
|
|--------------------------------|--------------------------------------|----------------------------------------------------------------------------|
|
||||||
|
| `DB_PORT` | Optional (default: `3306`) | Database port number. |
|
||||||
|
| `DB_HOST` | Required | Database host. |
|
||||||
|
| `DB_NAME` | Optional (if provided via `-d` flag) | Database name. |
|
||||||
|
| `DB_USERNAME` | Required | Database username. |
|
||||||
|
| `DB_PASSWORD` | Required | Database password. |
|
||||||
|
| `DB_SSL_CA` | Optional | Database client CA certificate file |
|
||||||
|
| `DB_SSL_MODE` | Optional(`0 or 1`) default: `0` | Database client Enable CA validation |
|
||||||
|
| `AWS_ACCESS_KEY` | Required for S3 storage | AWS S3 Access Key. |
|
||||||
|
| `AWS_SECRET_KEY` | Required for S3 storage | AWS S3 Secret Key. |
|
||||||
|
| `AWS_BUCKET_NAME` | Required for S3 storage | AWS S3 Bucket Name. |
|
||||||
|
| `AWS_REGION` | Required for S3 storage | AWS Region. |
|
||||||
|
| `AWS_DISABLE_SSL` | Optional | Disable SSL for S3 storage. |
|
||||||
|
| `AWS_FORCE_PATH_STYLE` | Optional | Force path-style access for S3 storage. |
|
||||||
|
| `FILE_NAME` | Optional (if provided via `--file`) | File name for restoration (e.g., `.sql`, `.sql.gz`). |
|
||||||
|
| `GPG_PASSPHRASE` | Optional | GPG passphrase for encrypting/decrypting backups. |
|
||||||
|
| `GPG_PUBLIC_KEY` | Optional | GPG public key for encrypting backups (e.g., `/config/public_key.asc`). |
|
||||||
|
| `BACKUP_CRON_EXPRESSION` | Optional (flag `-e`) | Cron expression for scheduled backups. |
|
||||||
|
| `BACKUP_RETENTION_DAYS` | Optional | Delete backups older than the specified number of days. |
|
||||||
|
| `BACKUP_CONFIG_FILE` | Optional (flag `-c`) | Configuration file for multi database backup. (e.g: `/backup/config.yaml`) |
|
||||||
|
| `SSH_HOST` | Required for SSH storage | SSH remote hostname or IP. |
|
||||||
|
| `SSH_USER` | Required for SSH storage | SSH remote username. |
|
||||||
|
| `SSH_PASSWORD` | Optional | SSH remote user's password. |
|
||||||
|
| `SSH_IDENTIFY_FILE` | Optional | SSH remote user's private key. |
|
||||||
|
| `SSH_PORT` | Optional (default: `22`) | SSH remote server port. |
|
||||||
|
| `REMOTE_PATH` | Required for SSH/FTP storage | Remote path (e.g., `/home/toto/backup`). |
|
||||||
|
| `FTP_HOST` | Required for FTP storage | FTP hostname. |
|
||||||
|
| `FTP_PORT` | Optional (default: `21`) | FTP server port. |
|
||||||
|
| `FTP_USER` | Required for FTP storage | FTP username. |
|
||||||
|
| `FTP_PASSWORD` | Required for FTP storage | FTP user password. |
|
||||||
|
| `TARGET_DB_HOST` | Required for migration | Target database host. |
|
||||||
|
| `TARGET_DB_PORT` | Optional (default: `5432`) | Target database port. |
|
||||||
|
| `TARGET_DB_NAME` | Required for migration | Target database name. |
|
||||||
|
| `TARGET_DB_USERNAME` | Required for migration | Target database username. |
|
||||||
|
| `TARGET_DB_PASSWORD` | Required for migration | Target database password. |
|
||||||
|
| `TARGET_DB_URL` | Optional | Target database URL in JDBC URI format. |
|
||||||
|
| `TG_TOKEN` | Required for Telegram notifications | Telegram token (`BOT-ID:BOT-TOKEN`). |
|
||||||
|
| `TG_CHAT_ID` | Required for Telegram notifications | Telegram Chat ID. |
|
||||||
|
| `TZ` | Optional | Time zone for scheduling. |
|
||||||
|
| `AZURE_STORAGE_CONTAINER_NAME` | Required for Azure Blob Storage | Azure storage container name. |
|
||||||
|
| `AZURE_STORAGE_ACCOUNT_NAME` | Required for Azure Blob Storage | Azure storage account name. |
|
||||||
|
| `AZURE_STORAGE_ACCOUNT_KEY` | Required for Azure Blob Storage | Azure storage account key. |
|
||||||
|
|
||||||
The syntax is:
|
---
|
||||||
|
|
||||||
- 1: Minute (0-59)
|
## Scheduled Backups
|
||||||
- 2: Hours (0-23)
|
|
||||||
- 3: Day (0-31)
|
|
||||||
- 4: Month (0-12 [12 == December])
|
|
||||||
- 5: Day of the week(0-7 [7 or 0 == sunday])
|
|
||||||
|
|
||||||
Easy to remember format:
|
### Running in Scheduled Mode
|
||||||
|
|
||||||
|
- **Docker**: Use the `--cron-expression` flag or the `BACKUP_CRON_EXPRESSION` environment variable to schedule backups.
|
||||||
|
- **Kubernetes**: Use a `CronJob` resource for scheduled backups.
|
||||||
|
|
||||||
|
### Cron Syntax
|
||||||
|
|
||||||
|
The cron syntax consists of five fields:
|
||||||
|
|
||||||
```conf
|
```conf
|
||||||
* * * * * command to be executed
|
* * * * * command
|
||||||
```
|
```
|
||||||
|
|
||||||
|
| Field | Description | Values |
|
||||||
|
|---------------|------------------------------|----------------|
|
||||||
|
| Minute | Minute of the hour | `0-59` |
|
||||||
|
| Hour | Hour of the day | `0-23` |
|
||||||
|
| Day of Month | Day of the month | `1-31` |
|
||||||
|
| Month | Month of the year | `1-12` |
|
||||||
|
| Day of Week | Day of the week (0 = Sunday) | `0-7` |
|
||||||
|
|
||||||
|
#### Examples
|
||||||
|
|
||||||
|
- **Every 30 minutes**: `*/30 * * * *`
|
||||||
|
- **Every hour at minute 0**: `0 * * * *`
|
||||||
|
- **Every day at 1:00 AM**: `0 1 * * *`
|
||||||
|
|
||||||
|
### Predefined Schedules
|
||||||
|
|
||||||
|
| Entry | Description | Equivalent To |
|
||||||
|
|----------------------------|--------------------------------------------|---------------|
|
||||||
|
| `@yearly` (or `@annually`) | Run once a year, midnight, Jan. 1st | `0 0 1 1 *` |
|
||||||
|
| `@monthly` | Run once a month, midnight, first of month | `0 0 1 * *` |
|
||||||
|
| `@weekly` | Run once a week, midnight between Sat/Sun | `0 0 * * 0` |
|
||||||
|
| `@daily` (or `@midnight`) | Run once a day, midnight | `0 0 * * *` |
|
||||||
|
| `@hourly` | Run once an hour, beginning of hour | `0 * * * *` |
|
||||||
|
|
||||||
|
### Intervals
|
||||||
|
|
||||||
|
You can also schedule backups at fixed intervals using the format:
|
||||||
|
|
||||||
```conf
|
```conf
|
||||||
- - - - -
|
@every <duration>
|
||||||
| | | | |
|
|
||||||
| | | | ----- Day of week (0 - 7) (Sunday=0 or 7)
|
|
||||||
| | | ------- Month (1 - 12)
|
|
||||||
| | --------- Day of month (1 - 31)
|
|
||||||
| ----------- Hour (0 - 23)
|
|
||||||
------------- Minute (0 - 59)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
> At every 30th minute
|
- Example: `@every 1h30m10s` runs the backup every 1 hour, 30 minutes, and 10 seconds.
|
||||||
|
|
||||||
```conf
|
|
||||||
*/30 * * * *
|
|
||||||
```
|
|
||||||
> “At minute 0.” every hour
|
|
||||||
```conf
|
|
||||||
0 * * * *
|
|
||||||
```
|
|
||||||
|
|
||||||
> “At 01:00.” every day
|
|
||||||
|
|
||||||
```conf
|
|
||||||
0 1 * * *
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ services:
|
|||||||
- AWS_SECRET_KEY=xxxxx
|
- AWS_SECRET_KEY=xxxxx
|
||||||
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||||
- AWS_DISABLE_SSL="false"
|
- AWS_DISABLE_SSL="false"
|
||||||
|
- AWS_FORCE_PATH_STYLE=true # true for S3 alternative such as Minio
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
|
|||||||
@@ -5,11 +5,13 @@ services:
|
|||||||
# release version instead of using `latest`.
|
# release version instead of using `latest`.
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
container_name: mysql-bkup
|
container_name: mysql-bkup
|
||||||
command: backup --dbname database_name --mode scheduled --period "0 1 * * *"
|
command: backup --dbname database_name
|
||||||
volumes:
|
volumes:
|
||||||
- ./backup:/backup
|
- ./backup:/backup
|
||||||
environment:
|
environment:
|
||||||
- DB_PORT=3306
|
- DB_PORT=3306
|
||||||
- DB_HOST=mysql
|
- DB_HOST=mysql
|
||||||
- DB_USERNAME=userName
|
- DB_USERNAME=userName
|
||||||
- DB_PASSWORD=${DB_PASSWORD}
|
- DB_PASSWORD=${DB_PASSWORD}
|
||||||
|
# See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
||||||
|
- BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * *
|
||||||
@@ -6,7 +6,7 @@ services:
|
|||||||
# for a list of available releases.
|
# for a list of available releases.
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
container_name: mysql-bkup
|
container_name: mysql-bkup
|
||||||
command: backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
|
command: backup --storage s3 -d my-database
|
||||||
environment:
|
environment:
|
||||||
- DB_PORT=3306
|
- DB_PORT=3306
|
||||||
- DB_HOST=mysql
|
- DB_HOST=mysql
|
||||||
@@ -21,6 +21,9 @@ services:
|
|||||||
- AWS_SECRET_KEY=xxxxx
|
- AWS_SECRET_KEY=xxxxx
|
||||||
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||||
- AWS_DISABLE_SSL="false"
|
- AWS_DISABLE_SSL="false"
|
||||||
|
- AWS_FORCE_PATH_STYLE=true # true for S3 alternative such as Minio
|
||||||
|
# See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
||||||
|
- BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * *
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
|
|||||||
@@ -15,10 +15,7 @@ spec:
|
|||||||
command:
|
command:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
- -c
|
- -c
|
||||||
- bkup
|
- backup --storage s3
|
||||||
- backup
|
|
||||||
- --storage
|
|
||||||
- s3
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
memory: "128Mi"
|
memory: "128Mi"
|
||||||
@@ -47,4 +44,6 @@ spec:
|
|||||||
value: "xxxx"
|
value: "xxxx"
|
||||||
- name: AWS_DISABLE_SSL
|
- name: AWS_DISABLE_SSL
|
||||||
value: "false"
|
value: "false"
|
||||||
|
- name: AWS_FORCE_PATH_STYLE
|
||||||
|
value: "true"
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
38
go.mod
38
go.mod
@@ -1,21 +1,39 @@
|
|||||||
module github.com/jkaninda/mysql-bkup
|
module github.com/jkaninda/mysql-bkup
|
||||||
|
|
||||||
go 1.22.5
|
go 1.23.2
|
||||||
|
|
||||||
require github.com/spf13/pflag v1.0.5
|
require github.com/spf13/pflag v1.0.6 // indirect
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/aws/aws-sdk-go v1.55.3
|
github.com/go-mail/mail v2.3.1+incompatible
|
||||||
github.com/bramvdbogaerde/go-scp v1.5.0
|
github.com/jkaninda/encryptor v0.0.0-20241111100652-926393c9437e
|
||||||
github.com/hpcloud/tail v1.0.0
|
github.com/jkaninda/go-storage v0.1.3
|
||||||
github.com/spf13/cobra v1.8.0
|
github.com/jkaninda/go-utils v0.1.1
|
||||||
golang.org/x/crypto v0.18.0
|
github.com/robfig/cron/v3 v3.0.1
|
||||||
|
github.com/spf13/cobra v1.9.1
|
||||||
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 // indirect
|
||||||
|
github.com/ProtonMail/go-crypto v1.1.0 // indirect
|
||||||
|
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
|
||||||
|
github.com/ProtonMail/gopenpgp/v2 v2.8.0 // indirect
|
||||||
|
github.com/aws/aws-sdk-go v1.55.5 // indirect
|
||||||
|
github.com/bramvdbogaerde/go-scp v1.5.0 // indirect
|
||||||
|
github.com/cloudflare/circl v1.3.7 // indirect
|
||||||
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
|
github.com/jlaffaye/ftp v0.2.0 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
golang.org/x/sys v0.22.0 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
gopkg.in/fsnotify.v1 v1.4.7 // indirect
|
golang.org/x/crypto v0.28.0 // indirect
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
golang.org/x/net v0.29.0 // indirect
|
||||||
|
golang.org/x/sys v0.26.0 // indirect
|
||||||
|
golang.org/x/text v0.19.0 // indirect
|
||||||
|
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
||||||
|
gopkg.in/mail.v2 v2.3.1 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
135
go.sum
135
go.sum
@@ -1,38 +1,129 @@
|
|||||||
github.com/aws/aws-sdk-go v1.55.3 h1:0B5hOX+mIx7I5XPOrjrHlKSDQV/+ypFZpIHOx5LOk3E=
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M=
|
||||||
github.com/aws/aws-sdk-go v1.55.3/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 h1:mlmW46Q0B79I+Aj4azKC6xDMFN9a9SyZWESlGWYXbFs=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0/go.mod h1:PXe2h+LKcWTX9afWdZoHyODqR4fBa5boUM/8uJfZ0Jo=
|
||||||
|
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
|
||||||
|
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||||
|
github.com/ProtonMail/go-crypto v1.1.0 h1:OnlSGxXflfrWJESDsGQOmACNQRM9IflG3q8XTrOqvbE=
|
||||||
|
github.com/ProtonMail/go-crypto v1.1.0/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
|
||||||
|
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=
|
||||||
|
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw=
|
||||||
|
github.com/ProtonMail/gopenpgp/v2 v2.8.0 h1:WvMv3CMcFsqKSM4/Qf8sf3tgyQkzDqQmoSE49bnBuP4=
|
||||||
|
github.com/ProtonMail/gopenpgp/v2 v2.8.0/go.mod h1:qb2GUSnmA9ipBW5GVtCtEhkummSlqs2A8Ar3S0HBgSY=
|
||||||
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
|
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
|
||||||
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||||
github.com/bramvdbogaerde/go-scp v1.5.0 h1:a9BinAjTfQh273eh7vd3qUgmBC+bx+3TRDtkZWmIpzM=
|
github.com/bramvdbogaerde/go-scp v1.5.0 h1:a9BinAjTfQh273eh7vd3qUgmBC+bx+3TRDtkZWmIpzM=
|
||||||
github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ=
|
github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/go-mail/mail v2.3.1+incompatible h1:UzNOn0k5lpfVtO31cK3hn6I4VEVGhe3lX8AJBAxXExM=
|
||||||
|
github.com/go-mail/mail v2.3.1+incompatible/go.mod h1:VPWjmmNyRsWXQZHVHT3g0YbIINUkSmuKOiLIDkWbL6M=
|
||||||
|
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||||
|
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||||
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
|
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||||
|
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
|
github.com/jkaninda/encryptor v0.0.0-20241111100652-926393c9437e h1:jtFKZHt/PLGQWXNgjEFTEwVbxiQQRMoJ7m37trbkJGw=
|
||||||
|
github.com/jkaninda/encryptor v0.0.0-20241111100652-926393c9437e/go.mod h1:Y1EXpPWQ9PNd7y7E6ez3xgnzZc8fuDWXwX/1/dXNCE4=
|
||||||
|
github.com/jkaninda/go-storage v0.1.3 h1:lEpHVgFLKSvjsi/6tAek96Y07za3vxmsXF2/+jiCMZU=
|
||||||
|
github.com/jkaninda/go-storage v0.1.3/go.mod h1:zVRnLprBk/9AUz2+za6Y03MgoNYrqKLy3edVtjqMaps=
|
||||||
|
github.com/jkaninda/go-utils v0.1.1 h1:PMrtXR9d51YzHo85y9Z6YVL0YyBURbRTPemHVbFDqZg=
|
||||||
|
github.com/jkaninda/go-utils v0.1.1/go.mod h1:pf0/U6k4JbxlablM2G4eSTZdQ2LFshfAsCK5Q8qNfGo=
|
||||||
|
github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
|
||||||
|
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
|
||||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||||
|
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||||
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
|
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||||
|
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||||
|
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||||
|
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||||
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
|
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
|
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||||
|
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
|
||||||
|
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
|
||||||
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
|
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
|
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||||
|
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
|
||||||
|
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
|
||||||
|
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
|
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
|
||||||
|
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
|
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
|
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
|
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
|
||||||
|
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
|
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
|
||||||
|
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
gopkg.in/mail.v2 v2.3.1 h1:WYFn/oANrAGP2C0dcV6/pbkPzv8yGzqTjPmTeO7qoXk=
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw=
|
||||||
|
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
|||||||
31
main.go
31
main.go
@@ -1,12 +1,29 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
//main
|
|
||||||
/*****
|
|
||||||
* MySQL Backup & Restore
|
|
||||||
* @author Jonas Kaninda
|
|
||||||
* @license MIT License <https://opensource.org/licenses/MIT>
|
|
||||||
* @link https://github.com/jkaninda/mysql-bkup
|
|
||||||
**/
|
|
||||||
import "github.com/jkaninda/mysql-bkup/cmd"
|
import "github.com/jkaninda/mysql-bkup/cmd"
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|||||||
35
migrations/init.sql
Normal file
35
migrations/init.sql
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
-- Create the database testdb2 and testdb3
|
||||||
|
CREATE DATABASE IF NOT EXISTS testdb2;
|
||||||
|
CREATE DATABASE IF NOT EXISTS testdb3;
|
||||||
|
CREATE DATABASE IF NOT EXISTS fakedb;
|
||||||
|
USE testdb;
|
||||||
|
|
||||||
|
-- Create the 'users' table
|
||||||
|
CREATE TABLE users (
|
||||||
|
id INT AUTO_INCREMENT PRIMARY KEY,
|
||||||
|
name VARCHAR(100) NOT NULL,
|
||||||
|
email VARCHAR(100) NOT NULL UNIQUE,
|
||||||
|
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Create the 'orders' table
|
||||||
|
CREATE TABLE orders (
|
||||||
|
id INT AUTO_INCREMENT PRIMARY KEY,
|
||||||
|
user_id INT NOT NULL,
|
||||||
|
amount DECIMAL(10,2) NOT NULL,
|
||||||
|
status ENUM('pending', 'completed', 'canceled') NOT NULL DEFAULT 'pending',
|
||||||
|
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Insert fake users
|
||||||
|
INSERT INTO users (name, email) VALUES
|
||||||
|
('Alice Smith', 'alice@example.com'),
|
||||||
|
('Bob Johnson', 'bob@example.com'),
|
||||||
|
('Charlie Brown', 'charlie@example.com');
|
||||||
|
|
||||||
|
-- Insert fake orders
|
||||||
|
INSERT INTO orders (user_id, amount, status) VALUES
|
||||||
|
(1, 100.50, 'completed'),
|
||||||
|
(2, 200.75, 'pending'),
|
||||||
|
(3, 50.00, 'canceled');
|
||||||
13
migrations/test_config.yaml
Normal file
13
migrations/test_config.yaml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
#cronExpression: "@every 20s"
|
||||||
|
#backupRescueMode: false
|
||||||
|
databases:
|
||||||
|
- host: 127.0.0.1
|
||||||
|
port: 3306
|
||||||
|
name: testdb
|
||||||
|
user: user
|
||||||
|
password: password
|
||||||
|
- name: testdb2
|
||||||
|
# database credentials from environment variables
|
||||||
|
#TESTDB2_DB_USERNAME
|
||||||
|
#TESTDB2_DB_PASSWORD
|
||||||
|
#TESTDB2_DB_HOST
|
||||||
128
pkg/azure.go
Normal file
128
pkg/azure.go
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/jkaninda/go-storage/pkg/azure"
|
||||||
|
goutils "github.com/jkaninda/go-utils"
|
||||||
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
|
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func azureBackup(db *dbConfig, config *BackupConfig) {
|
||||||
|
utils.Info("Backup database to Azure Blob Storage")
|
||||||
|
|
||||||
|
// Backup database
|
||||||
|
err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne)
|
||||||
|
if err != nil {
|
||||||
|
recoverMode(err, "Error backing up database")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
finalFileName := config.backupFileName
|
||||||
|
if config.encryption {
|
||||||
|
encryptBackup(config)
|
||||||
|
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||||
|
}
|
||||||
|
utils.Info("Uploading backup archive to Azure Blob storage ...")
|
||||||
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
|
azureConfig := loadAzureConfig()
|
||||||
|
azureStorage, err := azure.NewStorage(azure.Config{
|
||||||
|
ContainerName: azureConfig.containerName,
|
||||||
|
AccountName: azureConfig.accountName,
|
||||||
|
AccountKey: azureConfig.accountKey,
|
||||||
|
RemotePath: config.remotePath,
|
||||||
|
LocalPath: tmpPath,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error creating Azure storage: %s", err)
|
||||||
|
}
|
||||||
|
err = azureStorage.Copy(finalFileName)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error copying backup file: %s", err)
|
||||||
|
}
|
||||||
|
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||||
|
// Get backup info
|
||||||
|
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error: %s", err)
|
||||||
|
}
|
||||||
|
backupSize = fileInfo.Size()
|
||||||
|
// Delete backup file from tmp folder
|
||||||
|
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error deleting file: %v", err)
|
||||||
|
|
||||||
|
}
|
||||||
|
if config.prune {
|
||||||
|
err := azureStorage.Prune(config.backupRetention)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
|
utils.Info("Backup size: %s", utils.ConvertBytes(uint64(backupSize)))
|
||||||
|
utils.Info("Uploading backup archive to Azure Blob storage ... done ")
|
||||||
|
|
||||||
|
duration := goutils.FormatDuration(time.Since(startTime), 0)
|
||||||
|
|
||||||
|
// Send notification
|
||||||
|
utils.NotifySuccess(&utils.NotificationData{
|
||||||
|
File: finalFileName,
|
||||||
|
BackupSize: utils.ConvertBytes(uint64(backupSize)),
|
||||||
|
Database: db.dbName,
|
||||||
|
Storage: config.storage,
|
||||||
|
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||||
|
Duration: duration,
|
||||||
|
})
|
||||||
|
// Delete temp
|
||||||
|
deleteTemp()
|
||||||
|
utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration)
|
||||||
|
}
|
||||||
|
func azureRestore(db *dbConfig, conf *RestoreConfig) {
|
||||||
|
utils.Info("Restore database from Azure Blob storage")
|
||||||
|
azureConfig := loadAzureConfig()
|
||||||
|
azureStorage, err := azure.NewStorage(azure.Config{
|
||||||
|
ContainerName: azureConfig.containerName,
|
||||||
|
AccountName: azureConfig.accountName,
|
||||||
|
AccountKey: azureConfig.accountKey,
|
||||||
|
RemotePath: conf.remotePath,
|
||||||
|
LocalPath: tmpPath,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error creating SSH storage: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = azureStorage.CopyFrom(conf.file)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error downloading backup file: %s", err)
|
||||||
|
}
|
||||||
|
RestoreDatabase(db, conf)
|
||||||
|
}
|
||||||
587
pkg/backup.go
587
pkg/backup.go
@@ -1,280 +1,433 @@
|
|||||||
// Package pkg /*
|
// Package internal /
|
||||||
/*
|
/*
|
||||||
Copyright © 2024 Jonas Kaninda
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package pkg
|
package pkg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/hpcloud/tail"
|
"github.com/jkaninda/encryptor"
|
||||||
|
"github.com/jkaninda/go-storage/pkg/local"
|
||||||
|
goutils "github.com/jkaninda/go-utils"
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
|
"github.com/robfig/cron/v3"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func StartBackup(cmd *cobra.Command) {
|
func StartBackup(cmd *cobra.Command) {
|
||||||
_, _ = cmd.Flags().GetString("operation")
|
intro()
|
||||||
//Set env
|
// Initialize backup configs
|
||||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
config := initBackupConfig(cmd)
|
||||||
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
|
// Load backup configuration file
|
||||||
|
configFile, err := loadConfigFile()
|
||||||
//Get flag value and set env
|
if err != nil {
|
||||||
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
|
dbConf = initDbConfig(cmd)
|
||||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
if config.cronExpression == "" {
|
||||||
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
config.allowCustomName = true
|
||||||
backupRetention, _ := cmd.Flags().GetInt("keep-last")
|
createBackupTask(dbConf, config)
|
||||||
prune, _ := cmd.Flags().GetBool("prune")
|
} else {
|
||||||
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
|
if utils.IsValidCronExpression(config.cronExpression) {
|
||||||
executionMode, _ = cmd.Flags().GetString("mode")
|
scheduledMode(dbConf, config)
|
||||||
gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
|
} else {
|
||||||
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
utils.Fatal("Cron expression is not valid: %s", config.cronExpression)
|
||||||
|
}
|
||||||
dbConf = getDbConfig(cmd)
|
|
||||||
|
|
||||||
//
|
|
||||||
if gpqPassphrase != "" {
|
|
||||||
encryption = true
|
|
||||||
}
|
|
||||||
|
|
||||||
//Generate file name
|
|
||||||
backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbConf.dbName, time.Now().Format("20060102_150405"))
|
|
||||||
if disableCompression {
|
|
||||||
backupFileName = fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if executionMode == "default" {
|
|
||||||
switch storage {
|
|
||||||
case "s3":
|
|
||||||
s3Backup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
|
|
||||||
case "local":
|
|
||||||
localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
|
|
||||||
case "ssh", "remote":
|
|
||||||
sshBackup(dbConf, backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
|
|
||||||
case "ftp":
|
|
||||||
utils.Fatal("Not supported storage type: %s", storage)
|
|
||||||
default:
|
|
||||||
localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if executionMode == "scheduled" {
|
|
||||||
scheduledMode(dbConf, storage)
|
|
||||||
} else {
|
} else {
|
||||||
utils.Fatal("Error, unknown execution mode!")
|
startMultiBackup(config, configFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run in scheduled mode
|
// scheduledMode Runs backup in scheduled mode
|
||||||
func scheduledMode(db *dbConfig, storage string) {
|
func scheduledMode(db *dbConfig, config *BackupConfig) {
|
||||||
|
|
||||||
fmt.Println()
|
|
||||||
fmt.Println("**********************************")
|
|
||||||
fmt.Println(" Starting MySQL Bkup... ")
|
|
||||||
fmt.Println("***********************************")
|
|
||||||
utils.Info("Running in Scheduled mode")
|
utils.Info("Running in Scheduled mode")
|
||||||
utils.Info("Execution period %s", os.Getenv("BACKUP_CRON_EXPRESSION"))
|
utils.Info("Backup cron expression: %s", config.cronExpression)
|
||||||
utils.Info("Storage type %s ", storage)
|
utils.Info("The next scheduled time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
|
||||||
|
utils.Info("Storage type %s ", config.storage)
|
||||||
//Test database connexion
|
|
||||||
testDatabaseConnection(db)
|
|
||||||
|
|
||||||
|
// Test backup
|
||||||
|
utils.Info("Testing backup configurations...")
|
||||||
|
err := testDatabaseConnection(db)
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error connecting to database: %s", db.dbName)
|
||||||
|
utils.Fatal("Error: %s", err)
|
||||||
|
}
|
||||||
|
utils.Info("Testing backup configurations...done")
|
||||||
utils.Info("Creating backup job...")
|
utils.Info("Creating backup job...")
|
||||||
CreateCrontabScript(disableCompression, storage)
|
// Create a new cron instance
|
||||||
|
c := cron.New()
|
||||||
|
|
||||||
supervisorConfig := "/etc/supervisor/supervisord.conf"
|
_, err = c.AddFunc(config.cronExpression, func() {
|
||||||
|
createBackupTask(db, config)
|
||||||
|
utils.Info("Next backup time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
|
||||||
|
|
||||||
// Start Supervisor
|
})
|
||||||
cmd := exec.Command("supervisord", "-c", supervisorConfig)
|
|
||||||
err := cmd.Start()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal(fmt.Sprintf("Failed to start supervisord: %v", err))
|
return
|
||||||
}
|
}
|
||||||
|
// Start the cron scheduler
|
||||||
|
c.Start()
|
||||||
|
utils.Info("Creating backup job...done")
|
||||||
utils.Info("Backup job started")
|
utils.Info("Backup job started")
|
||||||
defer func() {
|
defer c.Stop()
|
||||||
if err := cmd.Process.Kill(); err != nil {
|
select {}
|
||||||
utils.Info("Failed to kill supervisord process: %v", err)
|
}
|
||||||
} else {
|
|
||||||
utils.Info("Supervisor stopped.")
|
// multiBackupTask backup multi database
|
||||||
|
func multiBackupTask(databases []Database, bkConfig *BackupConfig) {
|
||||||
|
for _, db := range databases {
|
||||||
|
// Check if path is defined in config file
|
||||||
|
if db.Path != "" {
|
||||||
|
bkConfig.remotePath = db.Path
|
||||||
}
|
}
|
||||||
}()
|
createBackupTask(getDatabase(db), bkConfig)
|
||||||
if _, err := os.Stat(cronLogFile); os.IsNotExist(err) {
|
|
||||||
utils.Fatal(fmt.Sprintf("Log file %s does not exist.", cronLogFile))
|
|
||||||
}
|
}
|
||||||
t, err := tail.TailFile(cronLogFile, tail.Config{Follow: true})
|
}
|
||||||
|
|
||||||
|
// createBackupTask backup task
|
||||||
|
func createBackupTask(db *dbConfig, config *BackupConfig) {
|
||||||
|
if config.all && !config.allInOne {
|
||||||
|
backupAll(db, config)
|
||||||
|
} else {
|
||||||
|
backupTask(db, config)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// backupAll backup all databases
|
||||||
|
func backupAll(db *dbConfig, config *BackupConfig) {
|
||||||
|
databases, err := listDatabases(*db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Failed to tail file: %v", err)
|
utils.Fatal("Error listing databases: %s", err)
|
||||||
|
}
|
||||||
|
for _, dbName := range databases {
|
||||||
|
if dbName == "information_schema" || dbName == "performance_schema" || dbName == "mysql" || dbName == "sys" || dbName == "innodb" || dbName == "Database" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
db.dbName = dbName
|
||||||
|
config.backupFileName = fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
|
||||||
|
backupTask(db, config)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read and print new lines from the log file
|
}
|
||||||
for line := range t.Lines {
|
|
||||||
fmt.Println(line.Text)
|
// backupTask backup task
|
||||||
|
func backupTask(db *dbConfig, config *BackupConfig) {
|
||||||
|
utils.Info("Starting backup task...")
|
||||||
|
startTime = time.Now()
|
||||||
|
prefix := db.dbName
|
||||||
|
if config.all && config.allInOne {
|
||||||
|
prefix = "all_databases"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Generate file name
|
||||||
|
backupFileName := fmt.Sprintf("%s_%s.sql.gz", prefix, time.Now().Format("20060102_150405"))
|
||||||
|
if config.disableCompression {
|
||||||
|
backupFileName = fmt.Sprintf("%s_%s.sql", prefix, time.Now().Format("20060102_150405"))
|
||||||
|
}
|
||||||
|
if config.customName != "" && config.allowCustomName && !config.all {
|
||||||
|
backupFileName = fmt.Sprintf("%s.sql.gz", config.customName)
|
||||||
|
if config.disableCompression {
|
||||||
|
backupFileName = fmt.Sprintf("%s.sql", config.customName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
config.backupFileName = backupFileName
|
||||||
|
s := strings.ToLower(config.storage)
|
||||||
|
switch s {
|
||||||
|
case "local":
|
||||||
|
localBackup(db, config)
|
||||||
|
case "s3":
|
||||||
|
s3Backup(db, config)
|
||||||
|
case "ssh", "remote", "sftp":
|
||||||
|
sshBackup(db, config)
|
||||||
|
case "ftp":
|
||||||
|
ftpBackup(db, config)
|
||||||
|
case "azure":
|
||||||
|
azureBackup(db, config)
|
||||||
|
default:
|
||||||
|
localBackup(db, config)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// startMultiBackup start multi backup
|
||||||
|
func startMultiBackup(bkConfig *BackupConfig, configFile string) {
|
||||||
|
utils.Info("Starting Multi backup task...")
|
||||||
|
conf, err := readConf(configFile)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error reading config file: %s", err)
|
||||||
|
}
|
||||||
|
// Check if cronExpression is defined in config file
|
||||||
|
if conf.CronExpression != "" {
|
||||||
|
bkConfig.cronExpression = conf.CronExpression
|
||||||
|
}
|
||||||
|
if len(conf.Databases) == 0 {
|
||||||
|
utils.Fatal("No databases found")
|
||||||
|
}
|
||||||
|
// Check if cronExpression is defined
|
||||||
|
if bkConfig.cronExpression == "" {
|
||||||
|
multiBackupTask(conf.Databases, bkConfig)
|
||||||
|
} else {
|
||||||
|
backupRescueMode = conf.BackupRescueMode
|
||||||
|
// Check if cronExpression is valid
|
||||||
|
if utils.IsValidCronExpression(bkConfig.cronExpression) {
|
||||||
|
utils.Info("Running backup in Scheduled mode")
|
||||||
|
utils.Info("Backup cron expression: %s", bkConfig.cronExpression)
|
||||||
|
utils.Info("The next scheduled time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
|
||||||
|
utils.Info("Storage type %s ", bkConfig.storage)
|
||||||
|
|
||||||
|
// Test backup
|
||||||
|
utils.Info("Testing backup configurations...")
|
||||||
|
for _, db := range conf.Databases {
|
||||||
|
err = testDatabaseConnection(getDatabase(db))
|
||||||
|
if err != nil {
|
||||||
|
recoverMode(err, fmt.Sprintf("Error connecting to database: %s", db.Name))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
utils.Info("Testing backup configurations...done")
|
||||||
|
utils.Info("Creating backup job...")
|
||||||
|
// Create a new cron instance
|
||||||
|
c := cron.New()
|
||||||
|
|
||||||
|
_, err := c.AddFunc(bkConfig.cronExpression, func() {
|
||||||
|
multiBackupTask(conf.Databases, bkConfig)
|
||||||
|
utils.Info("Next backup time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
|
||||||
|
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Start the cron scheduler
|
||||||
|
c.Start()
|
||||||
|
utils.Info("Creating backup job...done")
|
||||||
|
utils.Info("Backup job started")
|
||||||
|
defer c.Stop()
|
||||||
|
select {}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
utils.Fatal("Cron expression is not valid: %s", bkConfig.cronExpression)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// BackupDatabase backup database
|
// BackupDatabase backup database
|
||||||
func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) {
|
func BackupDatabase(db *dbConfig, backupFileName string, disableCompression, all, singleFile bool) error {
|
||||||
storagePath = os.Getenv("STORAGE_PATH")
|
storagePath = os.Getenv("STORAGE_PATH")
|
||||||
|
|
||||||
err := utils.CheckEnvVars(dbHVars)
|
|
||||||
if err != nil {
|
|
||||||
utils.Error("Please make sure all required environment variables for database are set")
|
|
||||||
utils.Fatal("Error checking environment variables: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
utils.Info("Starting database backup...")
|
utils.Info("Starting database backup...")
|
||||||
testDatabaseConnection(db)
|
|
||||||
|
|
||||||
// Backup Database database
|
if err := testDatabaseConnection(db); err != nil {
|
||||||
utils.Info("Backing up database...")
|
return fmt.Errorf("database connection failed: %w", err)
|
||||||
|
}
|
||||||
if disableCompression {
|
|
||||||
// Execute mysqldump
|
|
||||||
cmd := exec.Command("mysqldump",
|
|
||||||
"-h", db.dbHost,
|
|
||||||
"-P", db.dbPort,
|
|
||||||
"-u", db.dbUserName,
|
|
||||||
"--password="+db.dbPassword,
|
|
||||||
db.dbName,
|
|
||||||
)
|
|
||||||
output, err := cmd.Output()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// save output
|
|
||||||
file, err := os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName))
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
_, err = file.Write(output)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
utils.Done("Database has been backed up")
|
|
||||||
|
|
||||||
|
dumpArgs := []string{fmt.Sprintf("--defaults-file=%s", mysqlClientConfig)}
|
||||||
|
if all && singleFile {
|
||||||
|
dumpArgs = append(dumpArgs, "--all-databases", "--single-transaction", "--routines", "--triggers")
|
||||||
} else {
|
} else {
|
||||||
// Execute mysqldump
|
dumpArgs = append(dumpArgs, db.dbName)
|
||||||
cmd := exec.Command("mysqldump", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, "--password="+db.dbPassword, db.dbName)
|
|
||||||
stdout, err := cmd.StdoutPipe()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
gzipCmd := exec.Command("gzip")
|
|
||||||
gzipCmd.Stdin = stdout
|
|
||||||
gzipCmd.Stdout, err = os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName))
|
|
||||||
gzipCmd.Start()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
if err := gzipCmd.Wait(); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
utils.Done("Database has been backed up")
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
backupPath := filepath.Join(tmpPath, backupFileName)
|
||||||
|
if disableCompression {
|
||||||
|
return runCommandAndSaveOutput("mysqldump", dumpArgs, backupPath)
|
||||||
|
}
|
||||||
|
return runCommandWithCompression("mysqldump", dumpArgs, backupPath)
|
||||||
}
|
}
|
||||||
func localBackup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
|
||||||
|
// runCommandAndSaveOutput runs a command and saves the output to a file
|
||||||
|
func runCommandAndSaveOutput(command string, args []string, outputPath string) error {
|
||||||
|
cmd := exec.Command(command, args...)
|
||||||
|
output, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to execute %s: %v, output: %s", command, err, string(output))
|
||||||
|
}
|
||||||
|
|
||||||
|
return os.WriteFile(outputPath, output, 0644)
|
||||||
|
}
|
||||||
|
|
||||||
|
// runCommandWithCompression runs a command and compresses the output
|
||||||
|
func runCommandWithCompression(command string, args []string, outputPath string) error {
|
||||||
|
cmd := exec.Command(command, args...)
|
||||||
|
stdout, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create stdout pipe: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
gzipCmd := exec.Command("gzip")
|
||||||
|
gzipCmd.Stdin = stdout
|
||||||
|
gzipFile, err := os.Create(outputPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create gzip file: %w", err)
|
||||||
|
}
|
||||||
|
defer func(gzipFile *os.File) {
|
||||||
|
err := gzipFile.Close()
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error closing gzip file: %v", err)
|
||||||
|
}
|
||||||
|
}(gzipFile)
|
||||||
|
gzipCmd.Stdout = gzipFile
|
||||||
|
|
||||||
|
if err := gzipCmd.Start(); err != nil {
|
||||||
|
return fmt.Errorf("failed to start gzip: %w", err)
|
||||||
|
}
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return fmt.Errorf("failed to execute %s: %w", command, err)
|
||||||
|
}
|
||||||
|
if err := gzipCmd.Wait(); err != nil {
|
||||||
|
return fmt.Errorf("failed to wait for gzip completion: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
utils.Info("Database has been backed up")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// localBackup backup database to local storage
|
||||||
|
func localBackup(db *dbConfig, config *BackupConfig) {
|
||||||
utils.Info("Backup database to local storage")
|
utils.Info("Backup database to local storage")
|
||||||
BackupDatabase(db, backupFileName, disableCompression)
|
err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne)
|
||||||
finalFileName := backupFileName
|
if err != nil {
|
||||||
if encrypt {
|
recoverMode(err, "Error backing up database")
|
||||||
encryptBackup(backupFileName)
|
return
|
||||||
finalFileName = fmt.Sprintf("%s.%s", backupFileName, gpgExtension)
|
}
|
||||||
|
finalFileName := config.backupFileName
|
||||||
|
if config.encryption {
|
||||||
|
encryptBackup(config)
|
||||||
|
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, gpgExtension)
|
||||||
|
}
|
||||||
|
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error: %s", err)
|
||||||
|
}
|
||||||
|
backupSize = fileInfo.Size()
|
||||||
|
localStorage := local.NewStorage(local.Config{
|
||||||
|
LocalPath: tmpPath,
|
||||||
|
RemotePath: storagePath,
|
||||||
|
})
|
||||||
|
err = localStorage.Copy(finalFileName)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error copying backup file: %s", err)
|
||||||
}
|
}
|
||||||
utils.Info("Backup name is %s", finalFileName)
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
moveToBackup(finalFileName, storagePath)
|
utils.Info("Backup size: %s", utils.ConvertBytes(uint64(backupSize)))
|
||||||
//Delete old backup
|
utils.Info("Backup saved in %s", filepath.Join(storagePath, finalFileName))
|
||||||
if prune {
|
duration := goutils.FormatDuration(time.Since(startTime), 0)
|
||||||
deleteOldBackup(backupRetention)
|
|
||||||
|
// Send notification
|
||||||
|
utils.NotifySuccess(&utils.NotificationData{
|
||||||
|
File: finalFileName,
|
||||||
|
BackupSize: utils.ConvertBytes(uint64(backupSize)),
|
||||||
|
Database: db.dbName,
|
||||||
|
Storage: config.storage,
|
||||||
|
BackupLocation: filepath.Join(storagePath, finalFileName),
|
||||||
|
Duration: duration,
|
||||||
|
})
|
||||||
|
// Delete old backup
|
||||||
|
if config.prune {
|
||||||
|
err = localStorage.Prune(config.backupRetention)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
//Delete temp
|
// Delete temp
|
||||||
deleteTemp()
|
deleteTemp()
|
||||||
|
utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration)
|
||||||
}
|
}
|
||||||
|
|
||||||
func s3Backup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
// encryptBackup encrypt backup
|
||||||
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
func encryptBackup(config *BackupConfig) {
|
||||||
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
|
backupFile, err := os.ReadFile(filepath.Join(tmpPath, config.backupFileName))
|
||||||
utils.Info("Backup database to s3 storage")
|
outputFile := fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension)
|
||||||
//Backup database
|
|
||||||
BackupDatabase(db, backupFileName, disableCompression)
|
|
||||||
finalFileName := backupFileName
|
|
||||||
if encrypt {
|
|
||||||
encryptBackup(backupFileName)
|
|
||||||
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
|
|
||||||
}
|
|
||||||
utils.Info("Uploading backup archive to remote storage S3 ... ")
|
|
||||||
utils.Info("Backup name is %s", finalFileName)
|
|
||||||
err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error uploading file to S3: %s ", err)
|
utils.Fatal("Error reading backup file: %s ", err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
if config.usingKey {
|
||||||
//Delete backup file from tmp folder
|
utils.Info("Encrypting backup using public key...")
|
||||||
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
|
pubKey, err := os.ReadFile(config.publicKey)
|
||||||
if err != nil {
|
|
||||||
fmt.Println("Error deleting file: ", err)
|
|
||||||
|
|
||||||
}
|
|
||||||
// Delete old backup
|
|
||||||
if prune {
|
|
||||||
err := utils.DeleteOldBackup(bucket, s3Path, backupRetention)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error deleting old backup from S3: %s ", err)
|
utils.Fatal("Error reading public key: %s ", err)
|
||||||
|
}
|
||||||
|
err = encryptor.EncryptWithPublicKey(backupFile, fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension), pubKey)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error encrypting backup file: %v ", err)
|
||||||
|
}
|
||||||
|
utils.Info("Encrypting backup using public key...done")
|
||||||
|
|
||||||
|
} else if config.passphrase != "" {
|
||||||
|
utils.Info("Encrypting backup using passphrase...")
|
||||||
|
err := encryptor.Encrypt(backupFile, outputFile, config.passphrase)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("error during encrypting backup %v", err)
|
||||||
|
}
|
||||||
|
utils.Info("Encrypting backup using passphrase...done")
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// listDatabases list all databases
|
||||||
|
func listDatabases(db dbConfig) ([]string, error) {
|
||||||
|
databases := []string{}
|
||||||
|
// Create the mysql client config file
|
||||||
|
if err := createMysqlClientConfigFile(db); err != nil {
|
||||||
|
return databases, errors.New(err.Error())
|
||||||
|
}
|
||||||
|
utils.Info("Listing databases...")
|
||||||
|
// Step 1: List all databases
|
||||||
|
cmd := exec.Command("mariadb", fmt.Sprintf("--defaults-file=%s", mysqlClientConfig), "-e", "SHOW DATABASES;")
|
||||||
|
var out bytes.Buffer
|
||||||
|
cmd.Stdout = &out
|
||||||
|
err := cmd.Run()
|
||||||
|
if err != nil {
|
||||||
|
return databases, fmt.Errorf("failed to list databases: %s", err)
|
||||||
|
}
|
||||||
|
// Step 2: Parse the output
|
||||||
|
for _, _db := range strings.Split(out.String(), "\n") {
|
||||||
|
if _db != "" {
|
||||||
|
databases = append(databases, _db)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
utils.Done("Uploading backup archive to remote storage S3 ... done ")
|
return databases, nil
|
||||||
//Delete temp
|
|
||||||
deleteTemp()
|
|
||||||
}
|
}
|
||||||
func sshBackup(db *dbConfig, backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
func recoverMode(err error, msg string) {
|
||||||
utils.Info("Backup database to Remote server")
|
|
||||||
//Backup database
|
|
||||||
BackupDatabase(db, backupFileName, disableCompression)
|
|
||||||
finalFileName := backupFileName
|
|
||||||
if encrypt {
|
|
||||||
encryptBackup(backupFileName)
|
|
||||||
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
|
|
||||||
}
|
|
||||||
utils.Info("Uploading backup archive to remote storage ... ")
|
|
||||||
utils.Info("Backup name is %s", finalFileName)
|
|
||||||
err := CopyToRemote(finalFileName, remotePath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error uploading file to the remote server: %s ", err)
|
if backupRescueMode {
|
||||||
|
utils.NotifyError(fmt.Sprintf("%s : %v", msg, err))
|
||||||
}
|
utils.Error("Error: %s", msg)
|
||||||
|
utils.Error("Backup rescue mode is enabled")
|
||||||
//Delete backup file from tmp folder
|
utils.Error("Backup will continue")
|
||||||
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
} else {
|
||||||
if err != nil {
|
utils.Error("Error: %s", msg)
|
||||||
fmt.Println("Error deleting file: ", err)
|
utils.Fatal("Error: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
if prune {
|
|
||||||
//TODO: Delete old backup from remote server
|
|
||||||
utils.Info("Deleting old backup from a remote server is not implemented yet")
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
utils.Done("Uploading backup archive to remote storage ... done ")
|
|
||||||
//Delete temp
|
|
||||||
deleteTemp()
|
|
||||||
}
|
|
||||||
func encryptBackup(backupFileName string) {
|
|
||||||
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
|
|
||||||
err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase)
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatal("Error during encrypting backup %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
321
pkg/config.go
321
pkg/config.go
@@ -1,12 +1,50 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package pkg
|
package pkg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"os"
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type Database struct {
|
||||||
|
Host string `yaml:"host"`
|
||||||
|
Port string `yaml:"port"`
|
||||||
|
Name string `yaml:"name"`
|
||||||
|
User string `yaml:"user"`
|
||||||
|
Password string `yaml:"password"`
|
||||||
|
Path string `yaml:"path"`
|
||||||
|
}
|
||||||
type Config struct {
|
type Config struct {
|
||||||
|
CronExpression string `yaml:"cronExpression"`
|
||||||
|
BackupRescueMode bool `yaml:"backupRescueMode"`
|
||||||
|
Databases []Database `yaml:"databases"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type dbConfig struct {
|
type dbConfig struct {
|
||||||
@@ -23,13 +61,65 @@ type targetDbConfig struct {
|
|||||||
targetDbPassword string
|
targetDbPassword string
|
||||||
targetDbName string
|
targetDbName string
|
||||||
}
|
}
|
||||||
|
type TgConfig struct {
|
||||||
|
Token string
|
||||||
|
ChatId string
|
||||||
|
}
|
||||||
|
type BackupConfig struct {
|
||||||
|
backupFileName string
|
||||||
|
backupRetention int
|
||||||
|
disableCompression bool
|
||||||
|
prune bool
|
||||||
|
remotePath string
|
||||||
|
encryption bool
|
||||||
|
usingKey bool
|
||||||
|
passphrase string
|
||||||
|
publicKey string
|
||||||
|
storage string
|
||||||
|
cronExpression string
|
||||||
|
all bool
|
||||||
|
allInOne bool
|
||||||
|
customName string
|
||||||
|
allowCustomName bool
|
||||||
|
}
|
||||||
|
type FTPConfig struct {
|
||||||
|
host string
|
||||||
|
user string
|
||||||
|
password string
|
||||||
|
port int
|
||||||
|
remotePath string
|
||||||
|
}
|
||||||
|
type AzureConfig struct {
|
||||||
|
accountName string
|
||||||
|
accountKey string
|
||||||
|
containerName string
|
||||||
|
}
|
||||||
|
|
||||||
func getDbConfig(cmd *cobra.Command) *dbConfig {
|
// SSHConfig holds the SSH connection details
|
||||||
//Set env
|
type SSHConfig struct {
|
||||||
|
user string
|
||||||
|
password string
|
||||||
|
hostName string
|
||||||
|
port int
|
||||||
|
identifyFile string
|
||||||
|
}
|
||||||
|
type AWSConfig struct {
|
||||||
|
endpoint string
|
||||||
|
bucket string
|
||||||
|
accessKey string
|
||||||
|
secretKey string
|
||||||
|
region string
|
||||||
|
remotePath string
|
||||||
|
disableSsl bool
|
||||||
|
forcePathStyle bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func initDbConfig(cmd *cobra.Command) *dbConfig {
|
||||||
|
// Set env
|
||||||
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
||||||
dConf := dbConfig{}
|
dConf := dbConfig{}
|
||||||
dConf.dbHost = os.Getenv("DB_HOST")
|
dConf.dbHost = os.Getenv("DB_HOST")
|
||||||
dConf.dbPort = os.Getenv("DB_PORT")
|
dConf.dbPort = utils.EnvWithDefault("DB_PORT", "3306")
|
||||||
dConf.dbName = os.Getenv("DB_NAME")
|
dConf.dbName = os.Getenv("DB_NAME")
|
||||||
dConf.dbUserName = os.Getenv("DB_USERNAME")
|
dConf.dbUserName = os.Getenv("DB_USERNAME")
|
||||||
dConf.dbPassword = os.Getenv("DB_PASSWORD")
|
dConf.dbPassword = os.Getenv("DB_PASSWORD")
|
||||||
@@ -41,10 +131,224 @@ func getDbConfig(cmd *cobra.Command) *dbConfig {
|
|||||||
}
|
}
|
||||||
return &dConf
|
return &dConf
|
||||||
}
|
}
|
||||||
func getTargetDbConfig() *targetDbConfig {
|
|
||||||
|
func getDatabase(database Database) *dbConfig {
|
||||||
|
// Set default values from environment variables if not provided
|
||||||
|
database.User = getEnvOrDefault(database.User, "DB_USERNAME", database.Name, "")
|
||||||
|
database.Password = getEnvOrDefault(database.Password, "DB_PASSWORD", database.Name, "")
|
||||||
|
database.Host = getEnvOrDefault(database.Host, "DB_HOST", database.Name, "")
|
||||||
|
database.Port = getEnvOrDefault(database.Port, "DB_PORT", database.Name, "3306")
|
||||||
|
return &dbConfig{
|
||||||
|
dbHost: database.Host,
|
||||||
|
dbPort: database.Port,
|
||||||
|
dbName: database.Name,
|
||||||
|
dbUserName: database.User,
|
||||||
|
dbPassword: database.Password,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function to get environment variable or use a default value
|
||||||
|
func getEnvOrDefault(currentValue, envKey, suffix, defaultValue string) string {
|
||||||
|
// Return the current value if it's already set
|
||||||
|
if currentValue != "" {
|
||||||
|
return currentValue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for suffixed or prefixed environment variables if a suffix is provided
|
||||||
|
if suffix != "" {
|
||||||
|
suffixUpper := strings.ToUpper(suffix)
|
||||||
|
envSuffix := os.Getenv(fmt.Sprintf("%s_%s", envKey, suffixUpper))
|
||||||
|
if envSuffix != "" {
|
||||||
|
return envSuffix
|
||||||
|
}
|
||||||
|
|
||||||
|
envPrefix := os.Getenv(fmt.Sprintf("%s_%s", suffixUpper, envKey))
|
||||||
|
if envPrefix != "" {
|
||||||
|
return envPrefix
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fall back to the default value using a helper function
|
||||||
|
return utils.EnvWithDefault(envKey, defaultValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadSSHConfig loads the SSH configuration from environment variables
|
||||||
|
func loadSSHConfig() (*SSHConfig, error) {
|
||||||
|
utils.GetEnvVariable("SSH_HOST", "SSH_HOST_NAME")
|
||||||
|
sshVars := []string{"SSH_USER", "SSH_HOST", "SSH_PORT", "REMOTE_PATH"}
|
||||||
|
err := utils.CheckEnvVars(sshVars)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error missing environment variables: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &SSHConfig{
|
||||||
|
user: os.Getenv("SSH_USER"),
|
||||||
|
password: os.Getenv("SSH_PASSWORD"),
|
||||||
|
hostName: os.Getenv("SSH_HOST"),
|
||||||
|
port: utils.GetIntEnv("SSH_PORT"),
|
||||||
|
identifyFile: os.Getenv("SSH_IDENTIFY_FILE"),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
func loadFtpConfig() *FTPConfig {
|
||||||
|
// Initialize data configs
|
||||||
|
fConfig := FTPConfig{}
|
||||||
|
fConfig.host = utils.GetEnvVariable("FTP_HOST", "FTP_HOST_NAME")
|
||||||
|
fConfig.user = os.Getenv("FTP_USER")
|
||||||
|
fConfig.password = os.Getenv("FTP_PASSWORD")
|
||||||
|
fConfig.port = utils.GetIntEnv("FTP_PORT")
|
||||||
|
fConfig.remotePath = os.Getenv("REMOTE_PATH")
|
||||||
|
err := utils.CheckEnvVars(ftpVars)
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Please make sure all required environment variables for FTP are set")
|
||||||
|
utils.Fatal("Error missing environment variables: %s", err)
|
||||||
|
}
|
||||||
|
return &fConfig
|
||||||
|
}
|
||||||
|
func loadAzureConfig() *AzureConfig {
|
||||||
|
// Initialize data configs
|
||||||
|
aConfig := AzureConfig{}
|
||||||
|
aConfig.containerName = os.Getenv("AZURE_STORAGE_CONTAINER_NAME")
|
||||||
|
aConfig.accountName = os.Getenv("AZURE_STORAGE_ACCOUNT_NAME")
|
||||||
|
aConfig.accountKey = os.Getenv("AZURE_STORAGE_ACCOUNT_KEY")
|
||||||
|
|
||||||
|
err := utils.CheckEnvVars(azureVars)
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Please make sure all required environment variables for Azure Blob storage are set")
|
||||||
|
utils.Fatal("Error missing environment variables: %s", err)
|
||||||
|
}
|
||||||
|
return &aConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func initAWSConfig() *AWSConfig {
|
||||||
|
// Initialize AWS configs
|
||||||
|
aConfig := AWSConfig{}
|
||||||
|
aConfig.endpoint = utils.GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
|
||||||
|
aConfig.accessKey = utils.GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
|
||||||
|
aConfig.secretKey = utils.GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
|
||||||
|
aConfig.bucket = utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||||
|
aConfig.remotePath = utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
|
||||||
|
|
||||||
|
aConfig.region = os.Getenv("AWS_REGION")
|
||||||
|
disableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
|
||||||
|
if err != nil {
|
||||||
|
disableSsl = false
|
||||||
|
}
|
||||||
|
forcePathStyle, err := strconv.ParseBool(os.Getenv("AWS_FORCE_PATH_STYLE"))
|
||||||
|
if err != nil {
|
||||||
|
forcePathStyle = false
|
||||||
|
}
|
||||||
|
aConfig.disableSsl = disableSsl
|
||||||
|
aConfig.forcePathStyle = forcePathStyle
|
||||||
|
err = utils.CheckEnvVars(awsVars)
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Please make sure all required environment variables for AWS S3 are set")
|
||||||
|
utils.Fatal("Error checking environment variables: %s", err)
|
||||||
|
}
|
||||||
|
return &aConfig
|
||||||
|
}
|
||||||
|
func initBackupConfig(cmd *cobra.Command) *BackupConfig {
|
||||||
|
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||||
|
utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION")
|
||||||
|
utils.GetEnv(cmd, "path", "REMOTE_PATH")
|
||||||
|
utils.GetEnv(cmd, "config", "BACKUP_CONFIG_FILE")
|
||||||
|
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
||||||
|
// Get flag value and set env
|
||||||
|
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
|
||||||
|
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||||
|
prune := false
|
||||||
|
configFile := os.Getenv("BACKUP_CONFIG_FILE")
|
||||||
|
backupRetention := utils.GetIntEnv("BACKUP_RETENTION_DAYS")
|
||||||
|
if backupRetention > 0 {
|
||||||
|
prune = true
|
||||||
|
}
|
||||||
|
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
|
||||||
|
customName, _ := cmd.Flags().GetString("custom-name")
|
||||||
|
all, _ := cmd.Flags().GetBool("all-databases")
|
||||||
|
allInOne, _ := cmd.Flags().GetBool("all-in-one")
|
||||||
|
if allInOne {
|
||||||
|
all = true
|
||||||
|
}
|
||||||
|
_, _ = cmd.Flags().GetString("mode")
|
||||||
|
passphrase := os.Getenv("GPG_PASSPHRASE")
|
||||||
|
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||||
|
cronExpression := os.Getenv("BACKUP_CRON_EXPRESSION")
|
||||||
|
|
||||||
|
publicKeyFile, err := checkPubKeyFile(os.Getenv("GPG_PUBLIC_KEY"))
|
||||||
|
if err == nil {
|
||||||
|
encryption = true
|
||||||
|
usingKey = true
|
||||||
|
} else if passphrase != "" {
|
||||||
|
encryption = true
|
||||||
|
usingKey = false
|
||||||
|
}
|
||||||
|
dbName := os.Getenv("DB_NAME")
|
||||||
|
if dbName == "" && !all && configFile == "" {
|
||||||
|
utils.Fatal("Database name is required, use DB_NAME environment variable or -d flag")
|
||||||
|
}
|
||||||
|
// Initialize backup configs
|
||||||
|
config := BackupConfig{}
|
||||||
|
config.backupRetention = backupRetention
|
||||||
|
config.disableCompression = disableCompression
|
||||||
|
config.prune = prune
|
||||||
|
config.storage = storage
|
||||||
|
config.encryption = encryption
|
||||||
|
config.remotePath = remotePath
|
||||||
|
config.passphrase = passphrase
|
||||||
|
config.publicKey = publicKeyFile
|
||||||
|
config.usingKey = usingKey
|
||||||
|
config.cronExpression = cronExpression
|
||||||
|
config.all = all
|
||||||
|
config.allInOne = allInOne
|
||||||
|
config.customName = customName
|
||||||
|
return &config
|
||||||
|
}
|
||||||
|
|
||||||
|
type RestoreConfig struct {
|
||||||
|
s3Path string
|
||||||
|
remotePath string
|
||||||
|
storage string
|
||||||
|
file string
|
||||||
|
bucket string
|
||||||
|
usingKey bool
|
||||||
|
passphrase string
|
||||||
|
privateKey string
|
||||||
|
}
|
||||||
|
|
||||||
|
func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
|
||||||
|
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||||
|
utils.GetEnv(cmd, "path", "REMOTE_PATH")
|
||||||
|
|
||||||
|
// Get flag value and set env
|
||||||
|
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||||
|
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
|
||||||
|
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||||
|
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
||||||
|
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||||
|
passphrase := os.Getenv("GPG_PASSPHRASE")
|
||||||
|
privateKeyFile, err := checkPrKeyFile(os.Getenv("GPG_PRIVATE_KEY"))
|
||||||
|
if err == nil {
|
||||||
|
usingKey = true
|
||||||
|
} else if passphrase != "" {
|
||||||
|
usingKey = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize restore configs
|
||||||
|
rConfig := RestoreConfig{}
|
||||||
|
rConfig.s3Path = s3Path
|
||||||
|
rConfig.remotePath = remotePath
|
||||||
|
rConfig.storage = storage
|
||||||
|
rConfig.bucket = bucket
|
||||||
|
rConfig.file = file
|
||||||
|
rConfig.storage = storage
|
||||||
|
rConfig.passphrase = passphrase
|
||||||
|
rConfig.usingKey = usingKey
|
||||||
|
rConfig.privateKey = privateKeyFile
|
||||||
|
return &rConfig
|
||||||
|
}
|
||||||
|
func initTargetDbConfig() *targetDbConfig {
|
||||||
tdbConfig := targetDbConfig{}
|
tdbConfig := targetDbConfig{}
|
||||||
tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST")
|
tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST")
|
||||||
tdbConfig.targetDbPort = os.Getenv("TARGET_DB_PORT")
|
tdbConfig.targetDbPort = utils.EnvWithDefault("TARGET_DB_PORT", "3306")
|
||||||
tdbConfig.targetDbName = os.Getenv("TARGET_DB_NAME")
|
tdbConfig.targetDbName = os.Getenv("TARGET_DB_NAME")
|
||||||
tdbConfig.targetDbUserName = os.Getenv("TARGET_DB_USERNAME")
|
tdbConfig.targetDbUserName = os.Getenv("TARGET_DB_USERNAME")
|
||||||
tdbConfig.targetDbPassword = os.Getenv("TARGET_DB_PASSWORD")
|
tdbConfig.targetDbPassword = os.Getenv("TARGET_DB_PASSWORD")
|
||||||
@@ -56,3 +360,10 @@ func getTargetDbConfig() *targetDbConfig {
|
|||||||
}
|
}
|
||||||
return &tdbConfig
|
return &tdbConfig
|
||||||
}
|
}
|
||||||
|
func loadConfigFile() (string, error) {
|
||||||
|
backupConfigFile, err := checkConfigFile(os.Getenv("BACKUP_CONFIG_FILE"))
|
||||||
|
if err == nil {
|
||||||
|
return backupConfigFile, nil
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("backup config file not found")
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,57 +0,0 @@
|
|||||||
package pkg
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Decrypt(inputFile string, passphrase string) error {
|
|
||||||
utils.Info("Decrypting backup file: " + inputFile + " ...")
|
|
||||||
//Create gpg home dir
|
|
||||||
err := utils.MakeDir(gpgHome)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
utils.SetEnv("GNUPGHOME", gpgHome)
|
|
||||||
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile)
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
|
|
||||||
err = cmd.Run()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
utils.Info("Backup file decrypted successful!")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func Encrypt(inputFile string, passphrase string) error {
|
|
||||||
utils.Info("Encrypting backup...")
|
|
||||||
//Create gpg home dir
|
|
||||||
err := utils.MakeDir(gpgHome)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
utils.SetEnv("GNUPGHOME", gpgHome)
|
|
||||||
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--symmetric", "--cipher-algo", algorithm, inputFile)
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
|
|
||||||
err = cmd.Run()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
utils.Info("Backup file encrypted successful!")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func RemoveLastExtension(filename string) string {
|
|
||||||
if idx := strings.LastIndex(filename, "."); idx != -1 {
|
|
||||||
return filename[:idx]
|
|
||||||
}
|
|
||||||
return filename
|
|
||||||
}
|
|
||||||
232
pkg/helper.go
232
pkg/helper.go
@@ -1,81 +1,49 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package pkg
|
package pkg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
goutils "github.com/jkaninda/go-utils"
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
|
"gopkg.in/yaml.v3"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
func copyToTmp(sourcePath string, backupFileName string) {
|
func intro() {
|
||||||
//Copy backup from storage to /tmp
|
fmt.Println("Starting MYSQL-BKUP...")
|
||||||
err := utils.CopyFile(filepath.Join(sourcePath, backupFileName), filepath.Join(tmpPath, backupFileName))
|
fmt.Printf("Version: %s\n", utils.Version)
|
||||||
if err != nil {
|
fmt.Println("Copyright (c) 2024 Jonas Kaninda")
|
||||||
utils.Fatal(fmt.Sprintf("Error copying file %s %s", backupFileName, err))
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
func moveToBackup(backupFileName string, destinationPath string) {
|
|
||||||
//Copy backup from tmp folder to storage destination
|
|
||||||
err := utils.CopyFile(filepath.Join(tmpPath, backupFileName), filepath.Join(destinationPath, backupFileName))
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatal(fmt.Sprintf("Error copying file %s %s", backupFileName, err))
|
|
||||||
|
|
||||||
}
|
// copyToTmp copy file to temporary directory
|
||||||
//Delete backup file from tmp folder
|
|
||||||
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("Error deleting file:", err)
|
|
||||||
|
|
||||||
}
|
|
||||||
utils.Done("Database has been backed up and copied to %s", filepath.Join(destinationPath, backupFileName))
|
|
||||||
}
|
|
||||||
func deleteOldBackup(retentionDays int) {
|
|
||||||
utils.Info("Deleting old backups...")
|
|
||||||
storagePath = os.Getenv("STORAGE_PATH")
|
|
||||||
// Define the directory path
|
|
||||||
backupDir := storagePath + "/"
|
|
||||||
// Get current time
|
|
||||||
currentTime := time.Now()
|
|
||||||
// Delete file
|
|
||||||
deleteFile := func(filePath string) error {
|
|
||||||
err := os.Remove(filePath)
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatal(fmt.Sprintf("Error: %s", err))
|
|
||||||
} else {
|
|
||||||
utils.Done("File %s has been deleted successfully", filePath)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Walk through the directory and delete files modified more than specified days ago
|
|
||||||
err := filepath.Walk(backupDir, func(filePath string, fileInfo os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Check if it's a regular file and if it was modified more than specified days ago
|
|
||||||
if fileInfo.Mode().IsRegular() {
|
|
||||||
timeDiff := currentTime.Sub(fileInfo.ModTime())
|
|
||||||
if timeDiff.Hours() > 24*float64(retentionDays) {
|
|
||||||
err := deleteFile(filePath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatal(fmt.Sprintf("Error: %s", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
utils.Done("Deleting old backups...done")
|
|
||||||
|
|
||||||
}
|
|
||||||
func deleteTemp() {
|
func deleteTemp() {
|
||||||
utils.Info("Deleting %s ...", tmpPath)
|
utils.Info("Deleting %s ...", tmpPath)
|
||||||
err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error {
|
err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error {
|
||||||
@@ -99,23 +67,137 @@ func deleteTemp() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestDatabaseConnection tests the database connection
|
// TestDatabaseConnection tests the database connection
|
||||||
func testDatabaseConnection(db *dbConfig) {
|
func testDatabaseConnection(db *dbConfig) error {
|
||||||
|
// Create the mysql client config file
|
||||||
|
if err := createMysqlClientConfigFile(*db); err != nil {
|
||||||
|
return errors.New(err.Error())
|
||||||
|
}
|
||||||
utils.Info("Connecting to %s database ...", db.dbName)
|
utils.Info("Connecting to %s database ...", db.dbName)
|
||||||
|
// Set database name for notification error
|
||||||
|
utils.DatabaseName = db.dbName
|
||||||
|
|
||||||
cmd := exec.Command("mysql", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, "--password="+db.dbPassword, db.dbName, "-e", "quit")
|
// Prepare the command to test the database connection
|
||||||
|
cmd := exec.Command("mariadb", fmt.Sprintf("--defaults-file=%s", mysqlClientConfig), db.dbName, "-e", "quit")
|
||||||
// Capture the output
|
// Capture the output
|
||||||
var out bytes.Buffer
|
var out bytes.Buffer
|
||||||
cmd.Stdout = &out
|
cmd.Stdout = &out
|
||||||
cmd.Stderr = &out
|
cmd.Stderr = &out
|
||||||
err := cmd.Run()
|
|
||||||
if err != nil {
|
|
||||||
utils.Error("Error testing database connection: %v\nOutput: %s", err, out.String())
|
|
||||||
os.Exit(1)
|
|
||||||
|
|
||||||
|
// Run the command
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return fmt.Errorf("failed to connect to database %s: %v, output: %s", db.dbName, err, out.String())
|
||||||
}
|
}
|
||||||
utils.Info("Successfully connected to %s database", db.dbName)
|
|
||||||
|
|
||||||
|
utils.Info("Successfully connected to %s database", db.dbName)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkPubKeyFile checks gpg public key
|
||||||
|
func checkPubKeyFile(pubKey string) (string, error) {
|
||||||
|
// Define possible key file names
|
||||||
|
keyFiles := []string{filepath.Join(gpgHome, "public_key.asc"), filepath.Join(gpgHome, "public_key.gpg"), pubKey}
|
||||||
|
|
||||||
|
// Loop through key file names and check if they exist
|
||||||
|
for _, keyFile := range keyFiles {
|
||||||
|
if _, err := os.Stat(keyFile); err == nil {
|
||||||
|
// File exists
|
||||||
|
return keyFile, nil
|
||||||
|
} else if os.IsNotExist(err) {
|
||||||
|
// File does not exist, continue to the next one
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
// An unexpected error occurred
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an error if neither file exists
|
||||||
|
return "", fmt.Errorf("no public key file found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkPrKeyFile checks private key
|
||||||
|
func checkPrKeyFile(prKey string) (string, error) {
|
||||||
|
// Define possible key file names
|
||||||
|
keyFiles := []string{filepath.Join(gpgHome, "private_key.asc"), filepath.Join(gpgHome, "private_key.gpg"), prKey}
|
||||||
|
|
||||||
|
// Loop through key file names and check if they exist
|
||||||
|
for _, keyFile := range keyFiles {
|
||||||
|
if _, err := os.Stat(keyFile); err == nil {
|
||||||
|
// File exists
|
||||||
|
return keyFile, nil
|
||||||
|
} else if os.IsNotExist(err) {
|
||||||
|
// File does not exist, continue to the next one
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
// An unexpected error occurred
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an error if neither file exists
|
||||||
|
return "", fmt.Errorf("no public key file found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// readConf reads config file and returns Config
|
||||||
|
func readConf(configFile string) (*Config, error) {
|
||||||
|
if utils.FileExists(configFile) {
|
||||||
|
buf, err := os.ReadFile(configFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c := &Config{}
|
||||||
|
err = yaml.Unmarshal(buf, c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("in file %q: %w", configFile, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c, err
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("config file %q not found", configFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkConfigFile checks config files and returns one config file
|
||||||
|
func checkConfigFile(filePath string) (string, error) {
|
||||||
|
// Remove the quotes
|
||||||
|
filePath = strings.Trim(filePath, `"`)
|
||||||
|
// Define possible config file names
|
||||||
|
configFiles := []string{filepath.Join(workingDir, "config.yaml"), filepath.Join(workingDir, "config.yml"), filePath}
|
||||||
|
|
||||||
|
// Loop through config file names and check if they exist
|
||||||
|
for _, configFile := range configFiles {
|
||||||
|
if _, err := os.Stat(configFile); err == nil {
|
||||||
|
// File exists
|
||||||
|
return configFile, nil
|
||||||
|
} else if os.IsNotExist(err) {
|
||||||
|
// File does not exist, continue to the next one
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
// An unexpected error occurred
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an error if neither file exists
|
||||||
|
return "", fmt.Errorf("no config file found")
|
||||||
|
}
|
||||||
|
func RemoveLastExtension(filename string) string {
|
||||||
|
if idx := strings.LastIndex(filename, "."); idx != -1 {
|
||||||
|
return filename[:idx]
|
||||||
|
}
|
||||||
|
return filename
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create mysql client config file
|
||||||
|
func createMysqlClientConfigFile(db dbConfig) error {
|
||||||
|
caCertPath := goutils.GetStringEnvWithDefault("DB_SSL_CA", "/etc/ssl/certs/ca-certificates.crt")
|
||||||
|
sslMode := goutils.GetStringEnvWithDefault("DB_SSL_MODE", "0")
|
||||||
|
// Create the mysql client config file
|
||||||
|
mysqlClientConfigFile := filepath.Join(tmpPath, "my.cnf")
|
||||||
|
mysqlCl := fmt.Sprintf("[client]\nhost=%s\nport=%s\nuser=%s\npassword=%s\nssl-ca=%s\nssl=%s\n", db.dbHost, db.dbPort, db.dbUserName, db.dbPassword, caCertPath, sslMode)
|
||||||
|
if err := os.WriteFile(mysqlClientConfigFile, []byte(mysqlCl), 0644); err != nil {
|
||||||
|
return fmt.Errorf("failed to create mysql client config file: %v", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,3 +1,27 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package pkg
|
package pkg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -8,12 +32,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func StartMigration(cmd *cobra.Command) {
|
func StartMigration(cmd *cobra.Command) {
|
||||||
|
intro()
|
||||||
utils.Info("Starting database migration...")
|
utils.Info("Starting database migration...")
|
||||||
//Get DB config
|
// Get DB config
|
||||||
dbConf = getDbConfig(cmd)
|
dbConf = initDbConfig(cmd)
|
||||||
targetDbConf = getTargetDbConfig()
|
targetDbConf = initTargetDbConfig()
|
||||||
|
|
||||||
//Defining the target database variables
|
// Defining the target database variables
|
||||||
newDbConfig := dbConfig{}
|
newDbConfig := dbConfig{}
|
||||||
newDbConfig.dbHost = targetDbConf.targetDbHost
|
newDbConfig.dbHost = targetDbConf.targetDbHost
|
||||||
newDbConfig.dbPort = targetDbConf.targetDbPort
|
newDbConfig.dbPort = targetDbConf.targetDbPort
|
||||||
@@ -21,13 +46,18 @@ func StartMigration(cmd *cobra.Command) {
|
|||||||
newDbConfig.dbUserName = targetDbConf.targetDbUserName
|
newDbConfig.dbUserName = targetDbConf.targetDbUserName
|
||||||
newDbConfig.dbPassword = targetDbConf.targetDbPassword
|
newDbConfig.dbPassword = targetDbConf.targetDbPassword
|
||||||
|
|
||||||
//Generate file name
|
// Generate file name
|
||||||
backupFileName := fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
|
backupFileName := fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
|
||||||
//Backup source Database
|
conf := &RestoreConfig{}
|
||||||
BackupDatabase(dbConf, backupFileName, true)
|
conf.file = backupFileName
|
||||||
//Restore source database into target database
|
// Backup source Database
|
||||||
|
err := BackupDatabase(dbConf, backupFileName, true, false, false)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error backing up database: %s", err)
|
||||||
|
}
|
||||||
|
// Restore source database into target database
|
||||||
utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
|
utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
|
||||||
RestoreDatabase(&newDbConfig, backupFileName)
|
RestoreDatabase(&newDbConfig, conf)
|
||||||
utils.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
|
utils.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
|
||||||
utils.Info("Database migration completed.")
|
utils.Info("Database migration completed.")
|
||||||
}
|
}
|
||||||
|
|||||||
228
pkg/remote.go
Normal file
228
pkg/remote.go
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/jkaninda/go-storage/pkg/ftp"
|
||||||
|
"github.com/jkaninda/go-storage/pkg/ssh"
|
||||||
|
goutils "github.com/jkaninda/go-utils"
|
||||||
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
|
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func sshBackup(db *dbConfig, config *BackupConfig) {
|
||||||
|
utils.Info("Backup database to Remote server")
|
||||||
|
// Backup database
|
||||||
|
err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne)
|
||||||
|
if err != nil {
|
||||||
|
recoverMode(err, "Error backing up database")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
finalFileName := config.backupFileName
|
||||||
|
if config.encryption {
|
||||||
|
encryptBackup(config)
|
||||||
|
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||||
|
}
|
||||||
|
utils.Info("Uploading backup archive to remote storage ... ")
|
||||||
|
sshConfig, err := loadSSHConfig()
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error loading ssh config: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sshStorage, err := ssh.NewStorage(ssh.Config{
|
||||||
|
Host: sshConfig.hostName,
|
||||||
|
Port: sshConfig.port,
|
||||||
|
User: sshConfig.user,
|
||||||
|
Password: sshConfig.password,
|
||||||
|
IdentifyFile: sshConfig.identifyFile,
|
||||||
|
RemotePath: config.remotePath,
|
||||||
|
LocalPath: tmpPath,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error creating SSH storage: %s", err)
|
||||||
|
}
|
||||||
|
err = sshStorage.Copy(finalFileName)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error copying backup file: %s", err)
|
||||||
|
}
|
||||||
|
// Get backup info
|
||||||
|
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error: %s", err)
|
||||||
|
}
|
||||||
|
backupSize = fileInfo.Size()
|
||||||
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
|
utils.Info("Backup size: %s", utils.ConvertBytes(uint64(backupSize)))
|
||||||
|
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||||
|
|
||||||
|
// Delete backup file from tmp folder
|
||||||
|
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error deleting file: %v", err)
|
||||||
|
|
||||||
|
}
|
||||||
|
if config.prune {
|
||||||
|
err := sshStorage.Prune(config.backupRetention)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
utils.Info("Uploading backup archive to remote storage ... done ")
|
||||||
|
duration := goutils.FormatDuration(time.Since(startTime), 0)
|
||||||
|
|
||||||
|
// Send notification
|
||||||
|
utils.NotifySuccess(&utils.NotificationData{
|
||||||
|
File: finalFileName,
|
||||||
|
BackupSize: utils.ConvertBytes(uint64(backupSize)),
|
||||||
|
Database: db.dbName,
|
||||||
|
Storage: config.storage,
|
||||||
|
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||||
|
Duration: duration,
|
||||||
|
})
|
||||||
|
// Delete temp
|
||||||
|
deleteTemp()
|
||||||
|
utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration)
|
||||||
|
|
||||||
|
}
|
||||||
|
func remoteRestore(db *dbConfig, conf *RestoreConfig) {
|
||||||
|
utils.Info("Restore database from remote server")
|
||||||
|
sshConfig, err := loadSSHConfig()
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error loading ssh config: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sshStorage, err := ssh.NewStorage(ssh.Config{
|
||||||
|
Host: sshConfig.hostName,
|
||||||
|
Port: sshConfig.port,
|
||||||
|
User: sshConfig.user,
|
||||||
|
Password: sshConfig.password,
|
||||||
|
IdentifyFile: sshConfig.identifyFile,
|
||||||
|
RemotePath: conf.remotePath,
|
||||||
|
LocalPath: tmpPath,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error creating SSH storage: %s", err)
|
||||||
|
}
|
||||||
|
err = sshStorage.CopyFrom(conf.file)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error copying backup file: %s", err)
|
||||||
|
}
|
||||||
|
RestoreDatabase(db, conf)
|
||||||
|
}
|
||||||
|
func ftpRestore(db *dbConfig, conf *RestoreConfig) {
|
||||||
|
utils.Info("Restore database from FTP server")
|
||||||
|
ftpConfig := loadFtpConfig()
|
||||||
|
ftpStorage, err := ftp.NewStorage(ftp.Config{
|
||||||
|
Host: ftpConfig.host,
|
||||||
|
Port: ftpConfig.port,
|
||||||
|
User: ftpConfig.user,
|
||||||
|
Password: ftpConfig.password,
|
||||||
|
RemotePath: conf.remotePath,
|
||||||
|
LocalPath: tmpPath,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error creating SSH storage: %s", err)
|
||||||
|
}
|
||||||
|
err = ftpStorage.CopyFrom(conf.file)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error copying backup file: %s", err)
|
||||||
|
}
|
||||||
|
RestoreDatabase(db, conf)
|
||||||
|
}
|
||||||
|
func ftpBackup(db *dbConfig, config *BackupConfig) {
|
||||||
|
utils.Info("Backup database to the remote FTP server")
|
||||||
|
|
||||||
|
// Backup database
|
||||||
|
err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne)
|
||||||
|
if err != nil {
|
||||||
|
recoverMode(err, "Error backing up database")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
finalFileName := config.backupFileName
|
||||||
|
if config.encryption {
|
||||||
|
encryptBackup(config)
|
||||||
|
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||||
|
}
|
||||||
|
utils.Info("Uploading backup archive to the remote FTP server ... ")
|
||||||
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
|
ftpConfig := loadFtpConfig()
|
||||||
|
ftpStorage, err := ftp.NewStorage(ftp.Config{
|
||||||
|
Host: ftpConfig.host,
|
||||||
|
Port: ftpConfig.port,
|
||||||
|
User: ftpConfig.user,
|
||||||
|
Password: ftpConfig.password,
|
||||||
|
RemotePath: config.remotePath,
|
||||||
|
LocalPath: tmpPath,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error creating SSH storage: %s", err)
|
||||||
|
}
|
||||||
|
err = ftpStorage.Copy(finalFileName)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error copying backup file: %s", err)
|
||||||
|
}
|
||||||
|
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||||
|
// Get backup info
|
||||||
|
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error: %s", err)
|
||||||
|
}
|
||||||
|
backupSize = fileInfo.Size()
|
||||||
|
// Delete backup file from tmp folder
|
||||||
|
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error deleting file: %v", err)
|
||||||
|
|
||||||
|
}
|
||||||
|
if config.prune {
|
||||||
|
err := ftpStorage.Prune(config.backupRetention)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
|
utils.Info("Backup size: %s", utils.ConvertBytes(uint64(backupSize)))
|
||||||
|
utils.Info("Uploading backup archive to the remote FTP server ... done ")
|
||||||
|
duration := goutils.FormatDuration(time.Since(startTime), 0)
|
||||||
|
|
||||||
|
// Send notification
|
||||||
|
utils.NotifySuccess(&utils.NotificationData{
|
||||||
|
File: finalFileName,
|
||||||
|
BackupSize: utils.ConvertBytes(uint64(backupSize)),
|
||||||
|
Database: db.dbName,
|
||||||
|
Storage: config.storage,
|
||||||
|
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||||
|
Duration: duration,
|
||||||
|
})
|
||||||
|
// Delete temp
|
||||||
|
deleteTemp()
|
||||||
|
utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration)
|
||||||
|
}
|
||||||
213
pkg/restore.go
213
pkg/restore.go
@@ -1,7 +1,33 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package pkg
|
package pkg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/jkaninda/encryptor"
|
||||||
|
"github.com/jkaninda/go-storage/pkg/local"
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"os"
|
"os"
|
||||||
@@ -10,117 +36,118 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func StartRestore(cmd *cobra.Command) {
|
func StartRestore(cmd *cobra.Command) {
|
||||||
|
intro()
|
||||||
|
dbConf = initDbConfig(cmd)
|
||||||
|
restoreConf := initRestoreConfig(cmd)
|
||||||
|
|
||||||
//Set env
|
switch restoreConf.storage {
|
||||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
|
||||||
|
|
||||||
//Get flag value and set env
|
|
||||||
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
|
||||||
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
|
|
||||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
|
||||||
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
|
||||||
executionMode, _ = cmd.Flags().GetString("mode")
|
|
||||||
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
|
||||||
dbConf = getDbConfig(cmd)
|
|
||||||
|
|
||||||
switch storage {
|
|
||||||
case "s3":
|
|
||||||
restoreFromS3(dbConf, file, bucket, s3Path)
|
|
||||||
case "local":
|
case "local":
|
||||||
utils.Info("Restore database from local")
|
localRestore(dbConf, restoreConf)
|
||||||
copyToTmp(storagePath, file)
|
case "s3", "S3":
|
||||||
RestoreDatabase(dbConf, file)
|
s3Restore(dbConf, restoreConf)
|
||||||
case "ssh":
|
case "ssh", "SSH", "remote":
|
||||||
restoreFromRemote(dbConf, file, remotePath)
|
remoteRestore(dbConf, restoreConf)
|
||||||
case "ftp":
|
case "ftp", "FTP":
|
||||||
utils.Fatal("Restore from FTP is not yet supported")
|
ftpRestore(dbConf, restoreConf)
|
||||||
|
case "azure":
|
||||||
|
azureRestore(dbConf, restoreConf)
|
||||||
default:
|
default:
|
||||||
utils.Info("Restore database from local")
|
localRestore(dbConf, restoreConf)
|
||||||
copyToTmp(storagePath, file)
|
|
||||||
RestoreDatabase(dbConf, file)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) {
|
||||||
|
utils.Info("Restore database from local")
|
||||||
|
basePath := filepath.Dir(restoreConf.file)
|
||||||
|
fileName := filepath.Base(restoreConf.file)
|
||||||
|
restoreConf.file = fileName
|
||||||
|
if basePath == "" || basePath == "." {
|
||||||
|
basePath = storagePath
|
||||||
|
}
|
||||||
|
localStorage := local.NewStorage(local.Config{
|
||||||
|
RemotePath: basePath,
|
||||||
|
LocalPath: tmpPath,
|
||||||
|
})
|
||||||
|
err := localStorage.CopyFrom(fileName)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error copying backup file: %s", err)
|
||||||
|
}
|
||||||
|
RestoreDatabase(dbConf, restoreConf)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func restoreFromS3(db *dbConfig, file, bucket, s3Path string) {
|
// RestoreDatabase restores the database from a backup file
|
||||||
utils.Info("Restore database from s3")
|
func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
|
||||||
err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
|
if conf.file == "" {
|
||||||
if err != nil {
|
|
||||||
utils.Fatal("Error download file from s3 %s %v", file, err)
|
|
||||||
}
|
|
||||||
RestoreDatabase(db, file)
|
|
||||||
}
|
|
||||||
func restoreFromRemote(db *dbConfig, file, remotePath string) {
|
|
||||||
utils.Info("Restore database from remote server")
|
|
||||||
err := CopyFromRemote(file, remotePath)
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatal("Error download file from remote server: %s %v ", filepath.Join(remotePath, file), err)
|
|
||||||
}
|
|
||||||
RestoreDatabase(db, file)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RestoreDatabase restore database
|
|
||||||
func RestoreDatabase(db *dbConfig, file string) {
|
|
||||||
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
|
|
||||||
if file == "" {
|
|
||||||
utils.Fatal("Error, file required")
|
utils.Fatal("Error, file required")
|
||||||
}
|
}
|
||||||
|
|
||||||
err := utils.CheckEnvVars(dbHVars)
|
filePath := filepath.Join(tmpPath, conf.file)
|
||||||
|
rFile, err := os.ReadFile(filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Error("Please make sure all required environment variables for database are set")
|
utils.Fatal("Error reading backup file: %v", err)
|
||||||
utils.Fatal("Error checking environment variables: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
|
extension := filepath.Ext(filePath)
|
||||||
|
outputFile := RemoveLastExtension(filePath)
|
||||||
|
|
||||||
if extension == ".gpg" {
|
if extension == ".gpg" {
|
||||||
if gpgPassphrase == "" {
|
decryptBackup(conf, rFile, outputFile)
|
||||||
utils.Fatal("Error: GPG passphrase is required, your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE environment variable is required.")
|
|
||||||
|
|
||||||
} else {
|
|
||||||
//Decrypt file
|
|
||||||
err := Decrypt(filepath.Join(tmpPath, file), gpgPassphrase)
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatal("Error decrypting file %s %v", file, err)
|
|
||||||
}
|
|
||||||
//Update file name
|
|
||||||
file = RemoveLastExtension(file)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) {
|
restorationFile := filepath.Join(tmpPath, conf.file)
|
||||||
testDatabaseConnection(db)
|
if !utils.FileExists(restorationFile) {
|
||||||
utils.Info("Restoring database...")
|
utils.Fatal("File not found: %s", restorationFile)
|
||||||
|
}
|
||||||
|
|
||||||
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
|
if err := testDatabaseConnection(db); err != nil {
|
||||||
// Restore from compressed file / .sql.gz
|
utils.Fatal("Error connecting to the database: %v", err)
|
||||||
if extension == ".gz" {
|
}
|
||||||
str := "zcat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " --password=" + db.dbPassword + " " + db.dbName
|
|
||||||
_, err := exec.Command("bash", "-c", str).Output()
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatal("Error, in restoring the database %v", err)
|
|
||||||
}
|
|
||||||
utils.Info("Restoring database... done")
|
|
||||||
utils.Done("Database has been restored")
|
|
||||||
//Delete temp
|
|
||||||
deleteTemp()
|
|
||||||
|
|
||||||
} else if extension == ".sql" {
|
utils.Info("Restoring database...")
|
||||||
//Restore from sql file
|
restoreDatabaseFile(db, restorationFile)
|
||||||
str := "cat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " --password=" + db.dbPassword + " " + db.dbName
|
}
|
||||||
_, err := exec.Command("bash", "-c", str).Output()
|
|
||||||
if err != nil {
|
func decryptBackup(conf *RestoreConfig, rFile []byte, outputFile string) {
|
||||||
utils.Fatal(fmt.Sprintf("Error in restoring the database %s", err))
|
if conf.usingKey {
|
||||||
}
|
utils.Info("Decrypting backup using private key...")
|
||||||
utils.Info("Restoring database... done")
|
prKey, err := os.ReadFile(conf.privateKey)
|
||||||
utils.Done("Database has been restored")
|
if err != nil {
|
||||||
//Delete temp
|
utils.Fatal("Error reading private key: %v", err)
|
||||||
deleteTemp()
|
}
|
||||||
} else {
|
if err := encryptor.DecryptWithPrivateKey(rFile, outputFile, prKey, conf.passphrase); err != nil {
|
||||||
utils.Fatal(fmt.Sprintf("Unknown file extension %s", extension))
|
utils.Fatal("Error decrypting backup: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
utils.Fatal(fmt.Sprintf("File not found in %s", fmt.Sprintf("%s/%s", tmpPath, file)))
|
if conf.passphrase == "" {
|
||||||
|
utils.Fatal("Passphrase or private key required for GPG file.")
|
||||||
|
}
|
||||||
|
utils.Info("Decrypting backup using passphrase...")
|
||||||
|
if err := encryptor.Decrypt(rFile, outputFile, conf.passphrase); err != nil {
|
||||||
|
utils.Fatal("Error decrypting file: %v", err)
|
||||||
|
}
|
||||||
|
conf.file = RemoveLastExtension(conf.file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func restoreDatabaseFile(db *dbConfig, restorationFile string) {
|
||||||
|
extension := filepath.Ext(restorationFile)
|
||||||
|
var cmdStr string
|
||||||
|
|
||||||
|
switch extension {
|
||||||
|
case ".gz":
|
||||||
|
cmdStr = fmt.Sprintf("zcat %s | mariadb --defaults-file=%s %s", restorationFile, mysqlClientConfig, db.dbName)
|
||||||
|
case ".sql":
|
||||||
|
cmdStr = fmt.Sprintf("cat %s | mariadb --defaults-file=%s %s", restorationFile, mysqlClientConfig, db.dbName)
|
||||||
|
default:
|
||||||
|
utils.Fatal("Unknown file extension: %s", extension)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command("sh", "-c", cmdStr)
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error restoring database: %v\nOutput: %s", err, string(output))
|
||||||
|
}
|
||||||
|
|
||||||
|
utils.Info("Database has been restored successfully.")
|
||||||
|
deleteTemp()
|
||||||
|
}
|
||||||
|
|||||||
138
pkg/s3.go
Normal file
138
pkg/s3.go
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/jkaninda/go-storage/pkg/s3"
|
||||||
|
goutils "github.com/jkaninda/go-utils"
|
||||||
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
|
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func s3Backup(db *dbConfig, config *BackupConfig) {
|
||||||
|
|
||||||
|
utils.Info("Backup database to s3 storage")
|
||||||
|
// Backup database
|
||||||
|
err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne)
|
||||||
|
if err != nil {
|
||||||
|
recoverMode(err, "Error backing up database")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
finalFileName := config.backupFileName
|
||||||
|
if config.encryption {
|
||||||
|
encryptBackup(config)
|
||||||
|
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||||
|
}
|
||||||
|
utils.Info("Uploading backup archive to remote storage S3 ... ")
|
||||||
|
awsConfig := initAWSConfig()
|
||||||
|
if config.remotePath == "" {
|
||||||
|
config.remotePath = awsConfig.remotePath
|
||||||
|
}
|
||||||
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
|
s3Storage, err := s3.NewStorage(s3.Config{
|
||||||
|
Endpoint: awsConfig.endpoint,
|
||||||
|
Bucket: awsConfig.bucket,
|
||||||
|
AccessKey: awsConfig.accessKey,
|
||||||
|
SecretKey: awsConfig.secretKey,
|
||||||
|
Region: awsConfig.region,
|
||||||
|
DisableSsl: awsConfig.disableSsl,
|
||||||
|
ForcePathStyle: awsConfig.forcePathStyle,
|
||||||
|
RemotePath: config.remotePath,
|
||||||
|
LocalPath: tmpPath,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error creating s3 storage: %s", err)
|
||||||
|
}
|
||||||
|
err = s3Storage.Copy(finalFileName)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error copying backup file: %s", err)
|
||||||
|
}
|
||||||
|
// Get backup info
|
||||||
|
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error: %s", err)
|
||||||
|
}
|
||||||
|
backupSize = fileInfo.Size()
|
||||||
|
|
||||||
|
// Delete backup file from tmp folder
|
||||||
|
err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Error deleting file: ", err)
|
||||||
|
|
||||||
|
}
|
||||||
|
// Delete old backup
|
||||||
|
if config.prune {
|
||||||
|
err := s3Storage.Prune(config.backupRetention)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||||
|
utils.Info("Uploading backup archive to remote storage S3 ... done ")
|
||||||
|
duration := goutils.FormatDuration(time.Since(startTime), 0)
|
||||||
|
// Send notification
|
||||||
|
utils.NotifySuccess(&utils.NotificationData{
|
||||||
|
File: finalFileName,
|
||||||
|
BackupSize: utils.ConvertBytes(uint64(backupSize)),
|
||||||
|
Database: db.dbName,
|
||||||
|
Storage: config.storage,
|
||||||
|
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||||
|
Duration: duration,
|
||||||
|
})
|
||||||
|
// Delete temp
|
||||||
|
deleteTemp()
|
||||||
|
utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration)
|
||||||
|
|
||||||
|
}
|
||||||
|
func s3Restore(db *dbConfig, conf *RestoreConfig) {
|
||||||
|
utils.Info("Restore database from s3")
|
||||||
|
awsConfig := initAWSConfig()
|
||||||
|
if conf.remotePath == "" {
|
||||||
|
conf.remotePath = awsConfig.remotePath
|
||||||
|
}
|
||||||
|
s3Storage, err := s3.NewStorage(s3.Config{
|
||||||
|
Endpoint: awsConfig.endpoint,
|
||||||
|
Bucket: awsConfig.bucket,
|
||||||
|
AccessKey: awsConfig.accessKey,
|
||||||
|
SecretKey: awsConfig.secretKey,
|
||||||
|
Region: awsConfig.region,
|
||||||
|
DisableSsl: awsConfig.disableSsl,
|
||||||
|
ForcePathStyle: awsConfig.forcePathStyle,
|
||||||
|
RemotePath: conf.remotePath,
|
||||||
|
LocalPath: tmpPath,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error creating s3 storage: %s", err)
|
||||||
|
}
|
||||||
|
err = s3Storage.CopyFrom(conf.file)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error download file from S3 storage: %s", err)
|
||||||
|
}
|
||||||
|
RestoreDatabase(db, conf)
|
||||||
|
}
|
||||||
115
pkg/scp.go
115
pkg/scp.go
@@ -1,115 +0,0 @@
|
|||||||
package pkg
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"github.com/bramvdbogaerde/go-scp"
|
|
||||||
"github.com/bramvdbogaerde/go-scp/auth"
|
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
|
||||||
"golang.org/x/crypto/ssh"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
func CopyToRemote(fileName, remotePath string) error {
|
|
||||||
sshUser := os.Getenv("SSH_USER")
|
|
||||||
sshPassword := os.Getenv("SSH_PASSWORD")
|
|
||||||
sshHostName := os.Getenv("SSH_HOST_NAME")
|
|
||||||
sshPort := os.Getenv("SSH_PORT")
|
|
||||||
sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE")
|
|
||||||
|
|
||||||
err := utils.CheckEnvVars(sshHVars)
|
|
||||||
if err != nil {
|
|
||||||
utils.Error("Error checking environment variables: %s", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
|
|
||||||
if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) {
|
|
||||||
clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey())
|
|
||||||
|
|
||||||
} else {
|
|
||||||
if sshPassword == "" {
|
|
||||||
return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty")
|
|
||||||
}
|
|
||||||
utils.Warn("Accessing the remote server using password, password is not recommended")
|
|
||||||
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
|
|
||||||
|
|
||||||
}
|
|
||||||
// Create a new SCP client
|
|
||||||
client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig)
|
|
||||||
|
|
||||||
// Connect to the remote server
|
|
||||||
err = client.Connect()
|
|
||||||
if err != nil {
|
|
||||||
return errors.New("Couldn't establish a connection to the remote server")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open a file
|
|
||||||
file, _ := os.Open(filepath.Join(tmpPath, fileName))
|
|
||||||
|
|
||||||
// Close client connection after the file has been copied
|
|
||||||
defer client.Close()
|
|
||||||
// Close the file after it has been copied
|
|
||||||
defer file.Close()
|
|
||||||
// the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application.
|
|
||||||
err = client.CopyFromFile(context.Background(), *file, filepath.Join(remotePath, fileName), "0655")
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("Error while copying file ")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func CopyFromRemote(fileName, remotePath string) error {
|
|
||||||
sshUser := os.Getenv("SSH_USER")
|
|
||||||
sshPassword := os.Getenv("SSH_PASSWORD")
|
|
||||||
sshHostName := os.Getenv("SSH_HOST_NAME")
|
|
||||||
sshPort := os.Getenv("SSH_PORT")
|
|
||||||
sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE")
|
|
||||||
|
|
||||||
err := utils.CheckEnvVars(sshHVars)
|
|
||||||
if err != nil {
|
|
||||||
utils.Error("Error checking environment variables\n: %s", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
|
|
||||||
if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) {
|
|
||||||
clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey())
|
|
||||||
|
|
||||||
} else {
|
|
||||||
if sshPassword == "" {
|
|
||||||
return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n")
|
|
||||||
}
|
|
||||||
utils.Warn("Accessing the remote server using password, password is not recommended")
|
|
||||||
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
|
|
||||||
|
|
||||||
}
|
|
||||||
// Create a new SCP client
|
|
||||||
client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig)
|
|
||||||
|
|
||||||
// Connect to the remote server
|
|
||||||
err = client.Connect()
|
|
||||||
if err != nil {
|
|
||||||
return errors.New("Couldn't establish a connection to the remote server\n")
|
|
||||||
}
|
|
||||||
// Close client connection after the file has been copied
|
|
||||||
defer client.Close()
|
|
||||||
file, err := os.OpenFile(filepath.Join(tmpPath, fileName), os.O_RDWR|os.O_CREATE, 0777)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("Couldn't open the output file")
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
// the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application.
|
|
||||||
err = client.CopyFromRemote(context.Background(), file, filepath.Join(remotePath, fileName))
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("Error while copying file ", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -1,69 +0,0 @@
|
|||||||
package pkg
|
|
||||||
|
|
||||||
// Package pkg /*
|
|
||||||
/*
|
|
||||||
Copyright © 2024 Jonas Kaninda
|
|
||||||
*/
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
)
|
|
||||||
|
|
||||||
func CreateCrontabScript(disableCompression bool, storage string) {
|
|
||||||
//task := "/usr/local/bin/backup_cron.sh"
|
|
||||||
touchCmd := exec.Command("touch", backupCronFile)
|
|
||||||
if err := touchCmd.Run(); err != nil {
|
|
||||||
utils.Fatal("Error creating file %s: %v\n", backupCronFile, err)
|
|
||||||
}
|
|
||||||
var disableC = ""
|
|
||||||
if disableCompression {
|
|
||||||
disableC = "--disable-compression"
|
|
||||||
}
|
|
||||||
|
|
||||||
scriptContent := fmt.Sprintf(`#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
/usr/local/bin/mysql-bkup backup --dbname %s --port %s --storage %s %v
|
|
||||||
`, os.Getenv("DB_NAME"), os.Getenv("DB_PORT"), storage, disableC)
|
|
||||||
|
|
||||||
if err := utils.WriteToFile(backupCronFile, scriptContent); err != nil {
|
|
||||||
utils.Fatal("Error writing to %s: %v\n", backupCronFile, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
chmodCmd := exec.Command("chmod", "+x", "/usr/local/bin/backup_cron.sh")
|
|
||||||
if err := chmodCmd.Run(); err != nil {
|
|
||||||
utils.Fatal("Error changing permissions of %s: %v\n", backupCronFile, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
lnCmd := exec.Command("ln", "-s", "/usr/local/bin/backup_cron.sh", "/usr/local/bin/backup_cron")
|
|
||||||
if err := lnCmd.Run(); err != nil {
|
|
||||||
utils.Fatal("Error creating symbolic link: %v\n", err)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
touchLogCmd := exec.Command("touch", cronLogFile)
|
|
||||||
if err := touchLogCmd.Run(); err != nil {
|
|
||||||
utils.Fatal("Error creating file %s: %v\n", cronLogFile, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cronJob := "/etc/cron.d/backup_cron"
|
|
||||||
touchCronCmd := exec.Command("touch", cronJob)
|
|
||||||
if err := touchCronCmd.Run(); err != nil {
|
|
||||||
utils.Fatal("Error creating file %s: %v\n", cronJob, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cronContent := fmt.Sprintf(`%s root exec /bin/bash -c ". /run/supervisord.env; /usr/local/bin/backup_cron.sh >> %s"
|
|
||||||
`, os.Getenv("BACKUP_CRON_EXPRESSION"), cronLogFile)
|
|
||||||
|
|
||||||
if err := utils.WriteToFile(cronJob, cronContent); err != nil {
|
|
||||||
utils.Fatal("Error writing to %s: %v\n", cronJob, err)
|
|
||||||
}
|
|
||||||
utils.ChangePermission("/etc/cron.d/backup_cron", 0644)
|
|
||||||
|
|
||||||
crontabCmd := exec.Command("crontab", "/etc/cron.d/backup_cron")
|
|
||||||
if err := crontabCmd.Run(); err != nil {
|
|
||||||
utils.Fatal("Error updating crontab: ", err)
|
|
||||||
}
|
|
||||||
utils.Info("Backup job created.")
|
|
||||||
}
|
|
||||||
80
pkg/var.go
80
pkg/var.go
@@ -1,19 +1,52 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package pkg
|
package pkg
|
||||||
|
|
||||||
const cronLogFile = "/var/log/mysql-bkup.log"
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
const tmpPath = "/tmp/backup"
|
const tmpPath = "/tmp/backup"
|
||||||
const backupCronFile = "/usr/local/bin/backup_cron.sh"
|
const gpgHome = "/config/gnupg"
|
||||||
const algorithm = "aes256"
|
|
||||||
const gpgHome = "gnupg"
|
|
||||||
const gpgExtension = "gpg"
|
const gpgExtension = "gpg"
|
||||||
|
const timeFormat = "2006-01-02 at 15:04:05"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
storage = "local"
|
storage = "local"
|
||||||
file = ""
|
file = ""
|
||||||
executionMode = "default"
|
|
||||||
storagePath = "/backup"
|
storagePath = "/backup"
|
||||||
disableCompression = false
|
workingDir = "/config"
|
||||||
encryption = false
|
disableCompression = false
|
||||||
|
encryption = false
|
||||||
|
usingKey = false
|
||||||
|
backupSize int64 = 0
|
||||||
|
startTime = time.Now()
|
||||||
|
backupRescueMode = false
|
||||||
|
mysqlClientConfig = filepath.Join(tmpPath, "my.cnf")
|
||||||
)
|
)
|
||||||
|
|
||||||
// dbHVars Required environment variables for database
|
// dbHVars Required environment variables for database
|
||||||
@@ -21,11 +54,9 @@ var dbHVars = []string{
|
|||||||
"DB_HOST",
|
"DB_HOST",
|
||||||
"DB_PASSWORD",
|
"DB_PASSWORD",
|
||||||
"DB_USERNAME",
|
"DB_USERNAME",
|
||||||
"DB_NAME",
|
|
||||||
}
|
}
|
||||||
var tdbRVars = []string{
|
var tdbRVars = []string{
|
||||||
"TARGET_DB_HOST",
|
"TARGET_DB_HOST",
|
||||||
"TARGET_DB_PORT",
|
|
||||||
"TARGET_DB_NAME",
|
"TARGET_DB_NAME",
|
||||||
"TARGET_DB_USERNAME",
|
"TARGET_DB_USERNAME",
|
||||||
"TARGET_DB_PASSWORD",
|
"TARGET_DB_PASSWORD",
|
||||||
@@ -34,10 +65,23 @@ var tdbRVars = []string{
|
|||||||
var dbConf *dbConfig
|
var dbConf *dbConfig
|
||||||
var targetDbConf *targetDbConfig
|
var targetDbConf *targetDbConfig
|
||||||
|
|
||||||
// sshHVars Required environment variables for SSH remote server storage
|
var ftpVars = []string{
|
||||||
var sshHVars = []string{
|
"FTP_HOST_NAME",
|
||||||
"SSH_USER",
|
"FTP_USER",
|
||||||
"SSH_REMOTE_PATH",
|
"FTP_PASSWORD",
|
||||||
"SSH_HOST_NAME",
|
"FTP_PORT",
|
||||||
"SSH_PORT",
|
}
|
||||||
|
var azureVars = []string{
|
||||||
|
"AZURE_STORAGE_CONTAINER_NAME",
|
||||||
|
"AZURE_STORAGE_ACCOUNT_NAME",
|
||||||
|
"AZURE_STORAGE_ACCOUNT_KEY",
|
||||||
|
}
|
||||||
|
|
||||||
|
// AwsVars Required environment variables for AWS S3 storage
|
||||||
|
var awsVars = []string{
|
||||||
|
"AWS_S3_ENDPOINT",
|
||||||
|
"AWS_S3_BUCKET_NAME",
|
||||||
|
"AWS_ACCESS_KEY",
|
||||||
|
"AWS_SECRET_KEY",
|
||||||
|
"AWS_REGION",
|
||||||
}
|
}
|
||||||
|
|||||||
69
templates/email-error.tmpl
Normal file
69
templates/email-error.tmpl
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<title>🔴 Urgent: Database Backup Failure</title>
|
||||||
|
<style>
|
||||||
|
body {
|
||||||
|
font-family: Arial, sans-serif;
|
||||||
|
background-color: #f8f9fa;
|
||||||
|
color: #333;
|
||||||
|
margin: 0;
|
||||||
|
padding: 20px;
|
||||||
|
}
|
||||||
|
h2 {
|
||||||
|
color: #d9534f;
|
||||||
|
}
|
||||||
|
.details {
|
||||||
|
background-color: #ffffff;
|
||||||
|
border: 1px solid #ddd;
|
||||||
|
padding: 15px;
|
||||||
|
border-radius: 5px;
|
||||||
|
margin-top: 10px;
|
||||||
|
}
|
||||||
|
.details ul {
|
||||||
|
list-style-type: none;
|
||||||
|
padding: 0;
|
||||||
|
}
|
||||||
|
.details li {
|
||||||
|
margin: 5px 0;
|
||||||
|
}
|
||||||
|
a {
|
||||||
|
color: #0275d8;
|
||||||
|
text-decoration: none;
|
||||||
|
}
|
||||||
|
a:hover {
|
||||||
|
text-decoration: underline;
|
||||||
|
}
|
||||||
|
footer {
|
||||||
|
margin-top: 20px;
|
||||||
|
font-size: 0.9em;
|
||||||
|
color: #6c757d;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h2>🔴 Urgent: Database Backup Failure Notification</h2>
|
||||||
|
<p>Hi,</p>
|
||||||
|
<p>An error occurred during the database backup process. Please review the details below and take the necessary actions:</p>
|
||||||
|
|
||||||
|
<div class="details">
|
||||||
|
<h3>Failure Details:</h3>
|
||||||
|
<ul>
|
||||||
|
<li><strong>Database Name:</strong> {{.DatabaseName}}</li>
|
||||||
|
<li><strong>Date:</strong> {{.EndTime}}</li>
|
||||||
|
<li><strong>Backup Reference:</strong> {{.BackupReference}}</li>
|
||||||
|
<li><strong>Error Message:</strong> {{.Error}}</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<p>We recommend investigating the issue as soon as possible to prevent potential data loss or service disruptions.</p>
|
||||||
|
|
||||||
|
<p>For more information, visit the <a href="https://jkaninda.github.io/mysql-bkup">mysql-bkup documentation</a>.</p>
|
||||||
|
|
||||||
|
<footer>
|
||||||
|
© 2024 <a href="https://github.com/jkaninda/mysql-bkup">mysql-bkup</a> | Automated Backup System
|
||||||
|
</footer>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
69
templates/email.tmpl
Normal file
69
templates/email.tmpl
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<title>✅ Database Backup Successful – {{.Database}}</title>
|
||||||
|
<style>
|
||||||
|
body {
|
||||||
|
font-family: Arial, sans-serif;
|
||||||
|
background-color: #f8f9fa;
|
||||||
|
color: #333;
|
||||||
|
margin: 0;
|
||||||
|
padding: 20px;
|
||||||
|
}
|
||||||
|
h2 {
|
||||||
|
color: #5cb85c;
|
||||||
|
}
|
||||||
|
.details {
|
||||||
|
background-color: #ffffff;
|
||||||
|
border: 1px solid #ddd;
|
||||||
|
padding: 15px;
|
||||||
|
border-radius: 5px;
|
||||||
|
margin-top: 10px;
|
||||||
|
}
|
||||||
|
.details ul {
|
||||||
|
list-style-type: none;
|
||||||
|
padding: 0;
|
||||||
|
}
|
||||||
|
.details li {
|
||||||
|
margin: 5px 0;
|
||||||
|
}
|
||||||
|
a {
|
||||||
|
color: #0275d8;
|
||||||
|
text-decoration: none;
|
||||||
|
}
|
||||||
|
a:hover {
|
||||||
|
text-decoration: underline;
|
||||||
|
}
|
||||||
|
footer {
|
||||||
|
margin-top: 20px;
|
||||||
|
font-size: 0.9em;
|
||||||
|
color: #6c757d;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h2>✅ Database Backup Successful</h2>
|
||||||
|
<p>Hi,</p>
|
||||||
|
<p>The backup process for the <strong>{{.Database}}</strong> database was successfully completed. Please find the details below:</p>
|
||||||
|
|
||||||
|
<div class="details">
|
||||||
|
<h3>Backup Details:</h3>
|
||||||
|
<ul>
|
||||||
|
<li><strong>Database Name:</strong> {{.Database}}</li>
|
||||||
|
<li><strong>Backup Duration:</strong> {{.Duration}}</li>
|
||||||
|
<li><strong>Backup Storage:</strong> {{.Storage}}</li>
|
||||||
|
<li><strong>Backup Location:</strong> {{.BackupLocation}}</li>
|
||||||
|
<li><strong>Backup Size:</strong> {{.BackupSize}}</li>
|
||||||
|
<li><strong>Backup Reference:</strong> {{.BackupReference}}</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<p>You can access the backup at the specified location if needed. Thank you for using <a href="https://jkaninda.github.io/mysql-bkup/">mysql-bkup</a>.</p>
|
||||||
|
|
||||||
|
<footer>
|
||||||
|
© 2024 <a href="https://github.com/jkaninda/mysql-bkup">mysql-bkup</a> | Automated Backup System
|
||||||
|
</footer>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
11
templates/telegram-error.tmpl
Normal file
11
templates/telegram-error.tmpl
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
🔴 Urgent: Database Backup Failure Notification
|
||||||
|
|
||||||
|
Hi,
|
||||||
|
An error occurred during the database backup process.
|
||||||
|
Please review the details below and take the necessary actions:
|
||||||
|
Failure Details:
|
||||||
|
- Database Name: {{.DatabaseName}}
|
||||||
|
- Date: {{.EndTime}}
|
||||||
|
- Backup Reference: {{.BackupReference}}
|
||||||
|
- Error Message: {{.Error}}
|
||||||
|
We recommend investigating the issue as soon as possible to prevent potential data loss or service disruptions.
|
||||||
15
templates/telegram.tmpl
Normal file
15
templates/telegram.tmpl
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
✅ Database Backup Successful
|
||||||
|
|
||||||
|
Hi,
|
||||||
|
The backup process for the {{.Database}} database was successfully completed.
|
||||||
|
Please find the details below:
|
||||||
|
|
||||||
|
Backup Details:
|
||||||
|
- Database Name: {{.Database}}
|
||||||
|
- Backup Duration: {{.Duration}}
|
||||||
|
- Backup Storage: {{.Storage}}
|
||||||
|
- Backup Location: {{.BackupLocation}}
|
||||||
|
- Backup Size: {{.BackupSize}}
|
||||||
|
- Backup Reference: {{.BackupReference}}
|
||||||
|
|
||||||
|
You can access the backup at the specified location if needed.
|
||||||
95
utils/config.go
Normal file
95
utils/config.go
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package utils
|
||||||
|
|
||||||
|
import "os"
|
||||||
|
|
||||||
|
type MailConfig struct {
|
||||||
|
MailHost string
|
||||||
|
MailPort int
|
||||||
|
MailUserName string
|
||||||
|
MailPassword string
|
||||||
|
MailTo string
|
||||||
|
MailFrom string
|
||||||
|
SkipTls bool
|
||||||
|
}
|
||||||
|
type NotificationData struct {
|
||||||
|
File string
|
||||||
|
BackupSize string
|
||||||
|
Database string
|
||||||
|
Duration string
|
||||||
|
Storage string
|
||||||
|
BackupLocation string
|
||||||
|
BackupReference string
|
||||||
|
}
|
||||||
|
type ErrorMessage struct {
|
||||||
|
Database string
|
||||||
|
EndTime string
|
||||||
|
Error string
|
||||||
|
BackupReference string
|
||||||
|
DatabaseName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadMailConfig gets mail environment variables and returns MailConfig
|
||||||
|
func loadMailConfig() *MailConfig {
|
||||||
|
return &MailConfig{
|
||||||
|
MailHost: os.Getenv("MAIL_HOST"),
|
||||||
|
MailPort: GetIntEnv("MAIL_PORT"),
|
||||||
|
MailUserName: os.Getenv("MAIL_USERNAME"),
|
||||||
|
MailPassword: os.Getenv("MAIL_PASSWORD"),
|
||||||
|
MailTo: os.Getenv("MAIL_TO"),
|
||||||
|
MailFrom: os.Getenv("MAIL_FROM"),
|
||||||
|
SkipTls: os.Getenv("MAIL_SKIP_TLS") == "false",
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeFormat returns the format of the time
|
||||||
|
func TimeFormat() string {
|
||||||
|
format := os.Getenv("TIME_FORMAT")
|
||||||
|
if format == "" {
|
||||||
|
return "2006-01-02 at 15:04:05"
|
||||||
|
|
||||||
|
}
|
||||||
|
return format
|
||||||
|
}
|
||||||
|
|
||||||
|
func backupReference() string {
|
||||||
|
return os.Getenv("BACKUP_REFERENCE")
|
||||||
|
}
|
||||||
|
|
||||||
|
const templatePath = "/config/templates"
|
||||||
|
|
||||||
|
var DatabaseName = ""
|
||||||
|
var vars = []string{
|
||||||
|
"TG_TOKEN",
|
||||||
|
"TG_CHAT_ID",
|
||||||
|
}
|
||||||
|
var mailVars = []string{
|
||||||
|
"MAIL_HOST",
|
||||||
|
"MAIL_PORT",
|
||||||
|
"MAIL_FROM",
|
||||||
|
"MAIL_TO",
|
||||||
|
}
|
||||||
@@ -1,10 +1,35 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package utils
|
package utils
|
||||||
|
|
||||||
const RestoreExample = "mysql-bkup restore --dbname database --file db_20231219_022941.sql.gz\n" +
|
const RestoreExample = "restore --dbname database --file db_20231219_022941.sql.gz\n" +
|
||||||
"bkup restore --dbname database --storage s3 --path /custom-path --file db_20231219_022941.sql.gz"
|
"restore --dbname database --storage s3 --path /custom-path --file db_20231219_022941.sql.gz"
|
||||||
const BackupExample = "mysql-bkup backup --dbname database --disable-compression\n" +
|
const BackupExample = "backup --dbname database --disable-compression\n" +
|
||||||
"mysql-bkup backup --dbname database --storage s3 --path /custom-path --disable-compression"
|
"backup --dbname database --storage s3 --path /custom-path --disable-compression"
|
||||||
|
|
||||||
const MainExample = "mysql-bkup backup --dbname database --disable-compression\n" +
|
const MainExample = "mysql-bkup backup --dbname database --disable-compression\n" +
|
||||||
"mysql-bkup backup --dbname database --storage s3 --path /custom-path\n" +
|
"backup --dbname database --storage s3 --path /custom-path\n" +
|
||||||
"mysql-bkup restore --dbname database --file db_20231219_022941.sql.gz"
|
"restore --dbname database --file db_20231219_022941.sql.gz"
|
||||||
|
const traceLog = "trace"
|
||||||
|
|||||||
129
utils/logger.go
129
utils/logger.go
@@ -1,56 +1,103 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package utils
|
package utils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"runtime"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
var currentTime = time.Now().Format("2006/01/02 15:04:05")
|
// Info returns info log
|
||||||
|
func Info(msg string, args ...interface{}) {
|
||||||
|
log.SetOutput(getStd("/dev/stdout"))
|
||||||
|
logWithCaller("INFO", msg, args...)
|
||||||
|
|
||||||
func Info(msg string, args ...any) {
|
|
||||||
formattedMessage := fmt.Sprintf(msg, args...)
|
|
||||||
if len(args) == 0 {
|
|
||||||
fmt.Printf("%s INFO: %s\n", currentTime, msg)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("%s INFO: %s\n", currentTime, formattedMessage)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warn warning message
|
// Warn returns warning log
|
||||||
func Warn(msg string, args ...any) {
|
func Warn(msg string, args ...interface{}) {
|
||||||
formattedMessage := fmt.Sprintf(msg, args...)
|
log.SetOutput(getStd("/dev/stdout"))
|
||||||
if len(args) == 0 {
|
logWithCaller("WARN", msg, args...)
|
||||||
fmt.Printf("%s WARN: %s\n", currentTime, msg)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("%s WARN: %s\n", currentTime, formattedMessage)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func Error(msg string, args ...any) {
|
|
||||||
formattedMessage := fmt.Sprintf(msg, args...)
|
|
||||||
if len(args) == 0 {
|
|
||||||
fmt.Printf("%s ERROR: %s\n", currentTime, msg)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func Done(msg string, args ...any) {
|
|
||||||
formattedMessage := fmt.Sprintf(msg, args...)
|
|
||||||
if len(args) == 0 {
|
|
||||||
fmt.Printf("%s INFO: %s\n", currentTime, msg)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("%s INFO: %s\n", currentTime, formattedMessage)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fatal logs an error message and exits the program
|
// Error logs error messages
|
||||||
func Fatal(msg string, args ...any) {
|
func Error(msg string, args ...interface{}) {
|
||||||
// Fatal logs an error message and exits the program.
|
log.SetOutput(getStd("/dev/stderr"))
|
||||||
formattedMessage := fmt.Sprintf(msg, args...)
|
logWithCaller("ERROR", msg, args...)
|
||||||
if len(args) == 0 {
|
}
|
||||||
fmt.Printf("%s ERROR: %s\n", currentTime, msg)
|
|
||||||
} else {
|
func Fatal(msg string, args ...interface{}) {
|
||||||
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
|
log.SetOutput(os.Stdout)
|
||||||
|
// Format message if there are additional arguments
|
||||||
|
formattedMessage := msg
|
||||||
|
if len(args) > 0 {
|
||||||
|
formattedMessage = fmt.Sprintf(msg, args...)
|
||||||
}
|
}
|
||||||
|
logWithCaller("ERROR", msg, args...)
|
||||||
|
NotifyError(formattedMessage)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Helper function to format and log messages with file and line number
|
||||||
|
func logWithCaller(level, msg string, args ...interface{}) {
|
||||||
|
// Format message if there are additional arguments
|
||||||
|
formattedMessage := msg
|
||||||
|
if len(args) > 0 {
|
||||||
|
formattedMessage = fmt.Sprintf(msg, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the caller's file and line number (skip 2 frames)
|
||||||
|
_, file, line, ok := runtime.Caller(2)
|
||||||
|
if !ok {
|
||||||
|
file = "unknown"
|
||||||
|
line = 0
|
||||||
|
}
|
||||||
|
// Log message with caller information if GOMA_LOG_LEVEL is trace
|
||||||
|
if strings.ToLower(level) != "off" {
|
||||||
|
if strings.ToLower(level) == traceLog {
|
||||||
|
log.Printf("%s: %s (File: %s, Line: %d)\n", level, formattedMessage, file, line)
|
||||||
|
} else {
|
||||||
|
log.Printf("%s: %s\n", level, formattedMessage)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getStd(out string) *os.File {
|
||||||
|
switch out {
|
||||||
|
case "/dev/stdout":
|
||||||
|
return os.Stdout
|
||||||
|
case "/dev/stderr":
|
||||||
|
return os.Stderr
|
||||||
|
case "/dev/stdin":
|
||||||
|
return os.Stdin
|
||||||
|
default:
|
||||||
|
return os.Stdout
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
179
utils/notification.go
Normal file
179
utils/notification.go
Normal file
@@ -0,0 +1,179 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"github.com/go-mail/mail"
|
||||||
|
"html/template"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func parseTemplate[T any](data T, fileName string) (string, error) {
|
||||||
|
// Open the file
|
||||||
|
tmpl, err := template.ParseFiles(filepath.Join(templatePath, fileName))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err = tmpl.Execute(&buf, data); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func SendEmail(subject, body string) error {
|
||||||
|
Info("Start sending email notification....")
|
||||||
|
config := loadMailConfig()
|
||||||
|
emails := strings.Split(config.MailTo, ",")
|
||||||
|
m := mail.NewMessage()
|
||||||
|
m.SetHeader("From", config.MailFrom)
|
||||||
|
m.SetHeader("To", emails...)
|
||||||
|
m.SetHeader("Subject", subject)
|
||||||
|
m.SetBody("text/html", body)
|
||||||
|
d := mail.NewDialer(config.MailHost, config.MailPort, config.MailUserName, config.MailPassword)
|
||||||
|
d.TLSConfig = &tls.Config{InsecureSkipVerify: config.SkipTls}
|
||||||
|
|
||||||
|
if err := d.DialAndSend(m); err != nil {
|
||||||
|
Error("Error could not send email : %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
Info("Email notification has been sent")
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
func sendMessage(msg string) error {
|
||||||
|
|
||||||
|
Info("Sending Telegram notification... ")
|
||||||
|
chatId := os.Getenv("TG_CHAT_ID")
|
||||||
|
body, _ := json.Marshal(map[string]string{
|
||||||
|
"chat_id": chatId,
|
||||||
|
"text": msg,
|
||||||
|
})
|
||||||
|
url := fmt.Sprintf("%s/sendMessage", getTgUrl())
|
||||||
|
// Create an HTTP post request
|
||||||
|
request, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
request.Header.Add("Content-Type", "application/json")
|
||||||
|
client := &http.Client{}
|
||||||
|
response, err := client.Do(request)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
code := response.StatusCode
|
||||||
|
if code == 200 {
|
||||||
|
Info("Telegram notification has been sent")
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
body, _ := io.ReadAll(response.Body)
|
||||||
|
Error("Error could not send message, error: %s", string(body))
|
||||||
|
return fmt.Errorf("error could not send message %s", string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
func NotifySuccess(notificationData *NotificationData) {
|
||||||
|
notificationData.BackupReference = backupReference()
|
||||||
|
// Email notification
|
||||||
|
err := CheckEnvVars(mailVars)
|
||||||
|
if err == nil {
|
||||||
|
body, err := parseTemplate(*notificationData, "email.tmpl")
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not parse email template: %v", err)
|
||||||
|
}
|
||||||
|
err = SendEmail(fmt.Sprintf("✅ Database Backup Notification – %s", notificationData.Database), body)
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not send email: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Telegram notification
|
||||||
|
err = CheckEnvVars(vars)
|
||||||
|
if err == nil {
|
||||||
|
message, err := parseTemplate(*notificationData, "telegram.tmpl")
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not parse telegram template: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = sendMessage(message)
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not send Telegram message: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func NotifyError(error string) {
|
||||||
|
|
||||||
|
// Email notification
|
||||||
|
err := CheckEnvVars(mailVars)
|
||||||
|
if err == nil {
|
||||||
|
body, err := parseTemplate(ErrorMessage{
|
||||||
|
Error: error,
|
||||||
|
EndTime: time.Now().Format(TimeFormat()),
|
||||||
|
BackupReference: os.Getenv("BACKUP_REFERENCE"),
|
||||||
|
DatabaseName: DatabaseName,
|
||||||
|
}, "email-error.tmpl")
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not parse error template: %v", err)
|
||||||
|
}
|
||||||
|
err = SendEmail("🔴 Urgent: Database Backup Failure Notification", body)
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not send email: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Telegram notification
|
||||||
|
err = CheckEnvVars(vars)
|
||||||
|
if err == nil {
|
||||||
|
message, err := parseTemplate(ErrorMessage{
|
||||||
|
Error: error,
|
||||||
|
EndTime: time.Now().Format(TimeFormat()),
|
||||||
|
BackupReference: os.Getenv("BACKUP_REFERENCE"),
|
||||||
|
DatabaseName: DatabaseName,
|
||||||
|
}, "telegram-error.tmpl")
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not parse error template: %v", err)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
err = sendMessage(message)
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not send telegram message: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTgUrl() string {
|
||||||
|
return fmt.Sprintf("https://api.telegram.org/bot%s", os.Getenv("TG_TOKEN"))
|
||||||
|
|
||||||
|
}
|
||||||
169
utils/s3.go
169
utils/s3.go
@@ -1,169 +0,0 @@
|
|||||||
package utils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
|
||||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CreateSession creates a new AWS session
|
|
||||||
func CreateSession() (*session.Session, error) {
|
|
||||||
// AwsVars Required environment variables for AWS S3 storage
|
|
||||||
var awsVars = []string{
|
|
||||||
"AWS_S3_ENDPOINT",
|
|
||||||
"AWS_S3_BUCKET_NAME",
|
|
||||||
"AWS_ACCESS_KEY",
|
|
||||||
"AWS_SECRET_KEY",
|
|
||||||
"AWS_REGION",
|
|
||||||
"AWS_REGION",
|
|
||||||
"AWS_REGION",
|
|
||||||
}
|
|
||||||
|
|
||||||
endPoint := GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
|
|
||||||
accessKey := GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
|
|
||||||
secretKey := GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
|
|
||||||
_ = GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
|
||||||
|
|
||||||
region := os.Getenv("AWS_REGION")
|
|
||||||
awsDisableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
|
|
||||||
if err != nil {
|
|
||||||
Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = CheckEnvVars(awsVars)
|
|
||||||
if err != nil {
|
|
||||||
Fatal("Error checking environment variables\n: %s", err)
|
|
||||||
}
|
|
||||||
// S3 Config
|
|
||||||
s3Config := &aws.Config{
|
|
||||||
Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""),
|
|
||||||
Endpoint: aws.String(endPoint),
|
|
||||||
Region: aws.String(region),
|
|
||||||
DisableSSL: aws.Bool(awsDisableSsl),
|
|
||||||
S3ForcePathStyle: aws.Bool(true),
|
|
||||||
}
|
|
||||||
return session.NewSession(s3Config)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// UploadFileToS3 uploads a file to S3 with a given prefix
|
|
||||||
func UploadFileToS3(filePath, key, bucket, prefix string) error {
|
|
||||||
sess, err := CreateSession()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
svc := s3.New(sess)
|
|
||||||
|
|
||||||
file, err := os.Open(filepath.Join(filePath, key))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
fileInfo, err := file.Stat()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
objectKey := filepath.Join(prefix, key)
|
|
||||||
|
|
||||||
buffer := make([]byte, fileInfo.Size())
|
|
||||||
file.Read(buffer)
|
|
||||||
fileBytes := bytes.NewReader(buffer)
|
|
||||||
fileType := http.DetectContentType(buffer)
|
|
||||||
|
|
||||||
_, err = svc.PutObject(&s3.PutObjectInput{
|
|
||||||
Bucket: aws.String(bucket),
|
|
||||||
Key: aws.String(objectKey),
|
|
||||||
Body: fileBytes,
|
|
||||||
ContentLength: aws.Int64(fileInfo.Size()),
|
|
||||||
ContentType: aws.String(fileType),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func DownloadFile(destinationPath, key, bucket, prefix string) error {
|
|
||||||
|
|
||||||
sess, err := CreateSession()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
Info("Download backup from S3 storage...")
|
|
||||||
file, err := os.Create(filepath.Join(destinationPath, key))
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("Failed to create file", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
objectKey := filepath.Join(prefix, key)
|
|
||||||
|
|
||||||
downloader := s3manager.NewDownloader(sess)
|
|
||||||
numBytes, err := downloader.Download(file,
|
|
||||||
&s3.GetObjectInput{
|
|
||||||
Bucket: aws.String(bucket),
|
|
||||||
Key: aws.String(objectKey),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("Failed to download file", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
Info("Backup downloaded: %s bytes size %s ", file.Name(), numBytes)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func DeleteOldBackup(bucket, prefix string, retention int) error {
|
|
||||||
sess, err := CreateSession()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
svc := s3.New(sess)
|
|
||||||
|
|
||||||
// Get the current time and the time threshold for 7 days ago
|
|
||||||
now := time.Now()
|
|
||||||
backupRetentionDays := now.AddDate(0, 0, -retention)
|
|
||||||
|
|
||||||
// List objects in the bucket
|
|
||||||
listObjectsInput := &s3.ListObjectsV2Input{
|
|
||||||
Bucket: aws.String(bucket),
|
|
||||||
Prefix: aws.String(prefix),
|
|
||||||
}
|
|
||||||
err = svc.ListObjectsV2Pages(listObjectsInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
|
||||||
for _, object := range page.Contents {
|
|
||||||
if object.LastModified.Before(backupRetentionDays) {
|
|
||||||
// Object is older than retention days, delete it
|
|
||||||
_, err := svc.DeleteObject(&s3.DeleteObjectInput{
|
|
||||||
Bucket: aws.String(bucket),
|
|
||||||
Key: object.Key,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Failed to delete object %s: %v", *object.Key, err)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("Deleted object %s\n", *object.Key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return !lastPage
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to list objects: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println("Finished deleting old files.")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
128
utils/utils.go
128
utils/utils.go
@@ -1,19 +1,43 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package utils
|
package utils
|
||||||
|
|
||||||
/*****
|
|
||||||
* MySQL Backup & Restore
|
|
||||||
* @author Jonas Kaninda
|
|
||||||
* @license MIT License <https://opensource.org/licenses/MIT>
|
|
||||||
* @link https://github.com/jkaninda/mysql-bkup
|
|
||||||
**/
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/robfig/cron/v3"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var Version = ""
|
||||||
|
|
||||||
|
// FileExists checks if the file does exist
|
||||||
func FileExists(filename string) bool {
|
func FileExists(filename string) bool {
|
||||||
info, err := os.Stat(filename)
|
info, err := os.Stat(filename)
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
@@ -27,7 +51,13 @@ func WriteToFile(filePath, content string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer func(file *os.File) {
|
||||||
|
err := file.Close()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
|
||||||
|
}
|
||||||
|
}(file)
|
||||||
|
|
||||||
_, err = file.WriteString(content)
|
_, err = file.WriteString(content)
|
||||||
return err
|
return err
|
||||||
@@ -45,14 +75,25 @@ func CopyFile(src, dst string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to open source file: %v", err)
|
return fmt.Errorf("failed to open source file: %v", err)
|
||||||
}
|
}
|
||||||
defer sourceFile.Close()
|
defer func(sourceFile *os.File) {
|
||||||
|
err := sourceFile.Close()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}(sourceFile)
|
||||||
|
|
||||||
// Create the destination file
|
// Create the destination file
|
||||||
destinationFile, err := os.Create(dst)
|
destinationFile, err := os.Create(dst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create destination file: %v", err)
|
return fmt.Errorf("failed to create destination file: %v", err)
|
||||||
}
|
}
|
||||||
defer destinationFile.Close()
|
defer func(destinationFile *os.File) {
|
||||||
|
err := destinationFile.Close()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
|
||||||
|
}
|
||||||
|
}(destinationFile)
|
||||||
|
|
||||||
// Copy the content from source to destination
|
// Copy the content from source to destination
|
||||||
_, err = io.Copy(destinationFile, sourceFile)
|
_, err = io.Copy(destinationFile, sourceFile)
|
||||||
@@ -79,7 +120,12 @@ func IsDirEmpty(name string) (bool, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer func(f *os.File) {
|
||||||
|
err := f.Close()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}(f)
|
||||||
|
|
||||||
_, err = f.Readdirnames(1)
|
_, err = f.Readdirnames(1)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -127,14 +173,11 @@ func GetEnvVariable(envName, oldEnvName string) string {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return value
|
return value
|
||||||
}
|
}
|
||||||
Warn("%s is deprecated, please use %s instead!", oldEnvName, envName)
|
Warn("%s is deprecated, please use %s instead! ", oldEnvName, envName)
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return value
|
return value
|
||||||
}
|
}
|
||||||
func ShowHistory() {
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckEnvVars checks if all the specified environment variables are set
|
// CheckEnvVars checks if all the specified environment variables are set
|
||||||
func CheckEnvVars(vars []string) error {
|
func CheckEnvVars(vars []string) error {
|
||||||
@@ -170,3 +213,60 @@ func MakeDirAll(dirPath string) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
func GetIntEnv(envName string) int {
|
||||||
|
val := os.Getenv(envName)
|
||||||
|
if val == "" {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
ret, err := strconv.Atoi(val)
|
||||||
|
if err != nil {
|
||||||
|
Error("Error: %v", err)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func EnvWithDefault(envName string, defaultValue string) string {
|
||||||
|
value := os.Getenv(envName)
|
||||||
|
if value == "" {
|
||||||
|
return defaultValue
|
||||||
|
}
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValidCronExpression verify cronExpression and returns boolean
|
||||||
|
func IsValidCronExpression(cronExpr string) bool {
|
||||||
|
// Parse the cron expression
|
||||||
|
_, err := cron.ParseStandard(cronExpr)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CronNextTime returns cronExpression next time
|
||||||
|
func CronNextTime(cronExpr string) time.Time {
|
||||||
|
// Parse the cron expression
|
||||||
|
schedule, err := cron.ParseStandard(cronExpr)
|
||||||
|
if err != nil {
|
||||||
|
Error("Error parsing cron expression: %s", err)
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
// Get the current time
|
||||||
|
now := time.Now()
|
||||||
|
// Get the next scheduled time
|
||||||
|
next := schedule.Next(now)
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConvertBytes converts bytes to a human-readable string with the appropriate unit (bytes, MiB, or GiB).
|
||||||
|
func ConvertBytes(bytes uint64) string {
|
||||||
|
const (
|
||||||
|
MiB = 1024 * 1024
|
||||||
|
GiB = MiB * 1024
|
||||||
|
)
|
||||||
|
switch {
|
||||||
|
case bytes >= GiB:
|
||||||
|
return fmt.Sprintf("%.2f GiB", float64(bytes)/float64(GiB))
|
||||||
|
case bytes >= MiB:
|
||||||
|
return fmt.Sprintf("%.2f MiB", float64(bytes)/float64(MiB))
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("%d bytes", bytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user