Compare commits

...

65 Commits

Author SHA1 Message Date
d9d44c2798 Merge pull request #182 from jkaninda/nightly
Some checks failed
Deploy Documenation site to GitHub Pages / build (push) Failing after 9m29s
Deploy Documenation site to GitHub Pages / deploy (push) Has been skipped
Lint / Run on Ubuntu (push) Successful in 18m40s
Tests / test (push) Failing after 2m19s
Nightly
2025-03-14 09:59:33 +01:00
300a592508 Merge branch 'main' of github.com:jkaninda/mysql-bkup into nightly
Some checks failed
Build / docker (push) Failing after 9s
Lint / Run on Ubuntu (push) Successful in 18m40s
Tests / test (push) Failing after 19s
2025-03-14 09:58:47 +01:00
be82e841e7 ci: set docker tests on main 2025-03-14 09:58:41 +01:00
a73a365ebf ci: set docker tests on main 2025-03-14 09:57:59 +01:00
75e965c0c5 Merge pull request #181 from jkaninda/nightly
doc: update reference
2025-03-14 09:55:02 +01:00
fc60ddb308 doc: update reference 2025-03-14 09:53:38 +01:00
573ef15ef3 Merge pull request #180 from jkaninda/nightly
ci: update  Github pages action
2025-03-14 09:50:49 +01:00
b1776d3689 ci: update Github pages action 2025-03-14 09:50:13 +01:00
376d47f738 Merge pull request #178 from jkaninda/nightly
feat: add backup all databases separately
2025-03-14 09:43:43 +01:00
eb6268f8ec ci: add Docker tests (#179) 2025-03-14 09:41:37 +01:00
731e2d789d ci: add go lint 2025-03-14 05:24:46 +01:00
6300a8f2dd feat: add backup all databases 2025-03-14 05:20:54 +01:00
cd827a9277 chore: comment code
Some checks failed
Build / docker (push) Failing after 16s
2025-03-13 14:44:22 +01:00
71cf3fae85 chore: improve log message 2025-03-13 14:26:32 +01:00
528282bbd4 feat: add backup all databases separately 2025-03-13 07:48:28 +01:00
002c93a796 Merge pull request #176 from jkaninda/dependabot/docker/golang-1.24.1
chore(deps): bump golang from 1.24.0 to 1.24.1
2025-03-12 16:29:14 +01:00
b6192f4c42 feat: add backup all databases
Some checks failed
Build / docker (push) Failing after 14s
2025-03-12 16:04:26 +01:00
d5061453b0 feat: add backup all databases 2025-03-12 15:50:30 +01:00
0bc7497512 fix: warning message when using MYSQL_PASSWORD env 2025-03-12 14:13:21 +01:00
489dfdf842 fix: backup error output 2025-03-12 13:27:31 +01:00
dependabot[bot]
907e70d552 chore(deps): bump golang from 1.24.0 to 1.24.1
Bumps golang from 1.24.0 to 1.24.1.

---
updated-dependencies:
- dependency-name: golang
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-10 09:25:40 +00:00
696477fe5c Merge pull request #175 from jkaninda/dependabot/go_modules/github.com/jkaninda/go-utils-0.1.1
chore(deps): bump github.com/jkaninda/go-utils from 0.0.0-20250122060806-26119182077a to 0.1.1
2025-02-27 12:33:17 +01:00
dependabot[bot]
56a8b51660 chore(deps): bump github.com/jkaninda/go-utils
Bumps [github.com/jkaninda/go-utils](https://github.com/jkaninda/go-utils) from 0.0.0-20250122060806-26119182077a to 0.1.1.
- [Release notes](https://github.com/jkaninda/go-utils/releases)
- [Commits](https://github.com/jkaninda/go-utils/commits/v0.1.1)

---
updated-dependencies:
- dependency-name: github.com/jkaninda/go-utils
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-24 10:27:41 +00:00
c76a00139c Merge pull request #172 from jkaninda/dependabot/go_modules/github.com/spf13/cobra-1.9.1
chore(deps): bump github.com/spf13/cobra from 1.8.1 to 1.9.1
2025-02-21 11:38:48 +01:00
0f43871765 Merge pull request #173 from jkaninda/dependabot/docker/golang-1.24.0
chore(deps): bump golang from 1.23.6 to 1.24.0
2025-02-21 11:38:37 +01:00
9ba6abe3f4 Merge pull request #174 from jkaninda/dependabot/docker/alpine-3.21.3
chore(deps): bump alpine from 3.21.2 to 3.21.3
2025-02-21 11:38:27 +01:00
dependabot[bot]
764583d88f chore(deps): bump alpine from 3.21.2 to 3.21.3
Bumps alpine from 3.21.2 to 3.21.3.

---
updated-dependencies:
- dependency-name: alpine
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-17 10:11:17 +00:00
dependabot[bot]
dbf4dc596a chore(deps): bump golang from 1.23.6 to 1.24.0
Bumps golang from 1.23.6 to 1.24.0.

---
updated-dependencies:
- dependency-name: golang
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-17 10:11:10 +00:00
dependabot[bot]
06c89a9b78 chore(deps): bump github.com/spf13/cobra from 1.8.1 to 1.9.1
Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.8.1 to 1.9.1.
- [Release notes](https://github.com/spf13/cobra/releases)
- [Commits](https://github.com/spf13/cobra/compare/v1.8.1...v1.9.1)

---
updated-dependencies:
- dependency-name: github.com/spf13/cobra
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-17 10:01:09 +00:00
ec8bdd806c Merge pull request #171 from jkaninda/dependabot/docker/golang-1.23.6
chore(deps): bump golang from 1.23.5 to 1.23.6
2025-02-10 20:15:31 +01:00
dependabot[bot]
828b11c6dd chore(deps): bump golang from 1.23.5 to 1.23.6
Bumps golang from 1.23.5 to 1.23.6.

---
updated-dependencies:
- dependency-name: golang
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-10 09:34:49 +00:00
1d01e13909 Merge pull request #170 from jkaninda/nightly
chore: update helper func to check env with prefix or suffix for multi backups
2025-02-05 07:44:57 +01:00
bd65db2418 chore: update helper func to check env with prefix or suffix for multi backups
Some checks failed
Build / docker (push) Failing after 14m58s
2025-02-05 07:39:52 +01:00
75b809511e fix go lint
Some checks failed
Build / docker (push) Failing after 8s
2025-01-26 13:54:41 +01:00
fc028a2c55 feat: add multiple backup rescued mode for scheduled mode 2025-01-26 13:43:39 +01:00
7fa0c6a118 Merge pull request #169 from jkaninda/nightly
Some checks failed
Deploy Documenation site to GitHub Pages / build (push) Failing after 9m23s
Deploy Documenation site to GitHub Pages / deploy (push) Has been skipped
docs: add quick restore
2025-01-26 12:12:53 +01:00
661702a97e docs: add quick restore 2025-01-26 12:11:29 +01:00
dd5f33f17d Merge pull request #168 from jkaninda/nightly
Some checks failed
Deploy Documenation site to GitHub Pages / build (push) Failing after 9m28s
Deploy Documenation site to GitHub Pages / deploy (push) Has been skipped
Nightly
2025-01-25 09:36:19 +01:00
b7cdfebd9c chore: notification remove MAIL_USERNAME and MAIL_PASSWORD from required env
Some checks failed
Build / docker (push) Failing after 13s
2025-01-25 09:19:23 +01:00
4b93becdf2 feat: add Set default values from environment variables if not provided for multiple backup 2025-01-25 09:12:28 +01:00
748cccec58 Merge pull request #167 from jkaninda/nightly
feat: add backup duration
2025-01-22 07:23:29 +01:00
3e8bfabc44 feat: add backup duration
Some checks failed
Build / docker (push) Failing after 12s
2025-01-22 07:22:56 +01:00
777b59fd7c Merge pull request #166 from jkaninda/dependabot/docker/golang-1.23.5
chore(deps): bump golang from 1.23.4 to 1.23.5
2025-01-21 02:53:48 +01:00
dependabot[bot]
2b25f39c0a chore(deps): bump golang from 1.23.4 to 1.23.5
Bumps golang from 1.23.4 to 1.23.5.

---
updated-dependencies:
- dependency-name: golang
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-01-20 10:09:46 +00:00
e5ba397bb4 Merge pull request #164 from jkaninda/nightly
Some checks failed
Deploy Documenation site to GitHub Pages / build (push) Failing after 9m27s
Deploy Documenation site to GitHub Pages / deploy (push) Has been skipped
doc: reviewed docs
2025-01-13 15:34:50 +01:00
3a1bfc512d doc: reviewed docs
Some checks failed
Build / docker (push) Failing after 9s
2025-01-13 15:34:02 +01:00
b7b09ad6fd Merge pull request #163 from jkaninda/nightly
Nightly
2025-01-13 15:06:27 +01:00
1206140a67 doc: reviewed docs 2025-01-13 15:05:50 +01:00
24573a96ad doc: reviewed docs 2025-01-13 15:04:29 +01:00
fff0b55722 Merge pull request #162 from jkaninda/nightly
feat: add backup flags for configuration and cron expression
2025-01-13 14:57:00 +01:00
68322e6b9f doc: reviewed docs 2025-01-13 14:56:08 +01:00
0f28772659 doc: reviewed docs 2025-01-13 14:40:46 +01:00
b95ccf3905 feat: add backup flags for configuration and cron expression 2025-01-13 14:23:27 +01:00
a06872834f Merge pull request #161 from jkaninda/dependabot/docker/alpine-3.21.2
chore(deps): bump alpine from 3.21.0 to 3.21.2
2025-01-13 10:53:41 +01:00
dependabot[bot]
393168c6c5 chore(deps): bump alpine from 3.21.0 to 3.21.2
Bumps alpine from 3.21.0 to 3.21.2.

---
updated-dependencies:
- dependency-name: alpine
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-01-13 09:52:30 +00:00
5b9ec8a224 Merge pull request #160 from jkaninda/nightly
fix: the configuration file path is not being detected when it is enc…
2025-01-12 09:36:29 +01:00
2c3f2f4a46 fix: the configuration file path is not being detected when it is enclosed in quotes 2025-01-12 07:58:32 +01:00
0df14f37b4 Merge pull request #159 from jkaninda/refactor
chore: add convert bytes to a human-readable string with the appropri…
2024-12-12 13:29:22 +01:00
1b60ca6fd2 chore: add convert bytes to a human-readable string with the appropriate unit (bytes, MiB, or GiB) 2024-12-12 13:28:09 +01:00
d880f40108 Merge pull request #158 from jkaninda/dependabot/docker/golang-1.23.4
chore(deps): bump golang from 1.23.3 to 1.23.4
2024-12-10 10:20:27 +01:00
dependabot[bot]
c845b36797 chore(deps): bump golang from 1.23.3 to 1.23.4
Bumps golang from 1.23.3 to 1.23.4.

---
updated-dependencies:
- dependency-name: golang
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-09 09:53:21 +00:00
63d615f838 Merge pull request #157 from jkaninda/refactor
docs: add azure configuration reference
2024-12-08 13:36:26 +01:00
6f31d35df2 docs: add azure configuration reference 2024-12-08 13:35:49 +01:00
f36d01cc96 Merge pull request #156 from jkaninda/refactor
Fix grammar issues in azure.go
2024-12-08 00:01:13 +01:00
07b7f54a75 Fix grammar issues in azure.go 2024-12-08 00:00:24 +01:00
48 changed files with 2116 additions and 1043 deletions

View File

@@ -1,7 +1,7 @@
name: Build name: Build
on: on:
push: push:
branches: ['develop'] branches: ['nightly']
env: env:
BUILDKIT_IMAGE: jkaninda/mysql-bkup BUILDKIT_IMAGE: jkaninda/mysql-bkup
jobs: jobs:
@@ -28,7 +28,7 @@ jobs:
file: "./Dockerfile" file: "./Dockerfile"
platforms: linux/amd64,linux/arm64,linux/arm/v7 platforms: linux/amd64,linux/arm64,linux/arm/v7
build-args: | build-args: |
appVersion=develop-${{ github.sha }} appVersion=nightly
tags: | tags: |
"${{vars.BUILDKIT_IMAGE}}:develop-${{ github.sha }}" "${{vars.BUILDKIT_IMAGE}}:nightly"

View File

@@ -32,14 +32,14 @@ jobs:
working-directory: docs working-directory: docs
- name: Setup Pages - name: Setup Pages
id: pages id: pages
uses: actions/configure-pages@v2 uses: actions/configure-pages@v5
- name: Build with Jekyll - name: Build with Jekyll
working-directory: docs working-directory: docs
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}" run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
env: env:
JEKYLL_ENV: production JEKYLL_ENV: production
- name: Upload artifact - name: Upload artifact
uses: actions/upload-pages-artifact@v1 uses: actions/upload-pages-artifact@v3
with: with:
path: 'docs/_site/' path: 'docs/_site/'
@@ -52,4 +52,4 @@ jobs:
steps: steps:
- name: Deploy to GitHub Pages - name: Deploy to GitHub Pages
id: deployment id: deployment
uses: actions/deploy-pages@v1 uses: actions/deploy-pages@v4

23
.github/workflows/lint.yml vendored Normal file
View File

@@ -0,0 +1,23 @@
name: Lint
on:
push:
pull_request:
jobs:
lint:
name: Run on Ubuntu
runs-on: ubuntu-latest
steps:
- name: Clone the code
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: '~1.23'
- name: Run linter
uses: golangci/golangci-lint-action@v6
with:
version: v1.61

290
.github/workflows/tests.yml vendored Normal file
View File

@@ -0,0 +1,290 @@
name: Tests
on:
push:
branches:
- main
- nightly
pull_request:
branches:
- main
env:
IMAGE_NAME: mysql-bkup
jobs:
test:
runs-on: ubuntu-latest
services:
mysql:
image: mysql:9
env:
MYSQL_ROOT_PASSWORD: password
MYSQL_DATABASE: testdb
MYSQL_USER: user
MYSQL_PASSWORD: password
ports:
- 3306:3306
options: >-
--health-cmd="mysqladmin ping -h 127.0.0.1 -uuser -ppassword"
--health-interval=10s
--health-timeout=5s
--health-retries=5
mysql8:
image: mysql:8
env:
MYSQL_ROOT_PASSWORD: password
MYSQL_DATABASE: testdb
MYSQL_USER: user
MYSQL_PASSWORD: password
ports:
- 3308:3306
options: >-
--health-cmd="mysqladmin ping -h 127.0.0.1 -uuser -ppassword"
--health-interval=10s
--health-timeout=5s
--health-retries=5
mysql5:
image: mysql:5
env:
MYSQL_ROOT_PASSWORD: password
MYSQL_DATABASE: testdb
MYSQL_USER: user
MYSQL_PASSWORD: password
ports:
- 3305:3306
options: >-
--health-cmd="mysqladmin ping -h 127.0.0.1 -uuser -ppassword"
--health-interval=10s
--health-timeout=5s
--health-retries=5
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Create Minio container
run: |
docker run -d --rm --name minio \
--network host \
-p 9000:9000 \
-e MINIO_ACCESS_KEY=minioadmin \
-e MINIO_SECRET_KEY=minioadmin \
-e MINIO_REGION_NAME="eu" \
minio/minio server /data
echo "Create Minio container completed"
- name: Install MinIO Client (mc)
run: |
curl -O https://dl.min.io/client/mc/release/linux-amd64/mc
chmod +x mc
sudo mv mc /usr/local/bin/
- name: Wait for MinIO to be ready
run: sleep 5
- name: Configure MinIO Client
run: |
mc alias set local http://localhost:9000 minioadmin minioadmin
mc alias list
- name: Create MinIO Bucket
run: |
mc mb local/backups
echo "Bucket backups created successfully."
# Build the Docker image
- name: Build Docker Image
run: |
docker buildx build --build-arg appVersion=test -t ${{ env.IMAGE_NAME }}:latest --load .
- name: Verify Docker images
run: |
docker images
- name: Wait for MySQL to be ready
run: |
docker run --rm --network host mysql:9 mysqladmin ping -h 127.0.0.1 -uuser -ppassword --wait
- name: Test restore
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=root \
-e DB_PASSWORD=password \
-e DB_NAME=testdb \
${{ env.IMAGE_NAME }}:latest restore -f init.sql
echo "Database restore completed"
- name: Test restore Mysql8
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_PORT=3308 \
-e DB_USERNAME=root \
-e DB_PASSWORD=password \
-e DB_NAME=testdb \
${{ env.IMAGE_NAME }}:latest restore -f init.sql
echo "Test restore Mysql8 completed"
- name: Test restore Mysql5
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_PORT=3305 \
-e DB_USERNAME=root \
-e DB_PASSWORD=password \
-e DB_NAME=testdb \
${{ env.IMAGE_NAME }}:latest restore -f init.sql
echo "Test restore Mysql5 completed"
- name: Test backup
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=user \
-e DB_PASSWORD=password \
-e DB_NAME=testdb \
${{ env.IMAGE_NAME }}:latest backup
echo "Database backup completed"
- name: Test backup Mysql8
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_PORT=3308 \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=user \
-e DB_PASSWORD=password \
-e DB_NAME=testdb \
${{ env.IMAGE_NAME }}:latest backup
echo "Test backup Mysql8 completed"
- name: Test backup Mysql5
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_PORT=3305 \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=user \
-e DB_PASSWORD=password \
-e DB_NAME=testdb \
${{ env.IMAGE_NAME }}:latest backup
echo "Test backup Mysql5 completed"
- name: Test encrypted backup
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=user \
-e DB_PASSWORD=password \
-e GPG_PASSPHRASE=password \
-e DB_NAME=testdb \
${{ env.IMAGE_NAME }}:latest backup --disable-compression --custom-name encrypted-bkup
echo "Database encrypted backup completed"
- name: Test restore encrypted backup | testdb -> testdb2
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=root \
-e DB_PASSWORD=password \
-e GPG_PASSPHRASE=password \
-e DB_NAME=testdb2 \
${{ env.IMAGE_NAME }}:latest restore -f /backup/encrypted-bkup.sql.gpg
echo "Test restore encrypted backup completed"
- name: Test migrate database testdb -> testdb3
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=root \
-e DB_PASSWORD=password \
-e GPG_PASSPHRASE=password \
-e DB_NAME=testdb \
-e TARGET_DB_HOST=127.0.0.1 \
-e TARGET_DB_PORT=3306 \
-e TARGET_DB_NAME=testdb3 \
-e TARGET_DB_USERNAME=root \
-e TARGET_DB_PASSWORD=password \
${{ env.IMAGE_NAME }}:latest migrate
echo "Test migrate database testdb -> testdb3 completed"
- name: Test backup all databases
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=root \
-e DB_PASSWORD=password \
-e DB_NAME=testdb \
${{ env.IMAGE_NAME }}:latest backup --all-databases
echo "Database backup completed"
- name: Test multiple backup
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_HOST=127.0.0.1 \
-e TESTDB2_DB_USERNAME=root \
-e TESTDB2_DB_PASSWORD=password \
-e TESTDB2_DB_HOST=127.0.0.1 \
${{ env.IMAGE_NAME }}:latest backup -c /backup/test_config.yaml
echo "Database backup completed"
- name: Test backup Minio (s3)
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=user \
-e DB_PASSWORD=password \
-e DB_NAME=testdb \
-e AWS_S3_ENDPOINT="http://127.0.0.1:9000" \
-e AWS_S3_BUCKET_NAME=backups \
-e AWS_ACCESS_KEY=minioadmin \
-e AWS_SECRET_KEY=minioadmin \
-e AWS_DISABLE_SSL="true" \
-e AWS_REGION="eu" \
-e AWS_FORCE_PATH_STYLE="true" ${{ env.IMAGE_NAME }}:latest backup -s s3 --custom-name minio-backup
echo "Test backup Minio (s3) completed"
- name: Test restore Minio (s3)
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=user \
-e DB_PASSWORD=password \
-e DB_NAME=testdb \
-e AWS_S3_ENDPOINT="http://127.0.0.1:9000" \
-e AWS_S3_BUCKET_NAME=backups \
-e AWS_ACCESS_KEY=minioadmin \
-e AWS_SECRET_KEY=minioadmin \
-e AWS_DISABLE_SSL="true" \
-e AWS_REGION="eu" \
-e AWS_FORCE_PATH_STYLE="true" ${{ env.IMAGE_NAME }}:latest restore -s s3 -f minio-backup.sql.gz
echo "Test backup Minio (s3) completed"
- name: Test scheduled backup
run: |
docker run -d --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=user \
-e DB_PASSWORD=password \
-e DB_NAME=testdb \
${{ env.IMAGE_NAME }}:latest backup -e "@every 10s"
echo "Waiting for backup to be done..."
sleep 25
docker logs ${{ env.IMAGE_NAME }}
echo "Test scheduled backup completed"
# Cleanup: Stop and remove containers
- name: Clean up
run: |
docker stop ${{ env.IMAGE_NAME }} || true
docker rm ${{ env.IMAGE_NAME }} || true

View File

@@ -27,6 +27,7 @@ linters:
- gosimple - gosimple
- govet - govet
- ineffassign - ineffassign
# - lll
- misspell - misspell
- nakedret - nakedret
- prealloc - prealloc

View File

@@ -1,4 +1,4 @@
FROM golang:1.23.3 AS build FROM golang:1.24.1 AS build
WORKDIR /app WORKDIR /app
ARG appVersion="" ARG appVersion=""
@@ -10,7 +10,7 @@ RUN go mod download
# Build # Build
RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-X 'github.com/jkaninda/mysql-bkup/utils.Version=${appVersion}'" -o /app/mysql-bkup RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-X 'github.com/jkaninda/mysql-bkup/utils.Version=${appVersion}'" -o /app/mysql-bkup
FROM alpine:3.21.0 FROM alpine:3.21.3
ENV TZ=UTC ENV TZ=UTC
ARG WORKDIR="/config" ARG WORKDIR="/config"
ARG BACKUPDIR="/backup" ARG BACKUPDIR="/backup"

View File

@@ -3,6 +3,7 @@
**MYSQL-BKUP** is a Docker container image designed to **backup, restore, and migrate MySQL databases**. **MYSQL-BKUP** is a Docker container image designed to **backup, restore, and migrate MySQL databases**.
It supports a variety of storage options and ensures data security through GPG encryption. It supports a variety of storage options and ensures data security through GPG encryption.
[![Tests](https://github.com/jkaninda/mysql-bkup/actions/workflows/tests.yml/badge.svg)](https://github.com/jkaninda/mysql-bkup/actions/workflows/tests.yml)
[![Build](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml/badge.svg)](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml) [![Build](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml/badge.svg)](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml)
[![Go Report](https://goreportcard.com/badge/github.com/jkaninda/mysql-bkup)](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup) [![Go Report](https://goreportcard.com/badge/github.com/jkaninda/mysql-bkup)](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup)
![Docker Image Size (latest by date)](https://img.shields.io/docker/image-size/jkaninda/mysql-bkup?style=flat-square) ![Docker Image Size (latest by date)](https://img.shields.io/docker/image-size/jkaninda/mysql-bkup?style=flat-square)
@@ -74,6 +75,7 @@ To run a one time backup, bind your local volume to `/backup` in the container a
docker run --rm --network your_network_name \ docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \ -v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \ -e "DB_HOST=dbhost" \
-e "DB_PORT=3306" \
-e "DB_USERNAME=username" \ -e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \ -e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d database_name jkaninda/mysql-bkup backup -d database_name
@@ -87,7 +89,19 @@ Alternatively, pass a `--env-file` in order to use a full config as described be
-v $PWD/backup:/backup/ \ -v $PWD/backup:/backup/ \
jkaninda/mysql-bkup backup -d database_name jkaninda/mysql-bkup backup -d database_name
``` ```
### Simple restore using Docker CLI
To restore a database, bind your local volume to `/backup` in the container and run the `restore` command:
```shell
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_PORT=3306" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup restore -d database_name -f backup_file.sql.gz
```
### Simple backup in docker compose file ### Simple backup in docker compose file
```yaml ```yaml
@@ -188,13 +202,12 @@ Documentation references Docker Hub, but all examples will work using ghcr.io ju
## References ## References
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements: We created this image as a simpler and more lightweight alternative to existing solutions. Heres why:
- The original image is based on `Alpine` and requires additional tools, making it heavy. - **Lightweight:** Written in Go, the image is optimized for performance and minimal resource usage.
- This image is written in Go. - **Multi-Architecture Support:** Supports `arm64` and `arm/v7` architectures.
- `arm64` and `arm/v7` architectures are supported. - **Docker Swarm Support:** Fully compatible with Docker in Swarm mode.
- Docker in Swarm mode is supported. - **Kubernetes Support:** Designed to work seamlessly with Kubernetes.
- Kubernetes is supported.
## License ## License

View File

@@ -44,10 +44,14 @@ var BackupCmd = &cobra.Command{
} }
func init() { func init() {
//Backup // Backup
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Define storage: local, s3, ssh, ftp") BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Define storage: local, s3, ssh, ftp, azure")
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`") BackupCmd.PersistentFlags().StringP("path", "P", "", "Storage path without file name. e.g: /custom_path or ssh remote path `/home/foo/backup`")
BackupCmd.PersistentFlags().StringP("cron-expression", "", "", "Backup cron expression") BackupCmd.PersistentFlags().StringP("cron-expression", "e", "", "Backup cron expression (e.g., `0 0 * * *` or `@daily`)")
BackupCmd.PersistentFlags().StringP("config", "c", "", "Configuration file for multi database backup. (e.g: `/backup/config.yaml`)")
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression") BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
BackupCmd.PersistentFlags().BoolP("all-databases", "a", false, "Backup all databases")
BackupCmd.PersistentFlags().BoolP("all-in-one", "A", false, "Backup all databases in a single file")
BackupCmd.PersistentFlags().StringP("custom-name", "", "", "Custom backup name")
} }

View File

@@ -46,7 +46,7 @@ var RestoreCmd = &cobra.Command{
} }
func init() { func init() {
//Restore // Restore
RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database") RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database")
RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Define storage: local, s3, ssh, ftp") RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Define storage: local, s3, ssh, ftp")
RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`") RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")

View File

@@ -38,7 +38,6 @@ var rootCmd = &cobra.Command{
Example: utils.MainExample, Example: utils.MainExample,
Version: appVersion, Version: appVersion,
} }
var operation = ""
// Execute adds all child commands to the root command and sets flags appropriately. // Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd. // This is called by main.main(). It only needs to happen once to the rootCmd.

View File

@@ -20,7 +20,7 @@ description: >- # this means to ignore newlines until "baseurl:"
It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
baseurl: "" # the subpath of your site, e.g. /blog baseurl: "" # the subpath of your site, e.g. /blog
url: "jkaninda.github.io/mysql-bkup/" # the base hostname & protocol for your site, e.g. http://example.com url: "" # the base hostname & protocol for your site, e.g. http://example.com
twitter_username: jonaskaninda twitter_username: jonaskaninda
github_username: jkaninda github_username: jkaninda

View File

@@ -4,22 +4,43 @@ layout: default
parent: How Tos parent: How Tos
nav_order: 5 nav_order: 5
--- ---
# Azure Blob storage
{: .note } # Backup to Azure Blob Storage
As described on local backup section, to change the storage of you backup and use Azure Blob as storage. You need to add `--storage azure` (-s azure).
You can also specify a folder where you want to save you data by adding `--path my-custom-path` flag.
To store your backups on Azure Blob Storage, you can configure the backup process to use the `--storage azure` option.
## Backup to Azure Blob storage This section explains how to set up and configure Azure Blob-based backups.
```yml ---
## Configuration Steps
1. **Specify the Storage Type**
Add the `--storage azure` flag to your backup command.
2. **Set the Blob Path**
Optionally, specify a custom folder within your Azure Blob container where backups will be stored using the `--path` flag.
Example: `--path my-custom-path`.
3. **Required Environment Variables**
The following environment variables are mandatory for Azure Blob-based backups:
- `AZURE_STORAGE_CONTAINER_NAME`: The name of the Azure Blob container where backups will be stored.
- `AZURE_STORAGE_ACCOUNT_NAME`: The name of your Azure Storage account.
- `AZURE_STORAGE_ACCOUNT_KEY`: The access key for your Azure Storage account.
---
## Example Configuration
Below is an example `docker-compose.yml` configuration for backing up to Azure Blob Storage:
```yaml
services: services:
mysql-bkup: mysql-bkup:
# In production, it is advised to lock your image tag to a proper # In production, lock your image tag to a specific release version
# release version instead of using `latest`. # instead of using `latest`. Check https://github.com/jkaninda/mysqlbkup/releases
# Check https://github.com/jkaninda/mysql-bkup/releases # for available releases.
# for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: backup --storage azure -d database --path my-custom-path command: backup --storage azure -d database --path my-custom-path
@@ -29,16 +50,23 @@ services:
- DB_NAME=database - DB_NAME=database
- DB_USERNAME=username - DB_USERNAME=username
- DB_PASSWORD=password - DB_PASSWORD=password
## Azure Blob configurations ## Azure Blob Configuration
- AZURE_STORAGE_CONTAINER_NAME=backup-container - AZURE_STORAGE_CONTAINER_NAME=backup-container
- AZURE_STORAGE_ACCOUNT_NAME=account-name - AZURE_STORAGE_ACCOUNT_NAME=account-name
- AZURE_STORAGE_ACCOUNT_KEY=Ppby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw== - AZURE_STORAGE_ACCOUNT_KEY=Ppby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==
# mysql-bkup container must be connected to the same network with your database
# Ensure the mysql-bkup container is connected to the same network as your database
networks: networks:
- web - web
networks: networks:
web: web:
``` ```
---
## Key Notes
- **Custom Path**: Use the `--path` flag to specify a folder within your Azure Blob container for organizing backups.
- **Security**: Ensure your `AZURE_STORAGE_ACCOUNT_KEY` is kept secure and not exposed in public repositories.
- **Compatibility**: This configuration works with Azure Blob Storage and other compatible storage solutions.

View File

@@ -0,0 +1,61 @@
---
title: Backup all databases in the server
layout: default
parent: How Tos
nav_order: 12
---
# Backup All Databases
MySQL-Bkup supports backing up all databases on the server using the `--all-databases` (`-a`) flag. By default, this creates separate backup files for each database. If you prefer a single backup file, you can use the `--all-in-on`e (`-A`) flag.
Backing up all databases is useful for creating a snapshot of the entire database server, whether for disaster recovery or migration purposes.
## Backup Modes
### Separate Backup Files (Default)
Using --all-databases without --all-in-one creates individual backup files for each database.
- Creates separate backup files for each database.
- Provides more flexibility in restoring individual databases or tables.
- Can be more manageable in cases where different databases have different retention policies.
- Might take slightly longer due to multiple file operations.
- It is the default behavior when using the `--all-databases` flag.
- It does not backup system databases (`information_schema`, `performance_schema`, `mysql`, `sys`, `innodb`).
**Command:**
```bash
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_PORT=3306" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup --all-databases
```
### Single Backup File
Using --all-in-one (-A) creates a single backup file containing all databases.
- Creates a single backup file containing all databases.
- Easier to manage if you need to restore everything at once.
- Faster to back up and restore in bulk.
- Can be problematic if you only need to restore a specific database or table.
- It is recommended to use this option for disaster recovery purposes.
- It backups system databases as well.
```bash
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_PORT=3306" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup --all-in-one
```
### When to Use Which?
- Use `--all-in-one` if you want a quick, simple backup for disaster recovery where you'll restore everything at once.
- Use `--all-databases` if you need granularity in restoring specific databases or tables without affecting others.

View File

@@ -4,41 +4,72 @@ layout: default
parent: How Tos parent: How Tos
nav_order: 4 nav_order: 4
--- ---
# Backup to FTP remote server
# Backup to FTP Remote Server
As described for SSH backup section, to change the storage of your backup and use FTP Remote server as storage. You need to add `--storage ftp`. To store your backups on an FTP remote server, you can configure the backup process to use the `--storage ftp` option.
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `REMOTE_PATH` environment variable.
{: .note } This section explains how to set up and configure FTP-based backups.
These environment variables are required for SSH backup `FTP_HOST`, `FTP_USER`, `REMOTE_PATH`, `FTP_PORT` or `FTP_PASSWORD`.
```yml ---
## Configuration Steps
1. **Specify the Storage Type**
Add the `--storage ftp` flag to your backup command.
2. **Set the Remote Path**
Define the full remote path where backups will be stored using the `--path` flag or the `REMOTE_PATH` environment variable.
Example: `--path /home/jkaninda/backups`.
3. **Required Environment Variables**
The following environment variables are mandatory for FTP-based backups:
- `FTP_HOST`: The hostname or IP address of the FTP server.
- `FTP_PORT`: The FTP port (default is `21`).
- `FTP_USER`: The username for FTP authentication.
- `FTP_PASSWORD`: The password for FTP authentication.
- `REMOTE_PATH`: The directory on the FTP server where backups will be stored.
---
## Example Configuration
Below is an example `docker-compose.yml` configuration for backing up to an FTP remote server:
```yaml
services: services:
mysql-bkup: mysql-bkup:
# In production, it is advised to lock your image tag to a proper # In production, lock your image tag to a specific release version
# release version instead of using `latest`. # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# Check https://github.com/jkaninda/mysql-bkup/releases # for available releases.
# for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: backup --storage ftp -d database command: backup --storage ftp -d database
environment: environment:
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=postgres - DB_HOST=mysql
- DB_NAME=database - DB_NAME=database
- DB_USERNAME=username - DB_USERNAME=username
- DB_PASSWORD=password - DB_PASSWORD=password
## FTP config ## FTP Configuration
- FTP_HOST="hostname" - FTP_HOST="hostname"
- FTP_PORT=21 - FTP_PORT=21
- FTP_USER=user - FTP_USER=user
- FTP_PASSWORD=password - FTP_PASSWORD=password
- REMOTE_PATH=/home/jkaninda/backups - REMOTE_PATH=/home/jkaninda/backups
# pg-bkup container must be connected to the same network with your database # Ensure the mysql-bkup container is connected to the same network as your database
networks: networks:
- web - web
networks: networks:
web: web:
``` ```
---
## Key Notes
- **Security**: FTP transmits data, including passwords, in plaintext. For better security, consider using SFTP (SSH File Transfer Protocol) or FTPS (FTP Secure) if supported by your server.
- **Remote Path**: Ensure the `REMOTE_PATH` directory exists on the FTP server and is writable by the specified `FTP_USER`.

View File

@@ -4,85 +4,123 @@ layout: default
parent: How Tos parent: How Tos
nav_order: 2 nav_order: 2
--- ---
# Backup to AWS S3 # Backup to AWS S3
{: .note } To store your backups on AWS S3, you can configure the backup process to use the `--storage s3` option. This section explains how to set up and configure S3-based backups.
As described on local backup section, to change the storage of you backup and use S3 as storage. You need to add `--storage s3` (-s s3).
You can also specify a specify folder where you want to save you data by adding `--path /my-custom-path` flag.
---
## Backup to S3 ## Configuration Steps
```yml 1. **Specify the Storage Type**
Add the `--storage s3` flag to your backup command.
2. **Set the S3 Path**
Optionally, specify a custom folder within your S3 bucket where backups will be stored using the `--path` flag.
Example: `--path /my-custom-path`.
3. **Required Environment Variables**
The following environment variables are mandatory for S3-based backups:
- `AWS_S3_ENDPOINT`: The S3 endpoint URL (e.g., `https://s3.amazonaws.com`).
- `AWS_S3_BUCKET_NAME`: The name of the S3 bucket where backups will be stored.
- `AWS_REGION`: The AWS region where the bucket is located (e.g., `us-west-2`).
- `AWS_ACCESS_KEY`: Your AWS access key.
- `AWS_SECRET_KEY`: Your AWS secret key.
- `AWS_DISABLE_SSL`: Set to `"true"` if using an S3 alternative like Minio without SSL (default is `"false"`).
- `AWS_FORCE_PATH_STYLE`: Set to `"true"` if using an S3 alternative like Minio (default is `"false"`).
---
## Example Configuration
Below is an example `docker-compose.yml` configuration for backing up to AWS S3:
```yaml
services: services:
mysql-bkup: mysql-bkup:
# In production, it is advised to lock your image tag to a proper # In production, lock your image tag to a specific release version
# release version instead of using `latest`. # instead of using `latest`. Check https://github.com/jkaninda/pg-bkup/releases
# Check https://github.com/jkaninda/mysql-bkup/releases # for available releases.
# for a list of available releases. image: jkaninda/pg-bkup
image: jkaninda/mysql-bkup container_name: pg-bkup
container_name: mysql-bkup
command: backup --storage s3 -d database --path /my-custom-path command: backup --storage s3 -d database --path /my-custom-path
environment: environment:
- DB_PORT=3306 - DB_PORT=5432
- DB_HOST=mysql - DB_HOST=postgres
- DB_NAME=database - DB_NAME=database
- DB_USERNAME=username - DB_USERNAME=username
- DB_PASSWORD=password - DB_PASSWORD=password
## AWS configurations ## AWS Configuration
- AWS_S3_ENDPOINT=https://s3.amazonaws.com - AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup - AWS_S3_BUCKET_NAME=backup
- AWS_REGION="us-west-2" - AWS_REGION=us-west-2
- AWS_ACCESS_KEY=xxxx - AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx - AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true ## Optional: Disable SSL for S3 alternatives like Minio
- AWS_DISABLE_SSL="false" - AWS_DISABLE_SSL="false"
- AWS_FORCE_PATH_STYLE=true # true for S3 alternative such as Minio ## Optional: Enable path-style access for S3 alternatives like Minio
- AWS_FORCE_PATH_STYLE=false
# mysql-bkup container must be connected to the same network with your database
# Ensure the mysql-bkup container is connected to the same network as your database
networks: networks:
- web - web
networks: networks:
web: web:
``` ```
### Recurring backups to S3 ---
As explained above, you need just to add AWS environment variables and specify the storage type `--storage s3`. ## Recurring Backups to S3
In case you need to use recurring backups, you can use `--cron-expression "0 1 * * *"` flag or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
```yml To schedule recurring backups to S3, use the `--cron-expression` flag or the `BACKUP_CRON_EXPRESSION` environment variable. This allows you to define a cron schedule for automated backups.
### Example: Recurring Backup Configuration
```yaml
services: services:
mysql-bkup: mysql-bkup:
# In production, it is advised to lock your image tag to a proper # In production, lock your image tag to a specific release version
# release version instead of using `latest`. # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# Check https://github.com/jkaninda/mysql-bkup/releases # for available releases.
# for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: backup --storage s3 -d my-database --cron-expression "0 1 * * *" command: backup --storage s3 -d database --cron-expression "0 1 * * *"
environment: environment:
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=mysql - DB_HOST=mysql
- DB_NAME=database - DB_NAME=database
- DB_USERNAME=username - DB_USERNAME=username
- DB_PASSWORD=password - DB_PASSWORD=password
## AWS configurations ## AWS Configuration
- AWS_S3_ENDPOINT=https://s3.amazonaws.com - AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup - AWS_S3_BUCKET_NAME=backup
- AWS_REGION="us-west-2" - AWS_REGION=us-west-2
- AWS_ACCESS_KEY=xxxx - AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx - AWS_SECRET_KEY=xxxxx
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional ## Optional: Define a cron schedule for recurring backups
#Delete old backup created more than specified days ago #- BACKUP_CRON_EXPRESSION=0 1 * * *
## Optional: Delete old backups after a specified number of days
#- BACKUP_RETENTION_DAYS=7 #- BACKUP_RETENTION_DAYS=7
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true ## Optional: Disable SSL for S3 alternatives like Minio
- AWS_DISABLE_SSL="false" - AWS_DISABLE_SSL="false"
- AWS_FORCE_PATH_STYLE=true # true for S3 alternative such as Minio ## Optional: Enable path-style access for S3 alternatives like Minio
# mysql-bkup container must be connected to the same network with your database - AWS_FORCE_PATH_STYLE=false
# Ensure the pg-bkup container is connected to the same network as your database
networks: networks:
- web - web
networks: networks:
web: web:
``` ```
---
## Key Notes
- **Cron Expression**: Use the `--cron-expression` flag or `BACKUP_CRON_EXPRESSION` environment variable to define the backup schedule. For example, `0 1 * * *` runs the backup daily at 1:00 AM.
- **Backup Retention**: Optionally, use the `BACKUP_RETENTION_DAYS` environment variable to automatically delete backups older than a specified number of days.
- **S3 Alternatives**: If using an S3 alternative like Minio, set `AWS_DISABLE_SSL="true"` and `AWS_FORCE_PATH_STYLE="true"` as needed.

View File

@@ -1,91 +1,129 @@
--- ---
title: Backup to SSH title: Backup to SSH or SFTP
layout: default layout: default
parent: How Tos parent: How Tos
nav_order: 3 nav_order: 3
--- ---
# Backup to SSH remote server # Backup to SFTP or SSH Remote Server
To store your backups on an `SFTP` or `SSH` remote server instead of the default storage, you can configure the backup process to use the `--storage ssh` or `--storage remote` option.
This section explains how to set up and configure SSH-based backups.
As described for s3 backup section, to change the storage of your backup and use SSH Remote server as storage. You need to add `--storage ssh` or `--storage remote`. ---
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `REMOTE_PATH` environment variable.
{: .note } ## Configuration Steps
These environment variables are required for SSH backup `SSH_HOST`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server.
Accessing the remote server using password is not recommended, use private key instead.
```yml 1. **Specify the Storage Type**
Add the `--storage ssh` or `--storage remote` flag to your backup command.
2. **Set the Remote Path**
Define the full remote path where backups will be stored using the `--path` flag or the `REMOTE_PATH` environment variable.
Example: `--path /home/jkaninda/backups`.
3. **Required Environment Variables**
The following environment variables are mandatory for SSH-based backups:
- `SSH_HOST`: The hostname or IP address of the remote server.
- `SSH_USER`: The username for SSH authentication.
- `REMOTE_PATH`: The directory on the remote server where backups will be stored.
- `SSH_IDENTIFY_FILE`: The path to the private key file for SSH authentication.
- `SSH_PORT`: The SSH port (default is `22`).
- `SSH_PASSWORD`: (Optional) Use this only if you are not using a private key for authentication.
{: .note }
**Security Recommendation**: Using a private key (`SSH_IDENTIFY_FILE`) is strongly recommended over password-based authentication (`SSH_PASSWORD`) for better security.
---
## Example Configuration
Below is an example `docker-compose.yml` configuration for backing up to an SSH remote server:
```yaml
services: services:
mysql-bkup: mysql-bkup:
# In production, it is advised to lock your image tag to a proper # In production, lock your image tag to a specific release version
# release version instead of using `latest`. # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# Check https://github.com/jkaninda/mysql-bkup/releases # for available releases.
# for a list of available releases. image: jkaninda/mysql-bkup
image: jkaninda/mysql-bkup container_name: mysql-bkup
container_name: mysql-bkup command: backup --storage remote -d database
command: backup --storage remote -d database volumes:
volumes: - ./id_ed25519:/tmp/id_ed25519
- ./id_ed25519:/tmp/id_ed25519" environment:
environment: - DB_PORT=3306
- DB_PORT=3306 - DB_HOST=mysql
- DB_HOST=mysql - DB_NAME=database
#- DB_NAME=database - DB_USERNAME=username
- DB_USERNAME=username - DB_PASSWORD=password
- DB_PASSWORD=password ## SSH Configuration
## SSH config - SSH_HOST="hostname"
- SSH_HOST="hostname" - SSH_PORT=22
- SSH_PORT=22 - SSH_USER=user
- SSH_USER=user - REMOTE_PATH=/home/jkaninda/backups
- REMOTE_PATH=/home/jkaninda/backups - SSH_IDENTIFY_FILE=/tmp/id_ed25519
- SSH_IDENTIFY_FILE=/tmp/id_ed25519 ## Optional: Use password instead of private key (not recommended)
## We advise you to use a private jey instead of password #- SSH_PASSWORD=password
#- SSH_PASSWORD=password
# Ensure the mysql-bkup container is connected to the same network as your database
networks:
- web
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks: networks:
web: web:
``` ```
---
### Recurring backups to SSH remote server ## Recurring Backups to SSH Remote Server
As explained above, you need just to add required environment variables and specify the storage type `--storage ssh`. To schedule recurring backups, you can use the `--cron-expression` flag or the `BACKUP_CRON_EXPRESSION` environment variable.
You can use `--cron-expression "* * * * *"` or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below. This allows you to define a cron schedule for automated backups.
```yml ### Example: Recurring Backup Configuration
```yaml
services: services:
mysql-bkup: mysql-bkup:
# In production, it is advised to lock your image tag to a proper # In production, lock your image tag to a specific release version
# release version instead of using `latest`. # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# Check https://github.com/jkaninda/mysql-bkup/releases # for available releases.
# for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: backup -d database --storage ssh --cron-expression "0 1 * * *" command: backup -d database --storage ssh --cron-expression "@daily"
volumes: volumes:
- ./id_ed25519:/tmp/id_ed25519" - ./id_ed25519:/tmp/id_ed25519
environment: environment:
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=mysql - DB_HOST=postgres
- DB_NAME=database - DB_NAME=database
- DB_USERNAME=username - DB_USERNAME=username
- DB_PASSWORD=password - DB_PASSWORD=password
## SSH config ## SSH Configuration
- SSH_HOST="hostname" - SSH_HOST="hostname"
- SSH_PORT=22 - SSH_PORT=22
- SSH_USER=user - SSH_USER=user
- REMOTE_PATH=/home/jkaninda/backups - REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519 - SSH_IDENTIFY_FILE=/tmp/id_ed25519
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional ## Optional: Delete old backups after a specified number of days
#Delete old backup created more than specified days ago
#- BACKUP_RETENTION_DAYS=7 #- BACKUP_RETENTION_DAYS=7
## We advise you to use a private jey instead of password ## Optional: Use password instead of private key (not recommended)
#- SSH_PASSWORD=password #- SSH_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
# Ensure the mysql-bkup container is connected to the same network as your database
networks: networks:
- web - web
networks: networks:
web: web:
``` ```
---
## Key Notes
- **Cron Expression**: Use the `--cron-expression` flag or `BACKUP_CRON_EXPRESSION` environment variable to define the backup schedule. For example, `0 1 * * *` runs the backup daily at 1:00 AM.
- **Backup Retention**: Optionally, use the `BACKUP_RETENTION_DAYS` environment variable to automatically delete backups older than a specified number of days.
- **Security**: Always prefer private key authentication (`SSH_IDENTIFY_FILE`) over password-based authentication (`SSH_PASSWORD`) for enhanced security.
---

View File

@@ -5,26 +5,35 @@ parent: How Tos
nav_order: 1 nav_order: 1
--- ---
# Backup database # Backup Database
To backup the database, you need to add `backup` command. To back up your database, use the `backup` command.
This section explains how to configure and run backups, including recurring backups, using Docker or Kubernetes.
---
## Default Configuration
- **Storage**: By default, backups are stored locally in the `/backup` directory.
- **Compression**: Backups are compressed using `gzip` by default. Use the `--disable-compression` flag to disable compression.
- **Security**: It is recommended to create a dedicated user with read-only access for backup tasks.
{: .note } {: .note }
The default storage is local storage mounted to __/backup__. The backup is compressed by default using gzip. The flag __`disable-compression`__ can be used when you need to disable backup compression. The backup process supports recurring backups on Docker or Docker Swarm. On Kubernetes, it can be deployed as a CronJob.
{: .warning } ---
Creating a user for backup tasks who has read-only access is recommended!
The backup process can be run in scheduled mode for the recurring backups. ## Example: Basic Backup Configuration
It handles __recurring__ backups of mysql database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
```yml Below is an example `docker-compose.yml` configuration for backing up a database:
```yaml
services: services:
mysql-bkup: mysql-bkup:
# In production, it is advised to lock your image tag to a proper # In production, lock your image tag to a specific release version
# release version instead of using `latest`. # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# Check https://github.com/jkaninda/mysql-bkup/releases # for available releases.
# for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: backup -d database command: backup -d database
@@ -36,36 +45,47 @@ services:
- DB_NAME=database - DB_NAME=database
- DB_USERNAME=username - DB_USERNAME=username
- DB_PASSWORD=password - DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
# Ensure the mysql-bkup container is connected to the same network as your database
networks: networks:
- web - web
networks: networks:
web: web:
``` ```
### Backup using Docker CLI ---
```shell ## Backup Using Docker CLI
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \ You can also run backups directly using the Docker CLI:
-e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \ ```bash
-e "DB_PASSWORD=password" \ docker run --rm --network your_network_name \
jkaninda/mysql-bkup backup -d database_name -v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/pg-bkup backup -d database_name
``` ```
In case you need to use recurring backups, you can use `--cron-expression "0 1 * * *"` flag or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below. ---
```yml ## Recurring Backups
To schedule recurring backups, use the `--cron-expression (-e)` flag or the `BACKUP_CRON_EXPRESSION` environment variable. This allows you to define a cron schedule for automated backups.
### Example: Recurring Backup Configuration
```yaml
services: services:
mysql-bkup: mysql-bkup:
# In production, it is advised to lock your image tag to a proper # In production, lock your image tag to a specific release version
# release version instead of using `latest`. # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# Check https://github.com/jkaninda/mysql-bkup/releases # for available releases.
# for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: backup -d database --cron-expression "0 1 * * *" command: backup -d database --cron-expression @midnight
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:
@@ -74,13 +94,24 @@ services:
- DB_NAME=database - DB_NAME=database
- DB_USERNAME=username - DB_USERNAME=username
- DB_PASSWORD=password - DB_PASSWORD=password
- BACKUP_CRON_EXPRESSION=0 1 * * * ## Optional: Define a cron schedule for recurring backups
#Delete old backup created more than specified days ago - BACKUP_CRON_EXPRESSION=@midnight
## Optional: Delete old backups after a specified number of days
#- BACKUP_RETENTION_DAYS=7 #- BACKUP_RETENTION_DAYS=7
# mysql-bkup container must be connected to the same network with your database
# Ensure the mysql-bkup container is connected to the same network as your database
networks: networks:
- web - web
networks: networks:
web: web:
``` ```
---
## Key Notes
- **Cron Expression**: Use the `--cron-expression (-e)` flag or `BACKUP_CRON_EXPRESSION` environment variable to define the backup schedule. For example:
- `@midnight`: Runs the backup daily at midnight.
- `0 1 * * *`: Runs the backup daily at 1:00 AM.
- **Backup Retention**: Optionally, use the `BACKUP_RETENTION_DAYS` environment variable to automatically delete backups older than a specified number of days.

View File

@@ -5,12 +5,17 @@ parent: How Tos
nav_order: 9 nav_order: 9
--- ---
## Deploy on Kubernetes # Deploy on Kubernetes
To deploy MySQL Backup on Kubernetes, you can use Job to backup or Restore your database. To deploy MySQL Backup on Kubernetes, you can use a `Job` for one-time backups or restores, and a `CronJob` for recurring backups.
For recurring backup you can use CronJob, you don't need to run it in scheduled mode. as described bellow.
## Backup to S3 storage Below are examples for different use cases.
---
## Backup Job to S3 Storage
This example demonstrates how to configure a Kubernetes `Job` to back up a MySQL database to an S3-compatible storage.
```yaml ```yaml
apiVersion: batch/v1 apiVersion: batch/v1
@@ -21,50 +26,53 @@ spec:
template: template:
spec: spec:
containers: containers:
- name: mysql-bkup - name: mysql-bkup
# In production, it is advised to lock your image tag to a proper # In production, lock your image tag to a specific release version
# release version instead of using `latest`. # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# Check https://github.com/jkaninda/mysql-bkup/releases # for available releases.
# for a list of available releases. image: jkaninda/mysql-bkup
image: jkaninda/mysql-bkup command:
command: - /bin/sh
- /bin/sh - -c
- -c - backup --storage s3
- backup --storage s3 resources:
resources: limits:
limits: memory: "128Mi"
memory: "128Mi" cpu: "500m"
cpu: "500m" env:
env: - name: DB_PORT
- name: DB_PORT value: "3306"
value: "3306" - name: DB_HOST
- name: DB_HOST value: ""
value: "" - name: DB_NAME
- name: DB_NAME value: ""
value: "dbname" - name: DB_USERNAME
- name: DB_USERNAME value: ""
value: "username" # Use Kubernetes Secrets for sensitive data like passwords
# Please use secret! - name: DB_PASSWORD
- name: DB_PASSWORD value: ""
value: "" - name: AWS_S3_ENDPOINT
- name: AWS_S3_ENDPOINT value: "https://s3.amazonaws.com"
value: "https://s3.amazonaws.com" - name: AWS_S3_BUCKET_NAME
- name: AWS_S3_BUCKET_NAME value: "xxx"
value: "xxx" - name: AWS_REGION
- name: AWS_REGION value: "us-west-2"
value: "us-west-2" - name: AWS_ACCESS_KEY
- name: AWS_ACCESS_KEY value: "xxxx"
value: "xxxx" - name: AWS_SECRET_KEY
- name: AWS_SECRET_KEY value: "xxxx"
value: "xxxx" - name: AWS_DISABLE_SSL
- name: AWS_DISABLE_SSL value: "false"
value: "false" - name: AWS_FORCE_PATH_STYLE
- name: AWS_FORCE_PATH_STYLE value: "false"
value: "false"
restartPolicy: Never restartPolicy: Never
``` ```
## Backup Job to SSH remote server ---
## Backup Job to SSH Remote Server
This example demonstrates how to configure a Kubernetes `Job` to back up a MySQL database to an SSH remote server.
```yaml ```yaml
apiVersion: batch/v1 apiVersion: batch/v1
@@ -77,15 +85,14 @@ spec:
spec: spec:
containers: containers:
- name: mysql-bkup - name: mysql-bkup
# In production, it is advised to lock your image tag to a proper # In production, lock your image tag to a specific release version
# release version instead of using `latest`. # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# Check https://github.com/jkaninda/mysql-bkup/releases # for available releases.
# for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
command: command:
- /bin/sh - /bin/sh
- -c - -c
- backup --storage ssh - backup --storage ssh --disable-compression
resources: resources:
limits: limits:
memory: "128Mi" memory: "128Mi"
@@ -98,8 +105,8 @@ spec:
- name: DB_NAME - name: DB_NAME
value: "dbname" value: "dbname"
- name: DB_USERNAME - name: DB_USERNAME
value: "username" value: "postgres"
# Please use secret! # Use Kubernetes Secrets for sensitive data like passwords
- name: DB_PASSWORD - name: DB_PASSWORD
value: "" value: ""
- name: SSH_HOST_NAME - name: SSH_HOST_NAME
@@ -112,14 +119,18 @@ spec:
value: "xxxx" value: "xxxx"
- name: SSH_REMOTE_PATH - name: SSH_REMOTE_PATH
value: "/home/toto/backup" value: "/home/toto/backup"
# Optional, required if you want to encrypt your backup # Optional: Required if you want to encrypt your backup
- name: GPG_PASSPHRASE - name: GPG_PASSPHRASE
value: "secure-passphrase" value: "xxxx"
restartPolicy: Never restartPolicy: Never
``` ```
---
## Restore Job ## Restore Job
This example demonstrates how to configure a Kubernetes `Job` to restore a MySQL database from a backup stored on an SSH remote server.
```yaml ```yaml
apiVersion: batch/v1 apiVersion: batch/v1
kind: Job kind: Job
@@ -131,48 +142,51 @@ spec:
spec: spec:
containers: containers:
- name: mysql-bkup - name: mysql-bkup
# In production, it is advised to lock your image tag to a proper # In production, lock your image tag to a specific release version
# release version instead of using `latest`. # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# Check https://github.com/jkaninda/mysql-bkup/releases # for available releases.
# for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
command: command:
- /bin/sh - /bin/sh
- -c - -c
- backup --storage ssh --file store_20231219_022941.sql.gz - restore --storage ssh --file store_20231219_022941.sql.gz
resources: resources:
limits: limits:
memory: "128Mi" memory: "128Mi"
cpu: "500m" cpu: "500m"
env: env:
- name: DB_PORT - name: DB_PORT
value: "3306" value: "3306"
- name: DB_HOST - name: DB_HOST
value: "" value: ""
- name: DB_NAME - name: DB_NAME
value: "dbname" value: "dbname"
- name: DB_USERNAME - name: DB_USERNAME
value: "username" value: "postgres"
# Please use secret! # Use Kubernetes Secrets for sensitive data like passwords
- name: DB_PASSWORD - name: DB_PASSWORD
value: "" value: ""
- name: SSH_HOST_NAME - name: SSH_HOST_NAME
value: "xxx" value: "xxx"
- name: SSH_PORT - name: SSH_PORT
value: "22" value: "22"
- name: SSH_USER - name: SSH_USER
value: "xxx" value: "xxx"
- name: SSH_PASSWORD - name: SSH_PASSWORD
value: "xxxx" value: "xxxx"
- name: SSH_REMOTE_PATH - name: SSH_REMOTE_PATH
value: "/home/xxxx/backup" value: "/home/toto/backup"
# Optional, required if your backup was encrypted # Optional: Required if your backup was encrypted
#- name: GPG_PASSPHRASE #- name: GPG_PASSPHRASE
# value: "xxxx" # value: "xxxx"
restartPolicy: Never restartPolicy: Never
``` ```
## Recurring backup ---
## Recurring Backup with CronJob
This example demonstrates how to configure a Kubernetes `CronJob` for recurring backups to an SSH remote server.
```yaml ```yaml
apiVersion: batch/v1 apiVersion: batch/v1
@@ -187,51 +201,51 @@ spec:
spec: spec:
containers: containers:
- name: mysql-bkup - name: mysql-bkup
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
command: command:
- /bin/sh - /bin/sh
- -c - -c
- bkup - backup --storage ssh --disable-compression
- backup
- --storage
- ssh
- --disable-compression
resources: resources:
limits: limits:
memory: "128Mi" memory: "128Mi"
cpu: "500m" cpu: "500m"
env: env:
- name: DB_PORT - name: DB_PORT
value: "3306" value: "3306"
- name: DB_HOST - name: DB_HOST
value: "" value: ""
- name: DB_NAME - name: DB_NAME
value: "username" value: "test"
- name: DB_USERNAME - name: DB_USERNAME
value: "username" value: "postgres"
# Please use secret! # Use Kubernetes Secrets for sensitive data like passwords
- name: DB_PASSWORD - name: DB_PASSWORD
value: "" value: ""
- name: SSH_HOST_NAME - name: SSH_HOST_NAME
value: "xxx" value: "192.168.1.16"
- name: SSH_PORT - name: SSH_PORT
value: "xxx" value: "2222"
- name: SSH_USER - name: SSH_USER
value: "jkaninda" value: "jkaninda"
- name: SSH_REMOTE_PATH - name: SSH_REMOTE_PATH
value: "/home/jkaninda/backup" value: "/config/backup"
- name: SSH_PASSWORD - name: SSH_PASSWORD
value: "password" value: "password"
# Optional, required if you want to encrypt your backup # Optional: Required if you want to encrypt your backup
#- name: GPG_PASSPHRASE #- name: GPG_PASSPHRASE
# value: "xxx" # value: "xxx"
restartPolicy: Never restartPolicy: Never
``` ```
## Kubernetes Rootless ---
This image also supports Kubernetes security context, you can run it in Rootless environment. ## Kubernetes Rootless Deployment
It has been tested on Openshift, it works well.
This example demonstrates how to run the backup container in a rootless environment, suitable for platforms like OpenShift.
```yaml ```yaml
apiVersion: batch/v1 apiVersion: batch/v1
@@ -249,53 +263,52 @@ spec:
runAsGroup: 3000 runAsGroup: 3000
fsGroup: 2000 fsGroup: 2000
containers: containers:
# In production, it is advised to lock your image tag to a proper - name: mysql-bkup
# release version instead of using `latest`. # In production, lock your image tag to a specific release version
# Check https://github.com/jkaninda/mysql-bkup/releases # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases. # for available releases.
- name: mysql-bkup image: jkaninda/mysql-bkup
image: jkaninda/mysql-bkup command:
command: - /bin/sh
- /bin/sh - -c
- -c - backup --storage ssh --disable-compression
- bkup resources:
- backup limits:
- --storage memory: "128Mi"
- ssh cpu: "500m"
- --disable-compression env:
resources: - name: DB_PORT
limits: value: "3306"
memory: "128Mi" - name: DB_HOST
cpu: "500m" value: ""
env: - name: DB_NAME
- name: DB_PORT value: "test"
value: "3306" - name: DB_USERNAME
- name: DB_HOST value: "postgres"
value: "" # Use Kubernetes Secrets for sensitive data like passwords
- name: DB_NAME - name: DB_PASSWORD
value: "xxx" value: ""
- name: DB_USERNAME - name: SSH_HOST_NAME
value: "xxx" value: "192.168.1.16"
# Please use secret! - name: SSH_PORT
- name: DB_PASSWORD value: "2222"
value: "" - name: SSH_USER
- name: SSH_HOST_NAME value: "jkaninda"
value: "xxx" - name: SSH_REMOTE_PATH
- name: SSH_PORT value: "/config/backup"
value: "22" - name: SSH_PASSWORD
- name: SSH_USER value: "password"
value: "jkaninda" # Optional: Required if you want to encrypt your backup
- name: SSH_REMOTE_PATH
value: "/home/jkaninda/backup"
- name: SSH_PASSWORD
value: "password"
# Optional, required if you want to encrypt your backup
#- name: GPG_PASSPHRASE #- name: GPG_PASSPHRASE
# value: "xxx" # value: "xxx"
restartPolicy: OnFailure restartPolicy: OnFailure
``` ```
## Migrate database ---
## Migrate Database
This example demonstrates how to configure a Kubernetes `Job` to migrate a MySQL database from one server to another.
```yaml ```yaml
apiVersion: batch/v1 apiVersion: batch/v1
@@ -308,10 +321,9 @@ spec:
spec: spec:
containers: containers:
- name: mysql-bkup - name: mysql-bkup
# In production, it is advised to lock your image tag to a proper # In production, lock your image tag to a specific release version
# release version instead of using `latest`. # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# Check https://github.com/jkaninda/mysql-bkup/releases # for available releases.
# for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
command: command:
- /bin/sh - /bin/sh
@@ -322,11 +334,11 @@ spec:
memory: "128Mi" memory: "128Mi"
cpu: "500m" cpu: "500m"
env: env:
## Source Database ## Source Database
- name: DB_HOST - name: DB_HOST
value: "mysql" value: "postgres"
- name: DB_PORT - name: DB_PORT
value: "3306" value: "3306"
- name: DB_NAME - name: DB_NAME
value: "dbname" value: "dbname"
- name: DB_USERNAME - name: DB_USERNAME
@@ -335,7 +347,7 @@ spec:
value: "password" value: "password"
## Target Database ## Target Database
- name: TARGET_DB_HOST - name: TARGET_DB_HOST
value: "target-mysql" value: "target-postgres"
- name: TARGET_DB_PORT - name: TARGET_DB_PORT
value: "3306" value: "3306"
- name: TARGET_DB_NAME - name: TARGET_DB_NAME
@@ -345,4 +357,13 @@ spec:
- name: TARGET_DB_PASSWORD - name: TARGET_DB_PASSWORD
value: "password" value: "password"
restartPolicy: Never restartPolicy: Never
``` ```
---
## Key Notes
- **Security**: Always use Kubernetes Secrets for sensitive data like passwords and access keys.
- **Resource Limits**: Adjust resource limits (`memory` and `cpu`) based on your workload requirements.
- **Cron Schedule**: Use standard cron expressions for scheduling recurring backups.
- **Rootless Deployment**: The image supports running in rootless environments, making it suitable for platforms like OpenShift.

View File

@@ -1,47 +1,38 @@
--- ---
title: Encrypt backups title: Encrypt backups using GPG
layout: default layout: default
parent: How Tos parent: How Tos
nav_order: 8 nav_order: 8
--- ---
# Encrypt backup # Encrypt Backup
The image supports encrypting backups using one of two available methods: GPG with passphrase or GPG with a public key. The image supports encrypting backups using one of two methods: **GPG with a passphrase** or **GPG with a public key**. When a `GPG_PASSPHRASE` or `GPG_PUBLIC_KEY` environment variable is set, the backup archive will be encrypted and saved as a `.sql.gpg` or `.sql.gz.gpg` file.
The image supports encrypting backups using GPG out of the box. In case a `GPG_PASSPHRASE` or `GPG_PUBLIC_KEY` environment variable is set, the backup archive will be encrypted using the given key and saved as a sql.gpg file instead or sql.gz.gpg.
{: .warning } {: .warning }
To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process. To restore an encrypted backup, you must provide the same GPG passphrase or private key used during the backup process.
- GPG home directory `/config/gnupg` ---
- Cipher algorithm `aes256`
{: .note } ## Key Features
The backup encrypted using `GPG passphrase` method can be restored automatically, no need to decrypt it before restoration.
Suppose you used a GPG public key during the backup process. In that case, you need to decrypt your backup before restoration because decryption using a `GPG private` key is not fully supported.
To decrypt manually, you need to install `gnupg` - **Cipher Algorithm**: `aes256`
- **Automatic Restoration**: Backups encrypted with a GPG passphrase can be restored automatically without manual decryption.
- **Manual Decryption**: Backups encrypted with a GPG public key require manual decryption before restoration.
```shell ---
gpg --batch --passphrase "my-passphrase" \
--output database_20240730_044201.sql.gz \
--decrypt database_20240730_044201.sql.gz.gpg
```
Using your private key
```shell ## Using GPG Passphrase
gpg --output database_20240730_044201.sql.gz --decrypt database_20240730_044201.sql.gz.gpg
```
## Using GPG passphrase
```yml To encrypt backups using a GPG passphrase, set the `GPG_PASSPHRASE` environment variable. The backup will be encrypted and can be restored automatically.
### Example Configuration
```yaml
services: services:
mysql-bkup: mysql-bkup:
# In production, it is advised to lock your image tag to a proper # In production, lock your image tag to a specific release version
# release version instead of using `latest`. # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# Check https://github.com/jkaninda/mysql-bkup/releases # for available releases.
# for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: backup -d database command: backup -d database
@@ -55,26 +46,34 @@ services:
- DB_PASSWORD=password - DB_PASSWORD=password
## Required to encrypt backup ## Required to encrypt backup
- GPG_PASSPHRASE=my-secure-passphrase - GPG_PASSPHRASE=my-secure-passphrase
# mysql-bkup container must be connected to the same network with your database # Ensure the pg-bkup container is connected to the same network as your database
networks: networks:
- web - web
networks: networks:
web: web:
``` ```
---
## Using GPG Public Key ## Using GPG Public Key
```yml To encrypt backups using a GPG public key, set the `GPG_PUBLIC_KEY` environment variable to the path of your public key file. Backups encrypted with a public key require manual decryption before restoration.
### Example Configuration
```yaml
services: services:
mysql-bkup: mysql-bkup:
# In production, it is advised to lock your image tag to a proper # In production, lock your image tag to a specific release version
# release version instead of using `latest`. # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# Check https://github.com/jkaninda/mysql-bkup/releases # for available releases.
# for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: backup -d database command: backup -d database
volumes: volumes:
- ./backup:/backup - ./backup:/backup
- ./public_key.asc:/config/public_key.asc
environment: environment:
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=mysql - DB_HOST=mysql
@@ -83,9 +82,39 @@ services:
- DB_PASSWORD=password - DB_PASSWORD=password
## Required to encrypt backup ## Required to encrypt backup
- GPG_PUBLIC_KEY=/config/public_key.asc - GPG_PUBLIC_KEY=/config/public_key.asc
# mysql-bkup container must be connected to the same network with your database # Ensure the pg-bkup container is connected to the same network as your database
networks: networks:
- web - web
networks: networks:
web: web:
``` ```
---
## Manual Decryption
If you encrypted your backup using a GPG public key, you must manually decrypt it before restoration. Use the `gnupg` tool for decryption.
### Decrypt Using a Passphrase
```bash
gpg --batch --passphrase "my-passphrase" \
--output database_20240730_044201.sql.gz \
--decrypt database_20240730_044201.sql.gz.gpg
```
### Decrypt Using a Private Key
```bash
gpg --output database_20240730_044201.sql.gz \
--decrypt database_20240730_044201.sql.gz.gpg
```
---
## Key Notes
- **Automatic Restoration**: Backups encrypted with a GPG passphrase can be restored directly without manual decryption.
- **Manual Decryption**: Backups encrypted with a GPG public key require manual decryption using the corresponding private key.
- **Security**: Always keep your GPG passphrase and private key secure. Use Kubernetes Secrets or other secure methods to manage sensitive data.

View File

@@ -5,76 +5,102 @@ parent: How Tos
nav_order: 10 nav_order: 10
--- ---
# Migrate database # Migrate Database
To migrate the database, you need to add `migrate` command. To migrate a MySQL database from a source to a target database, you can use the `migrate` command. This feature simplifies the process by combining the backup and restore operations into a single step.
{: .note } {: .note }
The Mysql backup has another great feature: migrating your database from a source database to a target. The `migrate` command eliminates the need for separate backup and restore operations. It directly transfers data from the source database to the target database.
As you know, to restore a database from a source to a target database, you need 2 operations: which is to start by backing up the source database and then restoring the source backed database to the target database.
Instead of proceeding like that, you can use the integrated feature `(migrate)`, which will help you migrate your database by doing only one operation.
{: .warning } {: .warning }
The `migrate` operation is irreversible, please backup your target database before this action. The `migrate` operation is **irreversible**. Always back up your target database before performing this action.
### Docker compose ---
```yml
## Configuration Steps
1. **Source Database**: Provide connection details for the source database.
2. **Target Database**: Provide connection details for the target database.
3. **Run the Migration**: Use the `migrate` command to initiate the migration.
---
## Example: Docker Compose Configuration
Below is an example `docker-compose.yml` configuration for migrating a database:
```yaml
services: services:
mysql-bkup: mysql-bkup:
# In production, it is advised to lock your image tag to a proper # In production, lock your image tag to a specific release version
# release version instead of using `latest`. # instead of using `latest`. Check https://github.com/jkaninda/mysqlbkup/releases
# Check https://github.com/jkaninda/mysql-bkup/releases # for available releases.
# for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: migrate command: migrate
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:
## Source database ## Source Database
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=mysql - DB_HOST=mysql
- DB_NAME=database - DB_NAME=database
- DB_USERNAME=username - DB_USERNAME=username
- DB_PASSWORD=password - DB_PASSWORD=password
## Target database
- TARGET_DB_HOST=target-mysql ## Target Database
- TARGET_DB_HOST=target-postgres
- TARGET_DB_PORT=3306 - TARGET_DB_PORT=3306
- TARGET_DB_NAME=dbname - TARGET_DB_NAME=dbname
- TARGET_DB_USERNAME=username - TARGET_DB_USERNAME=username
- TARGET_DB_PASSWORD=password - TARGET_DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
# Ensure the mysql-bkup container is connected to the same network as your database
networks: networks:
- web - web
networks: networks:
web: web:
``` ```
---
### Migrate database using Docker CLI ## Migrate Database Using Docker CLI
You can also run the migration directly using the Docker CLI. Below is an example:
``` ### Environment Variables
## Source database
DB_HOST=mysql Save your source and target database connection details in an environment file (e.g., `your-env`):
```bash
## Source Database
DB_HOST=postgres
DB_PORT=3306 DB_PORT=3306
DB_NAME=dbname DB_NAME=dbname
DB_USERNAME=username DB_USERNAME=username
DB_PASSWORD=password DB_PASSWORD=password
## Taget database ## Target Database
TARGET_DB_HOST=target-mysql TARGET_DB_HOST=target-postgres
TARGET_DB_PORT=3306 TARGET_DB_PORT=3306
TARGET_DB_NAME=dbname TARGET_DB_NAME=dbname
TARGET_DB_USERNAME=username TARGET_DB_USERNAME=username
TARGET_DB_PASSWORD=password TARGET_DB_PASSWORD=password
``` ```
```shell ### Run the Migration
docker run --rm --network your_network_name \
--env-file your-env ```bash
-v $PWD/backup:/backup/ \ docker run --rm --network your_network_name \
jkaninda/mysql-bkup migrate --env-file your-env \
-v $PWD/backup:/backup/ \
jkaninda/pg-bkup migrate
``` ```
---
## Key Notes
- **Irreversible Operation**: The `migrate` command directly transfers data from the source to the target database. Ensure you have a backup of the target database before proceeding.
- **Network Configuration**: Ensure the `mysql-bkup` container is connected to the same network as your source and target databases.

View File

@@ -1,63 +1,103 @@
--- ---
title: Run multiple backup schedules in the same container title: Run multiple database backup schedules in the same container
layout: default layout: default
parent: How Tos parent: How Tos
nav_order: 11 nav_order: 11
--- ---
Multiple backup schedules with different configuration can be configured by mounting a configuration file into `/config/config.yaml` `/config/config.yml` or by defining an environment variable `BACKUP_CONFIG_FILE=/backup/config.yaml`.
## Configuration file # Multiple Backup Schedules
This tool supports running multiple database backup schedules within the same container.
You can configure these schedules with different settings using a **configuration file**. This flexibility allows you to manage backups for multiple databases efficiently.
---
## Configuration File Setup
The configuration file can be mounted into the container at `/config/config.yaml`, `/config/config.yml`, or specified via the `BACKUP_CONFIG_FILE` environment variable.
### Key Features:
- **Global Environment Variables**: Use these for databases that share the same configuration.
- **Database-Specific Overrides**: Override global settings for individual databases by specifying them in the configuration file or using the database name as a suffix in the variable name (e.g., `DB_HOST_DATABASE1`).
- **Global Cron Expression**: Define a global `cronExpression` in the configuration file to schedule backups for all databases. If omitted, backups will run immediately.
- **Configuration File Path**: Specify the configuration file path using:
- The `BACKUP_CONFIG_FILE` environment variable.
- The `--config` or `-c` flag for the backup command.
---
## Configuration File Example
Below is an example configuration file (`config.yaml`) that defines multiple databases and their respective backup settings:
```yaml ```yaml
#cronExpression: "@every 20m" //Optional for scheduled backups # Optional: Define a global cron expression for scheduled backups.
cronExpression: "" # Example: "@every 20m" (runs every 20 minutes). If omitted, backups run immediately.
cronExpression: "" # Optional: Define a global cron expression for scheduled backups.
backupRescueMode: false # Optional: Set to true to enable rescue mode for backups.
databases: databases:
- host: mysql1 - host: mysql1 # Optional: Overrides DB_HOST or uses DB_HOST_DATABASE1.
port: 3306 port: 3306 # Optional: Default is 5432. Overrides DB_PORT or uses DB_PORT_DATABASE1.
name: database1 name: database1 # Required: Database name.
user: database1 user: database1 # Optional: Overrides DB_USERNAME or uses DB_USERNAME_DATABASE1.
password: password password: password # Optional: Overrides DB_PASSWORD or uses DB_PASSWORD_DATABASE1.
path: /s3-path/database1 #For SSH or FTP you need to define the full path (/home/toto/backup/) path: /s3-path/database1 # Required: Backup path for SSH, FTP, or S3 (e.g., /home/toto/backup/).
- host: mysql2
port: 3306 - host: mysql2 # Optional: Overrides DB_HOST or uses DB_HOST_LLAP.
name: lldap port: 3306 # Optional: Default is 5432. Overrides DB_PORT or uses DB_PORT_LLAP.
user: lldap name: lldap # Required: Database name.
password: password user: lldap # Optional: Overrides DB_USERNAME or uses DB_USERNAME_LLAP.
path: /s3-path/lldap #For SSH or FTP you need to define the full path (/home/toto/backup/) password: password # Optional: Overrides DB_PASSWORD or uses DB_PASSWORD_LLAP.
- host: mysql3 path: /s3-path/lldap # Required: Backup path for SSH, FTP, or S3 (e.g., /home/toto/backup/).
port: 3306
name: keycloak - host: mysql3 # Optional: Overrides DB_HOST or uses DB_HOST_KEYCLOAK.
user: keycloak port: 3306 # Optional: Default is 5432. Overrides DB_PORT or uses DB_PORT_KEYCLOAK.
password: password name: keycloak # Required: Database name.
path: /s3-path/keycloak #For SSH or FTP you need to define the full path (/home/toto/backup/) user: keycloak # Optional: Overrides DB_USERNAME or uses DB_USERNAME_KEYCLOAK.
- host: mysql4 password: password # Optional: Overrides DB_PASSWORD or uses DB_PASSWORD_KEYCLOAK.
port: 3306 path: /s3-path/keycloak # Required: Backup path for SSH, FTP, or S3 (e.g., /home/toto/backup/).
name: joplin
user: joplin - host: mysql4 # Optional: Overrides DB_HOST or uses DB_HOST_JOPLIN.
password: password port: 3306 # Optional: Default is 5432. Overrides DB_PORT or uses DB_PORT_JOPLIN.
path: /s3-path/joplin #For SSH or FTP you need to define the full path (/home/toto/backup/) name: joplin # Required: Database name.
user: joplin # Optional: Overrides DB_USERNAME or uses DB_USERNAME_JOPLIN.
password: password # Optional: Overrides DB_PASSWORD or uses DB_PASSWORD_JOPLIN.
path: /s3-path/joplin # Required: Backup path for SSH, FTP, or S3 (e.g., /home/toto/backup/).
``` ```
## Docker compose file
---
## Docker Compose Configuration
To use the configuration file in a Docker Compose setup, mount the file and specify its path using the `BACKUP_CONFIG_FILE` environment variable.
### Example: Docker Compose File
```yaml ```yaml
services: services:
mysql-bkup: mysql-bkup:
# In production, it is advised to lock your image tag to a proper # In production, lock your image tag to a specific release version
# release version instead of using `latest`. # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# Check https://github.com/jkaninda/mysql-bkup/releases # for available releases.
# for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: backup command: backup #--config /backup/config.yaml # config file
volumes: volumes:
- ./backup:/backup - ./backup:/backup # Mount the backup directory
- ./config.yaml:/backup/config.yaml # Mount the configuration file
environment: environment:
## Multi backup config file ## Specify the path to the configuration file
- BACKUP_CONFIG_FILE=/backup/config.yaml - BACKUP_CONFIG_FILE=/backup/config.yaml
# mysql-bkup container must be connected to the same network with your database # Ensure the pg-bkup container is connected to the same network as your database
networks: networks:
- web - web
networks: networks:
web: web:
``` ```
---

View File

@@ -2,12 +2,22 @@
title: Receive notifications title: Receive notifications
layout: default layout: default
parent: How Tos parent: How Tos
nav_order: 12 nav_order: 13
--- ---
Send Email or Telegram notifications on successfully or failed backup.
### Email # Receive Notifications
To send out email notifications on failed or successfully backup runs, provide SMTP credentials, a sender and a recipient:
You can configure the system to send email or Telegram notifications when a backup succeeds or fails.
This section explains how to set up and customize notifications.
---
## Email Notifications
To send email notifications, provide SMTP credentials, a sender address, and recipient addresses. Notifications will be sent for both successful and failed backup runs.
### Example: Email Notification Configuration
```yaml ```yaml
services: services:
@@ -23,25 +33,33 @@ services:
- DB_NAME=database - DB_NAME=database
- DB_USERNAME=username - DB_USERNAME=username
- DB_PASSWORD=password - DB_PASSWORD=password
- MAIL_HOST= ## SMTP Configuration
- MAIL_HOST=smtp.example.com
- MAIL_PORT=587 - MAIL_PORT=587
- MAIL_USERNAME= - MAIL_USERNAME=your-email@example.com
- MAIL_PASSWORD=! - MAIL_PASSWORD=your-email-password
- MAIL_FROM=Backup Jobs <backup@example.com> - MAIL_FROM=Backup Jobs <backup@example.com>
## Multiple recipients separated by a comma ## Multiple recipients separated by a comma
- MAIL_TO=me@example.com,team@example.com,manager@example.com - MAIL_TO=me@example.com,team@example.com,manager@example.com
- MAIL_SKIP_TLS=false - MAIL_SKIP_TLS=false
## Time format for notification ## Time format for notifications
- TIME_FORMAT=2006-01-02 at 15:04:05 - TIME_FORMAT=2006-01-02 at 15:04:05
## Backup reference, in case you want to identify every backup instance ## Backup reference (e.g., database/cluster name or server name)
- BACKUP_REFERENCE=database/Paris cluster - BACKUP_REFERENCE=database/Paris cluster
networks: networks:
- web - web
networks: networks:
web: web:
``` ```
### Telegram ---
## Telegram Notifications
To send Telegram notifications, provide your bot token and chat ID. Notifications will be sent for both successful and failed backup runs.
### Example: Telegram Notification Configuration
```yaml ```yaml
services: services:
@@ -57,41 +75,49 @@ services:
- DB_NAME=database - DB_NAME=database
- DB_USERNAME=username - DB_USERNAME=username
- DB_PASSWORD=password - DB_PASSWORD=password
## Telegram Configuration
- TG_TOKEN=[BOT ID]:[BOT TOKEN] - TG_TOKEN=[BOT ID]:[BOT TOKEN]
- TG_CHAT_ID= - TG_CHAT_ID=your-chat-id
## Time format for notification ## Time format for notifications
- TIME_FORMAT=2006-01-02 at 15:04:05 - TIME_FORMAT=2006-01-02 at 15:04:05
## Backup reference, in case you want to identify every backup instance ## Backup reference (e.g., database/cluster name or server name)
- BACKUP_REFERENCE=database/Paris cluster - BACKUP_REFERENCE=database/Paris cluster
networks: networks:
- web - web
networks: networks:
web: web:
``` ```
### Customize notifications ---
The title and body of the notifications can be tailored to your needs using Go templates. ## Customize Notifications
Template sources must be mounted inside the container in /config/templates:
- email.tmpl: Email notification template You can customize the title and body of notifications using Go templates. Template files must be mounted inside the container at `/config/templates`. The following templates are supported:
- telegram.tmpl: Telegram notification template
- email-error.tmpl: Error notification template
- telegram-error.tmpl: Error notification template
### Data - `email.tmpl`: Template for successful email notifications.
- `telegram.tmpl`: Template for successful Telegram notifications.
- `email-error.tmpl`: Template for failed email notifications.
- `telegram-error.tmpl`: Template for failed Telegram notifications.
Here is a list of all data passed to the template: ### Template Data
- `Database` : Database name
- `StartTime`: Backup start time process
- `EndTime`: Backup start time process
- `Storage`: Backup storage
- `BackupLocation`: Backup location
- `BackupSize`: Backup size
- `BackupReference`: Backup reference(eg: database/cluster name or server name)
> email.template: The following data is passed to the templates:
- `Database`: Database name.
- `StartTime`: Backup start time.
- `EndTime`: Backup end time.
- `Storage`: Backup storage type (e.g., local, S3, SSH).
- `BackupLocation`: Backup file location.
- `BackupSize`: Backup file size in bytes.
- `BackupReference`: Backup reference (e.g., database/cluster name or server name).
- `Error`: Error message (only for error templates).
---
### Example Templates
#### `email.tmpl` (Successful Backup)
```html ```html
<h2>Hi,</h2> <h2>Hi,</h2>
@@ -104,29 +130,29 @@ Here is a list of all data passed to the template:
<li>Backup Storage: {{.Storage}}</li> <li>Backup Storage: {{.Storage}}</li>
<li>Backup Location: {{.BackupLocation}}</li> <li>Backup Location: {{.BackupLocation}}</li>
<li>Backup Size: {{.BackupSize}} bytes</li> <li>Backup Size: {{.BackupSize}} bytes</li>
<li>Backup Reference: {{.BackupReference}} </li> <li>Backup Reference: {{.BackupReference}}</li>
</ul> </ul>
<p>Best regards,</p> <p>Best regards,</p>
``` ```
> telegram.template #### `telegram.tmpl` (Successful Backup)
```html ```html
Database Backup Notification {{.Database}} ✅ Database Backup Notification {{.Database}}
Hi, Hi,
Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}. Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.
Backup Details: Backup Details:
- Database Name: {{.Database}} - Database Name: {{.Database}}
- Backup Start Time: {{.StartTime}} - Backup Start Time: {{.StartTime}}
- Backup EndTime: {{.EndTime}} - Backup End Time: {{.EndTime}}
- Backup Storage: {{.Storage}} - Backup Storage: {{.Storage}}
- Backup Location: {{.BackupLocation}} - Backup Location: {{.BackupLocation}}
- Backup Size: {{.BackupSize}} bytes - Backup Size: {{.BackupSize}} bytes
- Backup Reference: {{.BackupReference}} - Backup Reference: {{.BackupReference}}
``` ```
> email-error.template #### `email-error.tmpl` (Failed Backup)
```html ```html
<!DOCTYPE html> <!DOCTYPE html>
@@ -140,16 +166,15 @@ Backup Details:
<p>An error occurred during database backup.</p> <p>An error occurred during database backup.</p>
<h3>Failure Details:</h3> <h3>Failure Details:</h3>
<ul> <ul>
<li>Error Message: {{.Error}}</li> <li>Error Message: {{.Error}}</li>
<li>Date: {{.EndTime}}</li> <li>Date: {{.EndTime}}</li>
<li>Backup Reference: {{.BackupReference}} </li> <li>Backup Reference: {{.BackupReference}}</li>
</ul> </ul>
</body> </body>
</html> </html>
``` ```
> telegram-error.template #### `telegram-error.tmpl` (Failed Backup)
```html ```html
🔴 Urgent: Database Backup Failure Notification 🔴 Urgent: Database Backup Failure Notification
@@ -159,4 +184,14 @@ Failure Details:
Error Message: {{.Error}} Error Message: {{.Error}}
Date: {{.EndTime}} Date: {{.EndTime}}
``` Backup Reference: {{.BackupReference}}
```
---
## Key Notes
- **SMTP Configuration**: Ensure your SMTP server supports TLS unless `MAIL_SKIP_TLS` is set to `true`.
- **Telegram Configuration**: Obtain your bot token and chat ID from Telegram.
- **Custom Templates**: Mount custom templates to `/config/templates` to override default notifications.
- **Time Format**: Use the `TIME_FORMAT` environment variable to customize the timestamp format in notifications.

View File

@@ -5,45 +5,71 @@ parent: How Tos
nav_order: 6 nav_order: 6
--- ---
# Restore database from S3 storage # Restore Database from S3 Storage
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`. To restore a MySQL database from a backup stored in S3, use the `restore` command and specify the backup file with the `--file` flag. The system supports the following file formats:
{: .note } - `.sql` (uncompressed SQL dump)
It supports __.sql__,__.sql.gpg__ and __.sql.gz__,__.sql.gz.gpg__ compressed file. - `.sql.gz` (gzip-compressed SQL dump)
- `.sql.gpg` (GPG-encrypted SQL dump)
- `.sql.gz.gpg` (GPG-encrypted and gzip-compressed SQL dump)
### Restore ---
```yml ## Configuration Steps
1. **Specify the Backup File**: Use the `--file` flag to specify the backup file to restore.
2. **Set the Storage Type**: Add the `--storage s3` flag to indicate that the backup is stored in S3.
3. **Provide S3 Configuration**: Include the necessary AWS S3 credentials and configuration.
4. **Provide Database Credentials**: Ensure the correct database connection details are provided.
---
## Example: Restore from S3 Configuration
Below is an example `docker-compose.yml` configuration for restoring a database from S3 storage:
```yaml
services: services:
mysql-bkup: mysql-bkup:
# In production, it is advised to lock your image tag to a proper # In production, lock your image tag to a specific release version
# release version instead of using `latest`. # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# Check https://github.com/jkaninda/mysql-bkup/releases # for available releases.
# for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path command: restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path
volumes: volumes:
- ./backup:/backup - ./backup:/backup # Mount the directory for local operations (if needed)
environment: environment:
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=mysql - DB_HOST=mysql
- DB_NAME=database - DB_NAME=database
- DB_USERNAME=username - DB_USERNAME=username
- DB_PASSWORD=password - DB_PASSWORD=password
## AWS configurations ## AWS S3 Configuration
- AWS_S3_ENDPOINT=https://s3.amazonaws.com - AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup - AWS_S3_BUCKET_NAME=backup
- AWS_REGION="us-west-2" - AWS_REGION=us-west-2
- AWS_ACCESS_KEY=xxxx - AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx - AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true ## Optional: Disable SSL for S3 alternatives like Minio
- AWS_DISABLE_SSL="false" - AWS_DISABLE_SSL=false
- AWS_FORCE_PATH_STYLE="false" ## Optional: Enable path-style access for S3 alternatives like Minio
# mysql-bkup container must be connected to the same network with your database - AWS_FORCE_PATH_STYLE=false
# Ensure the pg-bkup container is connected to the same network as your database
networks: networks:
- web - web
networks: networks:
web: web:
``` ```
---
## Key Notes
- **Supported File Formats**: The restore process supports `.sql`, `.sql.gz`, `.sql.gpg`, and `.sql.gz.gpg` files.
- **S3 Path**: Use the `--path` flag to specify the folder within the S3 bucket where the backup file is located.
- **Encrypted Backups**: If the backup is encrypted with GPG, ensure the `GPG_PASSPHRASE` environment variable is set for automatic decryption.
- **S3 Alternatives**: For S3-compatible storage like Minio, set `AWS_DISABLE_SSL` and `AWS_FORCE_PATH_STYLE` as needed.
- **Network Configuration**: Ensure the `pg-bkup` container is connected to the same network as your database.

View File

@@ -4,44 +4,71 @@ layout: default
parent: How Tos parent: How Tos
nav_order: 7 nav_order: 7
--- ---
# Restore database from SSH remote server
To restore the database from your remote server, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`. # Restore Database from SSH Remote Server
{: .note } To restore a MySQL database from a backup stored on an SSH remote server, use the `restore` command and specify the backup file with the `--file` flag. The system supports the following file formats:
It supports __.sql__,__.sql.gpg__ and __.sql.gz__,__.sql.gz.gpg__ compressed file.
### Restore - `.sql` (uncompressed SQL dump)
- `.sql.gz` (gzip-compressed SQL dump)
- `.sql.gpg` (GPG-encrypted SQL dump)
- `.sql.gz.gpg` (GPG-encrypted and gzip-compressed SQL dump)
```yml ---
## Configuration Steps
1. **Specify the Backup File**: Use the `--file` flag to specify the backup file to restore.
2. **Set the Storage Type**: Add the `--storage ssh` flag to indicate that the backup is stored on an SSH remote server.
3. **Provide SSH Configuration**: Include the necessary SSH credentials and configuration.
4. **Provide Database Credentials**: Ensure the correct database connection details are provided.
---
## Example: Restore from SSH Remote Server Configuration
Below is an example `docker-compose.yml` configuration for restoring a database from an SSH remote server:
```yaml
services: services:
mysql-bkup: mysql-bkup:
# In production, it is advised to lock your image tag to a proper # In production, lock your image tag to a specific release version
# release version instead of using `latest`. # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# Check https://github.com/jkaninda/mysql-bkup/releases # for available releases.
# for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups command: restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups
volumes: volumes:
- ./backup:/backup - ./backup:/backup # Mount the directory for local operations (if needed)
- ./id_ed25519:/tmp/id_ed25519 # Mount the SSH private key file
environment: environment:
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=postgres - DB_HOST=mysql
- DB_NAME=database - DB_NAME=database
- DB_USERNAME=username - DB_USERNAME=username
- DB_PASSWORD=password - DB_PASSWORD=password
## SSH config ## SSH Configuration
- SSH_HOST_NAME="hostname" - SSH_HOST_NAME=hostname
- SSH_PORT=22 - SSH_PORT=22
- SSH_USER=user - SSH_USER=user
- SSH_REMOTE_PATH=/home/jkaninda/backups - SSH_REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519 - SSH_IDENTIFY_FILE=/tmp/id_ed25519
## We advise you to use a private jey instead of password ## Optional: Use password instead of private key (not recommended)
#- SSH_PASSWORD=password #- SSH_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database # Ensure the mysql-bkup container is connected to the same network as your database
networks: networks:
- web - web
networks: networks:
web: web:
``` ```
---
## Key Notes
- **Supported File Formats**: The restore process supports `.sql`, `.sql.gz`, `.sql.gpg`, and `.sql.gz.gpg` files.
- **SSH Path**: Use the `--path` flag to specify the folder on the SSH remote server where the backup file is located.
- **Encrypted Backups**: If the backup is encrypted with GPG, ensure the `GPG_PASSPHRASE` environment variable is set for automatic decryption.
- **SSH Authentication**: Use a private key (`SSH_IDENTIFY_FILE`) for SSH authentication instead of a password for better security.
- **Network Configuration**: Ensure the `mysql-bkup` container is connected to the same network as your database.

View File

@@ -5,36 +5,60 @@ parent: How Tos
nav_order: 5 nav_order: 5
--- ---
# Restore database
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`. # Restore Database
{: .note } To restore a MySQL database, use the `restore` command and specify the backup file to restore with the `--file` flag.
It supports __.sql__,__.sql.gpg__ and __.sql.gz__,__.sql.gz.gpg__ compressed file.
### Restore The system supports the following file formats:
```yml - `.sql` (uncompressed SQL dump)
- `.sql.gz` (gzip-compressed SQL dump)
- `.sql.gpg` (GPG-encrypted SQL dump)
- `.sql.gz.gpg` (GPG-encrypted and gzip-compressed SQL dump)
---
## Configuration Steps
1. **Specify the Backup File**: Use the `--file` flag to specify the backup file to restore.
2. **Provide Database Credentials**: Ensure the correct database connection details are provided.
---
## Example: Restore Configuration
Below is an example `docker-compose.yml` configuration for restoring a database:
```yaml
services: services:
mysql-bkup: mysql-bkup:
# In production, it is advised to lock your image tag to a proper # In production, lock your image tag to a specific release version
# release version instead of using `latest`. # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# Check https://github.com/jkaninda/mysql-bkup/releases # for available releases.
# for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: restore -d database -f store_20231219_022941.sql.gz command: restore -d database -f store_20231219_022941.sql.gz
volumes: volumes:
- ./backup:/backup - ./backup:/backup # Mount the directory containing the backup file
environment: environment:
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=mysql - DB_HOST=postgres
- DB_NAME=database - DB_NAME=database
- DB_USERNAME=username - DB_USERNAME=username
- DB_PASSWORD=password - DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database # Ensure the pg-bkup container is connected to the same network as your database
networks: networks:
- web - web
networks: networks:
web: web:
``` ```
---
## Key Notes
- **Supported File Formats**: The restore process supports `.sql`, `.sql.gz`, `.sql.gpg`, and `.sql.gz.gpg` files.
- **Encrypted Backups**: If the backup is encrypted with GPG, ensure the `GPG_PASSPHRASE` environment variable is set for automatic decryption.
- **Network Configuration**: Ensure the `mysql-bkup` container is connected to the same network as your database.

View File

@@ -10,175 +10,76 @@ nav_order: 1
**MYSQL-BKUP** is a Docker container image designed to **backup, restore, and migrate MySQL databases**. **MYSQL-BKUP** is a Docker container image designed to **backup, restore, and migrate MySQL databases**.
It supports a variety of storage options and ensures data security through GPG encryption. It supports a variety of storage options and ensures data security through GPG encryption.
## Features ---
- **Storage Options:** ## Key Features
- Local storage
- AWS S3 or any S3-compatible object storage
- FTP
- SSH-compatible storage
- Azure Blob storage
- **Data Security:** ### Storage Options
- Backups can be encrypted using **GPG** to ensure confidentiality. - **Local storage**
- **AWS S3** or any S3-compatible object storage
- **FTP**
- **SFTP**
- **SSH-compatible storage**
- **Azure Blob storage**
- **Deployment Flexibility:** ### Data Security
- Available as the [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image. - Backups can be encrypted using **GPG** to ensure data confidentiality.
- Deployable on **Docker**, **Docker Swarm**, and **Kubernetes**.
- Supports recurring backups of MySQL databases when deployed:
- On Docker for automated backup schedules.
- As a **Job** or **CronJob** on Kubernetes.
- **Notifications:** ### Deployment Flexibility
- Get real-time updates on backup success or failure via: - Available as the [jkaninda/pg-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image.
- **Telegram** - Deployable on **Docker**, **Docker Swarm**, and **Kubernetes**.
- **Email** - Supports recurring backups of MySQL databases:
- On Docker for automated backup schedules.
- As a **Job** or **CronJob** on Kubernetes.
### Notifications
- Receive real-time updates on backup success or failure via:
- **Telegram**
- **Email**
---
## Use Cases ## Use Cases
- **Automated Recurring Backups:** Schedule regular backups for MySQL databases. - **Automated Recurring Backups:** Schedule regular backups for MySQL databases.
- **Cross-Environment Migration:** Easily migrate your MySQL databases across different environments using supported storage options. - **Cross-Environment Migration:** Easily migrate MySQL databases across different environments using supported storage options.
- **Secure Backup Management:** Protect your data with GPG encryption. - **Secure Backup Management:** Protect your data with GPG encryption.
---
## Get Involved
We welcome contributions! Feel free to give us a ⭐, submit PRs, or open issues on our [GitHub repository](https://github.com/jkaninda/mysql-bkup).
{: .fs-6 .fw-300 }
---
{: .note } {: .note }
Code and documentation for `v1` version on [this branch][v1-branch]. Code and documentation for the `v1` version are available on [this branch][v1-branch].
[v1-branch]: https://github.com/jkaninda/mysql-bkup [v1-branch]: https://github.com/jkaninda/mysql-bkup
--- ---
## Quickstart ## Available Image Registries
### Simple backup using Docker CLI The Docker image is published to both **Docker Hub** and the **GitHub Container Registry**. You can use either of the following:
To run a one time backup, bind your local volume to `/backup` in the container and run the `backup` command: ```bash
```shell
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d database_name
```
Alternatively, pass a `--env-file` in order to use a full config as described below.
```yaml
docker run --rm --network your_network_name \
--env-file your-env-file \
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup backup -d database_name
```
### Simple backup in docker compose file
```yaml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=foo
- DB_USERNAME=bar
- DB_PASSWORD=password
- TZ=Europe/Paris
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
### Docker recurring backup
```shell
docker run --rm --network network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=hostname" \
-e "DB_USERNAME=user" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 15m" #@midnight
```
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
## Kubernetes
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: backup-job
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup -d dbname
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_HOST
value: "mysql"
- name: DB_USERNAME
value: "user"
- name: DB_PASSWORD
value: "password"
volumeMounts:
- mountPath: /backup
name: backup
volumes:
- name: backup
hostPath:
path: /home/toto/backup # directory location on host
type: Directory # this field is optional
restartPolicy: Never
```
## Available image registries
This Docker image is published to both Docker Hub and the GitHub container registry.
Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
```
docker pull jkaninda/mysql-bkup docker pull jkaninda/mysql-bkup
docker pull ghcr.io/jkaninda/mysql-bkup docker pull ghcr.io/jkaninda/mysql-bkup
``` ```
Documentation references Docker Hub, but all examples will work using ghcr.io just as well. While the documentation references Docker Hub, all examples work seamlessly with `ghcr.io`.
## Supported Engines ---
This image is developed and tested against the Docker CE engine and Kubernetes exclusively.
While it may work against different implementations, there are no guarantees about support for non-Docker engines.
## References ## References
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements: We created this image as a simpler and more lightweight alternative to existing solutions. Heres why:
- The original image is based on `alpine` and requires additional tools, making it heavy. - **Lightweight:** Written in Go, the image is optimized for performance and minimal resource usage.
- This image is written in Go. - **Multi-Architecture Support:** Supports `arm64` and `arm/v7` architectures.
- `arm64` and `arm/v7` architectures are supported. - **Docker Swarm Support:** Fully compatible with Docker in Swarm mode.
- Docker in Swarm mode is supported. - **Kubernetes Support:** Designed to work seamlessly with Kubernetes.
- Kubernetes is supported.

152
docs/quickstart/index.md Normal file
View File

@@ -0,0 +1,152 @@
---
title: Quickstart
layout: home
nav_order: 2
---
# Quickstart
This guide provides quick examples for running backups using Docker CLI, Docker Compose, and Kubernetes.
---
## Simple Backup Using Docker CLI
To run a one-time backup, bind your local volume to `/backup` in the container and execute the `backup` command:
```bash
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_PORT=3306" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d database_name
```
### Using an Environment File
Alternatively, you can use an `--env-file` to pass a full configuration:
```bash
docker run --rm --network your_network_name \
--env-file your-env-file \
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup backup -d database_name
```
### Simple restore using Docker CLI
To restore a database, bind your local volume to `/backup` in the container and run the `restore` command:
```shell
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_PORT=3306" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup restore -d database_name -f backup_file.sql.gz
```
---
## Simple Backup Using Docker Compose
Below is an example `docker-compose.yml` configuration for running a backup:
```yaml
services:
mysql-bkup:
# In production, lock the image tag to a specific release version.
# Check https://github.com/jkaninda/mysql-bkup/releases for available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=foo
- DB_USERNAME=bar
- DB_PASSWORD=password
- TZ=Europe/Paris
# Ensure the mysql-bkup container is connected to the same network as your database.
networks:
- web
networks:
web:
```
---
## Recurring Backup with Docker
To schedule recurring backups, use the `--cron-expression` flag:
```bash
docker run --rm --network network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=hostname" \
-e "DB_USERNAME=user" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 15m"
```
For predefined schedules, refer to the [documentation](https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules).
---
## Backup Using Kubernetes
Below is an example Kubernetes `Job` configuration for running a backup:
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: backup-job
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, lock the image tag to a specific release version.
# Check https://github.com/jkaninda/mysql-bkup/releases for available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup -d dbname
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_HOST
value: "mysql"
- name: DB_USERNAME
value: "postgres"
- name: DB_PASSWORD
value: "password"
volumeMounts:
- mountPath: /backup
name: backup
volumes:
- name: backup
hostPath:
path: /home/toto/backup # Directory location on the host
type: Directory # Optional field
restartPolicy: Never
```
---
## Key Notes
- **Volume Binding**: Ensure the `/backup` directory is mounted to persist backup files.
- **Environment Variables**: Use environment variables or an `--env-file` to pass database credentials and other configurations.
- **Cron Expressions**: Use standard cron expressions or predefined schedules for recurring backups.
- **Kubernetes Jobs**: Use Kubernetes `Job` or `CronJob` for running backups in a Kubernetes cluster.

View File

@@ -1,137 +1,132 @@
--- ---
title: Configuration Reference title: Configuration Reference
layout: default layout: default
nav_order: 2 nav_order: 3
--- ---
# Configuration reference # Configuration Reference
Backup, restore and migrate targets, schedule and retention are configured using environment variables or flags. MySQL backup, restore, and migration processes can be configured using **environment variables** or **CLI flags**.
## CLI Utility Usage
The `mysql-bkup` CLI provides commands and options to manage MySQL backups efficiently.
| Option | Short Flag | Description |
|-------------------------|------------|-----------------------------------------------------------------------------------------|
### CLI utility Usage | `mysql-bkup` | `bkup` | CLI tool for managing MySQL backups, restoration, and migration. |
| `backup` | | Executes a backup operation. |
| Options | Shorts | Usage | | `restore` | | Restores a database from a backup file. |
|-----------------------|--------|----------------------------------------------------------------------------------------| | `migrate` | | Migrates a database from one instance to another. |
| mysql-bkup | bkup | CLI utility | | `--storage` | `-s` | Specifies the storage type (`local`, `s3`, `ssh`, etc.). Default: `local`. |
| backup | | Backup database operation | | `--file` | `-f` | Defines the backup file name for restoration. |
| restore | | Restore database operation | | `--path` | | Sets the storage path (e.g., `/custom_path` for S3 or `/home/foo/backup` for SSH). |
| migrate | | Migrate database from one instance to another one | | `--config` | `-c` | Provides a configuration file for multi-database backups (e.g., `/backup/config.yaml`). |
| --storage | -s | Storage. local or s3 (default: local) | | `--dbname` | `-d` | Specifies the database name to back up or restore. |
| --file | -f | File name for restoration | | `--port` | `-p` | Defines the database port. Default: `3306`. |
| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` | | `--disable-compression` | | Disables compression for database backups. |
| --dbname | -d | Database name | | `--cron-expression` | `-e` | Schedules backups using a cron expression (e.g., `0 0 * * *` or `@daily`). |
| --port | -p | Database port (default: 3306) | | `--all-databases` | `-a` | Backs up all databases separately (e.g., `backup --all-databases`). |
| --disable-compression | | Disable database backup compression | | `--all-in-one` | `-A` | Backs up all databases in a single file (e.g., `backup --all-databases --single-file`). |
| --cron-expression | | Backup cron expression, eg: (* * * * *) or @daily | | `--custom-name` | `` | Sets custom backup name for one time backup |
| --help | -h | Print this help message and exit | | `--help` | `-h` | Displays the help message and exits. |
| --version | -V | Print version information and exit | | `--version` | `-V` | Shows version information and exits. |
## Environment variables
| Name | Requirement | Description |
|------------------------|---------------------------------------------------------------|-----------------------------------------------------------------|
| DB_PORT | Optional, default 3306 | Database port number |
| DB_HOST | Required | Database host |
| DB_NAME | Optional if it was provided from the -d flag | Database name |
| DB_USERNAME | Required | Database user name |
| DB_PASSWORD | Required | Database password |
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_REGION | Optional, required for S3 storage | AWS Region |
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
| AWS_FORCE_PATH_STYLE | Optional, required for S3 storage | Force path style |
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
| GPG_PUBLIC_KEY | Optional, required to encrypt backup | GPG public key, used to encrypt backup (/config/public_key.asc) |
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the `--cron-expression` flag | Backup cron expression for docker in scheduled mode |
| BACKUP_RETENTION_DAYS | Optional | Delete old backup created more than specified days ago |
| SSH_HOST | Optional, required for SSH storage | ssh remote hostname or ip |
| SSH_USER | Optional, required for SSH storage | ssh remote user |
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
| REMOTE_PATH | Optional, required for SSH or FTP storage | remote path (/home/toto/backup) |
| FTP_HOST | Optional, required for FTP storage | FTP host name |
| FTP_PORT | Optional, required for FTP storage | FTP server port number |
| FTP_USER | Optional, required for FTP storage | FTP user |
| FTP_PASSWORD | Optional, required for FTP storage | FTP user password |
| TARGET_DB_HOST | Optional, required for database migration | Target database host |
| TARGET_DB_PORT | Optional, required for database migration | Target database port |
| TARGET_DB_NAME | Optional, required for database migration | Target database name |
| TARGET_DB_USERNAME | Optional, required for database migration | Target database username |
| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password |
| TG_TOKEN | Optional, required for Telegram notification | Telegram token (`BOT-ID:BOT-TOKEN`) |
| TG_CHAT_ID | Optional, required for Telegram notification | Telegram Chat ID |
| TZ | Optional | Time Zone |
--- ---
## Run in Scheduled mode
This image can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources. ## Environment Variables
For Docker, you need to run it in scheduled mode by adding `--cron-expression "* * * * *"` flag or by defining `BACKUP_CRON_EXPRESSION=0 1 * * *` environment variable.
## Syntax of crontab (field description) | Name | Requirement | Description |
|--------------------------------|--------------------------------------|----------------------------------------------------------------------------|
| `DB_PORT` | Optional (default: `3306`) | Database port number. |
| `DB_HOST` | Required | Database host. |
| `DB_NAME` | Optional (if provided via `-d` flag) | Database name. |
| `DB_USERNAME` | Required | Database username. |
| `DB_PASSWORD` | Required | Database password. |
| `DB_SSL_CA` | Optional | Database client CA certificate file |
| `DB_SSL_MODE` | Optional(`0 or 1`) default: `0` | Database client Enable CA validation |
| `AWS_ACCESS_KEY` | Required for S3 storage | AWS S3 Access Key. |
| `AWS_SECRET_KEY` | Required for S3 storage | AWS S3 Secret Key. |
| `AWS_BUCKET_NAME` | Required for S3 storage | AWS S3 Bucket Name. |
| `AWS_REGION` | Required for S3 storage | AWS Region. |
| `AWS_DISABLE_SSL` | Optional | Disable SSL for S3 storage. |
| `AWS_FORCE_PATH_STYLE` | Optional | Force path-style access for S3 storage. |
| `FILE_NAME` | Optional (if provided via `--file`) | File name for restoration (e.g., `.sql`, `.sql.gz`). |
| `GPG_PASSPHRASE` | Optional | GPG passphrase for encrypting/decrypting backups. |
| `GPG_PUBLIC_KEY` | Optional | GPG public key for encrypting backups (e.g., `/config/public_key.asc`). |
| `BACKUP_CRON_EXPRESSION` | Optional (flag `-e`) | Cron expression for scheduled backups. |
| `BACKUP_RETENTION_DAYS` | Optional | Delete backups older than the specified number of days. |
| `BACKUP_CONFIG_FILE` | Optional (flag `-c`) | Configuration file for multi database backup. (e.g: `/backup/config.yaml`) |
| `SSH_HOST` | Required for SSH storage | SSH remote hostname or IP. |
| `SSH_USER` | Required for SSH storage | SSH remote username. |
| `SSH_PASSWORD` | Optional | SSH remote user's password. |
| `SSH_IDENTIFY_FILE` | Optional | SSH remote user's private key. |
| `SSH_PORT` | Optional (default: `22`) | SSH remote server port. |
| `REMOTE_PATH` | Required for SSH/FTP storage | Remote path (e.g., `/home/toto/backup`). |
| `FTP_HOST` | Required for FTP storage | FTP hostname. |
| `FTP_PORT` | Optional (default: `21`) | FTP server port. |
| `FTP_USER` | Required for FTP storage | FTP username. |
| `FTP_PASSWORD` | Required for FTP storage | FTP user password. |
| `TARGET_DB_HOST` | Required for migration | Target database host. |
| `TARGET_DB_PORT` | Optional (default: `5432`) | Target database port. |
| `TARGET_DB_NAME` | Required for migration | Target database name. |
| `TARGET_DB_USERNAME` | Required for migration | Target database username. |
| `TARGET_DB_PASSWORD` | Required for migration | Target database password. |
| `TARGET_DB_URL` | Optional | Target database URL in JDBC URI format. |
| `TG_TOKEN` | Required for Telegram notifications | Telegram token (`BOT-ID:BOT-TOKEN`). |
| `TG_CHAT_ID` | Required for Telegram notifications | Telegram Chat ID. |
| `TZ` | Optional | Time zone for scheduling. |
| `AZURE_STORAGE_CONTAINER_NAME` | Required for Azure Blob Storage | Azure storage container name. |
| `AZURE_STORAGE_ACCOUNT_NAME` | Required for Azure Blob Storage | Azure storage account name. |
| `AZURE_STORAGE_ACCOUNT_KEY` | Required for Azure Blob Storage | Azure storage account key. |
The syntax is: ---
- 1: Minute (0-59) ## Scheduled Backups
- 2: Hours (0-23)
- 3: Day (0-31)
- 4: Month (0-12 [12 == December])
- 5: Day of the week(0-7 [7 or 0 == sunday])
Easy to remember format: ### Running in Scheduled Mode
- **Docker**: Use the `--cron-expression` flag or the `BACKUP_CRON_EXPRESSION` environment variable to schedule backups.
- **Kubernetes**: Use a `CronJob` resource for scheduled backups.
### Cron Syntax
The cron syntax consists of five fields:
```conf ```conf
* * * * * command to be executed * * * * * command
``` ```
| Field | Description | Values |
|---------------|------------------------------|----------------|
| Minute | Minute of the hour | `0-59` |
| Hour | Hour of the day | `0-23` |
| Day of Month | Day of the month | `1-31` |
| Month | Month of the year | `1-12` |
| Day of Week | Day of the week (0 = Sunday) | `0-7` |
#### Examples
- **Every 30 minutes**: `*/30 * * * *`
- **Every hour at minute 0**: `0 * * * *`
- **Every day at 1:00 AM**: `0 1 * * *`
### Predefined Schedules
| Entry | Description | Equivalent To |
|----------------------------|--------------------------------------------|---------------|
| `@yearly` (or `@annually`) | Run once a year, midnight, Jan. 1st | `0 0 1 1 *` |
| `@monthly` | Run once a month, midnight, first of month | `0 0 1 * *` |
| `@weekly` | Run once a week, midnight between Sat/Sun | `0 0 * * 0` |
| `@daily` (or `@midnight`) | Run once a day, midnight | `0 0 * * *` |
| `@hourly` | Run once an hour, beginning of hour | `0 * * * *` |
### Intervals
You can also schedule backups at fixed intervals using the format:
```conf ```conf
- - - - -
| | | | |
| | | | ----- Day of week (0 - 7) (Sunday=0 or 7)
| | | ------- Month (1 - 12)
| | --------- Day of month (1 - 31)
| ----------- Hour (0 - 23)
------------- Minute (0 - 59)
```
> At every 30th minute
```conf
*/30 * * * *
```
> “At minute 0.” every hour
```conf
0 * * * *
```
> “At 01:00.” every day
```conf
0 1 * * *
```
## Predefined schedules
You may use one of several pre-defined schedules in place of a cron expression.
| Entry | Description | Equivalent To |
|------------------------|--------------------------------------------|---------------|
| @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 * |
| @monthly | Run once a month, midnight, first of month | 0 0 1 * * |
| @weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0 |
| @daily (or @midnight) | Run once a day, midnight | 0 0 * * * |
| @hourly | Run once an hour, beginning of hour | 0 * * * * |
### Intervals
You may also schedule backup task at fixed intervals, starting at the time it's added or cron is run. This is supported by formatting the cron spec like this:
@every <duration> @every <duration>
where "duration" is a string accepted by time. ```
For example, "@every 1h30m10s" would indicate a schedule that activates after 1 hour, 30 minutes, 10 seconds, and then every interval after that. - Example: `@every 1h30m10s` runs the backup every 1 hour, 30 minutes, and 10 seconds.

5
go.mod
View File

@@ -2,14 +2,15 @@ module github.com/jkaninda/mysql-bkup
go 1.23.2 go 1.23.2
require github.com/spf13/pflag v1.0.5 // indirect require github.com/spf13/pflag v1.0.6 // indirect
require ( require (
github.com/go-mail/mail v2.3.1+incompatible github.com/go-mail/mail v2.3.1+incompatible
github.com/jkaninda/encryptor v0.0.0-20241111100652-926393c9437e github.com/jkaninda/encryptor v0.0.0-20241111100652-926393c9437e
github.com/jkaninda/go-storage v0.1.3 github.com/jkaninda/go-storage v0.1.3
github.com/jkaninda/go-utils v0.1.1
github.com/robfig/cron/v3 v3.0.1 github.com/robfig/cron/v3 v3.0.1
github.com/spf13/cobra v1.8.1 github.com/spf13/cobra v1.9.1
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
) )

12
go.sum
View File

@@ -22,7 +22,7 @@ github.com/bramvdbogaerde/go-scp v1.5.0 h1:a9BinAjTfQh273eh7vd3qUgmBC+bx+3TRDtkZ
github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ= github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ=
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -43,6 +43,8 @@ github.com/jkaninda/encryptor v0.0.0-20241111100652-926393c9437e h1:jtFKZHt/PLGQ
github.com/jkaninda/encryptor v0.0.0-20241111100652-926393c9437e/go.mod h1:Y1EXpPWQ9PNd7y7E6ez3xgnzZc8fuDWXwX/1/dXNCE4= github.com/jkaninda/encryptor v0.0.0-20241111100652-926393c9437e/go.mod h1:Y1EXpPWQ9PNd7y7E6ez3xgnzZc8fuDWXwX/1/dXNCE4=
github.com/jkaninda/go-storage v0.1.3 h1:lEpHVgFLKSvjsi/6tAek96Y07za3vxmsXF2/+jiCMZU= github.com/jkaninda/go-storage v0.1.3 h1:lEpHVgFLKSvjsi/6tAek96Y07za3vxmsXF2/+jiCMZU=
github.com/jkaninda/go-storage v0.1.3/go.mod h1:zVRnLprBk/9AUz2+za6Y03MgoNYrqKLy3edVtjqMaps= github.com/jkaninda/go-storage v0.1.3/go.mod h1:zVRnLprBk/9AUz2+za6Y03MgoNYrqKLy3edVtjqMaps=
github.com/jkaninda/go-utils v0.1.1 h1:PMrtXR9d51YzHo85y9Z6YVL0YyBURbRTPemHVbFDqZg=
github.com/jkaninda/go-utils v0.1.1/go.mod h1:pf0/U6k4JbxlablM2G4eSTZdQ2LFshfAsCK5Q8qNfGo=
github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg= github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI= github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
@@ -66,10 +68,10 @@ github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzG
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=

35
migrations/init.sql Normal file
View File

@@ -0,0 +1,35 @@
-- Create the database testdb2 and testdb3
CREATE DATABASE IF NOT EXISTS testdb2;
CREATE DATABASE IF NOT EXISTS testdb3;
CREATE DATABASE IF NOT EXISTS fakedb;
USE testdb;
-- Create the 'users' table
CREATE TABLE users (
id INT AUTO_INCREMENT PRIMARY KEY,
name VARCHAR(100) NOT NULL,
email VARCHAR(100) NOT NULL UNIQUE,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
-- Create the 'orders' table
CREATE TABLE orders (
id INT AUTO_INCREMENT PRIMARY KEY,
user_id INT NOT NULL,
amount DECIMAL(10,2) NOT NULL,
status ENUM('pending', 'completed', 'canceled') NOT NULL DEFAULT 'pending',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
);
-- Insert fake users
INSERT INTO users (name, email) VALUES
('Alice Smith', 'alice@example.com'),
('Bob Johnson', 'bob@example.com'),
('Charlie Brown', 'charlie@example.com');
-- Insert fake orders
INSERT INTO orders (user_id, amount, status) VALUES
(1, 100.50, 'completed'),
(2, 200.75, 'pending'),
(3, 50.00, 'canceled');

View File

@@ -0,0 +1,13 @@
#cronExpression: "@every 20s"
#backupRescueMode: false
databases:
- host: 127.0.0.1
port: 3306
name: testdb
user: user
password: password
- name: testdb2
# database credentials from environment variables
#TESTDB2_DB_USERNAME
#TESTDB2_DB_PASSWORD
#TESTDB2_DB_HOST

View File

@@ -27,6 +27,7 @@ package pkg
import ( import (
"fmt" "fmt"
"github.com/jkaninda/go-storage/pkg/azure" "github.com/jkaninda/go-storage/pkg/azure"
goutils "github.com/jkaninda/go-utils"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"os" "os"
@@ -35,11 +36,14 @@ import (
) )
func azureBackup(db *dbConfig, config *BackupConfig) { func azureBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to the remote FTP server") utils.Info("Backup database to Azure Blob Storage")
startTime = time.Now().Format(utils.TimeFormat())
// Backup database // Backup database
BackupDatabase(db, config.backupFileName, disableCompression) err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne)
if err != nil {
recoverMode(err, "Error backing up database")
return
}
finalFileName := config.backupFileName finalFileName := config.backupFileName
if config.encryption { if config.encryption {
encryptBackup(config) encryptBackup(config)
@@ -83,21 +87,24 @@ func azureBackup(db *dbConfig, config *BackupConfig) {
} }
utils.Info("Backup name is %s", finalFileName)
utils.Info("Backup size: %s", utils.ConvertBytes(uint64(backupSize)))
utils.Info("Uploading backup archive to Azure Blob storage ... done ") utils.Info("Uploading backup archive to Azure Blob storage ... done ")
duration := goutils.FormatDuration(time.Since(startTime), 0)
// Send notification // Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
File: finalFileName, File: finalFileName,
BackupSize: backupSize, BackupSize: utils.ConvertBytes(uint64(backupSize)),
Database: db.dbName, Database: db.dbName,
Storage: config.storage, Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName), BackupLocation: filepath.Join(config.remotePath, finalFileName),
StartTime: startTime, Duration: duration,
EndTime: time.Now().Format(utils.TimeFormat()),
}) })
// Delete temp // Delete temp
deleteTemp() deleteTemp()
utils.Info("Backup completed successfully") utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration)
} }
func azureRestore(db *dbConfig, conf *RestoreConfig) { func azureRestore(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from Azure Blob storage") utils.Info("Restore database from Azure Blob storage")

View File

@@ -26,16 +26,19 @@ SOFTWARE.
package pkg package pkg
import ( import (
"bytes"
"errors"
"fmt" "fmt"
"github.com/jkaninda/encryptor" "github.com/jkaninda/encryptor"
"github.com/jkaninda/go-storage/pkg/local" "github.com/jkaninda/go-storage/pkg/local"
goutils "github.com/jkaninda/go-utils"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"github.com/robfig/cron/v3" "github.com/robfig/cron/v3"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"log"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"strings"
"time" "time"
) )
@@ -48,7 +51,8 @@ func StartBackup(cmd *cobra.Command) {
if err != nil { if err != nil {
dbConf = initDbConfig(cmd) dbConf = initDbConfig(cmd)
if config.cronExpression == "" { if config.cronExpression == "" {
BackupTask(dbConf, config) config.allowCustomName = true
createBackupTask(dbConf, config)
} else { } else {
if utils.IsValidCronExpression(config.cronExpression) { if utils.IsValidCronExpression(config.cronExpression) {
scheduledMode(dbConf, config) scheduledMode(dbConf, config)
@@ -71,14 +75,18 @@ func scheduledMode(db *dbConfig, config *BackupConfig) {
// Test backup // Test backup
utils.Info("Testing backup configurations...") utils.Info("Testing backup configurations...")
testDatabaseConnection(db) err := testDatabaseConnection(db)
if err != nil {
utils.Error("Error connecting to database: %s", db.dbName)
utils.Fatal("Error: %s", err)
}
utils.Info("Testing backup configurations...done") utils.Info("Testing backup configurations...done")
utils.Info("Creating backup job...") utils.Info("Creating backup job...")
// Create a new cron instance // Create a new cron instance
c := cron.New() c := cron.New()
_, err := c.AddFunc(config.cronExpression, func() { _, err = c.AddFunc(config.cronExpression, func() {
BackupTask(db, config) createBackupTask(db, config)
utils.Info("Next backup time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat)) utils.Info("Next backup time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
}) })
@@ -100,27 +108,66 @@ func multiBackupTask(databases []Database, bkConfig *BackupConfig) {
if db.Path != "" { if db.Path != "" {
bkConfig.remotePath = db.Path bkConfig.remotePath = db.Path
} }
BackupTask(getDatabase(db), bkConfig) createBackupTask(getDatabase(db), bkConfig)
} }
} }
// BackupTask backups database // createBackupTask backup task
func BackupTask(db *dbConfig, config *BackupConfig) { func createBackupTask(db *dbConfig, config *BackupConfig) {
if config.all && !config.allInOne {
backupAll(db, config)
} else {
backupTask(db, config)
}
}
// backupAll backup all databases
func backupAll(db *dbConfig, config *BackupConfig) {
databases, err := listDatabases(*db)
if err != nil {
utils.Fatal("Error listing databases: %s", err)
}
for _, dbName := range databases {
if dbName == "information_schema" || dbName == "performance_schema" || dbName == "mysql" || dbName == "sys" || dbName == "innodb" || dbName == "Database" {
continue
}
db.dbName = dbName
config.backupFileName = fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
backupTask(db, config)
}
}
// backupTask backup task
func backupTask(db *dbConfig, config *BackupConfig) {
utils.Info("Starting backup task...") utils.Info("Starting backup task...")
startTime = time.Now()
prefix := db.dbName
if config.all && config.allInOne {
prefix = "all_databases"
}
// Generate file name // Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20060102_150405")) backupFileName := fmt.Sprintf("%s_%s.sql.gz", prefix, time.Now().Format("20060102_150405"))
if config.disableCompression { if config.disableCompression {
backupFileName = fmt.Sprintf("%s_%s.sql", db.dbName, time.Now().Format("20060102_150405")) backupFileName = fmt.Sprintf("%s_%s.sql", prefix, time.Now().Format("20060102_150405"))
}
if config.customName != "" && config.allowCustomName && !config.all {
backupFileName = fmt.Sprintf("%s.sql.gz", config.customName)
if config.disableCompression {
backupFileName = fmt.Sprintf("%s.sql", config.customName)
}
} }
config.backupFileName = backupFileName config.backupFileName = backupFileName
switch config.storage { s := strings.ToLower(config.storage)
switch s {
case "local": case "local":
localBackup(db, config) localBackup(db, config)
case "s3", "S3": case "s3":
s3Backup(db, config) s3Backup(db, config)
case "ssh", "SSH", "remote": case "ssh", "remote", "sftp":
sshBackup(db, config) sshBackup(db, config)
case "ftp", "FTP": case "ftp":
ftpBackup(db, config) ftpBackup(db, config)
case "azure": case "azure":
azureBackup(db, config) azureBackup(db, config)
@@ -128,8 +175,10 @@ func BackupTask(db *dbConfig, config *BackupConfig) {
localBackup(db, config) localBackup(db, config)
} }
} }
// startMultiBackup start multi backup
func startMultiBackup(bkConfig *BackupConfig, configFile string) { func startMultiBackup(bkConfig *BackupConfig, configFile string) {
utils.Info("Starting backup task...") utils.Info("Starting Multi backup task...")
conf, err := readConf(configFile) conf, err := readConf(configFile)
if err != nil { if err != nil {
utils.Fatal("Error reading config file: %s", err) utils.Fatal("Error reading config file: %s", err)
@@ -145,6 +194,7 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
if bkConfig.cronExpression == "" { if bkConfig.cronExpression == "" {
multiBackupTask(conf.Databases, bkConfig) multiBackupTask(conf.Databases, bkConfig)
} else { } else {
backupRescueMode = conf.BackupRescueMode
// Check if cronExpression is valid // Check if cronExpression is valid
if utils.IsValidCronExpression(bkConfig.cronExpression) { if utils.IsValidCronExpression(bkConfig.cronExpression) {
utils.Info("Running backup in Scheduled mode") utils.Info("Running backup in Scheduled mode")
@@ -155,7 +205,11 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
// Test backup // Test backup
utils.Info("Testing backup configurations...") utils.Info("Testing backup configurations...")
for _, db := range conf.Databases { for _, db := range conf.Databases {
testDatabaseConnection(getDatabase(db)) err = testDatabaseConnection(getDatabase(db))
if err != nil {
recoverMode(err, fmt.Sprintf("Error connecting to database: %s", db.Name))
continue
}
} }
utils.Info("Testing backup configurations...done") utils.Info("Testing backup configurations...done")
utils.Info("Creating backup job...") utils.Info("Creating backup job...")
@@ -185,79 +239,83 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
} }
// BackupDatabase backup database // BackupDatabase backup database
func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) { func BackupDatabase(db *dbConfig, backupFileName string, disableCompression, all, singleFile bool) error {
storagePath = os.Getenv("STORAGE_PATH") storagePath = os.Getenv("STORAGE_PATH")
utils.Info("Starting database backup...") utils.Info("Starting database backup...")
err := os.Setenv("MYSQL_PWD", db.dbPassword) if err := testDatabaseConnection(db); err != nil {
if err != nil { return fmt.Errorf("database connection failed: %w", err)
return
} }
testDatabaseConnection(db)
// Backup Database database
utils.Info("Backing up database...")
// Verify is compression is disabled
if disableCompression {
// Execute mysqldump
cmd := exec.Command("mysqldump",
"-h", db.dbHost,
"-P", db.dbPort,
"-u", db.dbUserName,
db.dbName,
)
output, err := cmd.Output()
if err != nil {
utils.Fatal(err.Error())
}
// save output
file, err := os.Create(filepath.Join(tmpPath, backupFileName))
if err != nil {
utils.Fatal(err.Error())
}
defer func(file *os.File) {
err := file.Close()
if err != nil {
utils.Fatal(err.Error())
}
}(file)
_, err = file.Write(output)
if err != nil {
utils.Fatal(err.Error())
}
utils.Info("Database has been backed up")
dumpArgs := []string{fmt.Sprintf("--defaults-file=%s", mysqlClientConfig)}
if all && singleFile {
dumpArgs = append(dumpArgs, "--all-databases", "--single-transaction", "--routines", "--triggers")
} else { } else {
// Execute mysqldump dumpArgs = append(dumpArgs, db.dbName)
cmd := exec.Command("mysqldump", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName)
stdout, err := cmd.StdoutPipe()
if err != nil {
log.Fatal(err)
}
gzipCmd := exec.Command("gzip")
gzipCmd.Stdin = stdout
gzipCmd.Stdout, err = os.Create(filepath.Join(tmpPath, backupFileName))
err = gzipCmd.Start()
if err != nil {
return
}
if err := cmd.Run(); err != nil {
log.Fatal(err)
}
if err := gzipCmd.Wait(); err != nil {
log.Fatal(err)
}
utils.Info("Database has been backed up")
} }
backupPath := filepath.Join(tmpPath, backupFileName)
if disableCompression {
return runCommandAndSaveOutput("mysqldump", dumpArgs, backupPath)
}
return runCommandWithCompression("mysqldump", dumpArgs, backupPath)
} }
// runCommandAndSaveOutput runs a command and saves the output to a file
func runCommandAndSaveOutput(command string, args []string, outputPath string) error {
cmd := exec.Command(command, args...)
output, err := cmd.Output()
if err != nil {
return fmt.Errorf("failed to execute %s: %v, output: %s", command, err, string(output))
}
return os.WriteFile(outputPath, output, 0644)
}
// runCommandWithCompression runs a command and compresses the output
func runCommandWithCompression(command string, args []string, outputPath string) error {
cmd := exec.Command(command, args...)
stdout, err := cmd.StdoutPipe()
if err != nil {
return fmt.Errorf("failed to create stdout pipe: %w", err)
}
gzipCmd := exec.Command("gzip")
gzipCmd.Stdin = stdout
gzipFile, err := os.Create(outputPath)
if err != nil {
return fmt.Errorf("failed to create gzip file: %w", err)
}
defer func(gzipFile *os.File) {
err := gzipFile.Close()
if err != nil {
utils.Error("Error closing gzip file: %v", err)
}
}(gzipFile)
gzipCmd.Stdout = gzipFile
if err := gzipCmd.Start(); err != nil {
return fmt.Errorf("failed to start gzip: %w", err)
}
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to execute %s: %w", command, err)
}
if err := gzipCmd.Wait(); err != nil {
return fmt.Errorf("failed to wait for gzip completion: %w", err)
}
utils.Info("Database has been backed up")
return nil
}
// localBackup backup database to local storage
func localBackup(db *dbConfig, config *BackupConfig) { func localBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to local storage") utils.Info("Backup database to local storage")
startTime = time.Now().Format(utils.TimeFormat()) err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne)
BackupDatabase(db, config.backupFileName, disableCompression) if err != nil {
recoverMode(err, "Error backing up database")
return
}
finalFileName := config.backupFileName finalFileName := config.backupFileName
if config.encryption { if config.encryption {
encryptBackup(config) encryptBackup(config)
@@ -268,7 +326,6 @@ func localBackup(db *dbConfig, config *BackupConfig) {
utils.Error("Error: %s", err) utils.Error("Error: %s", err)
} }
backupSize = fileInfo.Size() backupSize = fileInfo.Size()
utils.Info("Backup name is %s", finalFileName)
localStorage := local.NewStorage(local.Config{ localStorage := local.NewStorage(local.Config{
LocalPath: tmpPath, LocalPath: tmpPath,
RemotePath: storagePath, RemotePath: storagePath,
@@ -277,16 +334,19 @@ func localBackup(db *dbConfig, config *BackupConfig) {
if err != nil { if err != nil {
utils.Fatal("Error copying backup file: %s", err) utils.Fatal("Error copying backup file: %s", err)
} }
utils.Info("Backup name is %s", finalFileName)
utils.Info("Backup size: %s", utils.ConvertBytes(uint64(backupSize)))
utils.Info("Backup saved in %s", filepath.Join(storagePath, finalFileName)) utils.Info("Backup saved in %s", filepath.Join(storagePath, finalFileName))
duration := goutils.FormatDuration(time.Since(startTime), 0)
// Send notification // Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
File: finalFileName, File: finalFileName,
BackupSize: backupSize, BackupSize: utils.ConvertBytes(uint64(backupSize)),
Database: db.dbName, Database: db.dbName,
Storage: config.storage, Storage: config.storage,
BackupLocation: filepath.Join(storagePath, finalFileName), BackupLocation: filepath.Join(storagePath, finalFileName),
StartTime: startTime, Duration: duration,
EndTime: time.Now().Format(utils.TimeFormat()),
}) })
// Delete old backup // Delete old backup
if config.prune { if config.prune {
@@ -298,9 +358,10 @@ func localBackup(db *dbConfig, config *BackupConfig) {
} }
// Delete temp // Delete temp
deleteTemp() deleteTemp()
utils.Info("Backup completed successfully") utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration)
} }
// encryptBackup encrypt backup
func encryptBackup(config *BackupConfig) { func encryptBackup(config *BackupConfig) {
backupFile, err := os.ReadFile(filepath.Join(tmpPath, config.backupFileName)) backupFile, err := os.ReadFile(filepath.Join(tmpPath, config.backupFileName))
outputFile := fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension) outputFile := fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension)
@@ -330,3 +391,43 @@ func encryptBackup(config *BackupConfig) {
} }
} }
// listDatabases list all databases
func listDatabases(db dbConfig) ([]string, error) {
databases := []string{}
// Create the mysql client config file
if err := createMysqlClientConfigFile(db); err != nil {
return databases, errors.New(err.Error())
}
utils.Info("Listing databases...")
// Step 1: List all databases
cmd := exec.Command("mariadb", fmt.Sprintf("--defaults-file=%s", mysqlClientConfig), "-e", "SHOW DATABASES;")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
return databases, fmt.Errorf("failed to list databases: %s", err)
}
// Step 2: Parse the output
for _, _db := range strings.Split(out.String(), "\n") {
if _db != "" {
databases = append(databases, _db)
}
}
return databases, nil
}
func recoverMode(err error, msg string) {
if err != nil {
if backupRescueMode {
utils.NotifyError(fmt.Sprintf("%s : %v", msg, err))
utils.Error("Error: %s", msg)
utils.Error("Backup rescue mode is enabled")
utils.Error("Backup will continue")
} else {
utils.Error("Error: %s", msg)
utils.Fatal("Error: %v", err)
return
}
}
}

View File

@@ -30,6 +30,7 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"os" "os"
"strconv" "strconv"
"strings"
) )
type Database struct { type Database struct {
@@ -41,8 +42,9 @@ type Database struct {
Path string `yaml:"path"` Path string `yaml:"path"`
} }
type Config struct { type Config struct {
Databases []Database `yaml:"databases"` CronExpression string `yaml:"cronExpression"`
CronExpression string `yaml:"cronExpression"` BackupRescueMode bool `yaml:"backupRescueMode"`
Databases []Database `yaml:"databases"`
} }
type dbConfig struct { type dbConfig struct {
@@ -75,6 +77,10 @@ type BackupConfig struct {
publicKey string publicKey string
storage string storage string
cronExpression string cronExpression string
all bool
allInOne bool
customName string
allowCustomName bool
} }
type FTPConfig struct { type FTPConfig struct {
host string host string
@@ -113,7 +119,7 @@ func initDbConfig(cmd *cobra.Command) *dbConfig {
utils.GetEnv(cmd, "dbname", "DB_NAME") utils.GetEnv(cmd, "dbname", "DB_NAME")
dConf := dbConfig{} dConf := dbConfig{}
dConf.dbHost = os.Getenv("DB_HOST") dConf.dbHost = os.Getenv("DB_HOST")
dConf.dbPort = os.Getenv("DB_PORT") dConf.dbPort = utils.EnvWithDefault("DB_PORT", "3306")
dConf.dbName = os.Getenv("DB_NAME") dConf.dbName = os.Getenv("DB_NAME")
dConf.dbUserName = os.Getenv("DB_USERNAME") dConf.dbUserName = os.Getenv("DB_USERNAME")
dConf.dbPassword = os.Getenv("DB_PASSWORD") dConf.dbPassword = os.Getenv("DB_PASSWORD")
@@ -127,6 +133,11 @@ func initDbConfig(cmd *cobra.Command) *dbConfig {
} }
func getDatabase(database Database) *dbConfig { func getDatabase(database Database) *dbConfig {
// Set default values from environment variables if not provided
database.User = getEnvOrDefault(database.User, "DB_USERNAME", database.Name, "")
database.Password = getEnvOrDefault(database.Password, "DB_PASSWORD", database.Name, "")
database.Host = getEnvOrDefault(database.Host, "DB_HOST", database.Name, "")
database.Port = getEnvOrDefault(database.Port, "DB_PORT", database.Name, "3306")
return &dbConfig{ return &dbConfig{
dbHost: database.Host, dbHost: database.Host,
dbPort: database.Port, dbPort: database.Port,
@@ -136,6 +147,31 @@ func getDatabase(database Database) *dbConfig {
} }
} }
// Helper function to get environment variable or use a default value
func getEnvOrDefault(currentValue, envKey, suffix, defaultValue string) string {
// Return the current value if it's already set
if currentValue != "" {
return currentValue
}
// Check for suffixed or prefixed environment variables if a suffix is provided
if suffix != "" {
suffixUpper := strings.ToUpper(suffix)
envSuffix := os.Getenv(fmt.Sprintf("%s_%s", envKey, suffixUpper))
if envSuffix != "" {
return envSuffix
}
envPrefix := os.Getenv(fmt.Sprintf("%s_%s", suffixUpper, envKey))
if envPrefix != "" {
return envPrefix
}
}
// Fall back to the default value using a helper function
return utils.EnvWithDefault(envKey, defaultValue)
}
// loadSSHConfig loads the SSH configuration from environment variables // loadSSHConfig loads the SSH configuration from environment variables
func loadSSHConfig() (*SSHConfig, error) { func loadSSHConfig() (*SSHConfig, error) {
utils.GetEnvVariable("SSH_HOST", "SSH_HOST_NAME") utils.GetEnvVariable("SSH_HOST", "SSH_HOST_NAME")
@@ -214,15 +250,23 @@ func initBackupConfig(cmd *cobra.Command) *BackupConfig {
utils.SetEnv("STORAGE_PATH", storagePath) utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION") utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION")
utils.GetEnv(cmd, "path", "REMOTE_PATH") utils.GetEnv(cmd, "path", "REMOTE_PATH")
utils.GetEnv(cmd, "config", "BACKUP_CONFIG_FILE")
// Get flag value and set env // Get flag value and set env
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH") remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE") storage = utils.GetEnv(cmd, "storage", "STORAGE")
prune := false prune := false
configFile := os.Getenv("BACKUP_CONFIG_FILE")
backupRetention := utils.GetIntEnv("BACKUP_RETENTION_DAYS") backupRetention := utils.GetIntEnv("BACKUP_RETENTION_DAYS")
if backupRetention > 0 { if backupRetention > 0 {
prune = true prune = true
} }
disableCompression, _ = cmd.Flags().GetBool("disable-compression") disableCompression, _ = cmd.Flags().GetBool("disable-compression")
customName, _ := cmd.Flags().GetString("custom-name")
all, _ := cmd.Flags().GetBool("all-databases")
allInOne, _ := cmd.Flags().GetBool("all-in-one")
if allInOne {
all = true
}
_, _ = cmd.Flags().GetString("mode") _, _ = cmd.Flags().GetString("mode")
passphrase := os.Getenv("GPG_PASSPHRASE") passphrase := os.Getenv("GPG_PASSPHRASE")
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH") _ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
@@ -236,6 +280,10 @@ func initBackupConfig(cmd *cobra.Command) *BackupConfig {
encryption = true encryption = true
usingKey = false usingKey = false
} }
dbName := os.Getenv("DB_NAME")
if dbName == "" && !all && configFile == "" {
utils.Fatal("Database name is required, use DB_NAME environment variable or -d flag")
}
// Initialize backup configs // Initialize backup configs
config := BackupConfig{} config := BackupConfig{}
config.backupRetention = backupRetention config.backupRetention = backupRetention
@@ -248,6 +296,9 @@ func initBackupConfig(cmd *cobra.Command) *BackupConfig {
config.publicKey = publicKeyFile config.publicKey = publicKeyFile
config.usingKey = usingKey config.usingKey = usingKey
config.cronExpression = cronExpression config.cronExpression = cronExpression
config.all = all
config.allInOne = allInOne
config.customName = customName
return &config return &config
} }

View File

@@ -26,7 +26,9 @@ package pkg
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
goutils "github.com/jkaninda/go-utils"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
"os" "os"
@@ -36,7 +38,7 @@ import (
) )
func intro() { func intro() {
fmt.Println("Starting MySQL Backup...") fmt.Println("Starting MYSQL-BKUP...")
fmt.Printf("Version: %s\n", utils.Version) fmt.Printf("Version: %s\n", utils.Version)
fmt.Println("Copyright (c) 2024 Jonas Kaninda") fmt.Println("Copyright (c) 2024 Jonas Kaninda")
} }
@@ -65,27 +67,30 @@ func deleteTemp() {
} }
} }
// TestDatabaseConnection tests the database connection // TestDatabaseConnection tests the database connection
func testDatabaseConnection(db *dbConfig) { func testDatabaseConnection(db *dbConfig) error {
err := os.Setenv("MYSQL_PWD", db.dbPassword) // Create the mysql client config file
if err != nil { if err := createMysqlClientConfigFile(*db); err != nil {
return return errors.New(err.Error())
} }
utils.Info("Connecting to %s database ...", db.dbName) utils.Info("Connecting to %s database ...", db.dbName)
// Set database name for notification error // Set database name for notification error
utils.DatabaseName = db.dbName utils.DatabaseName = db.dbName
cmd := exec.Command("mariadb", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName, "-e", "quit")
// Prepare the command to test the database connection
cmd := exec.Command("mariadb", fmt.Sprintf("--defaults-file=%s", mysqlClientConfig), db.dbName, "-e", "quit")
// Capture the output // Capture the output
var out bytes.Buffer var out bytes.Buffer
cmd.Stdout = &out cmd.Stdout = &out
cmd.Stderr = &out cmd.Stderr = &out
err = cmd.Run()
if err != nil {
utils.Fatal("Error testing database connection: %v\nOutput: %s", err, out.String())
// Run the command
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to connect to database %s: %v, output: %s", db.dbName, err, out.String())
} }
utils.Info("Successfully connected to %s database", db.dbName)
utils.Info("Successfully connected to %s database", db.dbName)
return nil
} }
// checkPubKeyFile checks gpg public key // checkPubKeyFile checks gpg public key
@@ -155,6 +160,8 @@ func readConf(configFile string) (*Config, error) {
// checkConfigFile checks config files and returns one config file // checkConfigFile checks config files and returns one config file
func checkConfigFile(filePath string) (string, error) { func checkConfigFile(filePath string) (string, error) {
// Remove the quotes
filePath = strings.Trim(filePath, `"`)
// Define possible config file names // Define possible config file names
configFiles := []string{filepath.Join(workingDir, "config.yaml"), filepath.Join(workingDir, "config.yml"), filePath} configFiles := []string{filepath.Join(workingDir, "config.yaml"), filepath.Join(workingDir, "config.yml"), filePath}
@@ -181,3 +188,16 @@ func RemoveLastExtension(filename string) string {
} }
return filename return filename
} }
// Create mysql client config file
func createMysqlClientConfigFile(db dbConfig) error {
caCertPath := goutils.GetStringEnvWithDefault("DB_SSL_CA", "/etc/ssl/certs/ca-certificates.crt")
sslMode := goutils.GetStringEnvWithDefault("DB_SSL_MODE", "0")
// Create the mysql client config file
mysqlClientConfigFile := filepath.Join(tmpPath, "my.cnf")
mysqlCl := fmt.Sprintf("[client]\nhost=%s\nport=%s\nuser=%s\npassword=%s\nssl-ca=%s\nssl=%s\n", db.dbHost, db.dbPort, db.dbUserName, db.dbPassword, caCertPath, sslMode)
if err := os.WriteFile(mysqlClientConfigFile, []byte(mysqlCl), 0644); err != nil {
return fmt.Errorf("failed to create mysql client config file: %v", err)
}
return nil
}

View File

@@ -51,7 +51,10 @@ func StartMigration(cmd *cobra.Command) {
conf := &RestoreConfig{} conf := &RestoreConfig{}
conf.file = backupFileName conf.file = backupFileName
// Backup source Database // Backup source Database
BackupDatabase(dbConf, backupFileName, true) err := BackupDatabase(dbConf, backupFileName, true, false, false)
if err != nil {
utils.Fatal("Error backing up database: %s", err)
}
// Restore source database into target database // Restore source database into target database
utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName) utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
RestoreDatabase(&newDbConfig, conf) RestoreDatabase(&newDbConfig, conf)

View File

@@ -28,6 +28,7 @@ import (
"fmt" "fmt"
"github.com/jkaninda/go-storage/pkg/ftp" "github.com/jkaninda/go-storage/pkg/ftp"
"github.com/jkaninda/go-storage/pkg/ssh" "github.com/jkaninda/go-storage/pkg/ssh"
goutils "github.com/jkaninda/go-utils"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"os" "os"
@@ -37,16 +38,18 @@ import (
func sshBackup(db *dbConfig, config *BackupConfig) { func sshBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to Remote server") utils.Info("Backup database to Remote server")
startTime = time.Now().Format(utils.TimeFormat())
// Backup database // Backup database
BackupDatabase(db, config.backupFileName, disableCompression) err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne)
if err != nil {
recoverMode(err, "Error backing up database")
return
}
finalFileName := config.backupFileName finalFileName := config.backupFileName
if config.encryption { if config.encryption {
encryptBackup(config) encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg") finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
} }
utils.Info("Uploading backup archive to remote storage ... ") utils.Info("Uploading backup archive to remote storage ... ")
utils.Info("Backup name is %s", finalFileName)
sshConfig, err := loadSSHConfig() sshConfig, err := loadSSHConfig()
if err != nil { if err != nil {
utils.Fatal("Error loading ssh config: %s", err) utils.Fatal("Error loading ssh config: %s", err)
@@ -74,6 +77,8 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
utils.Error("Error: %s", err) utils.Error("Error: %s", err)
} }
backupSize = fileInfo.Size() backupSize = fileInfo.Size()
utils.Info("Backup name is %s", finalFileName)
utils.Info("Backup size: %s", utils.ConvertBytes(uint64(backupSize)))
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName)) utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
// Delete backup file from tmp folder // Delete backup file from tmp folder
@@ -90,19 +95,20 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
} }
utils.Info("Uploading backup archive to remote storage ... done ") utils.Info("Uploading backup archive to remote storage ... done ")
duration := goutils.FormatDuration(time.Since(startTime), 0)
// Send notification // Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
File: finalFileName, File: finalFileName,
BackupSize: backupSize, BackupSize: utils.ConvertBytes(uint64(backupSize)),
Database: db.dbName, Database: db.dbName,
Storage: config.storage, Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName), BackupLocation: filepath.Join(config.remotePath, finalFileName),
StartTime: startTime, Duration: duration,
EndTime: time.Now().Format(utils.TimeFormat()),
}) })
// Delete temp // Delete temp
deleteTemp() deleteTemp()
utils.Info("Backup completed successfully") utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration)
} }
func remoteRestore(db *dbConfig, conf *RestoreConfig) { func remoteRestore(db *dbConfig, conf *RestoreConfig) {
@@ -152,10 +158,13 @@ func ftpRestore(db *dbConfig, conf *RestoreConfig) {
} }
func ftpBackup(db *dbConfig, config *BackupConfig) { func ftpBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to the remote FTP server") utils.Info("Backup database to the remote FTP server")
startTime = time.Now().Format(utils.TimeFormat())
// Backup database // Backup database
BackupDatabase(db, config.backupFileName, disableCompression) err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne)
if err != nil {
recoverMode(err, "Error backing up database")
return
}
finalFileName := config.backupFileName finalFileName := config.backupFileName
if config.encryption { if config.encryption {
encryptBackup(config) encryptBackup(config)
@@ -199,20 +208,21 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
} }
} }
utils.Info("Backup name is %s", finalFileName)
utils.Info("Backup size: %s", utils.ConvertBytes(uint64(backupSize)))
utils.Info("Uploading backup archive to the remote FTP server ... done ") utils.Info("Uploading backup archive to the remote FTP server ... done ")
duration := goutils.FormatDuration(time.Since(startTime), 0)
// Send notification // Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
File: finalFileName, File: finalFileName,
BackupSize: backupSize, BackupSize: utils.ConvertBytes(uint64(backupSize)),
Database: db.dbName, Database: db.dbName,
Storage: config.storage, Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName), BackupLocation: filepath.Join(config.remotePath, finalFileName),
StartTime: startTime, Duration: duration,
EndTime: time.Now().Format(utils.TimeFormat()),
}) })
// Delete temp // Delete temp
deleteTemp() deleteTemp()
utils.Info("Backup completed successfully") utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration)
} }

View File

@@ -25,6 +25,7 @@ SOFTWARE.
package pkg package pkg
import ( import (
"fmt"
"github.com/jkaninda/encryptor" "github.com/jkaninda/encryptor"
"github.com/jkaninda/go-storage/pkg/local" "github.com/jkaninda/go-storage/pkg/local"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
@@ -56,11 +57,17 @@ func StartRestore(cmd *cobra.Command) {
} }
func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) { func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) {
utils.Info("Restore database from local") utils.Info("Restore database from local")
basePath := filepath.Dir(restoreConf.file)
fileName := filepath.Base(restoreConf.file)
restoreConf.file = fileName
if basePath == "" || basePath == "." {
basePath = storagePath
}
localStorage := local.NewStorage(local.Config{ localStorage := local.NewStorage(local.Config{
RemotePath: storagePath, RemotePath: basePath,
LocalPath: tmpPath, LocalPath: tmpPath,
}) })
err := localStorage.CopyFrom(restoreConf.file) err := localStorage.CopyFrom(fileName)
if err != nil { if err != nil {
utils.Fatal("Error copying backup file: %s", err) utils.Fatal("Error copying backup file: %s", err)
} }
@@ -68,88 +75,79 @@ func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) {
} }
// RestoreDatabase restore database // RestoreDatabase restores the database from a backup file
func RestoreDatabase(db *dbConfig, conf *RestoreConfig) { func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
if conf.file == "" { if conf.file == "" {
utils.Fatal("Error, file required") utils.Fatal("Error, file required")
} }
extension := filepath.Ext(filepath.Join(tmpPath, conf.file))
rFile, err := os.ReadFile(filepath.Join(tmpPath, conf.file)) filePath := filepath.Join(tmpPath, conf.file)
outputFile := RemoveLastExtension(filepath.Join(tmpPath, conf.file)) rFile, err := os.ReadFile(filePath)
if err != nil { if err != nil {
utils.Fatal("Error reading backup file: %s ", err) utils.Fatal("Error reading backup file: %v", err)
} }
extension := filepath.Ext(filePath)
outputFile := RemoveLastExtension(filePath)
if extension == ".gpg" { if extension == ".gpg" {
decryptBackup(conf, rFile, outputFile)
if conf.usingKey {
utils.Info("Decrypting backup using private key...")
utils.Warn("Backup decryption using a private key is not fully supported")
prKey, err := os.ReadFile(conf.privateKey)
if err != nil {
utils.Fatal("Error reading public key: %s ", err)
}
err = encryptor.DecryptWithPrivateKey(rFile, outputFile, prKey, conf.passphrase)
if err != nil {
utils.Fatal("error during decrypting backup %v", err)
}
utils.Info("Decrypting backup using private key...done")
} else {
if conf.passphrase == "" {
utils.Error("Error, passphrase or private key required")
utils.Fatal("Your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE or GPG_PRIVATE_KEY environment variable is required.")
} else {
utils.Info("Decrypting backup using passphrase...")
// decryptWithGPG file
err := encryptor.Decrypt(rFile, outputFile, conf.passphrase)
if err != nil {
utils.Fatal("Error decrypting file %s %v", file, err)
}
utils.Info("Decrypting backup using passphrase...done")
// Update file name
conf.file = RemoveLastExtension(file)
}
}
} }
if utils.FileExists(filepath.Join(tmpPath, conf.file)) { restorationFile := filepath.Join(tmpPath, conf.file)
err := os.Setenv("MYSQL_PWD", db.dbPassword) if !utils.FileExists(restorationFile) {
utils.Fatal("File not found: %s", restorationFile)
}
if err := testDatabaseConnection(db); err != nil {
utils.Fatal("Error connecting to the database: %v", err)
}
utils.Info("Restoring database...")
restoreDatabaseFile(db, restorationFile)
}
func decryptBackup(conf *RestoreConfig, rFile []byte, outputFile string) {
if conf.usingKey {
utils.Info("Decrypting backup using private key...")
prKey, err := os.ReadFile(conf.privateKey)
if err != nil { if err != nil {
return utils.Fatal("Error reading private key: %v", err)
} }
testDatabaseConnection(db) if err := encryptor.DecryptWithPrivateKey(rFile, outputFile, prKey, conf.passphrase); err != nil {
utils.Info("Restoring database...") utils.Fatal("Error decrypting backup: %v", err)
extension := filepath.Ext(filepath.Join(tmpPath, conf.file))
// Restore from compressed file / .sql.gz
if extension == ".gz" {
str := "zcat " + filepath.Join(tmpPath, conf.file) + " | mariadb -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
_, err := exec.Command("sh", "-c", str).Output()
if err != nil {
utils.Fatal("Error, in restoring the database %v", err)
}
utils.Info("Restoring database... done")
utils.Info("Database has been restored")
// Delete temp
deleteTemp()
} else if extension == ".sql" {
// Restore from sql file
str := "cat " + filepath.Join(tmpPath, conf.file) + " | mariadb -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
_, err := exec.Command("sh", "-c", str).Output()
if err != nil {
utils.Fatal("Error in restoring the database %v", err)
}
utils.Info("Restoring database... done")
utils.Info("Database has been restored")
// Delete temp
deleteTemp()
} else {
utils.Fatal("Unknown file extension %s", extension)
} }
} else { } else {
utils.Fatal("File not found in %s", filepath.Join(tmpPath, conf.file)) if conf.passphrase == "" {
utils.Fatal("Passphrase or private key required for GPG file.")
}
utils.Info("Decrypting backup using passphrase...")
if err := encryptor.Decrypt(rFile, outputFile, conf.passphrase); err != nil {
utils.Fatal("Error decrypting file: %v", err)
}
conf.file = RemoveLastExtension(conf.file)
} }
} }
func restoreDatabaseFile(db *dbConfig, restorationFile string) {
extension := filepath.Ext(restorationFile)
var cmdStr string
switch extension {
case ".gz":
cmdStr = fmt.Sprintf("zcat %s | mariadb --defaults-file=%s %s", restorationFile, mysqlClientConfig, db.dbName)
case ".sql":
cmdStr = fmt.Sprintf("cat %s | mariadb --defaults-file=%s %s", restorationFile, mysqlClientConfig, db.dbName)
default:
utils.Fatal("Unknown file extension: %s", extension)
}
cmd := exec.Command("sh", "-c", cmdStr)
output, err := cmd.CombinedOutput()
if err != nil {
utils.Fatal("Error restoring database: %v\nOutput: %s", err, string(output))
}
utils.Info("Database has been restored successfully.")
deleteTemp()
}

View File

@@ -27,6 +27,7 @@ package pkg
import ( import (
"fmt" "fmt"
"github.com/jkaninda/go-storage/pkg/s3" "github.com/jkaninda/go-storage/pkg/s3"
goutils "github.com/jkaninda/go-utils"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"os" "os"
@@ -37,9 +38,12 @@ import (
func s3Backup(db *dbConfig, config *BackupConfig) { func s3Backup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to s3 storage") utils.Info("Backup database to s3 storage")
startTime = time.Now().Format(utils.TimeFormat())
// Backup database // Backup database
BackupDatabase(db, config.backupFileName, disableCompression) err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne)
if err != nil {
recoverMode(err, "Error backing up database")
return
}
finalFileName := config.backupFileName finalFileName := config.backupFileName
if config.encryption { if config.encryption {
encryptBackup(config) encryptBackup(config)
@@ -91,19 +95,19 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
} }
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName)) utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
utils.Info("Uploading backup archive to remote storage S3 ... done ") utils.Info("Uploading backup archive to remote storage S3 ... done ")
duration := goutils.FormatDuration(time.Since(startTime), 0)
// Send notification // Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
File: finalFileName, File: finalFileName,
BackupSize: backupSize, BackupSize: utils.ConvertBytes(uint64(backupSize)),
Database: db.dbName, Database: db.dbName,
Storage: config.storage, Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName), BackupLocation: filepath.Join(config.remotePath, finalFileName),
StartTime: startTime, Duration: duration,
EndTime: time.Now().Format(utils.TimeFormat()),
}) })
// Delete temp // Delete temp
deleteTemp() deleteTemp()
utils.Info("Backup completed successfully") utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration)
} }
func s3Restore(db *dbConfig, conf *RestoreConfig) { func s3Restore(db *dbConfig, conf *RestoreConfig) {

View File

@@ -24,6 +24,11 @@ SOFTWARE.
package pkg package pkg
import (
"path/filepath"
"time"
)
const tmpPath = "/tmp/backup" const tmpPath = "/tmp/backup"
const gpgHome = "/config/gnupg" const gpgHome = "/config/gnupg"
const gpgExtension = "gpg" const gpgExtension = "gpg"
@@ -39,20 +44,19 @@ var (
encryption = false encryption = false
usingKey = false usingKey = false
backupSize int64 = 0 backupSize int64 = 0
startTime string startTime = time.Now()
backupRescueMode = false
mysqlClientConfig = filepath.Join(tmpPath, "my.cnf")
) )
// dbHVars Required environment variables for database // dbHVars Required environment variables for database
var dbHVars = []string{ var dbHVars = []string{
"DB_HOST", "DB_HOST",
"DB_PORT",
"DB_PASSWORD", "DB_PASSWORD",
"DB_USERNAME", "DB_USERNAME",
"DB_NAME",
} }
var tdbRVars = []string{ var tdbRVars = []string{
"TARGET_DB_HOST", "TARGET_DB_HOST",
"TARGET_DB_PORT",
"TARGET_DB_NAME", "TARGET_DB_NAME",
"TARGET_DB_USERNAME", "TARGET_DB_USERNAME",
"TARGET_DB_PASSWORD", "TARGET_DB_PASSWORD",
@@ -61,13 +65,6 @@ var tdbRVars = []string{
var dbConf *dbConfig var dbConf *dbConfig
var targetDbConf *targetDbConfig var targetDbConf *targetDbConfig
// sshVars Required environment variables for SSH remote server storage
var sshVars = []string{
"SSH_USER",
"SSH_HOST_NAME",
"SSH_PORT",
"REMOTE_PATH",
}
var ftpVars = []string{ var ftpVars = []string{
"FTP_HOST_NAME", "FTP_HOST_NAME",
"FTP_USER", "FTP_USER",

View File

@@ -45,25 +45,25 @@
</head> </head>
<body> <body>
<h2>🔴 Urgent: Database Backup Failure Notification</h2> <h2>🔴 Urgent: Database Backup Failure Notification</h2>
<p>Dear Team,</p> <p>Hi,</p>
<p>An error occurred during the database backup process. Please review the details below and take the necessary actions:</p> <p>An error occurred during the database backup process. Please review the details below and take the necessary actions:</p>
<div class="details"> <div class="details">
<h3>Failure Details:</h3> <h3>Failure Details:</h3>
<ul> <ul>
<li><strong>Database Name:</strong> {{.DatabaseName}}</li> <li><strong>Database Name:</strong> {{.DatabaseName}}</li>
<li><strong>Error Message:</strong> {{.Error}}</li>
<li><strong>Date:</strong> {{.EndTime}}</li> <li><strong>Date:</strong> {{.EndTime}}</li>
<li><strong>Backup Reference:</strong> {{.BackupReference}}</li> <li><strong>Backup Reference:</strong> {{.BackupReference}}</li>
<li><strong>Error Message:</strong> {{.Error}}</li>
</ul> </ul>
</div> </div>
<p>We recommend investigating the issue as soon as possible to prevent potential data loss or service disruptions.</p> <p>We recommend investigating the issue as soon as possible to prevent potential data loss or service disruptions.</p>
<p>For more information, visit the <a href="https://jkaninda.github.io/pg-bkup">pg-bkup documentation</a>.</p> <p>For more information, visit the <a href="https://jkaninda.github.io/mysql-bkup">mysql-bkup documentation</a>.</p>
<footer> <footer>
&copy; 2024 <a href="https://github.com/jkaninda/pg-bkup">pg-bkup</a> | Automated Backup System &copy; 2024 <a href="https://github.com/jkaninda/mysql-bkup">mysql-bkup</a> | Automated Backup System
</footer> </footer>
</body> </body>
</html> </html>

View File

@@ -45,18 +45,17 @@
</head> </head>
<body> <body>
<h2>✅ Database Backup Successful</h2> <h2>✅ Database Backup Successful</h2>
<p>Dear Team,</p> <p>Hi,</p>
<p>The backup process for the <strong>{{.Database}}</strong> database was successfully completed. Please find the details below:</p> <p>The backup process for the <strong>{{.Database}}</strong> database was successfully completed. Please find the details below:</p>
<div class="details"> <div class="details">
<h3>Backup Details:</h3> <h3>Backup Details:</h3>
<ul> <ul>
<li><strong>Database Name:</strong> {{.Database}}</li> <li><strong>Database Name:</strong> {{.Database}}</li>
<li><strong>Backup Start Time:</strong> {{.StartTime}}</li> <li><strong>Backup Duration:</strong> {{.Duration}}</li>
<li><strong>Backup End Time:</strong> {{.EndTime}}</li>
<li><strong>Backup Storage:</strong> {{.Storage}}</li> <li><strong>Backup Storage:</strong> {{.Storage}}</li>
<li><strong>Backup Location:</strong> {{.BackupLocation}}</li> <li><strong>Backup Location:</strong> {{.BackupLocation}}</li>
<li><strong>Backup Size:</strong> {{.BackupSize}} bytes</li> <li><strong>Backup Size:</strong> {{.BackupSize}}</li>
<li><strong>Backup Reference:</strong> {{.BackupReference}}</li> <li><strong>Backup Reference:</strong> {{.BackupReference}}</li>
</ul> </ul>
</div> </div>

View File

@@ -1,6 +1,6 @@
🔴 Urgent: Database Backup Failure Notification 🔴 Urgent: Database Backup Failure Notification
Dear Team, Hi,
An error occurred during the database backup process. An error occurred during the database backup process.
Please review the details below and take the necessary actions: Please review the details below and take the necessary actions:
Failure Details: Failure Details:

View File

@@ -1,16 +1,15 @@
✅ Database Backup Successful ✅ Database Backup Successful
Dear Team, Hi,
The backup process for the {{.Database}} database was successfully completed. The backup process for the {{.Database}} database was successfully completed.
Please find the details below: Please find the details below:
Backup Details: Backup Details:
- Database Name: {{.Database}} - Database Name: {{.Database}}
- Backup Start Time: {{.StartTime}} - Backup Duration: {{.Duration}}
- Backup EndTime: {{.EndTime}}
- Backup Storage: {{.Storage}} - Backup Storage: {{.Storage}}
- Backup Location: {{.BackupLocation}} - Backup Location: {{.BackupLocation}}
- Backup Size: {{.BackupSize}} bytes - Backup Size: {{.BackupSize}}
- Backup Reference: {{.BackupReference}} - Backup Reference: {{.BackupReference}}
You can access the backup at the specified location if needed. You can access the backup at the specified location if needed.

View File

@@ -37,10 +37,9 @@ type MailConfig struct {
} }
type NotificationData struct { type NotificationData struct {
File string File string
BackupSize int64 BackupSize string
Database string Database string
StartTime string Duration string
EndTime string
Storage string Storage string
BackupLocation string BackupLocation string
BackupReference string BackupReference string
@@ -84,3 +83,13 @@ func backupReference() string {
const templatePath = "/config/templates" const templatePath = "/config/templates"
var DatabaseName = "" var DatabaseName = ""
var vars = []string{
"TG_TOKEN",
"TG_CHAT_ID",
}
var mailVars = []string{
"MAIL_HOST",
"MAIL_PORT",
"MAIL_FROM",
"MAIL_TO",
}

View File

@@ -107,19 +107,6 @@ func sendMessage(msg string) error {
} }
func NotifySuccess(notificationData *NotificationData) { func NotifySuccess(notificationData *NotificationData) {
notificationData.BackupReference = backupReference() notificationData.BackupReference = backupReference()
var vars = []string{
"TG_TOKEN",
"TG_CHAT_ID",
}
var mailVars = []string{
"MAIL_HOST",
"MAIL_PORT",
"MAIL_USERNAME",
"MAIL_PASSWORD",
"MAIL_FROM",
"MAIL_TO",
}
// Email notification // Email notification
err := CheckEnvVars(mailVars) err := CheckEnvVars(mailVars)
if err == nil { if err == nil {
@@ -147,18 +134,6 @@ func NotifySuccess(notificationData *NotificationData) {
} }
} }
func NotifyError(error string) { func NotifyError(error string) {
var vars = []string{
"TG_TOKEN",
"TG_CHAT_ID",
}
var mailVars = []string{
"MAIL_HOST",
"MAIL_PORT",
"MAIL_USERNAME",
"MAIL_PASSWORD",
"MAIL_FROM",
"MAIL_TO",
}
// Email notification // Email notification
err := CheckEnvVars(mailVars) err := CheckEnvVars(mailVars)

View File

@@ -254,3 +254,19 @@ func CronNextTime(cronExpr string) time.Time {
next := schedule.Next(now) next := schedule.Next(now)
return next return next
} }
// ConvertBytes converts bytes to a human-readable string with the appropriate unit (bytes, MiB, or GiB).
func ConvertBytes(bytes uint64) string {
const (
MiB = 1024 * 1024
GiB = MiB * 1024
)
switch {
case bytes >= GiB:
return fmt.Sprintf("%.2f GiB", float64(bytes)/float64(GiB))
case bytes >= MiB:
return fmt.Sprintf("%.2f MiB", float64(bytes)/float64(MiB))
default:
return fmt.Sprintf("%d bytes", bytes)
}
}