Compare commits

..

249 Commits

Author SHA1 Message Date
8ac6ffc394 Merge pull request #197 from jkaninda/dependabot/go_modules/github.com/jkaninda/go-utils-0.1.3
Some checks failed
Lint / Run on Ubuntu (push) Has been cancelled
Tests / test (push) Has been cancelled
chore(deps): bump github.com/jkaninda/go-utils from 0.1.1 to 0.1.3
2025-09-10 05:56:13 +02:00
59296c0c17 Merge branch 'main' into dependabot/go_modules/github.com/jkaninda/go-utils-0.1.3 2025-09-10 05:55:13 +02:00
19b64bbad1 Merge pull request #198 from jkaninda/dependabot/docker/golang-1.25.1
chore(deps): bump golang from 1.24.6 to 1.25.1
2025-09-10 05:54:28 +02:00
640e11b924 Merge pull request #199 from jkaninda/dependabot/go_modules/github.com/spf13/cobra-1.10.1
chore(deps): bump github.com/spf13/cobra from 1.9.1 to 1.10.1
2025-09-10 05:54:15 +02:00
dependabot[bot]
a746366348 chore(deps): bump github.com/spf13/cobra from 1.9.1 to 1.10.1
Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.9.1 to 1.10.1.
- [Release notes](https://github.com/spf13/cobra/releases)
- [Commits](https://github.com/spf13/cobra/compare/v1.9.1...v1.10.1)

---
updated-dependencies:
- dependency-name: github.com/spf13/cobra
  dependency-version: 1.10.1
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-08 09:25:00 +00:00
dependabot[bot]
64d57a077e chore(deps): bump golang from 1.24.6 to 1.25.1
Bumps golang from 1.24.6 to 1.25.1.

---
updated-dependencies:
- dependency-name: golang
  dependency-version: 1.25.1
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-08 09:21:49 +00:00
dependabot[bot]
995db83b7c chore(deps): bump github.com/jkaninda/go-utils from 0.1.1 to 0.1.3
Bumps [github.com/jkaninda/go-utils](https://github.com/jkaninda/go-utils) from 0.1.1 to 0.1.3.
- [Release notes](https://github.com/jkaninda/go-utils/releases)
- [Commits](https://github.com/jkaninda/go-utils/compare/v0.1.1...v0.1.3)

---
updated-dependencies:
- dependency-name: github.com/jkaninda/go-utils
  dependency-version: 0.1.3
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-08-25 17:37:38 +00:00
5d5cbea60b Merge pull request #194 from jkaninda/dependabot/docker/alpine-3.22.1
Some checks failed
Lint / Run on Ubuntu (push) Has been cancelled
Tests / test (push) Has been cancelled
chore(deps): bump alpine from 3.22.0 to 3.22.1
2025-08-13 04:45:07 +02:00
dependabot[bot]
2cb5603b88 chore(deps): bump alpine from 3.22.0 to 3.22.1
Some checks failed
Lint / Run on Ubuntu (push) Has been cancelled
Bumps alpine from 3.22.0 to 3.22.1.

---
updated-dependencies:
- dependency-name: alpine
  dependency-version: 3.22.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-08-11 18:28:38 +00:00
548de725dd Merge pull request #195 from jkaninda/dependabot/docker/golang-1.24.6
Some checks failed
Lint / Run on Ubuntu (push) Has been cancelled
Tests / test (push) Has been cancelled
chore(deps): bump golang from 1.24.5 to 1.24.6
2025-08-11 20:24:27 +02:00
dependabot[bot]
37b2867974 chore(deps): bump golang from 1.24.5 to 1.24.6
Bumps golang from 1.24.5 to 1.24.6.

---
updated-dependencies:
- dependency-name: golang
  dependency-version: 1.24.6
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-08-11 14:49:26 +00:00
d6c01c9a4a Merge pull request #193 from jkaninda/dependabot/docker/golang-1.24.5
Some checks failed
Lint / Run on Ubuntu (push) Has been cancelled
Tests / test (push) Has been cancelled
chore(deps): bump golang from 1.24.4 to 1.24.5
2025-07-19 08:00:22 +02:00
dependabot[bot]
bd92cc6844 chore(deps): bump golang from 1.24.4 to 1.24.5
Bumps golang from 1.24.4 to 1.24.5.

---
updated-dependencies:
- dependency-name: golang
  dependency-version: 1.24.5
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-07-14 12:14:33 +00:00
3006abac3e Merge pull request #192 from jkaninda/dependabot/docker/golang-1.24.4
Some checks failed
Tests / test (push) Has been cancelled
Lint / Run on Ubuntu (push) Has been cancelled
chore(deps): bump golang from 1.24.3 to 1.24.4
2025-06-09 19:53:25 +02:00
dependabot[bot]
3a16f6929c chore(deps): bump golang from 1.24.3 to 1.24.4
Bumps golang from 1.24.3 to 1.24.4.

---
updated-dependencies:
- dependency-name: golang
  dependency-version: 1.24.4
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-06-09 10:17:20 +00:00
2f3ed7d0d8 Merge pull request #191 from jkaninda/dependabot/docker/alpine-3.22.0
chore(deps): bump alpine from 3.21.3 to 3.22.0
2025-06-06 20:01:27 +02:00
dependabot[bot]
a92bba05e4 chore(deps): bump alpine from 3.21.3 to 3.22.0
Bumps alpine from 3.21.3 to 3.22.0.

---
updated-dependencies:
- dependency-name: alpine
  dependency-version: 3.22.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-06-02 11:09:07 +00:00
7b565d54bd Merge pull request #190 from jkaninda/dependabot/docker/golang-1.24.3
Some checks failed
Lint / Run on Ubuntu (push) Successful in 18m52s
Tests / test (push) Failing after 1m14s
chore(deps): bump golang from 1.24.2 to 1.24.3
2025-05-14 23:03:58 +02:00
dependabot[bot]
d10321dac6 chore(deps): bump golang from 1.24.2 to 1.24.3
Bumps golang from 1.24.2 to 1.24.3.

---
updated-dependencies:
- dependency-name: golang
  dependency-version: 1.24.3
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-05-12 09:54:31 +00:00
d151c50caa Merge pull request #189 from jkaninda/nightly
Some checks failed
Tests / test (push) Has been cancelled
Lint / Run on Ubuntu (push) Has been cancelled
Nightly
2025-05-11 20:10:05 +02:00
07ecec57b3 Add why use MySQL-BKUP 2025-05-11 20:09:19 +02:00
bc4aab6ed0 Add Docker labels 2025-05-11 20:05:16 +02:00
12c17c18d6 doc: update key features (#188)
Some checks failed
Tests / test (push) Has been cancelled
Deploy Documenation site to GitHub Pages / build (push) Has been cancelled
Lint / Run on Ubuntu (push) Has been cancelled
Deploy Documenation site to GitHub Pages / deploy (push) Has been cancelled
2025-05-10 17:47:12 +02:00
80cd70e153 Merge pull request #187 from jkaninda/dependabot/docker/golang-1.24.2
Some checks failed
Lint / Run on Ubuntu (push) Successful in 18m40s
Tests / test (push) Failing after 25s
chore(deps): bump golang from 1.24.1 to 1.24.2
2025-04-08 19:29:23 +02:00
dependabot[bot]
80abdb4299 chore(deps): bump golang from 1.24.1 to 1.24.2
Bumps golang from 1.24.1 to 1.24.2.

---
updated-dependencies:
- dependency-name: golang
  dependency-version: 1.24.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-07 10:37:49 +00:00
72b21f00e6 Merge pull request #186 from jkaninda/nightly
Some checks failed
Lint / Run on Ubuntu (push) Successful in 18m44s
Tests / test (push) Failing after 18s
Nightly
2025-03-17 09:09:09 +01:00
3439c74257 Merge branch 'main' of github.com:jkaninda/mysql-bkup into nightly 2025-03-17 09:05:24 +01:00
4110ff4e64 Merge branch 'docs' into nightly 2025-03-17 09:05:16 +01:00
cb299a35bf enh: enhancement of logging 2025-03-17 09:05:04 +01:00
ba7c096bf3 Merge pull request #185 from jkaninda/docs
Some checks failed
Deploy Documenation site to GitHub Pages / build (push) Failing after 9m27s
Deploy Documenation site to GitHub Pages / deploy (push) Has been skipped
Lint / Run on Ubuntu (push) Successful in 18m44s
Tests / test (push) Failing after 20s
doc: update deployment examples
2025-03-16 11:50:32 +01:00
6ac8dcef9e doc: update deployment examples
All checks were successful
Lint / Run on Ubuntu (push) Successful in 18m39s
2025-03-16 11:49:36 +01:00
6bcc5d6bd4 Merge pull request #184 from jkaninda/docs
Some checks failed
Deploy Documenation site to GitHub Pages / build (push) Failing after 9m27s
Deploy Documenation site to GitHub Pages / deploy (push) Has been skipped
Lint / Run on Ubuntu (push) Successful in 18m36s
Tests / test (push) Failing after 18s
doc: update configuration deployment
2025-03-16 05:55:14 +01:00
fd35fabf97 doc: update configuration deployment 2025-03-16 05:45:12 +01:00
23cce10e8c Merge branch 'main' of github.com:jkaninda/mysql-bkup into nightly 2025-03-16 05:37:34 +01:00
e666466d27 fix: database name not set error when using flag -d (#183)
Some checks failed
Lint / Run on Ubuntu (push) Successful in 18m39s
Tests / test (push) Failing after 13s
* fix: database name not set error when using flag -d
2025-03-14 14:38:06 +01:00
36bca254a9 ci: Set -d flag for database name
Some checks failed
Build / docker (push) Failing after 8s
Lint / Run on Ubuntu (push) Successful in 18m37s
Tests / test (push) Failing after 1m0s
2025-03-14 14:34:25 +01:00
ad18d42145 fix: database name not set error when using flag -d 2025-03-14 14:32:38 +01:00
d9d44c2798 Merge pull request #182 from jkaninda/nightly
Some checks failed
Deploy Documenation site to GitHub Pages / build (push) Failing after 9m29s
Deploy Documenation site to GitHub Pages / deploy (push) Has been skipped
Lint / Run on Ubuntu (push) Successful in 18m40s
Tests / test (push) Failing after 2m19s
Nightly
2025-03-14 09:59:33 +01:00
300a592508 Merge branch 'main' of github.com:jkaninda/mysql-bkup into nightly
Some checks failed
Build / docker (push) Failing after 9s
Lint / Run on Ubuntu (push) Successful in 18m40s
Tests / test (push) Failing after 19s
2025-03-14 09:58:47 +01:00
be82e841e7 ci: set docker tests on main 2025-03-14 09:58:41 +01:00
a73a365ebf ci: set docker tests on main 2025-03-14 09:57:59 +01:00
75e965c0c5 Merge pull request #181 from jkaninda/nightly
doc: update reference
2025-03-14 09:55:02 +01:00
fc60ddb308 doc: update reference 2025-03-14 09:53:38 +01:00
573ef15ef3 Merge pull request #180 from jkaninda/nightly
ci: update  Github pages action
2025-03-14 09:50:49 +01:00
b1776d3689 ci: update Github pages action 2025-03-14 09:50:13 +01:00
376d47f738 Merge pull request #178 from jkaninda/nightly
feat: add backup all databases separately
2025-03-14 09:43:43 +01:00
eb6268f8ec ci: add Docker tests (#179) 2025-03-14 09:41:37 +01:00
731e2d789d ci: add go lint 2025-03-14 05:24:46 +01:00
6300a8f2dd feat: add backup all databases 2025-03-14 05:20:54 +01:00
cd827a9277 chore: comment code
Some checks failed
Build / docker (push) Failing after 16s
2025-03-13 14:44:22 +01:00
71cf3fae85 chore: improve log message 2025-03-13 14:26:32 +01:00
528282bbd4 feat: add backup all databases separately 2025-03-13 07:48:28 +01:00
002c93a796 Merge pull request #176 from jkaninda/dependabot/docker/golang-1.24.1
chore(deps): bump golang from 1.24.0 to 1.24.1
2025-03-12 16:29:14 +01:00
b6192f4c42 feat: add backup all databases
Some checks failed
Build / docker (push) Failing after 14s
2025-03-12 16:04:26 +01:00
d5061453b0 feat: add backup all databases 2025-03-12 15:50:30 +01:00
0bc7497512 fix: warning message when using MYSQL_PASSWORD env 2025-03-12 14:13:21 +01:00
489dfdf842 fix: backup error output 2025-03-12 13:27:31 +01:00
dependabot[bot]
907e70d552 chore(deps): bump golang from 1.24.0 to 1.24.1
Bumps golang from 1.24.0 to 1.24.1.

---
updated-dependencies:
- dependency-name: golang
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-10 09:25:40 +00:00
696477fe5c Merge pull request #175 from jkaninda/dependabot/go_modules/github.com/jkaninda/go-utils-0.1.1
chore(deps): bump github.com/jkaninda/go-utils from 0.0.0-20250122060806-26119182077a to 0.1.1
2025-02-27 12:33:17 +01:00
dependabot[bot]
56a8b51660 chore(deps): bump github.com/jkaninda/go-utils
Bumps [github.com/jkaninda/go-utils](https://github.com/jkaninda/go-utils) from 0.0.0-20250122060806-26119182077a to 0.1.1.
- [Release notes](https://github.com/jkaninda/go-utils/releases)
- [Commits](https://github.com/jkaninda/go-utils/commits/v0.1.1)

---
updated-dependencies:
- dependency-name: github.com/jkaninda/go-utils
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-24 10:27:41 +00:00
c76a00139c Merge pull request #172 from jkaninda/dependabot/go_modules/github.com/spf13/cobra-1.9.1
chore(deps): bump github.com/spf13/cobra from 1.8.1 to 1.9.1
2025-02-21 11:38:48 +01:00
0f43871765 Merge pull request #173 from jkaninda/dependabot/docker/golang-1.24.0
chore(deps): bump golang from 1.23.6 to 1.24.0
2025-02-21 11:38:37 +01:00
9ba6abe3f4 Merge pull request #174 from jkaninda/dependabot/docker/alpine-3.21.3
chore(deps): bump alpine from 3.21.2 to 3.21.3
2025-02-21 11:38:27 +01:00
dependabot[bot]
764583d88f chore(deps): bump alpine from 3.21.2 to 3.21.3
Bumps alpine from 3.21.2 to 3.21.3.

---
updated-dependencies:
- dependency-name: alpine
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-17 10:11:17 +00:00
dependabot[bot]
dbf4dc596a chore(deps): bump golang from 1.23.6 to 1.24.0
Bumps golang from 1.23.6 to 1.24.0.

---
updated-dependencies:
- dependency-name: golang
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-17 10:11:10 +00:00
dependabot[bot]
06c89a9b78 chore(deps): bump github.com/spf13/cobra from 1.8.1 to 1.9.1
Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.8.1 to 1.9.1.
- [Release notes](https://github.com/spf13/cobra/releases)
- [Commits](https://github.com/spf13/cobra/compare/v1.8.1...v1.9.1)

---
updated-dependencies:
- dependency-name: github.com/spf13/cobra
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-17 10:01:09 +00:00
ec8bdd806c Merge pull request #171 from jkaninda/dependabot/docker/golang-1.23.6
chore(deps): bump golang from 1.23.5 to 1.23.6
2025-02-10 20:15:31 +01:00
dependabot[bot]
828b11c6dd chore(deps): bump golang from 1.23.5 to 1.23.6
Bumps golang from 1.23.5 to 1.23.6.

---
updated-dependencies:
- dependency-name: golang
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-10 09:34:49 +00:00
1d01e13909 Merge pull request #170 from jkaninda/nightly
chore: update helper func to check env with prefix or suffix for multi backups
2025-02-05 07:44:57 +01:00
bd65db2418 chore: update helper func to check env with prefix or suffix for multi backups
Some checks failed
Build / docker (push) Failing after 14m58s
2025-02-05 07:39:52 +01:00
75b809511e fix go lint
Some checks failed
Build / docker (push) Failing after 8s
2025-01-26 13:54:41 +01:00
fc028a2c55 feat: add multiple backup rescued mode for scheduled mode 2025-01-26 13:43:39 +01:00
7fa0c6a118 Merge pull request #169 from jkaninda/nightly
Some checks failed
Deploy Documenation site to GitHub Pages / build (push) Failing after 9m23s
Deploy Documenation site to GitHub Pages / deploy (push) Has been skipped
docs: add quick restore
2025-01-26 12:12:53 +01:00
661702a97e docs: add quick restore 2025-01-26 12:11:29 +01:00
dd5f33f17d Merge pull request #168 from jkaninda/nightly
Some checks failed
Deploy Documenation site to GitHub Pages / build (push) Failing after 9m28s
Deploy Documenation site to GitHub Pages / deploy (push) Has been skipped
Nightly
2025-01-25 09:36:19 +01:00
b7cdfebd9c chore: notification remove MAIL_USERNAME and MAIL_PASSWORD from required env
Some checks failed
Build / docker (push) Failing after 13s
2025-01-25 09:19:23 +01:00
4b93becdf2 feat: add Set default values from environment variables if not provided for multiple backup 2025-01-25 09:12:28 +01:00
748cccec58 Merge pull request #167 from jkaninda/nightly
feat: add backup duration
2025-01-22 07:23:29 +01:00
3e8bfabc44 feat: add backup duration
Some checks failed
Build / docker (push) Failing after 12s
2025-01-22 07:22:56 +01:00
777b59fd7c Merge pull request #166 from jkaninda/dependabot/docker/golang-1.23.5
chore(deps): bump golang from 1.23.4 to 1.23.5
2025-01-21 02:53:48 +01:00
dependabot[bot]
2b25f39c0a chore(deps): bump golang from 1.23.4 to 1.23.5
Bumps golang from 1.23.4 to 1.23.5.

---
updated-dependencies:
- dependency-name: golang
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-01-20 10:09:46 +00:00
e5ba397bb4 Merge pull request #164 from jkaninda/nightly
Some checks failed
Deploy Documenation site to GitHub Pages / build (push) Failing after 9m27s
Deploy Documenation site to GitHub Pages / deploy (push) Has been skipped
doc: reviewed docs
2025-01-13 15:34:50 +01:00
3a1bfc512d doc: reviewed docs
Some checks failed
Build / docker (push) Failing after 9s
2025-01-13 15:34:02 +01:00
b7b09ad6fd Merge pull request #163 from jkaninda/nightly
Nightly
2025-01-13 15:06:27 +01:00
1206140a67 doc: reviewed docs 2025-01-13 15:05:50 +01:00
24573a96ad doc: reviewed docs 2025-01-13 15:04:29 +01:00
fff0b55722 Merge pull request #162 from jkaninda/nightly
feat: add backup flags for configuration and cron expression
2025-01-13 14:57:00 +01:00
68322e6b9f doc: reviewed docs 2025-01-13 14:56:08 +01:00
0f28772659 doc: reviewed docs 2025-01-13 14:40:46 +01:00
b95ccf3905 feat: add backup flags for configuration and cron expression 2025-01-13 14:23:27 +01:00
a06872834f Merge pull request #161 from jkaninda/dependabot/docker/alpine-3.21.2
chore(deps): bump alpine from 3.21.0 to 3.21.2
2025-01-13 10:53:41 +01:00
dependabot[bot]
393168c6c5 chore(deps): bump alpine from 3.21.0 to 3.21.2
Bumps alpine from 3.21.0 to 3.21.2.

---
updated-dependencies:
- dependency-name: alpine
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-01-13 09:52:30 +00:00
5b9ec8a224 Merge pull request #160 from jkaninda/nightly
fix: the configuration file path is not being detected when it is enc…
2025-01-12 09:36:29 +01:00
2c3f2f4a46 fix: the configuration file path is not being detected when it is enclosed in quotes 2025-01-12 07:58:32 +01:00
0df14f37b4 Merge pull request #159 from jkaninda/refactor
chore: add convert bytes to a human-readable string with the appropri…
2024-12-12 13:29:22 +01:00
1b60ca6fd2 chore: add convert bytes to a human-readable string with the appropriate unit (bytes, MiB, or GiB) 2024-12-12 13:28:09 +01:00
d880f40108 Merge pull request #158 from jkaninda/dependabot/docker/golang-1.23.4
chore(deps): bump golang from 1.23.3 to 1.23.4
2024-12-10 10:20:27 +01:00
dependabot[bot]
c845b36797 chore(deps): bump golang from 1.23.3 to 1.23.4
Bumps golang from 1.23.3 to 1.23.4.

---
updated-dependencies:
- dependency-name: golang
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-09 09:53:21 +00:00
63d615f838 Merge pull request #157 from jkaninda/refactor
docs: add azure configuration reference
2024-12-08 13:36:26 +01:00
6f31d35df2 docs: add azure configuration reference 2024-12-08 13:35:49 +01:00
f36d01cc96 Merge pull request #156 from jkaninda/refactor
Fix grammar issues in azure.go
2024-12-08 00:01:13 +01:00
07b7f54a75 Fix grammar issues in azure.go 2024-12-08 00:00:24 +01:00
7ff9a32f08 Merge pull request #155 from jkaninda/develop
chore: update notification template
2024-12-07 20:29:13 +01:00
95a81cb6b7 fix: SSH storage key identitify file 2024-12-07 20:14:30 +01:00
057d5277b0 fix: deprecation warning message, replace mysql by mariadb command 2024-12-07 17:54:44 +01:00
8e58d7a4c3 chore: update notification template 2024-12-07 17:36:05 +01:00
4bd7d9fa72 Merge pull request #154 from jkaninda/refactor
chore: update .env.example
2024-12-07 03:24:35 +01:00
Jonas Kaninda
156f22f1e5 chore: update .env.example 2024-12-07 03:24:06 +01:00
fd444293b4 Merge pull request #153 from jkaninda/refactor
fix: S3 remote path when backing up multiple databases
2024-12-07 02:34:42 +01:00
Jonas Kaninda
1940ceba9a fix: S3 remote path when backing up multiple databases 2024-12-07 02:25:22 +01:00
Jonas Kaninda
07d580a8a9 refactoring of code 2024-12-07 02:23:38 +01:00
9a261b22ec Merge pull request #152 from jkaninda/refactor
docs: update features
2024-12-06 22:09:12 +01:00
Jonas Kaninda
e7a58f0569 docs: update features 2024-12-06 22:08:51 +01:00
1b529725d7 Merge pull request #151 from jkaninda/refactor
fix: fatal logger notification
2024-12-06 21:03:47 +01:00
Jonas Kaninda
d8c73560b8 fix: fatal logger notification 2024-12-06 21:00:26 +01:00
Jonas Kaninda
d5a0adc981 refactoring of code 2024-12-06 20:53:46 +01:00
6df3bae9e2 Merge pull request #150 from jkaninda/feature/azure-blob
chore: update base image tag version
2024-12-06 20:23:46 +01:00
Jonas Kaninda
f7d624fd15 chore: update base image tag version 2024-12-06 20:23:08 +01:00
1e9e1ed951 Merge pull request #149 from jkaninda/feature/azure-blob
chore: update app package
2024-12-06 20:17:57 +01:00
Jonas Kaninda
917ba8947f chore: update app package 2024-12-06 20:16:56 +01:00
94a1dcdff7 Merge pull request #148 from jkaninda/feature/azure-blob
Feature/azure blob
2024-12-06 18:34:46 +01:00
Jonas Kaninda
f70e549b16 docs: update Azure storage 2024-12-06 18:33:54 +01:00
Jonas Kaninda
607478fcc6 docs: update Azure storage 2024-12-06 18:33:17 +01:00
2862e504f5 Merge pull request #147 from jkaninda/feature/azure-blob
feat: add Azure Blob storage
2024-12-06 18:31:08 +01:00
Jonas Kaninda
29420ee13e feat: add Azure Blob storage 2024-12-06 18:30:38 +01:00
f53272ccf0 Merge pull request #146 from jkaninda/feature/azure-blob
feat: add Azure Blob storage
2024-12-06 18:29:34 +01:00
Jonas Kaninda
c360441445 feat: add Azure Blob storage 2024-12-06 18:27:25 +01:00
Jonas Kaninda
f6916231f7 docs: update core features 2024-12-06 14:23:06 +01:00
Jonas Kaninda
afd4afc83b refactor: refactoring of code 2024-12-06 14:21:55 +01:00
Jonas Kaninda
9016a9ec7a Add LICENSE 2024-12-06 03:25:38 +01:00
4ecd96e75c Merge pull request #145 from jkaninda/dependabot/docker/golang-1.23.3
chore(deps): bump golang from 1.23.2 to 1.23.3
2024-11-11 11:04:56 +01:00
dependabot[bot]
8a88e4a727 chore(deps): bump golang from 1.23.2 to 1.23.3
Bumps golang from 1.23.2 to 1.23.3.

---
updated-dependencies:
- dependency-name: golang
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-11-11 09:58:46 +00:00
62f86adea9 Merge pull request #144 from jkaninda/refactor
refactor: Restructure project files for better organization, readabil…
2024-11-04 09:02:57 +01:00
eb414d818c refactor: Restructure project files for better organization, readability, and maintainability 2024-11-04 09:02:41 +01:00
6721cc430d Merge pull request #143 from jkaninda/refactor
refactor: update logger, fix warning message
2024-10-23 12:06:49 +02:00
Jonas Kaninda
8e20e9595f refactor: update logger, fix warning message 2024-10-23 12:06:24 +02:00
02e3267237 Merge pull request #142 from jkaninda/refactor
refactor: update logger, fix warning message
2024-10-23 12:04:50 +02:00
Jonas Kaninda
448ef4d988 refactor: update logger, fix warning message 2024-10-23 12:04:19 +02:00
70ac78c2cd Merge pull request #141 from jkaninda/refactor
docs: update supported storage
2024-10-23 10:31:48 +02:00
Jonas Kaninda
72f5ef4839 docs: update supported storage 2024-10-23 10:31:24 +02:00
6a51f591a5 Merge pull request #138 from jkaninda/dependabot/go_modules/github.com/spf13/cobra-1.8.1
chore(deps): bump github.com/spf13/cobra from 1.8.0 to 1.8.1
2024-10-23 10:03:18 +02:00
d55ade3c21 Merge pull request #139 from jkaninda/dependabot/go_modules/golang.org/x/crypto-0.28.0
chore(deps): bump golang.org/x/crypto from 0.18.0 to 0.28.0
2024-10-23 10:02:54 +02:00
dependabot[bot]
cdbd6dcd6a chore(deps): bump golang.org/x/crypto from 0.18.0 to 0.28.0
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.18.0 to 0.28.0.
- [Commits](https://github.com/golang/crypto/compare/v0.18.0...v0.28.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-10-23 08:01:21 +00:00
dependabot[bot]
307e18d9ff chore(deps): bump github.com/spf13/cobra from 1.8.0 to 1.8.1
Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.8.0 to 1.8.1.
- [Release notes](https://github.com/spf13/cobra/releases)
- [Commits](https://github.com/spf13/cobra/compare/v1.8.0...v1.8.1)

---
updated-dependencies:
- dependency-name: github.com/spf13/cobra
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-10-23 08:01:20 +00:00
8d366f0302 Merge pull request #140 from jkaninda/dependabot/go_modules/github.com/aws/aws-sdk-go-1.55.5
chore(deps): bump github.com/aws/aws-sdk-go from 1.55.3 to 1.55.5
2024-10-23 10:00:07 +02:00
05e32c3cc1 Merge pull request #137 from jkaninda/dependabot/docker/golang-1.23.2
chore(deps): bump golang from 1.22.5 to 1.23.2
2024-10-23 09:59:27 +02:00
dependabot[bot]
edd13907d0 chore(deps): bump github.com/aws/aws-sdk-go from 1.55.3 to 1.55.5
Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.55.3 to 1.55.5.
- [Release notes](https://github.com/aws/aws-sdk-go/releases)
- [Commits](https://github.com/aws/aws-sdk-go/compare/v1.55.3...v1.55.5)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-10-23 07:51:28 +00:00
dependabot[bot]
7cb1c50927 chore(deps): bump golang from 1.22.5 to 1.23.2
Bumps golang from 1.22.5 to 1.23.2.

---
updated-dependencies:
- dependency-name: golang
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-10-23 07:51:07 +00:00
f545704b02 Merge pull request #136 from jkaninda/refactor
Set up automated updates for Docker base images and Go packages
2024-10-23 09:50:23 +02:00
90f5391b24 Set up automated updates for Docker base images and Go packages 2024-10-23 09:50:09 +02:00
ca241b4fef Merge pull request #135 from jkaninda/refactor
fix: add identifyFile
2024-10-23 09:37:56 +02:00
Jonas Kaninda
3911296921 fix: add identifyFile 2024-10-23 09:36:58 +02:00
8d04d276ba Merge pull request #134 from jkaninda/refactor
chore: integrate external storage module
2024-10-23 04:05:04 +02:00
Jonas Kaninda
221079e0ea chore: integrate external storage module 2024-10-23 04:04:38 +02:00
590b2d8bc6 Merge pull request #133 from jkaninda/refactor
Refactor
2024-10-22 17:24:09 +02:00
Jonas Kaninda
d2aeb55ebc docs: update notification template 2024-10-22 17:22:45 +02:00
Jonas Kaninda
431be36210 refactor: create storage interface, refactor local, s3, ftp and ssh storage 2024-10-22 17:21:01 +02:00
ef2c5c80cd Merge pull request #132 from jkaninda/refactor
Refactor
2024-10-21 08:41:50 +02:00
Jonas Kaninda
3a0137d6ea doc: update deployment using s3 storage 2024-10-21 08:35:57 +02:00
Jonas Kaninda
8afb5ace40 chore: add ssh, ftp to storage list 2024-10-21 08:33:59 +02:00
Jonas Kaninda
5569258a71 refactor: clean up Dockerfile 2024-10-21 08:26:59 +02:00
Jonas Kaninda
f3ec395e37 refactor: clean up code 2024-10-21 08:22:06 +02:00
ba432997c8 Merge pull request #131 from jkaninda/refactor
refactor: ineffectual assignment
2024-10-20 08:22:02 +02:00
Jonas Kaninda
dc20ea9635 refactor: ineffectual assignment 2024-10-20 08:21:36 +02:00
40557af437 Merge pull request #130 from jkaninda/refactor
Refactor
2024-10-20 06:58:20 +02:00
Jonas Kaninda
1dcb9586a6 chore: add command usage error 2024-10-20 06:57:06 +02:00
Jonas Kaninda
2c6336e84a chore: add backup prune, replace period flag by BACKUP_RETENTION_DAYS environment variable 2024-10-20 06:52:36 +02:00
c16ee3a492 Merge pull request #129 from jkaninda/refactor
chore: replace prune and keep-last flags by BACKUP_RETENTION_DAYS env…
2024-10-19 05:31:31 +02:00
Jonas Kaninda
3f7d28ea49 chore: replace prune and keep-last flags by BACKUP_RETENTION_DAYS env variable 2024-10-19 05:30:57 +02:00
cea1ef9c3b Merge pull request #128 from jkaninda/refactor
doc: update using s3 storage example
2024-10-18 08:51:17 +02:00
Jonas Kaninda
56c271bc29 doc: update using s3 storage example 2024-10-18 08:51:04 +02:00
45c30dca5f Merge pull request #127 from jkaninda/refactor
Refactor
2024-10-15 16:57:44 +02:00
Jonas Kaninda
b0ae212578 docs: update scheduled backup docker deployment example 2024-10-15 16:53:37 +02:00
Jonas Kaninda
6e2d3a9f21 add env.example 2024-10-15 16:52:25 +02:00
Jonas Kaninda
dd314aa4cb chore: clean up Dockerfile 2024-10-15 16:50:37 +02:00
Jonas Kaninda
24ccdaa671 refactor: add default env variable value 2024-10-15 16:43:02 +02:00
45e3452376 Merge pull request #126 from jkaninda/refactor
docs: update readme
2024-10-13 14:53:31 +02:00
Jonas Kaninda
3527b4cdcd docs: update readme 2024-10-13 14:53:04 +02:00
dc6fe2f4b9 Merge pull request #125 from jkaninda/refactor
chore: switch to encryptor module
2024-10-13 14:34:18 +02:00
Jonas Kaninda
f0afc0f4e0 chore: switch to encryptor module 2024-10-13 14:33:54 +02:00
7d7c813bb0 Merge pull request #124 from jkaninda/template
docs: update restoration supported extensions
2024-10-12 11:14:08 +02:00
Jonas Kaninda
6b8491cdc0 docs: update restoration supported extensions 2024-10-12 11:13:45 +02:00
a1dd6e3f58 Merge pull request #123 from jkaninda/template
docs: update email notification example
2024-10-12 10:47:28 +02:00
Jonas Kaninda
86ba3530c9 docs: update email notification example 2024-10-12 10:47:14 +02:00
e1f3b15003 Merge pull request #122 from jkaninda/template
docs: update Kubernetes deployments
2024-10-10 21:22:08 +02:00
Jonas Kaninda
1577e92a66 docs: update Kubernetes deployments 2024-10-10 21:21:52 +02:00
7b67f88769 Merge pull request #121 from jkaninda/template
Template
2024-10-10 21:11:14 +02:00
Jonas Kaninda
043233dabe docs: update Kubernetes deployments 2024-10-10 21:10:40 +02:00
Jonas Kaninda
d6652cfb75 chore: update github link 2024-10-10 21:03:07 +02:00
140ed608ab Merge pull request #120 from jkaninda/fix-dockerfile
fix: Dockerfile backup, restore, and migrate scripts since the migration of base image from Ubuntu to alpine
2024-10-10 10:03:44 +02:00
Jonas Kaninda
98211a27b8 fix: Dockerfile backup, restore, and migrate scripts since the migration of base image from Ubuntu to alpine 2024-10-10 10:02:42 +02:00
4e4d45e555 Merge pull request #119 from jkaninda/fix-notification
fix: fix multi backup s3 path
2024-10-10 05:51:46 +02:00
Jonas Kaninda
01e41acb5c fix: fix multi backup s3 path 2024-10-10 05:51:18 +02:00
3dce2017f8 Merge pull request #118 from jkaninda/fix-notification
fix: fix multi backup s3 path
2024-10-10 05:32:08 +02:00
Jonas Kaninda
ed2f1b8d9c fix: fix multi backup s3 path 2024-10-10 05:31:18 +02:00
b64875df21 Merge pull request #117 from jkaninda/fix-notification
docs: correct grammar in  receive-notification.md
2024-10-10 04:28:52 +02:00
Jonas Kaninda
fc90507b3f docs: correct grammar in receive-notification.md 2024-10-10 04:28:02 +02:00
df0efd24d3 Merge pull request #116 from jkaninda/fix-notification
chore: fix infinity calling Fatal, add a backup reference
2024-10-10 04:15:12 +02:00
Jonas Kaninda
e5dd7e76ce chore: fix infinity calling Fatal, add a backup reference 2024-10-10 04:14:42 +02:00
12fbb67a09 Merge pull request #115 from jkaninda/email-notification
docs: update send notification
2024-10-09 22:38:35 +02:00
Jonas Kaninda
df490af7b6 docs: update send notification 2024-10-09 22:38:07 +02:00
d930c3e2f6 Merge pull request #114 from jkaninda/email-notification
feat: add email notification for failed and success backup
2024-10-09 22:32:44 +02:00
Jonas Kaninda
e4258cb12e feat: add email notification for failed and success backup 2024-10-09 22:31:52 +02:00
4c44166921 Merge pull request #113 from jkaninda/develop
Develop
2024-10-09 12:51:15 +02:00
554df819ab Merge pull request #112 from jkaninda/multi-backup
docs: add mutli database backup example
2024-10-09 12:49:46 +02:00
Jonas Kaninda
ca5633882e docs: add mutli database backup example 2024-10-09 12:45:55 +02:00
c5cca82841 Merge pull request #111 from jkaninda/multi-backup
Add Multi database backup
2024-10-09 12:24:37 +02:00
Jonas Kaninda
bbd5422089 ci: change Dockerfile path 2024-10-09 12:23:45 +02:00
Jonas Kaninda
d72156f890 feat: add multi database backup 2024-10-09 12:23:14 +02:00
Jonas Kaninda
909a50dbe7 docs: update backup encryption example 2024-10-08 23:20:50 +02:00
Jonas Kaninda
94ceb71da2 docs: update backup encryption example 2024-10-08 23:05:10 +02:00
Jonas Kaninda
fe05fe5110 feat: add encrypt backup using public key, migrate gpg to go gpg dependency 2024-10-08 23:02:46 +02:00
dabba2050a Merge pull request #110 from jkaninda/refactor
chore: remove os.kill.signal
2024-10-05 10:42:55 +02:00
Jonas Kaninda
47e1ac407b chore: remove os.kill.signal 2024-10-05 10:41:46 +02:00
28f6ed3a82 Merge pull request #109 from jkaninda/refactor
fix: logging time
2024-10-05 10:40:11 +02:00
Jonas Kaninda
504926c7cd fix: logging time 2024-10-05 10:39:49 +02:00
737f473f92 Merge pull request #108 from jkaninda/refactor
Refactor
2024-10-03 18:19:12 +02:00
Jonas Kaninda
300d2a8205 chore: remove testDatabaseConnection function for scheduled mode 2024-10-03 18:18:47 +02:00
Jonas Kaninda
a4ad0502cf chore: add storage type alt for smallcase and uppercase 2024-10-03 18:17:48 +02:00
f344867edf Merge pull request #107 from jkaninda/refactor
docs: update configuration reference
2024-10-02 04:26:05 +02:00
Jonas Kaninda
d774584f64 docs: update configuration reference 2024-10-02 04:25:35 +02:00
96927cd57e Merge pull request #106 from jkaninda/refactor
Refactor
2024-10-02 04:13:20 +02:00
Jonas Kaninda
ceacfa1d9d docs: update ssh and ftp deployment example 2024-10-02 04:09:42 +02:00
Jonas Kaninda
9380a18b45 refactor: remove old arguments, refactor aws and ssh configuration 2024-10-02 04:07:14 +02:00
Jonas Kaninda
d186071df9 Merge pull request #105 from jkaninda/refactor
chore: update app version
2024-09-30 17:49:21 +02:00
Jonas Kaninda
71429b0e1a chore: update app version 2024-09-30 17:48:56 +02:00
Jonas Kaninda
0bed86ded4 Merge pull request #104 from jkaninda/refactor
chore: add Time Zone
2024-09-30 17:45:38 +02:00
Jonas Kaninda
e891801125 chore: add Time Zone 2024-09-30 17:44:45 +02:00
Jonas Kaninda
01cf8a3392 Merge pull request #103 from jkaninda/refactor
fix: MySQL 8.x -Plugin caching_sha2_password could not be loaded
2024-09-30 07:58:39 +02:00
Jonas Kaninda
efea81833a fix: MySQL 8.x -Plugin caching_sha2_password could not be loaded 2024-09-30 07:57:42 +02:00
Jonas Kaninda
1cbf65d686 Merge pull request #102 from jkaninda/refactor
fix: backup date and time
2024-09-30 02:03:08 +02:00
Jonas Kaninda
73d19913f8 fix: backup date and time 2024-09-30 02:02:37 +02:00
Jonas Kaninda
b0224e43ef Merge pull request #101 from jkaninda/docs
docs: add FTP storage
2024-09-30 00:58:42 +02:00
Jonas Kaninda
fa0485bb5a docs: add FTP storage 2024-09-30 00:58:20 +02:00
Jonas Kaninda
65ef6d3e8f Merge pull request #100 from jkaninda/develop
Merge develop
2024-09-30 00:55:42 +02:00
Jonas Kaninda
a7b6abb101 feat: add ftp backup storage 2024-09-30 00:40:35 +02:00
Jonas Kaninda
3b21c109bc chore: migrate baseos from Ubuntu to Alpine 2024-09-29 20:44:11 +02:00
Jonas Kaninda
a50a1ef6f9 Merge pull request #99 from jkaninda/refactor
refactor: replace function params by config struct
2024-09-29 20:09:02 +02:00
Jonas Kaninda
76bbfa35c4 refactor: replace function params by config struct 2024-09-29 20:08:36 +02:00
Jonas Kaninda
599d93bef4 Merge pull request #98 from jkaninda/refactor
refactoring of code
2024-09-29 19:51:07 +02:00
Jonas Kaninda
247e90f73e refactoring of code 2024-09-29 19:50:26 +02:00
Jonas Kaninda
7d544aca68 Merge pull request #97 from jkaninda/docs
chore: add test configurations before running in scheduled mode
2024-09-29 07:35:45 +02:00
Jonas Kaninda
1722ee0eeb chore: add test configurations before running in scheduled mode 2024-09-29 07:35:27 +02:00
Jonas Kaninda
726fd14831 Merge pull request #96 from jkaninda/docs
docs: add docker recurring backup examples
2024-09-29 07:01:27 +02:00
Jonas Kaninda
fdc88e6064 docs: add docker recurring backup examples 2024-09-29 07:00:55 +02:00
Jonas Kaninda
2ba1b516e9 Merge pull request #95 from jkaninda/docs
docs: fix environment variables table
2024-09-28 21:23:43 +02:00
Jonas Kaninda
301594676b docs: fix environment variables table 2024-09-28 21:23:03 +02:00
Jonas Kaninda
d06f2f2d7e Merge pull request #94 from jkaninda/docs
docs: update deployment example
2024-09-28 21:18:37 +02:00
Jonas Kaninda
2f06bd1c3a docs: update deployment example 2024-09-28 21:17:34 +02:00
67 changed files with 4877 additions and 2049 deletions

81
.env.example Normal file
View File

@@ -0,0 +1,81 @@
### Database
DB_HOST=
DB_PORT=3306
DB_USERNAME=
DB_PASSWORD=
DB_NAME=
TZ=Europe/Paris
### Database Migration
#TARGET_DB_HOST=
#TARGET_DB_PORT=3306
#TARGET_DB_NAME=
#TARGET_DB_USERNAME=
#TARGET_DB_PASSWORD=
### Backup restoration
#FILE_NAME=
### AWS S3 Storage
#ACCESS_KEY=
#SECRET_KEY=
#AWS_S3_BUCKET_NAME=
#AWS_S3_ENDPOINT=
#AWS_REGION=
#AWS_S3_PATH=
#AWS_DISABLE_SSL=false
#AWS_FORCE_PATH_STYLE=true
### Backup Cron Expression
#BACKUP_CRON_EXPRESSION=@midnight
##Delete old backup created more than specified days ago
#BACKUP_RETENTION_DAYS=7
####SSH Storage
#SSH_HOST_NAME=
#SSH_PORT=22
#SSH_USER=
#SSH_PASSWORD=
#SSH_IDENTIFY_FILE=/tmp/id_ed25519
####FTP Storage
#FTP_PASSWORD=
#FTP_HOST_NAME=
#FTP_USER=
#FTP_PORT=21
#REMOTE_PATH=
## Azure Blob storage
AZURE_STORAGE_CONTAINER_NAME=
AZURE_STORAGE_ACCOUNT_NAME=
AZURE_STORAGE_ACCOUNT_KEY=
#### Backup encryption
#GPG_PUBLIC_KEY=/config/public_key.asc
#GPG_PRIVATE_KEY=/config/private_key.asc
#GPG_PASSPHRASE=Your strong passphrase
## For multiple database backup on Docker or Docker in Swarm mode
#BACKUP_CONFIG_FILE=/config/config.yaml
### Database restoration
#FILE_NAME=
### Notification
#BACKUP_REFERENCE=K8s/Paris cluster
## Telegram
#TG_TOKEN=
#TG_CHAT_ID=
### Email
#MAIL_HOST=
#MAIL_PORT=
#MAIL_USERNAME=
#MAIL_PASSWORD=
#MAIL_FROM=Backup Jobs <backup-jobs@example.com>
#MAIL_TO=backup@example.com,me@example.com,team@example.com
#MAIL_SKIP_TLS=false

10
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,10 @@
version: 2
updates:
- package-ecosystem: docker
directory: /
schedule:
interval: weekly
- package-ecosystem: gomod
directory: /
schedule:
interval: weekly

View File

@@ -1,7 +1,7 @@
name: Build
on:
push:
branches: ['develop']
branches: ['nightly']
env:
BUILDKIT_IMAGE: jkaninda/mysql-bkup
jobs:
@@ -25,8 +25,10 @@ jobs:
uses: docker/build-push-action@v3
with:
push: true
file: "./docker/Dockerfile"
file: "./Dockerfile"
platforms: linux/amd64,linux/arm64,linux/arm/v7
build-args: |
appVersion=nightly
tags: |
"${{env.BUILDKIT_IMAGE}}:develop-${{ github.sha }}"
"${{vars.BUILDKIT_IMAGE}}:nightly"

View File

@@ -32,14 +32,14 @@ jobs:
working-directory: docs
- name: Setup Pages
id: pages
uses: actions/configure-pages@v2
uses: actions/configure-pages@v5
- name: Build with Jekyll
working-directory: docs
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
env:
JEKYLL_ENV: production
- name: Upload artifact
uses: actions/upload-pages-artifact@v1
uses: actions/upload-pages-artifact@v3
with:
path: 'docs/_site/'
@@ -52,4 +52,4 @@ jobs:
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v1
uses: actions/deploy-pages@v4

23
.github/workflows/lint.yml vendored Normal file
View File

@@ -0,0 +1,23 @@
name: Lint
on:
push:
pull_request:
jobs:
lint:
name: Run on Ubuntu
runs-on: ubuntu-latest
steps:
- name: Clone the code
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: '~1.23'
- name: Run linter
uses: golangci/golangci-lint-action@v6
with:
version: v1.61

View File

@@ -39,11 +39,13 @@ jobs:
uses: docker/build-push-action@v3
with:
push: true
file: "./docker/Dockerfile"
file: "./Dockerfile"
platforms: linux/amd64,linux/arm64,linux/arm/v7
build-args: |
appVersion=${{ env.TAG_NAME }}
tags: |
"${{env.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
"${{env.BUILDKIT_IMAGE}}:latest"
"ghcr.io/${{env.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
"ghcr.io/${{env.BUILDKIT_IMAGE}}:latest"
"${{vars.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
"${{vars.BUILDKIT_IMAGE}}:latest"
"ghcr.io/${{vars.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
"ghcr.io/${{vars.BUILDKIT_IMAGE}}:latest"

289
.github/workflows/tests.yml vendored Normal file
View File

@@ -0,0 +1,289 @@
name: Tests
on:
push:
branches:
- main
- nightly
pull_request:
branches:
- main
env:
IMAGE_NAME: mysql-bkup
jobs:
test:
runs-on: ubuntu-latest
services:
mysql:
image: mysql:9
env:
MYSQL_ROOT_PASSWORD: password
MYSQL_DATABASE: testdb
MYSQL_USER: user
MYSQL_PASSWORD: password
ports:
- 3306:3306
options: >-
--health-cmd="mysqladmin ping -h 127.0.0.1 -uuser -ppassword"
--health-interval=10s
--health-timeout=5s
--health-retries=5
mysql8:
image: mysql:8
env:
MYSQL_ROOT_PASSWORD: password
MYSQL_DATABASE: testdb
MYSQL_USER: user
MYSQL_PASSWORD: password
ports:
- 3308:3306
options: >-
--health-cmd="mysqladmin ping -h 127.0.0.1 -uuser -ppassword"
--health-interval=10s
--health-timeout=5s
--health-retries=5
mysql5:
image: mysql:5
env:
MYSQL_ROOT_PASSWORD: password
MYSQL_DATABASE: testdb
MYSQL_USER: user
MYSQL_PASSWORD: password
ports:
- 3305:3306
options: >-
--health-cmd="mysqladmin ping -h 127.0.0.1 -uuser -ppassword"
--health-interval=10s
--health-timeout=5s
--health-retries=5
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Create Minio container
run: |
docker run -d --rm --name minio \
--network host \
-p 9000:9000 \
-e MINIO_ACCESS_KEY=minioadmin \
-e MINIO_SECRET_KEY=minioadmin \
-e MINIO_REGION_NAME="eu" \
minio/minio server /data
echo "Create Minio container completed"
- name: Install MinIO Client (mc)
run: |
curl -O https://dl.min.io/client/mc/release/linux-amd64/mc
chmod +x mc
sudo mv mc /usr/local/bin/
- name: Wait for MinIO to be ready
run: sleep 5
- name: Configure MinIO Client
run: |
mc alias set local http://localhost:9000 minioadmin minioadmin
mc alias list
- name: Create MinIO Bucket
run: |
mc mb local/backups
echo "Bucket backups created successfully."
# Build the Docker image
- name: Build Docker Image
run: |
docker buildx build --build-arg appVersion=test -t ${{ env.IMAGE_NAME }}:latest --load .
- name: Verify Docker images
run: |
docker images
- name: Wait for MySQL to be ready
run: |
docker run --rm --network host mysql:9 mysqladmin ping -h 127.0.0.1 -uuser -ppassword --wait
- name: Test restore
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=root \
-e DB_PASSWORD=password \
-e DB_NAME=testdb \
${{ env.IMAGE_NAME }}:latest restore -f init.sql
echo "Database restore completed"
- name: Test restore Mysql8
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_PORT=3308 \
-e DB_USERNAME=root \
-e DB_PASSWORD=password \
-e DB_NAME=testdb \
${{ env.IMAGE_NAME }}:latest restore -f init.sql
echo "Test restore Mysql8 completed"
- name: Test restore Mysql5
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_PORT=3305 \
-e DB_USERNAME=root \
-e DB_PASSWORD=password \
-e DB_NAME=testdb \
${{ env.IMAGE_NAME }}:latest restore -f init.sql
echo "Test restore Mysql5 completed"
- name: Test backup
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=user \
-e DB_PASSWORD=password \
-e DB_NAME=testdb \
${{ env.IMAGE_NAME }}:latest backup
echo "Database backup completed"
- name: Test backup Mysql8
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_PORT=3308 \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=user \
-e DB_PASSWORD=password \
-e DB_NAME=testdb \
${{ env.IMAGE_NAME }}:latest backup
echo "Test backup Mysql8 completed"
- name: Test backup Mysql5
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_PORT=3305 \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=user \
-e DB_PASSWORD=password \
-e DB_NAME=testdb \
${{ env.IMAGE_NAME }}:latest backup
echo "Test backup Mysql5 completed"
- name: Test encrypted backup
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=user \
-e DB_PASSWORD=password \
-e GPG_PASSPHRASE=password \
${{ env.IMAGE_NAME }}:latest backup -d testdb --disable-compression --custom-name encrypted-bkup
echo "Database encrypted backup completed"
- name: Test restore encrypted backup | testdb -> testdb2
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=root \
-e DB_PASSWORD=password \
-e GPG_PASSPHRASE=password \
-e DB_NAME=testdb2 \
${{ env.IMAGE_NAME }}:latest restore -f /backup/encrypted-bkup.sql.gpg
echo "Test restore encrypted backup completed"
- name: Test migrate database testdb -> testdb3
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=root \
-e DB_PASSWORD=password \
-e GPG_PASSPHRASE=password \
-e DB_NAME=testdb \
-e TARGET_DB_HOST=127.0.0.1 \
-e TARGET_DB_PORT=3306 \
-e TARGET_DB_NAME=testdb3 \
-e TARGET_DB_USERNAME=root \
-e TARGET_DB_PASSWORD=password \
${{ env.IMAGE_NAME }}:latest migrate
echo "Test migrate database testdb -> testdb3 completed"
- name: Test backup all databases
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=root \
-e DB_PASSWORD=password \
-e DB_NAME=testdb \
${{ env.IMAGE_NAME }}:latest backup --all-databases
echo "Database backup completed"
- name: Test multiple backup
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_HOST=127.0.0.1 \
-e TESTDB2_DB_USERNAME=root \
-e TESTDB2_DB_PASSWORD=password \
-e TESTDB2_DB_HOST=127.0.0.1 \
${{ env.IMAGE_NAME }}:latest backup -c /backup/test_config.yaml
echo "Database backup completed"
- name: Test backup Minio (s3)
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=user \
-e DB_PASSWORD=password \
-e DB_NAME=testdb \
-e AWS_S3_ENDPOINT="http://127.0.0.1:9000" \
-e AWS_S3_BUCKET_NAME=backups \
-e AWS_ACCESS_KEY=minioadmin \
-e AWS_SECRET_KEY=minioadmin \
-e AWS_DISABLE_SSL="true" \
-e AWS_REGION="eu" \
-e AWS_FORCE_PATH_STYLE="true" ${{ env.IMAGE_NAME }}:latest backup -s s3 --custom-name minio-backup
echo "Test backup Minio (s3) completed"
- name: Test restore Minio (s3)
run: |
docker run --rm --name ${{ env.IMAGE_NAME }} \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=user \
-e DB_PASSWORD=password \
-e DB_NAME=testdb \
-e AWS_S3_ENDPOINT="http://127.0.0.1:9000" \
-e AWS_S3_BUCKET_NAME=backups \
-e AWS_ACCESS_KEY=minioadmin \
-e AWS_SECRET_KEY=minioadmin \
-e AWS_DISABLE_SSL="true" \
-e AWS_REGION="eu" \
-e AWS_FORCE_PATH_STYLE="true" ${{ env.IMAGE_NAME }}:latest restore -s s3 -f minio-backup.sql.gz
echo "Test backup Minio (s3) completed"
- name: Test scheduled backup
run: |
docker run -d --rm --name ${{ env.IMAGE_NAME }} \
-v ./migrations:/backup/ \
--network host \
-e DB_HOST=127.0.0.1 \
-e DB_USERNAME=user \
-e DB_PASSWORD=password \
-e DB_NAME=testdb \
${{ env.IMAGE_NAME }}:latest backup -e "@every 10s"
echo "Waiting for backup to be done..."
sleep 25
docker logs ${{ env.IMAGE_NAME }}
echo "Test scheduled backup completed"
# Cleanup: Stop and remove containers
- name: Clean up
run: |
docker stop ${{ env.IMAGE_NAME }} || true
docker rm ${{ env.IMAGE_NAME }} || true

44
.golangci.yml Normal file
View File

@@ -0,0 +1,44 @@
run:
timeout: 5m
allow-parallel-runners: true
issues:
# don't skip warning about doc comments
# don't exclude the default set of lint
exclude-use-default: false
# restore some of the defaults
# (fill in the rest as needed)
exclude-rules:
- path: "internal/*"
linters:
- dupl
- lll
- goimports
linters:
disable-all: true
enable:
- dupl
- errcheck
- copyloopvar
- ginkgolinter
- goconst
- gocyclo
- gofmt
- gosimple
- govet
- ineffassign
# - lll
- misspell
- nakedret
- prealloc
- revive
- staticcheck
- typecheck
- unconvert
- unparam
- unused
linters-settings:
revive:
rules:
- name: comment-spacings

47
Dockerfile Normal file
View File

@@ -0,0 +1,47 @@
FROM golang:1.25.1 AS build
WORKDIR /app
ARG appVersion=""
# Copy the source code.
COPY . .
# Installs Go dependencies
RUN go mod download
# Build
RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-X 'github.com/jkaninda/mysql-bkup/utils.Version=${appVersion}'" -o /app/mysql-bkup
FROM alpine:3.22.1
ENV TZ=UTC
ARG WORKDIR="/config"
ARG BACKUPDIR="/backup"
ARG BACKUP_TMP_DIR="/tmp/backup"
ARG TEMPLATES_DIR="/config/templates"
ARG appVersion=""
ENV VERSION=${appVersion}
LABEL org.opencontainers.image.title="mysql-bkup"
LABEL org.opencontainers.image.description="A lightweight MySQL backup and restore tool"
LABEL org.opencontainers.image.licenses="MIT"
LABEL org.opencontainers.image.authors="Jonas Kaninda <me@jonaskaninda.com>"
LABEL org.opencontainers.image.version=${appVersion}
LABEL org.opencontainers.image.source="https://github.com/jkaninda/mysql-bkup"
RUN apk --update add --no-cache mysql-client mariadb-connector-c tzdata ca-certificates
RUN mkdir -p $WORKDIR $BACKUPDIR $TEMPLATES_DIR $BACKUP_TMP_DIR && \
chmod a+rw $WORKDIR $BACKUPDIR $BACKUP_TMP_DIR
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
COPY ./templates/* $TEMPLATES_DIR/
RUN chmod +x /usr/local/bin/mysql-bkup && \
ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
# Create backup script and make it executable
RUN printf '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
chmod +x /usr/local/bin/backup
# Create restore script and make it executable
RUN printf '#!/bin/sh\n/usr/local/bin/mysql-bkup restore "$@"' > /usr/local/bin/restore && \
chmod +x /usr/local/bin/restore
# Create migrate script and make it executable
RUN printf '#!/bin/sh\n/usr/local/bin/mysql-bkup migrate "$@"' > /usr/local/bin/migrate && \
chmod +x /usr/local/bin/migrate
WORKDIR $WORKDIR
ENTRYPOINT ["/usr/local/bin/mysql-bkup"]

281
README.md
View File

@@ -1,21 +1,59 @@
# MySQL Backup
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
It also supports __encrypting__ your backups using GPG.
# MYSQL-BKUP
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
**MYSQL-BKUP** is a Docker container image designed to **backup, restore, and migrate MySQL databases**.
It supports a variety of storage options and ensures data security through GPG encryption.
It also supports database __encryption__ using GPG.
MYSQL-BKUP is designed for seamless deployment on **Docker** and **Kubernetes**, simplifying MySQL backup, restoration, and migration across environments.
It is a lightweight, multi-architecture solution compatible with **Docker**, **Docker Swarm**, **Kubernetes**, and other container orchestration platforms.
[![Tests](https://github.com/jkaninda/mysql-bkup/actions/workflows/tests.yml/badge.svg)](https://github.com/jkaninda/mysql-bkup/actions/workflows/tests.yml)
[![Build](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml/badge.svg)](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml)
[![Go Report](https://goreportcard.com/badge/github.com/jkaninda/mysql-bkup)](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup)
![Docker Image Size (latest by date)](https://img.shields.io/docker/image-size/jkaninda/mysql-bkup?style=flat-square)
![Docker Pulls](https://img.shields.io/docker/pulls/jkaninda/mysql-bkup?style=flat-square)
<a href="https://ko-fi.com/jkaninda"><img src="https://uploads-ssl.webflow.com/5c14e387dab576fe667689cf/5cbed8a4ae2b88347c06c923_BuyMeACoffee_blue.png" height="20" alt="buy ma a coffee"></a>
Successfully tested on:
## Features
- **Flexible Storage Backends:**
- Local filesystem
- Amazon S3 & S3-compatible storage (e.g., MinIO, Wasabi)
- FTP
- SSH-compatible storage
- Azure Blob storage
- **Data Security:**
- Backups can be encrypted using **GPG** to ensure confidentiality.
- **Deployment Flexibility:**
- Available as the [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image.
- Deployable on **Docker**, **Docker Swarm**, and **Kubernetes**.
- Supports recurring backups of MySQL databases when deployed:
- On Docker for automated backup schedules.
- As a **Job** or **CronJob** on Kubernetes.
- **Notifications:**
- Get real-time updates on backup success or failure via:
- **Telegram**
- **Email**
## 💡Use Cases
- **Scheduled Backups**: Automate recurring backups using Docker or Kubernetes.
- **Disaster Recovery:** Quickly restore backups to a clean MySQL instance.
- **Database Migration**: Seamlessly move data across environments using the built-in `migrate` feature.
- **Secure Archiving:** Keep backups encrypted and safely stored in the cloud or remote servers.
## ✅ Verified Platforms:
MYSQL-BKUP has been tested and runs successfully on:
- Docker
- Docker in Swarm mode
- Docker Swarm
- Kubernetes
- OpenShift
@@ -31,44 +69,75 @@ Successfully tested on:
- [PostgreSQL](https://github.com/jkaninda/pg-bkup)
## Storage:
- Local
- AWS S3 or any S3 Alternatives for Object Storage
- SSH remote server
## Quickstart
### Simple backup using Docker CLI
### Simple Backup Using Docker CLI
To run a one time backup, bind your local volume to `/backup` in the container and run the `backup` command:
To perform a one-time backup, bind your local volume to `/backup` in the container and run the `backup` command:
```shell
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d database_name
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_PORT=3306" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d database_name
```
Alternatively, pass a `--env-file` in order to use a full config as described below.
Alternatively, use an environment file (`--env-file`) for configuration:
```yaml
docker run --rm --network your_network_name \
--env-file your-env-file \
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup backup -d database_name
```shell
docker run --rm --network your_network_name \
--env-file your-env-file \
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup backup -d database_name
```
### Simple backup in docker compose file
### Backup All Databases
To back up all databases on the server, use the `--all-databases` or `-a` flag. By default, this creates individual backup files for each database.
```shell
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_PORT=3306" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup --all-databases --disable-compression
```
> **Note:** Use the `--all-in-one` or `-A` flag to combine backups into a single file.
---
### Simple Restore Using Docker CLI
To restore a database, bind your local volume to `/backup` and run the `restore` command:
```shell
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_PORT=3306" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup restore -d database_name -f backup_file.sql.gz
```
---
### Backup with Docker Compose
Below is an example of a `docker-compose.yml` file for running a one-time backup:
```yaml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
pg-bkup:
# In production, pin your image tag to a specific release version instead of `latest`.
# See available releases: https://github.com/jkaninda/mysql-bkup/releases
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup
@@ -80,17 +149,40 @@ services:
- DB_NAME=foo
- DB_USERNAME=bar
- DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
- TZ=Europe/Paris
networks:
- web
- web
networks:
web:
```
---
### Recurring Backups with Docker
You can schedule recurring backups using the `--cron-expression` or `-e` flag:
```shell
docker run --rm --network network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=hostname" \
-e "DB_USERNAME=user" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 15m"
```
For predefined schedules, refer to the [documentation](https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules).
---
## Deploy on Kubernetes
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as Job or CronJob.
For Kubernetes, you can deploy `mysql-bkup` as a Job or CronJob. Below are examples for both.
### Simple Kubernetes backup Job :
### Kubernetes Backup Job
This example defines a one-time backup job:
```yaml
apiVersion: batch/v1
@@ -102,16 +194,11 @@ spec:
template:
spec:
containers:
- name: pg-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
- name: mysql-bkup
# Pin the image tag to a specific release version in production.
# See available releases: https://github.com/jkaninda/mysql-bkup/releases
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup -d dbname
command: ["backup", "-d", "dbname"]
resources:
limits:
memory: "128Mi"
@@ -129,10 +216,94 @@ spec:
volumes:
- name: backup
hostPath:
path: /home/toto/backup # directory location on host
type: Directory # this field is optional
path: /home/toto/backup # Directory location on the host
type: Directory # Optional field
restartPolicy: Never
```
### Kubernetes CronJob for Scheduled Backups
For scheduled backups, use a `CronJob`:
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: pg-bkup-cronjob
spec:
schedule: "0 2 * * *" # Runs daily at 2 AM
jobTemplate:
spec:
template:
spec:
containers:
- name: pg-bkup
image: jkaninda/mysql-bkup
command: ["backup", "-d", "dbname"]
env:
- name: DB_HOST
value: "mysql"
- name: DB_USERNAME
value: "user"
- name: DB_PASSWORD
value: "password"
volumeMounts:
- mountPath: /backup
name: backup
volumes:
- name: backup
hostPath:
path: /home/toto/backup
type: Directory
restartPolicy: OnFailure
```
---
## 🚀 Why Use MYSQL-BKUP?
**MYSQL-BKUP** isn't just another MySQL backup tool, it's a robust, production-ready solution purpose-built for modern DevOps workflows.
Heres why developers, sysadmins, and DevOps choose **MYSQL-BKUP**:
### ✅ All-in-One Backup, Restore & Migration
Whether you're backing up a single database, restoring critical data, or migrating across environments, MYSQL-BKUP handles it all with a **single, unified CLI** no scripting gymnastics required.
### 🔄 Works Everywhere You Deploy
Designed to be cloud-native:
* **Runs seamlessly on Docker, Docker Swarm, and Kubernetes**
* Supports **CronJobs** for automated scheduled backups
* Compatible with GitOps and CI/CD workflows
### ☁️ Flexible Storage Integrations
Store your backups **anywhere**:
* Local disks
* Amazon S3, MinIO, Wasabi, Azure Blob, FTP, SSH
### 🔒 Enterprise-Grade Security
* **GPG Encryption**: Protect sensitive data with optional encryption before storing backups locally or in the cloud.
* **Secure Storage** Options: Supports S3, Azure Blob, SFTP, and SSH with encrypted transfers, keeping backups safe from unauthorized access.
### 📬 Instant Notifications
Stay in the loop with real-time notifications via **Telegram** and **Email**. Know immediately when a backup succeeds—or fails.
### 🏃‍♂️ Lightweight and Fast
Written in **Go**, MYSQL-BKUP is fast, multi-arch compatible (`amd64`, `arm64`, `arm/v7`), and optimized for minimal memory and CPU usage. Ideal for both cloud and edge deployments.
### 🧪 Tested. Verified. Trusted.
Actively maintained with **automated testing**, **Docker image size optimizations**, and verified support across major container platforms.
---
## Available image registries
This Docker image is published to both Docker Hub and the GitHub container registry.
@@ -145,20 +316,14 @@ docker pull ghcr.io/jkaninda/mysql-bkup
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
## Supported Engines
This image is developed and tested against the Docker CE engine and Kubernetes exclusively.
While it may work against different implementations, there are no guarantees about support for non-Docker engines.
## References
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
We created this image as a simpler and more lightweight alternative to existing solutions. Heres why:
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
- This image is written in Go.
- `arm64` and `arm/v7` architectures are supported.
- Docker in Swarm mode is supported.
- Kubernetes is supported.
- **Lightweight:** Written in Go, the image is optimized for performance and minimal resource usage.
- **Multi-Architecture Support:** Supports `arm64` and `arm/v7` architectures.
- **Docker Swarm Support:** Fully compatible with Docker in Swarm mode.
- **Kubernetes Support:** Designed to work seamlessly with Kubernetes.
## License

View File

@@ -1,9 +1,27 @@
// Package cmd /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package cmd
import (
@@ -20,20 +38,20 @@ var BackupCmd = &cobra.Command{
if len(args) == 0 {
pkg.StartBackup(cmd)
} else {
utils.Fatal("Error, no argument required")
utils.Fatal(`"backup" accepts no argument %q`, args)
}
},
}
func init() {
//Backup
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. | Deprecated")
BackupCmd.PersistentFlags().StringP("period", "", "", "Schedule period time | Deprecated")
BackupCmd.PersistentFlags().StringP("cron-expression", "", "", "Backup cron expression")
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")
BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "Delete files created more than specified days ago, default 7 days")
// Backup
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Define storage: local, s3, ssh, ftp, azure")
BackupCmd.PersistentFlags().StringP("path", "P", "", "Storage path without file name. e.g: /custom_path or ssh remote path `/home/foo/backup`")
BackupCmd.PersistentFlags().StringP("cron-expression", "e", "", "Backup cron expression (e.g., `0 0 * * *` or `@daily`)")
BackupCmd.PersistentFlags().StringP("config", "c", "", "Configuration file for multi database backup. (e.g: `/backup/config.yaml`)")
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
BackupCmd.PersistentFlags().BoolP("all-databases", "a", false, "Backup all databases")
BackupCmd.PersistentFlags().BoolP("all-in-one", "A", false, "Backup all databases in a single file")
BackupCmd.PersistentFlags().StringP("custom-name", "", "", "Custom backup name")
}

View File

@@ -1,9 +1,27 @@
// Package cmd /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package cmd
import (
@@ -19,7 +37,7 @@ var MigrateCmd = &cobra.Command{
if len(args) == 0 {
pkg.StartMigration(cmd)
} else {
utils.Fatal("Error, no argument required")
utils.Fatal(`"migrate" accepts no argument %q`, args)
}

View File

@@ -1,3 +1,27 @@
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package cmd
import (
@@ -14,7 +38,7 @@ var RestoreCmd = &cobra.Command{
if len(args) == 0 {
pkg.StartRestore(cmd)
} else {
utils.Fatal("Error, no argument required")
utils.Fatal(`"restore" accepts no argument %q`, args)
}
@@ -22,9 +46,9 @@ var RestoreCmd = &cobra.Command{
}
func init() {
//Restore
// Restore
RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database")
RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Define storage: local, s3, ssh, ftp")
RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
}

View File

@@ -1,9 +1,27 @@
// Package cmd /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package cmd
import (
@@ -20,7 +38,6 @@ var rootCmd = &cobra.Command{
Example: utils.MainExample,
Version: appVersion,
}
var operation = ""
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.

View File

@@ -1,13 +1,32 @@
// Package cmd /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package cmd
import (
"fmt"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
"os"
)
@@ -23,6 +42,6 @@ var VersionCmd = &cobra.Command{
}
func Version() {
fmt.Printf("Version: %s \n", appVersion)
fmt.Printf("Version: %s \n", utils.Version)
fmt.Println()
}

View File

@@ -1,83 +0,0 @@
FROM golang:1.22.5 AS build
WORKDIR /app
# Copy the source code.
COPY . .
# Installs Go dependencies
RUN go mod download
# Build
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/mysql-bkup
FROM ubuntu:24.04
ENV DB_HOST="localhost"
ENV DB_NAME=""
ENV DB_USERNAME=""
ENV DB_PASSWORD=""
ENV DB_PORT=3306
ENV STORAGE=local
ENV AWS_S3_ENDPOINT=""
ENV AWS_S3_BUCKET_NAME=""
ENV AWS_ACCESS_KEY=""
ENV AWS_SECRET_KEY=""
ENV AWS_REGION="us-west-2"
ENV AWS_S3_PATH=""
ENV AWS_DISABLE_SSL="false"
ENV GPG_PASSPHRASE=""
ENV SSH_USER=""
ENV SSH_REMOTE_PATH=""
ENV SSH_PASSWORD=""
ENV SSH_HOST_NAME=""
ENV SSH_IDENTIFY_FILE=""
ENV SSH_PORT="22"
ENV TARGET_DB_HOST=""
ENV TARGET_DB_PORT=3306
ENV TARGET_DB_NAME="localhost"
ENV TARGET_DB_USERNAME=""
ENV TARGET_DB_PASSWORD=""
ARG DEBIAN_FRONTEND=noninteractive
ENV VERSION="v1.2.8"
ENV BACKUP_CRON_EXPRESSION=""
ENV TG_TOKEN=""
ENV TG_CHAT_ID=""
ARG WORKDIR="/config"
ARG BACKUPDIR="/backup"
ARG BACKUP_TMP_DIR="/tmp/backup"
ARG BACKUP_CRON="/etc/cron.d/backup_cron"
ARG BACKUP_CRON_SCRIPT="/usr/local/bin/backup_cron.sh"
LABEL author="Jonas Kaninda"
RUN apt-get update -qq
RUN apt install mysql-client cron gnupg -y
# Clear cache
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
RUN mkdir $WORKDIR
RUN mkdir $BACKUPDIR
RUN mkdir -p $BACKUP_TMP_DIR
RUN chmod 777 $WORKDIR
RUN chmod 777 $BACKUPDIR
RUN chmod 777 $BACKUP_TMP_DIR
RUN touch $BACKUP_CRON && \
touch $BACKUP_CRON_SCRIPT && \
chmod 777 $BACKUP_CRON && \
chmod 777 $BACKUP_CRON_SCRIPT
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
RUN chmod +x /usr/local/bin/mysql-bkup
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
# Create backup script and make it executable
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
chmod +x /usr/local/bin/backup
# Create restore script and make it executable
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup restore "$@"' > /usr/local/bin/restore && \
chmod +x /usr/local/bin/restore
# Create migrate script and make it executable
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup migrate "$@"' > /usr/local/bin/migrate && \
chmod +x /usr/local/bin/migrate
WORKDIR $WORKDIR
ENTRYPOINT ["/usr/local/bin/mysql-bkup"]

View File

@@ -1,13 +0,0 @@
[supervisord]
nodaemon=true
user=root
logfile=/var/log/supervisor/supervisord.log
pidfile=/var/run/supervisord.pid
[program:cron]
command = /bin/bash -c "declare -p | grep -Ev '^declare -[[:alpha:]]*r' > /run/supervisord.env && /usr/sbin/cron -f -L 15"
autostart=true
autorestart=true
user = root
stderr_logfile=/var/log/cron.err.log
stdout_logfile=/var/log/cron.out.log

View File

@@ -20,7 +20,7 @@ description: >- # this means to ignore newlines until "baseurl:"
It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage.
baseurl: "" # the subpath of your site, e.g. /blog
url: "jkaninda.github.io/mysql-bkup/" # the base hostname & protocol for your site, e.g. http://example.com
url: "" # the base hostname & protocol for your site, e.g. http://example.com
twitter_username: jonaskaninda
github_username: jkaninda

BIN
docs/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.2 KiB

View File

@@ -0,0 +1,72 @@
---
title: Azure Blob storage
layout: default
parent: How Tos
nav_order: 5
---
# Backup to Azure Blob Storage
To store your backups on Azure Blob Storage, you can configure the backup process to use the `--storage azure` option.
This section explains how to set up and configure Azure Blob-based backups.
---
## Configuration Steps
1. **Specify the Storage Type**
Add the `--storage azure` flag to your backup command.
2. **Set the Blob Path**
Optionally, specify a custom folder within your Azure Blob container where backups will be stored using the `--path` flag.
Example: `--path my-custom-path`.
3. **Required Environment Variables**
The following environment variables are mandatory for Azure Blob-based backups:
- `AZURE_STORAGE_CONTAINER_NAME`: The name of the Azure Blob container where backups will be stored.
- `AZURE_STORAGE_ACCOUNT_NAME`: The name of your Azure Storage account.
- `AZURE_STORAGE_ACCOUNT_KEY`: The access key for your Azure Storage account.
---
## Example Configuration
Below is an example `docker-compose.yml` configuration for backing up to Azure Blob Storage:
```yaml
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysqlbkup/releases
# for available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup --storage azure -d database --path my-custom-path
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## Azure Blob Configuration
- AZURE_STORAGE_CONTAINER_NAME=backup-container
- AZURE_STORAGE_ACCOUNT_NAME=account-name
- AZURE_STORAGE_ACCOUNT_KEY=Ppby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==
# Ensure the mysql-bkup container is connected to the same network as your database
networks:
- web
networks:
web:
```
---
## Key Notes
- **Custom Path**: Use the `--path` flag to specify a folder within your Azure Blob container for organizing backups.
- **Security**: Ensure your `AZURE_STORAGE_ACCOUNT_KEY` is kept secure and not exposed in public repositories.
- **Compatibility**: This configuration works with Azure Blob Storage and other compatible storage solutions.

View File

@@ -0,0 +1,61 @@
---
title: Backup all databases in the server
layout: default
parent: How Tos
nav_order: 12
---
# Backup All Databases
MySQL-Bkup supports backing up all databases on the server using the `--all-databases` (`-a`) flag. By default, this creates separate backup files for each database. If you prefer a single backup file, you can use the `--all-in-one` (`-A`) flag.
Backing up all databases is useful for creating a snapshot of the entire database server, whether for disaster recovery or migration purposes.
## Backup Modes
### Separate Backup Files (Default)
Using --all-databases without --all-in-one creates individual backup files for each database.
- Creates separate backup files for each database.
- Provides more flexibility in restoring individual databases or tables.
- Can be more manageable in cases where different databases have different retention policies.
- Might take slightly longer due to multiple file operations.
- It is the default behavior when using the `--all-databases` flag.
- It does not backup system databases (`information_schema`, `performance_schema`, `mysql`, `sys`, `innodb`,...).
**Command:**
```bash
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_PORT=3306" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup --all-databases
```
### Single Backup File
Using --all-in-one (-A) creates a single backup file containing all databases.
- Creates a single backup file containing all databases.
- Easier to manage if you need to restore everything at once.
- Faster to back up and restore in bulk.
- Can be problematic if you only need to restore a specific database or table.
- It is recommended to use this option for disaster recovery purposes.
- It backups system databases as well.
```bash
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_PORT=3306" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup --all-in-one
```
### When to Use Which?
- Use `--all-in-one` if you want a quick, simple backup for disaster recovery where you'll restore everything at once.
- Use `--all-databases` if you need granularity in restoring specific databases or tables without affecting others.

View File

@@ -0,0 +1,75 @@
---
title: Backup to FTP remote server
layout: default
parent: How Tos
nav_order: 4
---
# Backup to FTP Remote Server
To store your backups on an FTP remote server, you can configure the backup process to use the `--storage ftp` option.
This section explains how to set up and configure FTP-based backups.
---
## Configuration Steps
1. **Specify the Storage Type**
Add the `--storage ftp` flag to your backup command.
2. **Set the Remote Path**
Define the full remote path where backups will be stored using the `--path` flag or the `REMOTE_PATH` environment variable.
Example: `--path /home/jkaninda/backups`.
3. **Required Environment Variables**
The following environment variables are mandatory for FTP-based backups:
- `FTP_HOST`: The hostname or IP address of the FTP server.
- `FTP_PORT`: The FTP port (default is `21`).
- `FTP_USER`: The username for FTP authentication.
- `FTP_PASSWORD`: The password for FTP authentication.
- `REMOTE_PATH`: The directory on the FTP server where backups will be stored.
---
## Example Configuration
Below is an example `docker-compose.yml` configuration for backing up to an FTP remote server:
```yaml
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup --storage ftp -d database
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## FTP Configuration
- FTP_HOST="hostname"
- FTP_PORT=21
- FTP_USER=user
- FTP_PASSWORD=password
- REMOTE_PATH=/home/jkaninda/backups
# Ensure the mysql-bkup container is connected to the same network as your database
networks:
- web
networks:
web:
```
---
## Key Notes
- **Security**: FTP transmits data, including passwords, in plaintext. For better security, consider using SFTP (SSH File Transfer Protocol) or FTPS (FTP Secure) if supported by your server.
- **Remote Path**: Ensure the `REMOTE_PATH` directory exists on the FTP server and is writable by the specified `FTP_USER`.

View File

@@ -6,129 +6,121 @@ nav_order: 2
---
# Backup to AWS S3
{: .note }
As described on local backup section, to change the storage of you backup and use S3 as storage. You need to add `--storage s3` (-s s3).
You can also specify a specify folder where you want to save you data by adding `--path /my-custom-path` flag.
To store your backups on AWS S3, you can configure the backup process to use the `--storage s3` option. This section explains how to set up and configure S3-based backups.
---
## Backup to S3
## Configuration Steps
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup --storage s3 -d database --path /my-custom-path
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## AWS configurations
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup
- AWS_REGION="us-west-2"
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
1. **Specify the Storage Type**
Add the `--storage s3` flag to your backup command.
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
2. **Set the S3 Path**
Optionally, specify a custom folder within your S3 bucket where backups will be stored using the `--path` flag.
Example: `--path /my-custom-path`.
### Recurring backups to S3
3. **Required Environment Variables**
The following environment variables are mandatory for S3-based backups:
As explained above, you need just to add AWS environment variables and specify the storage type `--storage s3`.
In case you need to use recurring backups, you can use `--cron-expression "0 1 * * *"` flag or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
- `AWS_S3_ENDPOINT`: The S3 endpoint URL (e.g., `https://s3.amazonaws.com`).
- `AWS_S3_BUCKET_NAME`: The name of the S3 bucket where backups will be stored.
- `AWS_REGION`: The AWS region where the bucket is located (e.g., `us-west-2`).
- `AWS_ACCESS_KEY`: Your AWS access key.
- `AWS_SECRET_KEY`: Your AWS secret key.
- `AWS_DISABLE_SSL`: Set to `"true"` if using an S3 alternative like Minio without SSL (default is `"false"`).
- `AWS_FORCE_PATH_STYLE`: Set to `"true"` if using an S3 alternative like Minio (default is `"false"`).
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup --storage s3 -d my-database --cron-expression "0 1 * * *"
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## AWS configurations
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup
- AWS_REGION="us-west-2"
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
---
## Deploy on Kubernetes
## Example Configuration
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as CronJob.
### Simple Kubernetes CronJob usage:
Below is an example `docker-compose.yml` configuration for backing up to AWS S3:
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: bkup-job
spec:
schedule: "0 1 * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup -s s3 --path /custom_path
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: OnFailure
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/pg-bkup/releases
# for available releases.
image: jkaninda/pg-bkup
container_name: pg-bkup
command: backup --storage s3 -d database --path /my-custom-path
environment:
- DB_PORT=5432
- DB_HOST=postgres
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## AWS Configuration
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup
- AWS_REGION=us-west-2
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
## Optional: Disable SSL for S3 alternatives like Minio
- AWS_DISABLE_SSL="false"
## Optional: Enable path-style access for S3 alternatives like Minio
- AWS_FORCE_PATH_STYLE=false
# Ensure the mysql-bkup container is connected to the same network as your database
networks:
- web
networks:
web:
```
---
## Recurring Backups to S3
To schedule recurring backups to S3, use the `--cron-expression` flag or the `BACKUP_CRON_EXPRESSION` environment variable. This allows you to define a cron schedule for automated backups.
### Example: Recurring Backup Configuration
```yaml
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup --storage s3 -d database --cron-expression "0 1 * * *"
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## AWS Configuration
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup
- AWS_REGION=us-west-2
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
## Optional: Define a cron schedule for recurring backups
#- BACKUP_CRON_EXPRESSION=0 1 * * *
## Optional: Delete old backups after a specified number of days
#- BACKUP_RETENTION_DAYS=7
## Optional: Disable SSL for S3 alternatives like Minio
- AWS_DISABLE_SSL="false"
## Optional: Enable path-style access for S3 alternatives like Minio
- AWS_FORCE_PATH_STYLE=false
# Ensure the pg-bkup container is connected to the same network as your database
networks:
- web
networks:
web:
```
---
## Key Notes
- **Cron Expression**: Use the `--cron-expression` flag or `BACKUP_CRON_EXPRESSION` environment variable to define the backup schedule. For example, `0 1 * * *` runs the backup daily at 1:00 AM.
- **Backup Retention**: Optionally, use the `BACKUP_RETENTION_DAYS` environment variable to automatically delete backups older than a specified number of days.
- **S3 Alternatives**: If using an S3 alternative like Minio, set `AWS_DISABLE_SSL="true"` and `AWS_FORCE_PATH_STYLE="true"` as needed.

View File

@@ -1,141 +1,129 @@
---
title: Backup to SSH
title: Backup to SSH or SFTP
layout: default
parent: How Tos
nav_order: 3
---
# Backup to SSH remote server
# Backup to SFTP or SSH Remote Server
To store your backups on an `SFTP` or `SSH` remote server instead of the default storage, you can configure the backup process to use the `--storage ssh` or `--storage remote` option.
This section explains how to set up and configure SSH-based backups.
As described for s3 backup section, to change the storage of your backup and use SSH Remote server as storage. You need to add `--storage ssh` or `--storage remote`.
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `SSH_REMOTE_PATH` environment variable.
---
{: .note }
These environment variables are required for SSH backup `SSH_HOST_NAME`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server.
Accessing the remote server using password is not recommended, use private key instead.
## Configuration Steps
```yml
1. **Specify the Storage Type**
Add the `--storage ssh` or `--storage remote` flag to your backup command.
2. **Set the Remote Path**
Define the full remote path where backups will be stored using the `--path` flag or the `REMOTE_PATH` environment variable.
Example: `--path /home/jkaninda/backups`.
3. **Required Environment Variables**
The following environment variables are mandatory for SSH-based backups:
- `SSH_HOST`: The hostname or IP address of the remote server.
- `SSH_USER`: The username for SSH authentication.
- `REMOTE_PATH`: The directory on the remote server where backups will be stored.
- `SSH_IDENTIFY_FILE`: The path to the private key file for SSH authentication.
- `SSH_PORT`: The SSH port (default is `22`).
- `SSH_PASSWORD`: (Optional) Use this only if you are not using a private key for authentication.
{: .note }
**Security Recommendation**: Using a private key (`SSH_IDENTIFY_FILE`) is strongly recommended over password-based authentication (`SSH_PASSWORD`) for better security.
---
## Example Configuration
Below is an example `docker-compose.yml` configuration for backing up to an SSH remote server:
```yaml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup --storage remote -d database
volumes:
- ./id_ed25519:/tmp/id_ed25519"
environment:
- DB_PORT=3306
- DB_HOST=mysql
#- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## SSH config
- SSH_HOST_NAME="hostname"
- SSH_PORT=22
- SSH_USER=user
- SSH_REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
## We advise you to use a private jey instead of password
#- SSH_PASSWORD=password
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup --storage remote -d database
volumes:
- ./id_ed25519:/tmp/id_ed25519
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## SSH Configuration
- SSH_HOST="hostname"
- SSH_PORT=22
- SSH_USER=user
- REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
## Optional: Use password instead of private key (not recommended)
#- SSH_PASSWORD=password
# Ensure the mysql-bkup container is connected to the same network as your database
networks:
- web
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
web:
```
---
### Recurring backups to SSH remote server
## Recurring Backups to SSH Remote Server
As explained above, you need just to add required environment variables and specify the storage type `--storage ssh`.
You can use `--cron-expression "* * * * *"` or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
To schedule recurring backups, you can use the `--cron-expression` flag or the `BACKUP_CRON_EXPRESSION` environment variable.
This allows you to define a cron schedule for automated backups.
```yml
### Example: Recurring Backup Configuration
```yaml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup -d database --storage ssh --cron-expression "0 1 * * *"
command: backup -d database --storage ssh --cron-expression "@daily"
volumes:
- ./id_ed25519:/tmp/id_ed25519"
- ./id_ed25519:/tmp/id_ed25519
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_HOST=postgres
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## SSH config
- SSH_HOST_NAME="hostname"
## SSH Configuration
- SSH_HOST="hostname"
- SSH_PORT=22
- SSH_USER=user
- SSH_REMOTE_PATH=/home/jkaninda/backups
- REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
## We advise you to use a private jey instead of password
## Optional: Delete old backups after a specified number of days
#- BACKUP_RETENTION_DAYS=7
## Optional: Use password instead of private key (not recommended)
#- SSH_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
# Ensure the mysql-bkup container is connected to the same network as your database
networks:
- web
networks:
web:
```
## Deploy on Kubernetes
---
For Kubernetes, you don't need to run it in scheduled mode.
You can deploy it as CronJob.
## Key Notes
Simple Kubernetes CronJob usage:
- **Cron Expression**: Use the `--cron-expression` flag or `BACKUP_CRON_EXPRESSION` environment variable to define the backup schedule. For example, `0 1 * * *` runs the backup daily at 1:00 AM.
- **Backup Retention**: Optionally, use the `BACKUP_RETENTION_DAYS` environment variable to automatically delete backups older than a specified number of days.
- **Security**: Always prefer private key authentication (`SSH_IDENTIFY_FILE`) over password-based authentication (`SSH_PASSWORD`) for enhanced security.
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: bkup-job
spec:
schedule: "0 1 * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup -s ssh
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: ""
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "xxx"
- name: SSH_REMOTE_PATH
value: "/home/jkaninda/backups"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: SSH_IDENTIFY_FILE
value: "/tmp/id_ed25519"
restartPolicy: Never
```
---

View File

@@ -5,26 +5,35 @@ parent: How Tos
nav_order: 1
---
# Backup database
# Backup Database
To backup the database, you need to add `backup` command.
To back up your database, use the `backup` command.
This section explains how to configure and run backups, including recurring backups, using Docker or Kubernetes.
---
## Default Configuration
- **Storage**: By default, backups are stored locally in the `/backup` directory.
- **Compression**: Backups are compressed using `gzip` by default. Use the `--disable-compression` flag to disable compression.
- **Security**: It is recommended to create a dedicated user with read-only access for backup tasks.
{: .note }
The default storage is local storage mounted to __/backup__. The backup is compressed by default using gzip. The flag __`disable-compression`__ can be used when you need to disable backup compression.
The backup process supports recurring backups on Docker or Docker Swarm. On Kubernetes, it can be deployed as a CronJob.
{: .warning }
Creating a user for backup tasks who has read-only access is recommended!
---
The backup process can be run in scheduled mode for the recurring backups.
It handles __recurring__ backups of mysql database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
## Example: Basic Backup Configuration
```yml
Below is an example `docker-compose.yml` configuration for backing up a database:
```yaml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup -d database
@@ -36,36 +45,47 @@ services:
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
# Ensure the mysql-bkup container is connected to the same network as your database
networks:
- web
networks:
web:
```
### Backup using Docker CLI
---
```shell
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d database_name
## Backup Using Docker CLI
You can also run backups directly using the Docker CLI:
```bash
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/pg-bkup backup -d database_name
```
In case you need to use recurring backups, you can use `--cron-expression "0 1 * * *"` flag or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below.
---
```yml
## Recurring Backups
To schedule recurring backups, use the `--cron-expression (-e)` flag or the `BACKUP_CRON_EXPRESSION` environment variable. This allows you to define a cron schedule for automated backups.
### Example: Recurring Backup Configuration
```yaml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup -d database --cron-expression "0 1 * * *"
command: backup -d database --cron-expression @midnight
volumes:
- ./backup:/backup
environment:
@@ -74,11 +94,24 @@ services:
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
- BACKUP_CRON_EXPRESSION=0 1 * * *
# mysql-bkup container must be connected to the same network with your database
## Optional: Define a cron schedule for recurring backups
- BACKUP_CRON_EXPRESSION=@midnight
## Optional: Delete old backups after a specified number of days
#- BACKUP_RETENTION_DAYS=7
# Ensure the mysql-bkup container is connected to the same network as your database
networks:
- web
networks:
web:
```
---
## Key Notes
- **Cron Expression**: Use the `--cron-expression (-e)` flag or `BACKUP_CRON_EXPRESSION` environment variable to define the backup schedule. For example:
- `@midnight`: Runs the backup daily at midnight.
- `0 1 * * *`: Runs the backup daily at 1:00 AM.
- **Backup Retention**: Optionally, use the `BACKUP_RETENTION_DAYS` environment variable to automatically delete backups older than a specified number of days.

View File

@@ -2,15 +2,20 @@
title: Deploy on Kubernetes
layout: default
parent: How Tos
nav_order: 8
nav_order: 9
---
## Deploy on Kubernetes
# Deploy on Kubernetes
To deploy MySQL Backup on Kubernetes, you can use Job to backup or Restore your database.
For recurring backup you can use CronJob, you don't need to run it in scheduled mode. as described bellow.
To deploy MySQL Backup on Kubernetes, you can use a `Job` for one-time backups or restores, and a `CronJob` for recurring backups.
## Backup to S3 storage
Below are examples for different use cases.
---
## Backup Job to S3 Storage
This example demonstrates how to configure a Kubernetes `Job` to back up a MySQL database to an S3-compatible storage.
```yaml
apiVersion: batch/v1
@@ -21,48 +26,53 @@ spec:
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup --storage s3
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
- name: mysql-bkup
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup --storage s3
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Use Kubernetes Secrets for sensitive data like passwords
- name: DB_PASSWORD
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
- name: AWS_FORCE_PATH_STYLE
value: "false"
restartPolicy: Never
```
## Backup Job to SSH remote server
---
## Backup Job to SSH Remote Server
This example demonstrates how to configure a Kubernetes `Job` to back up a MySQL database to an SSH remote server.
```yaml
apiVersion: batch/v1
@@ -75,19 +85,14 @@ spec:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- backup
- --storage
- ssh
- --disable-compression
- backup --storage ssh --disable-compression
resources:
limits:
memory: "128Mi"
@@ -100,8 +105,8 @@ spec:
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
# Please use secret!
value: "postgres"
# Use Kubernetes Secrets for sensitive data like passwords
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
@@ -114,14 +119,18 @@ spec:
value: "xxxx"
- name: SSH_REMOTE_PATH
value: "/home/toto/backup"
# Optional, required if you want to encrypt your backup
# Optional: Required if you want to encrypt your backup
- name: GPG_PASSPHRASE
value: "xxxx"
restartPolicy: Never
```
---
## Restore Job
This example demonstrates how to configure a Kubernetes `Job` to restore a MySQL database from a backup stored on an SSH remote server.
```yaml
apiVersion: batch/v1
kind: Job
@@ -133,52 +142,51 @@ spec:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- restore
- --storage
- ssh
- --file store_20231219_022941.sql.gz
- restore --storage ssh --file store_20231219_022941.sql.gz
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "xxx"
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "xxx"
- name: SSH_PASSWORD
value: "xxxx"
- name: SSH_REMOTE_PATH
value: "/home/xxxx/backup"
# Optional, required if your backup was encrypted
#- name: GPG_PASSPHRASE
# value: "xxxx"
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "postgres"
# Use Kubernetes Secrets for sensitive data like passwords
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "xxx"
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "xxx"
- name: SSH_PASSWORD
value: "xxxx"
- name: SSH_REMOTE_PATH
value: "/home/toto/backup"
# Optional: Required if your backup was encrypted
#- name: GPG_PASSPHRASE
# value: "xxxx"
restartPolicy: Never
```
## Recurring backup
---
## Recurring Backup with CronJob
This example demonstrates how to configure a Kubernetes `CronJob` for recurring backups to an SSH remote server.
```yaml
apiVersion: batch/v1
@@ -193,52 +201,51 @@ spec:
spec:
containers:
- name: mysql-bkup
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- backup
- --storage
- ssh
- --disable-compression
- backup --storage ssh --disable-compression
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "username"
- name: DB_USERNAME
value: "username"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "xxx"
- name: SSH_PORT
value: "xxx"
- name: SSH_USER
value: "jkaninda"
- name: SSH_REMOTE_PATH
value: "/home/jkaninda/backup"
- name: SSH_PASSWORD
value: "password"
# Optional, required if you want to encrypt your backup
#- name: GPG_PASSPHRASE
# value: "xxx"
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "test"
- name: DB_USERNAME
value: "postgres"
# Use Kubernetes Secrets for sensitive data like passwords
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "192.168.1.16"
- name: SSH_PORT
value: "2222"
- name: SSH_USER
value: "jkaninda"
- name: SSH_REMOTE_PATH
value: "/config/backup"
- name: SSH_PASSWORD
value: "password"
# Optional: Required if you want to encrypt your backup
#- name: GPG_PASSPHRASE
# value: "xxx"
restartPolicy: Never
```
## Kubernetes Rootless
---
This image also supports Kubernetes security context, you can run it in Rootless environment.
It has been tested on Openshift, it works well.
Deployment on OpenShift is supported, you need to remove `securityContext` section on your yaml file.
## Kubernetes Rootless Deployment
This example demonstrates how to run the backup container in a rootless environment, suitable for platforms like OpenShift.
```yaml
apiVersion: batch/v1
@@ -256,48 +263,107 @@ spec:
runAsGroup: 3000
fsGroup: 2000
containers:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- bkup
- backup
- --storage
- ssh
- --disable-compression
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "xxx"
- name: DB_USERNAME
value: "xxx"
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "xxx"
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "jkaninda"
- name: SSH_REMOTE_PATH
value: "/home/jkaninda/backup"
- name: SSH_PASSWORD
value: "password"
# Optional, required if you want to encrypt your backup
- name: mysql-bkup
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup --storage ssh --disable-compression
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: "test"
- name: DB_USERNAME
value: "postgres"
# Use Kubernetes Secrets for sensitive data like passwords
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: "192.168.1.16"
- name: SSH_PORT
value: "2222"
- name: SSH_USER
value: "jkaninda"
- name: SSH_REMOTE_PATH
value: "/config/backup"
- name: SSH_PASSWORD
value: "password"
# Optional: Required if you want to encrypt your backup
#- name: GPG_PASSPHRASE
# value: "xxx"
restartPolicy: OnFailure
```
---
## Migrate Database
This example demonstrates how to configure a Kubernetes `Job` to migrate a MySQL database from one server to another.
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: migrate-db
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- migrate
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
## Source Database
- name: DB_HOST
value: "postgres"
- name: DB_PORT
value: "3306"
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
- name: DB_PASSWORD
value: "password"
## Target Database
- name: TARGET_DB_HOST
value: "target-postgres"
- name: TARGET_DB_PORT
value: "3306"
- name: TARGET_DB_NAME
value: "dbname"
- name: TARGET_DB_USERNAME
value: "username"
- name: TARGET_DB_PASSWORD
value: "password"
restartPolicy: Never
```
---
## Key Notes
- **Security**: Always use Kubernetes Secrets for sensitive data like passwords and access keys.
- **Resource Limits**: Adjust resource limits (`memory` and `cpu`) based on your workload requirements.
- **Cron Schedule**: Use standard cron expressions for scheduling recurring backups.
- **Rootless Deployment**: The image supports running in rootless environments, making it suitable for platforms like OpenShift.

View File

@@ -0,0 +1,6 @@
---
title: Update deprecated configurations
layout: default
parent: How Tos
nav_order: 11
---

View File

@@ -2,37 +2,37 @@
title: Encrypt backups using GPG
layout: default
parent: How Tos
nav_order: 7
nav_order: 8
---
# Encrypt backup
# Encrypt Backup
The image supports encrypting backups using GPG out of the box. In case a `GPG_PASSPHRASE` environment variable is set, the backup archive will be encrypted using the given key and saved as a sql.gpg file instead or sql.gz.gpg.
The image supports encrypting backups using one of two methods: **GPG with a passphrase** or **GPG with a public key**. When a `GPG_PASSPHRASE` or `GPG_PUBLIC_KEY` environment variable is set, the backup archive will be encrypted and saved as a `.sql.gpg` or `.sql.gz.gpg` file.
{: .warning }
To restore an encrypted backup, you need to provide the same GPG passphrase or key used during backup process.
To restore an encrypted backup, you must provide the same GPG passphrase or private key used during the backup process.
- GPG home directory `/config/gnupg`
- Cipher algorithm `aes256`
-
To decrypt manually, you need to install `gnupg`
---
### Decrypt backup
## Key Features
```shell
gpg --batch --passphrase "my-passphrase" \
--output database_20240730_044201.sql.gz \
--decrypt database_20240730_044201.sql.gz.gpg
```
- **Cipher Algorithm**: `aes256`
- **Automatic Restoration**: Backups encrypted with a GPG passphrase can be restored automatically without manual decryption.
- **Manual Decryption**: Backups encrypted with a GPG public key require manual decryption before restoration.
### Backup
---
```yml
## Using GPG Passphrase
To encrypt backups using a GPG passphrase, set the `GPG_PASSPHRASE` environment variable. The backup will be encrypted and can be restored automatically.
### Example Configuration
```yaml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup -d database
@@ -46,9 +46,75 @@ services:
- DB_PASSWORD=password
## Required to encrypt backup
- GPG_PASSPHRASE=my-secure-passphrase
# mysql-bkup container must be connected to the same network with your database
# Ensure the pg-bkup container is connected to the same network as your database
networks:
- web
networks:
web:
```
---
## Using GPG Public Key
To encrypt backups using a GPG public key, set the `GPG_PUBLIC_KEY` environment variable to the path of your public key file. Backups encrypted with a public key require manual decryption before restoration.
### Example Configuration
```yaml
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup -d database
volumes:
- ./backup:/backup
- ./public_key.asc:/config/public_key.asc
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## Required to encrypt backup
- GPG_PUBLIC_KEY=/config/public_key.asc
# Ensure the pg-bkup container is connected to the same network as your database
networks:
- web
networks:
web:
```
---
## Manual Decryption
If you encrypted your backup using a GPG public key, you must manually decrypt it before restoration. Use the `gnupg` tool for decryption.
### Decrypt Using a Passphrase
```bash
gpg --batch --passphrase "my-passphrase" \
--output database_20240730_044201.sql.gz \
--decrypt database_20240730_044201.sql.gz.gpg
```
### Decrypt Using a Private Key
```bash
gpg --output database_20240730_044201.sql.gz \
--decrypt database_20240730_044201.sql.gz.gpg
```
---
## Key Notes
- **Automatic Restoration**: Backups encrypted with a GPG passphrase can be restored directly without manual decryption.
- **Manual Decryption**: Backups encrypted with a GPG public key require manual decryption using the corresponding private key.
- **Security**: Always keep your GPG passphrase and private key secure. Use Kubernetes Secrets or other secure methods to manage sensitive data.

View File

@@ -2,130 +2,105 @@
title: Migrate database
layout: default
parent: How Tos
nav_order: 9
nav_order: 10
---
# Migrate database
# Migrate Database
To migrate the database, you need to add `migrate` command.
To migrate a MySQL database from a source to a target database, you can use the `migrate` command. This feature simplifies the process by combining the backup and restore operations into a single step.
{: .note }
The Mysql backup has another great feature: migrating your database from a source database to a target.
As you know, to restore a database from a source to a target database, you need 2 operations: which is to start by backing up the source database and then restoring the source backed database to the target database.
Instead of proceeding like that, you can use the integrated feature `(migrate)`, which will help you migrate your database by doing only one operation.
The `migrate` command eliminates the need for separate backup and restore operations. It directly transfers data from the source database to the target database.
{: .warning }
The `migrate` operation is irreversible, please backup your target database before this action.
The `migrate` operation is **irreversible**. Always back up your target database before performing this action.
### Docker compose
```yml
---
## Configuration Steps
1. **Source Database**: Provide connection details for the source database.
2. **Target Database**: Provide connection details for the target database.
3. **Run the Migration**: Use the `migrate` command to initiate the migration.
---
## Example: Docker Compose Configuration
Below is an example `docker-compose.yml` configuration for migrating a database:
```yaml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysqlbkup/releases
# for available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: migrate
volumes:
- ./backup:/backup
environment:
## Source database
## Source Database
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## Target database
- TARGET_DB_HOST=target-mysql
## Target Database
- TARGET_DB_HOST=target-postgres
- TARGET_DB_PORT=3306
- TARGET_DB_NAME=dbname
- TARGET_DB_USERNAME=username
- TARGET_DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
# Ensure the mysql-bkup container is connected to the same network as your database
networks:
- web
networks:
web:
```
---
### Migrate database using Docker CLI
## Migrate Database Using Docker CLI
You can also run the migration directly using the Docker CLI. Below is an example:
```
## Source database
DB_HOST=mysql
### Environment Variables
Save your source and target database connection details in an environment file (e.g., `your-env`):
```bash
## Source Database
DB_HOST=postgres
DB_PORT=3306
DB_NAME=dbname
DB_USERNAME=username
DB_PASSWORD=password
## Taget database
TARGET_DB_HOST=target-mysql
## Target Database
TARGET_DB_HOST=target-postgres
TARGET_DB_PORT=3306
TARGET_DB_NAME=dbname
TARGET_DB_USERNAME=username
TARGET_DB_PASSWORD=password
```
```shell
docker run --rm --network your_network_name \
--env-file your-env
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup migrate
### Run the Migration
```bash
docker run --rm --network your_network_name \
--env-file your-env \
-v $PWD/backup:/backup/ \
jkaninda/pg-bkup migrate
```
## Kubernetes
---
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: migrate-db
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- migrate
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
## Source Database
- name: DB_HOST
value: "mysql"
- name: DB_PORT
value: "3306"
- name: DB_NAME
value: "dbname"
- name: DB_USERNAME
value: "username"
- name: DB_PASSWORD
value: "password"
## Target Database
- name: TARGET_DB_HOST
value: "target-mysql"
- name: TARGET_DB_PORT
value: "3306"
- name: TARGET_DB_NAME
value: "dbname"
- name: TARGET_DB_USERNAME
value: "username"
- name: TARGET_DB_PASSWORD
value: "password"
restartPolicy: Never
```
## Key Notes
- **Irreversible Operation**: The `migrate` command directly transfers data from the source to the target database. Ensure you have a backup of the target database before proceeding.
- **Network Configuration**: Ensure the `mysql-bkup` container is connected to the same network as your source and target databases.

View File

@@ -0,0 +1,103 @@
---
title: Run multiple database backup schedules in the same container
layout: default
parent: How Tos
nav_order: 11
---
# Multiple Backup Schedules
This tool supports running multiple database backup schedules within the same container.
You can configure these schedules with different settings using a **configuration file**. This flexibility allows you to manage backups for multiple databases efficiently.
---
## Configuration File Setup
The configuration file can be mounted into the container at `/config/config.yaml`, `/config/config.yml`, or specified via the `BACKUP_CONFIG_FILE` environment variable.
### Key Features:
- **Global Environment Variables**: Use these for databases that share the same configuration.
- **Database-Specific Overrides**: Override global settings for individual databases by specifying them in the configuration file or using the database name as a prefix or suffix in the variable name (e.g., `DB_HOST_DATABASENAME` or `DATABASENAME_DB_HOST`).
- **Global Cron Expression**: Define a global `cronExpression` in the configuration file to schedule backups for all databases. If omitted, backups will run immediately.
- **Configuration File Path**: Specify the configuration file path using:
- The `BACKUP_CONFIG_FILE` environment variable.
- The `--config` or `-c` flag for the backup command.
---
## Configuration File Example
Below is an example configuration file (`config.yaml`) that defines multiple databases and their respective backup settings:
```yaml
# Optional: Define a global cron expression for scheduled backups.
# Example: "@every 20m" (runs every 20 minutes). If omitted, backups run immediately.
cronExpression: "" # Optional: Define a global cron expression for scheduled backups.
backupRescueMode: false # Optional: Set to true to enable rescue mode for backups.
databases:
- host: mysql1 # Optional: Overrides DB_HOST or uses DB_HOST_DATABASE1.
port: 3306 # Optional: Default is 5432. Overrides DB_PORT or uses DB_PORT_DATABASE1.
name: database1 # Required: Database name.
user: database1 # Optional: Overrides DB_USERNAME or uses DB_USERNAME_DATABASE1.
password: password # Optional: Overrides DB_PASSWORD or uses DB_PASSWORD_DATABASE1.
path: /s3-path/database1 # Required: Backup path for SSH, FTP, or S3 (e.g., /home/toto/backup/).
- host: mysql2 # Optional: Overrides DB_HOST or uses DB_HOST_LLAP.
port: 3306 # Optional: Default is 5432. Overrides DB_PORT or uses DB_PORT_LLAP.
name: lldap # Required: Database name.
user: lldap # Optional: Overrides DB_USERNAME or uses DB_USERNAME_LLAP.
password: password # Optional: Overrides DB_PASSWORD or uses DB_PASSWORD_LLAP.
path: /s3-path/lldap # Required: Backup path for SSH, FTP, or S3 (e.g., /home/toto/backup/).
- host: mysql3 # Optional: Overrides DB_HOST or uses DB_HOST_KEYCLOAK.
port: 3306 # Optional: Default is 5432. Overrides DB_PORT or uses DB_PORT_KEYCLOAK.
name: keycloak # Required: Database name.
user: keycloak # Optional: Overrides DB_USERNAME or uses DB_USERNAME_KEYCLOAK.
password: password # Optional: Overrides DB_PASSWORD or uses DB_PASSWORD_KEYCLOAK.
path: /s3-path/keycloak # Required: Backup path for SSH, FTP, or S3 (e.g., /home/toto/backup/).
- host: mysql4 # Optional: Overrides DB_HOST or uses DB_HOST_JOPLIN.
port: 3306 # Optional: Default is 5432. Overrides DB_PORT or uses DB_PORT_JOPLIN.
name: joplin # Required: Database name.
user: joplin # Optional: Overrides DB_USERNAME or uses DB_USERNAME_JOPLIN.
password: password # Optional: Overrides DB_PASSWORD or uses DB_PASSWORD_JOPLIN.
path: /s3-path/joplin # Required: Backup path for SSH, FTP, or S3 (e.g., /home/toto/backup/).
```
---
## Docker Compose Configuration
To use the configuration file in a Docker Compose setup, mount the file and specify its path using the `BACKUP_CONFIG_FILE` environment variable.
### Example: Docker Compose File
```yaml
services:
mysql-bkup:
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup #--config /backup/config.yaml # config file
volumes:
- ./backup:/backup # Mount the backup directory
- ./config.yaml:/backup/config.yaml # Mount the configuration file
environment:
## Specify the path to the configuration file
- BACKUP_CONFIG_FILE=/backup/config.yaml
# Ensure the mysql-bkup container is connected to the same network as your database
networks:
- web
networks:
web:
```
---

View File

@@ -0,0 +1,197 @@
---
title: Receive notifications
layout: default
parent: How Tos
nav_order: 13
---
# Receive Notifications
You can configure the system to send email or Telegram notifications when a backup succeeds or fails.
This section explains how to set up and customize notifications.
---
## Email Notifications
To send email notifications, provide SMTP credentials, a sender address, and recipient addresses. Notifications will be sent for both successful and failed backup runs.
### Example: Email Notification Configuration
```yaml
services:
mysql-bkup:
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## SMTP Configuration
- MAIL_HOST=smtp.example.com
- MAIL_PORT=587
- MAIL_USERNAME=your-email@example.com
- MAIL_PASSWORD=your-email-password
- MAIL_FROM=Backup Jobs <backup@example.com>
## Multiple recipients separated by a comma
- MAIL_TO=me@example.com,team@example.com,manager@example.com
- MAIL_SKIP_TLS=false
## Time format for notifications
- TIME_FORMAT=2006-01-02 at 15:04:05
## Backup reference (e.g., database/cluster name or server name)
- BACKUP_REFERENCE=database/Paris cluster
networks:
- web
networks:
web:
```
---
## Telegram Notifications
To send Telegram notifications, provide your bot token and chat ID. Notifications will be sent for both successful and failed backup runs.
### Example: Telegram Notification Configuration
```yaml
services:
mysql-bkup:
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## Telegram Configuration
- TG_TOKEN=[BOT ID]:[BOT TOKEN]
- TG_CHAT_ID=your-chat-id
## Time format for notifications
- TIME_FORMAT=2006-01-02 at 15:04:05
## Backup reference (e.g., database/cluster name or server name)
- BACKUP_REFERENCE=database/Paris cluster
networks:
- web
networks:
web:
```
---
## Customize Notifications
You can customize the title and body of notifications using Go templates. Template files must be mounted inside the container at `/config/templates`. The following templates are supported:
- `email.tmpl`: Template for successful email notifications.
- `telegram.tmpl`: Template for successful Telegram notifications.
- `email-error.tmpl`: Template for failed email notifications.
- `telegram-error.tmpl`: Template for failed Telegram notifications.
### Template Data
The following data is passed to the templates:
- `Database`: Database name.
- `StartTime`: Backup start time.
- `EndTime`: Backup end time.
- `Storage`: Backup storage type (e.g., local, S3, SSH).
- `BackupLocation`: Backup file location.
- `BackupSize`: Backup file size in bytes.
- `BackupReference`: Backup reference (e.g., database/cluster name or server name).
- `Error`: Error message (only for error templates).
---
### Example Templates
#### `email.tmpl` (Successful Backup)
```html
<h2>Hi,</h2>
<p>Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.</p>
<h3>Backup Details:</h3>
<ul>
<li>Database Name: {{.Database}}</li>
<li>Backup Start Time: {{.StartTime}}</li>
<li>Backup End Time: {{.EndTime}}</li>
<li>Backup Storage: {{.Storage}}</li>
<li>Backup Location: {{.BackupLocation}}</li>
<li>Backup Size: {{.BackupSize}} bytes</li>
<li>Backup Reference: {{.BackupReference}}</li>
</ul>
<p>Best regards,</p>
```
#### `telegram.tmpl` (Successful Backup)
```html
✅ Database Backup Notification {{.Database}}
Hi,
Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.
Backup Details:
- Database Name: {{.Database}}
- Backup Start Time: {{.StartTime}}
- Backup End Time: {{.EndTime}}
- Backup Storage: {{.Storage}}
- Backup Location: {{.BackupLocation}}
- Backup Size: {{.BackupSize}} bytes
- Backup Reference: {{.BackupReference}}
```
#### `email-error.tmpl` (Failed Backup)
```html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>🔴 Urgent: Database Backup Failure Notification</title>
</head>
<body>
<h2>Hi,</h2>
<p>An error occurred during database backup.</p>
<h3>Failure Details:</h3>
<ul>
<li>Error Message: {{.Error}}</li>
<li>Date: {{.EndTime}}</li>
<li>Backup Reference: {{.BackupReference}}</li>
</ul>
</body>
</html>
```
#### `telegram-error.tmpl` (Failed Backup)
```html
🔴 Urgent: Database Backup Failure Notification
An error occurred during database backup.
Failure Details:
Error Message: {{.Error}}
Date: {{.EndTime}}
Backup Reference: {{.BackupReference}}
```
---
## Key Notes
- **SMTP Configuration**: Ensure your SMTP server supports TLS unless `MAIL_SKIP_TLS` is set to `true`.
- **Telegram Configuration**: Obtain your bot token and chat ID from Telegram.
- **Custom Templates**: Mount custom templates to `/config/templates` to override default notifications.
- **Time Format**: Use the `TIME_FORMAT` environment variable to customize the timestamp format in notifications.

View File

@@ -2,94 +2,74 @@
title: Restore database from AWS S3
layout: default
parent: How Tos
nav_order: 5
nav_order: 6
---
# Restore database from S3 storage
# Restore Database from S3 Storage
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
To restore a MySQL database from a backup stored in S3, use the `restore` command and specify the backup file with the `--file` flag. The system supports the following file formats:
{: .note }
It supports __.sql__ and __.sql.gz__ compressed file.
- `.sql` (uncompressed SQL dump)
- `.sql.gz` (gzip-compressed SQL dump)
- `.sql.gpg` (GPG-encrypted SQL dump)
- `.sql.gz.gpg` (GPG-encrypted and gzip-compressed SQL dump)
### Restore
---
```yml
## Configuration Steps
1. **Specify the Backup File**: Use the `--file` flag to specify the backup file to restore.
2. **Set the Storage Type**: Add the `--storage s3` flag to indicate that the backup is stored in S3.
3. **Provide S3 Configuration**: Include the necessary AWS S3 credentials and configuration.
4. **Provide Database Credentials**: Ensure the correct database connection details are provided.
---
## Example: Restore from S3 Configuration
Below is an example `docker-compose.yml` configuration for restoring a database from S3 storage:
```yaml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path
volumes:
- ./backup:/backup
- ./backup:/backup # Mount the directory for local operations (if needed)
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## AWS configurations
## AWS S3 Configuration
- AWS_S3_ENDPOINT=https://s3.amazonaws.com
- AWS_S3_BUCKET_NAME=backup
- AWS_REGION="us-west-2"
- AWS_REGION=us-west-2
- AWS_ACCESS_KEY=xxxx
- AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
# mysql-bkup container must be connected to the same network with your database
## Optional: Disable SSL for S3 alternatives like Minio
- AWS_DISABLE_SSL=false
## Optional: Enable path-style access for S3 alternatives like Minio
- AWS_FORCE_PATH_STYLE=false
# Ensure the pg-bkup container is connected to the same network as your database
networks:
- web
networks:
web:
```
## Restore on Kubernetes
---
Simple Kubernetes restore Job:
## Key Notes
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: restore-db
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- restore -s s3 --path /custom_path -f store_20231219_022941.sql.gz
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: AWS_S3_ENDPOINT
value: "https://s3.amazonaws.com"
- name: AWS_S3_BUCKET_NAME
value: "xxx"
- name: AWS_REGION
value: "us-west-2"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: AWS_SECRET_KEY
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
restartPolicy: Never
backoffLimit: 4
```
- **Supported File Formats**: The restore process supports `.sql`, `.sql.gz`, `.sql.gpg`, and `.sql.gz.gpg` files.
- **S3 Path**: Use the `--path` flag to specify the folder within the S3 bucket where the backup file is located.
- **Encrypted Backups**: If the backup is encrypted with GPG, ensure the `GPG_PASSPHRASE` environment variable is set for automatic decryption.
- **S3 Alternatives**: For S3-compatible storage like Minio, set `AWS_DISABLE_SSL` and `AWS_FORCE_PATH_STYLE` as needed.
- **Network Configuration**: Ensure the `pg-bkup` container is connected to the same network as your database.

View File

@@ -2,92 +2,73 @@
title: Restore database from SSH
layout: default
parent: How Tos
nav_order: 6
nav_order: 7
---
# Restore database from SSH remote server
To restore the database from your remote server, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
# Restore Database from SSH Remote Server
{: .note }
It supports __.sql__ and __.sql.gz__ compressed file.
To restore a MySQL database from a backup stored on an SSH remote server, use the `restore` command and specify the backup file with the `--file` flag. The system supports the following file formats:
### Restore
- `.sql` (uncompressed SQL dump)
- `.sql.gz` (gzip-compressed SQL dump)
- `.sql.gpg` (GPG-encrypted SQL dump)
- `.sql.gz.gpg` (GPG-encrypted and gzip-compressed SQL dump)
```yml
---
## Configuration Steps
1. **Specify the Backup File**: Use the `--file` flag to specify the backup file to restore.
2. **Set the Storage Type**: Add the `--storage ssh` flag to indicate that the backup is stored on an SSH remote server.
3. **Provide SSH Configuration**: Include the necessary SSH credentials and configuration.
4. **Provide Database Credentials**: Ensure the correct database connection details are provided.
---
## Example: Restore from SSH Remote Server Configuration
Below is an example `docker-compose.yml` configuration for restoring a database from an SSH remote server:
```yaml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups
volumes:
- ./backup:/backup
- ./backup:/backup # Mount the directory for local operations (if needed)
- ./id_ed25519:/tmp/id_ed25519 # Mount the SSH private key file
environment:
- DB_PORT=3306
- DB_HOST=postgres
- DB_HOST=mysql
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## SSH config
- SSH_HOST_NAME="hostname"
## SSH Configuration
- SSH_HOST_NAME=hostname
- SSH_PORT=22
- SSH_USER=user
- SSH_REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
## We advise you to use a private jey instead of password
## Optional: Use password instead of private key (not recommended)
#- SSH_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
# Ensure the mysql-bkup container is connected to the same network as your database
networks:
- web
networks:
web:
```
## Restore on Kubernetes
Simple Kubernetes restore Job:
---
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: restore-db
spec:
template:
spec:
containers:
- name: mysql-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- restore -s ssh -f store_20231219_022941.sql.gz
env:
- name: DB_PORT
value: "3306"
- name: DB_HOST
value: ""
- name: DB_NAME
value: ""
- name: DB_USERNAME
value: ""
# Please use secret!
- name: DB_PASSWORD
value: ""
- name: SSH_HOST_NAME
value: ""
- name: SSH_PORT
value: "22"
- name: SSH_USER
value: "xxx"
- name: SSH_REMOTE_PATH
value: "/home/jkaninda/backups"
- name: AWS_ACCESS_KEY
value: "xxxx"
- name: SSH_IDENTIFY_FILE
value: "/tmp/id_ed25519"
restartPolicy: Never
backoffLimit: 4
```
## Key Notes
- **Supported File Formats**: The restore process supports `.sql`, `.sql.gz`, `.sql.gpg`, and `.sql.gz.gpg` files.
- **SSH Path**: Use the `--path` flag to specify the folder on the SSH remote server where the backup file is located.
- **Encrypted Backups**: If the backup is encrypted with GPG, ensure the `GPG_PASSPHRASE` environment variable is set for automatic decryption.
- **SSH Authentication**: Use a private key (`SSH_IDENTIFY_FILE`) for SSH authentication instead of a password for better security.
- **Network Configuration**: Ensure the `mysql-bkup` container is connected to the same network as your database.

View File

@@ -2,39 +2,63 @@
title: Restore database
layout: default
parent: How Tos
nav_order: 4
nav_order: 5
---
# Restore database
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
# Restore Database
{: .note }
It supports __.sql__ and __.sql.gz__ compressed file.
To restore a MySQL database, use the `restore` command and specify the backup file to restore with the `--file` flag.
### Restore
The system supports the following file formats:
```yml
- `.sql` (uncompressed SQL dump)
- `.sql.gz` (gzip-compressed SQL dump)
- `.sql.gpg` (GPG-encrypted SQL dump)
- `.sql.gz.gpg` (GPG-encrypted and gzip-compressed SQL dump)
---
## Configuration Steps
1. **Specify the Backup File**: Use the `--file` flag to specify the backup file to restore.
2. **Provide Database Credentials**: Ensure the correct database connection details are provided.
---
## Example: Restore Configuration
Below is an example `docker-compose.yml` configuration for restoring a database:
```yaml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
# In production, lock your image tag to a specific release version
# instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases
# for available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: restore -d database -f store_20231219_022941.sql.gz
volumes:
- ./backup:/backup
- ./backup:/backup # Mount the directory containing the backup file
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_HOST=postgres
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
# Ensure the pg-bkup container is connected to the same network as your database
networks:
- web
networks:
web:
```
---
## Key Notes
- **Supported File Formats**: The restore process supports `.sql`, `.sql.gz`, `.sql.gpg`, and `.sql.gz.gpg` files.
- **Encrypted Backups**: If the backup is encrypted with GPG, ensure the `GPG_PASSPHRASE` environment variable is set for automatic decryption.
- **Network Configuration**: Ensure the `mysql-bkup` container is connected to the same network as your database.

View File

@@ -6,146 +6,92 @@ nav_order: 1
# About mysql-bkup
{:.no_toc}
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH remote storage.
It also supports __encrypting__ your backups using GPG.
We are open to receiving stars, PRs, and issues!
**MYSQL-BKUP** is a Docker container image designed to **backup, restore, and migrate MySQL databases**.
It supports a variety of storage options and ensures data security through GPG encryption.
**MYSQL-BKUP** is designed for seamless deployment on **Docker** and **Kubernetes**, simplifying MySQL backup, restoration, and migration across environments.
It is a lightweight, multi-architecture solution compatible with **Docker**, **Docker Swarm**, **Kubernetes**, and other container orchestration platforms.
---
## Key Features
### Storage Options
- **Local storage**
- **AWS S3** or any S3-compatible object storage
- **FTP**
- **SFTP**
- **SSH-compatible storage**
- **Azure Blob storage**
### Data Security
- Backups can be encrypted using **GPG** to ensure data confidentiality.
### Deployment Flexibility
- Available as the [jkaninda/pg-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image.
- Deployable on **Docker**, **Docker Swarm**, and **Kubernetes**.
- Supports recurring backups of MySQL databases:
- On Docker for automated backup schedules.
- As a **Job** or **CronJob** on Kubernetes.
### Notifications
- Receive real-time updates on backup success or failure via:
- **Telegram**
- **Email**
---
## 💡Use Cases
- **Scheduled Backups**: Automate recurring backups using Docker or Kubernetes.
- **Disaster Recovery:** Quickly restore backups to a clean MySQL instance.
- **Database Migration**: Seamlessly move data across environments using the built-in `migrate` feature.
- **Secure Archiving:** Keep backups encrypted and safely stored in the cloud or remote servers.
## ✅ Verified Platforms:
MYSQL-BKUP has been tested and runs successfully on:
- Docker
- Docker Swarm
- Kubernetes
- OpenShift
---
## Get Involved
We welcome contributions! Feel free to give us a ⭐, submit PRs, or open issues on our [GitHub repository](https://github.com/jkaninda/mysql-bkup).
{: .fs-6 .fw-300 }
---
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
It also supports database __encryption__ using GPG.
{: .note }
Code and documentation for `v1` version on [this branch][v1-branch].
Code and documentation for the `v1` version are available on [this branch][v1-branch].
[v1-branch]: https://github.com/jkaninda/mysql-bkup
---
## Quickstart
## Available Image Registries
### Simple backup using Docker CLI
The Docker image is published to both **Docker Hub** and the **GitHub Container Registry**. You can use either of the following:
To run a one time backup, bind your local volume to `/backup` in the container and run the `backup` command:
```shell
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d database_name
```
Alternatively, pass a `--env-file` in order to use a full config as described below.
```yaml
docker run --rm --network your_network_name \
--env-file your-env-file \
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup backup -d database_name
```
### Simple backup in docker compose file
```yaml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=foo
- DB_USERNAME=bar
- DB_PASSWORD=password
# mysql-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```
## Kubernetes
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: backup-job
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup -d dbname
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_HOST
value: "mysql"
- name: DB_USERNAME
value: "user"
- name: DB_PASSWORD
value: "password"
volumeMounts:
- mountPath: /backup
name: backup
volumes:
- name: backup
hostPath:
path: /home/toto/backup # directory location on host
type: Directory # this field is optional
restartPolicy: Never
```
## Available image registries
This Docker image is published to both Docker Hub and the GitHub container registry.
Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`:
```
```bash
docker pull jkaninda/mysql-bkup
docker pull ghcr.io/jkaninda/mysql-bkup
```
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
While the documentation references Docker Hub, all examples work seamlessly with `ghcr.io`.
## Supported Engines
This image is developed and tested against the Docker CE engine and Kubernetes exclusively.
While it may work against different implementations, there are no guarantees about support for non-Docker engines.
---
## References
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
We created this image as a simpler and more lightweight alternative to existing solutions. Heres why:
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
- This image is written in Go.
- `arm64` and `arm/v7` architectures are supported.
- Docker in Swarm mode is supported.
- Kubernetes is supported.
- **Lightweight:** Written in Go, the image is optimized for performance and minimal resource usage.
- **Multi-Architecture Support:** Supports `arm64` and `arm/v7` architectures.
- **Docker Swarm Support:** Fully compatible with Docker in Swarm mode.
- **Kubernetes Support:** Designed to work seamlessly with Kubernetes.

212
docs/quickstart/index.md Normal file
View File

@@ -0,0 +1,212 @@
---
title: Quickstart
layout: home
nav_order: 2
---
# Quickstart
This guide provides quick examples for running backups using Docker CLI, Docker Compose, and Kubernetes.
---
### Simple Backup Using Docker CLI
To perform a one-time backup, bind your local volume to `/backup` in the container and run the `backup` command:
```shell
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_PORT=3306" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d database_name
```
Alternatively, use an environment file (`--env-file`) for configuration:
```shell
docker run --rm --network your_network_name \
--env-file your-env-file \
-v $PWD/backup:/backup/ \
jkaninda/mysql-bkup backup -d database_name
```
### Backup All Databases
To back up all databases on the server, use the `--all-databases` or `-a` flag. By default, this creates individual backup files for each database.
```shell
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_PORT=3306" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup --all-databases --disable-compression
```
> **Note:** Use the `--all-in-one` or `-A` flag to combine backups into a single file.
---
### Simple Restore Using Docker CLI
To restore a database, bind your local volume to `/backup` and run the `restore` command:
```shell
docker run --rm --network your_network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=dbhost" \
-e "DB_PORT=3306" \
-e "DB_USERNAME=username" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup restore -d database_name -f backup_file.sql.gz
```
---
### Backup with Docker Compose
Below is an example of a `docker-compose.yml` file for running a one-time backup:
```yaml
services:
pg-bkup:
# In production, pin your image tag to a specific release version instead of `latest`.
# See available releases: https://github.com/jkaninda/mysql-bkup/releases
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup
volumes:
- ./backup:/backup
environment:
- DB_PORT=3306
- DB_HOST=mysql
- DB_NAME=foo
- DB_USERNAME=bar
- DB_PASSWORD=password
- TZ=Europe/Paris
networks:
- web
networks:
web:
```
---
### Recurring Backups with Docker
You can schedule recurring backups using the `--cron-expression` or `-e` flag:
```shell
docker run --rm --network network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=hostname" \
-e "DB_USERNAME=user" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 15m"
```
For predefined schedules, refer to the [documentation](https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules).
---
## Deploy on Kubernetes
For Kubernetes, you can deploy `mysql-bkup` as a Job or CronJob. Below are examples for both.
### Kubernetes Backup Job
This example defines a one-time backup job:
```yaml
apiVersion: batch/v1
kind: Job
metadata:
name: backup-job
spec:
ttlSecondsAfterFinished: 100
template:
spec:
containers:
- name: mysql-bkup
# Pin the image tag to a specific release version in production.
# See available releases: https://github.com/jkaninda/mysql-bkup/releases
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup -d dbname
resources:
limits:
memory: "128Mi"
cpu: "500m"
env:
- name: DB_HOST
value: "mysql"
- name: DB_USERNAME
value: "user"
- name: DB_PASSWORD
value: "password"
volumeMounts:
- mountPath: /backup
name: backup
volumes:
- name: backup
hostPath:
path: /home/toto/backup # Directory location on the host
type: Directory # Optional field
restartPolicy: Never
```
### Kubernetes CronJob for Scheduled Backups
For scheduled backups, use a `CronJob`:
```yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: pg-bkup-cronjob
spec:
schedule: "0 2 * * *" # Runs daily at 2 AM
jobTemplate:
spec:
template:
spec:
containers:
- name: pg-bkup
image: jkaninda/mysql-bkup
command:
- /bin/sh
- -c
- backup -d dbname
env:
- name: DB_HOST
value: "mysql"
- name: DB_USERNAME
value: "user"
- name: DB_PASSWORD
value: "password"
volumeMounts:
- mountPath: /backup
name: backup
volumes:
- name: backup
hostPath:
path: /home/toto/backup
type: Directory
restartPolicy: OnFailure
```
---
## Key Notes
- **Volume Binding**: Ensure the `/backup` directory is mounted to persist backup files.
- **Environment Variables**: Use environment variables or an `--env-file` to pass database credentials and other configurations.
- **Cron Expressions**: Use standard cron expressions or predefined schedules for recurring backups.
- **Kubernetes Jobs**: Use Kubernetes `Job` or `CronJob` for running backups in a Kubernetes cluster.

View File

@@ -1,130 +1,132 @@
---
title: Configuration Reference
layout: default
nav_order: 2
nav_order: 3
---
# Configuration reference
# Configuration Reference
Backup, restore and migrate targets, schedule and retention are configured using environment variables or flags.
MySQL backup, restore, and migration processes can be configured using **environment variables** or **CLI flags**.
## CLI Utility Usage
The `mysql-bkup` CLI provides commands and options to manage MySQL backups efficiently.
| Option | Short Flag | Description |
|-------------------------|------------|-----------------------------------------------------------------------------------------|
| `mysql-bkup` | `bkup` | CLI tool for managing MySQL backups, restoration, and migration. |
| `backup` | | Executes a backup operation. |
| `restore` | | Restores a database from a backup file. |
| `migrate` | | Migrates a database from one instance to another. |
| `--storage` | `-s` | Specifies the storage type (`local`, `s3`, `ssh`, etc.). Default: `local`. |
| `--file` | `-f` | Defines the backup file name for restoration. |
| `--path` | | Sets the storage path (e.g., `/custom_path` for S3 or `/home/foo/backup` for SSH). |
| `--config` | `-c` | Provides a configuration file for multi-database backups (e.g., `/backup/config.yaml`). |
| `--dbname` | `-d` | Specifies the database name to back up or restore. |
| `--port` | `-p` | Defines the database port. Default: `3306`. |
| `--disable-compression` | | Disables compression for database backups. |
| `--cron-expression` | `-e` | Schedules backups using a cron expression (e.g., `0 0 * * *` or `@daily`). |
| `--all-databases` | `-a` | Backs up all databases separately (e.g., `backup --all-databases`). |
| `--all-in-one` | `-A` | Backs up all databases in a single file (e.g., `backup --all-databases --single-file`). |
| `--custom-name` | `` | Sets custom backup name for one time backup |
| `--help` | `-h` | Displays the help message and exits. |
| `--version` | `-V` | Shows version information and exits. |
### CLI utility Usage
| Options | Shorts | Usage |
|-----------------------|--------|----------------------------------------------------------------------------------------|
| mysql-bkup | bkup | CLI utility |
| backup | | Backup database operation |
| restore | | Restore database operation |
| migrate | | Migrate database from one instance to another one |
| --storage | -s | Storage. local or s3 (default: local) |
| --file | -f | File name for restoration |
| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` |
| --dbname | -d | Database name |
| --port | -p | Database port (default: 3306) |
| --disable-compression | | Disable database backup compression |
| --prune | | Delete old backup, default disabled |
| --keep-last | | Delete old backup created more than specified days ago, default 7 days |
| --cron-expression | | Backup cron expression, eg: (* * * * *) or @daily |
| --help | -h | Print this help message and exit |
| --version | -V | Print version information and exit |
## Environment variables
| Name | Requirement | Description |
|------------------------|--------------------------------------------------------------|------------------------------------------------------|
| DB_PORT | Optional, default 3306 | Database port number |
| DB_HOST | Required | Database host |
| DB_NAME | Optional if it was provided from the -d flag | Database name |
| DB_USERNAME | Required | Database user name |
| DB_PASSWORD | Required | Database password |
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_REGION | Optional, required for S3 storage | AWS Region |
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the --cron-expression flag | Backup cron expression for docker in scheduled mode |
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
| SSH_USER | Optional, required for SSH storage | ssh remote user |
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
| SSH_PORT | Optional, required for SSH storage | ssh remote server port |
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) |
| TARGET_DB_HOST | Optional, required for database migration | Target database host |
| TARGET_DB_PORT | Optional, required for database migration | Target database port |
| TARGET_DB_NAME | Optional, required for database migration | Target database name |
| TARGET_DB_USERNAME | Optional, required for database migration | Target database username |
| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password |
| TG_TOKEN | Optional, required for Telegram notification | Telegram token |
| TG_CHAT_ID | Optional, required for Telegram notification | Telegram Chat ID |
---
## Run in Scheduled mode
This image can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources.
For Docker, you need to run it in scheduled mode by adding `--cron-expression "* * * * *"` flag or by defining `BACKUP_CRON_EXPRESSION=0 1 * * *` environment variable.
## Environment Variables
## Syntax of crontab (field description)
| Name | Requirement | Description |
|--------------------------------|--------------------------------------|----------------------------------------------------------------------------|
| `DB_PORT` | Optional (default: `3306`) | Database port number. |
| `DB_HOST` | Required | Database host. |
| `DB_NAME` | Optional (if provided via `-d` flag) | Database name. |
| `DB_USERNAME` | Required | Database username. |
| `DB_PASSWORD` | Required | Database password. |
| `DB_SSL_CA` | Optional | Database client CA certificate file |
| `DB_SSL_MODE` | Optional(`0 or 1`) default: `0` | Database client Enable CA validation |
| `AWS_ACCESS_KEY` | Required for S3 storage | AWS S3 Access Key. |
| `AWS_SECRET_KEY` | Required for S3 storage | AWS S3 Secret Key. |
| `AWS_BUCKET_NAME` | Required for S3 storage | AWS S3 Bucket Name. |
| `AWS_REGION` | Required for S3 storage | AWS Region. |
| `AWS_DISABLE_SSL` | Optional | Disable SSL for S3 storage. |
| `AWS_FORCE_PATH_STYLE` | Optional | Force path-style access for S3 storage. |
| `FILE_NAME` | Optional (if provided via `--file`) | File name for restoration (e.g., `.sql`, `.sql.gz`). |
| `GPG_PASSPHRASE` | Optional | GPG passphrase for encrypting/decrypting backups. |
| `GPG_PUBLIC_KEY` | Optional | GPG public key for encrypting backups (e.g., `/config/public_key.asc`). |
| `BACKUP_CRON_EXPRESSION` | Optional (flag `-e`) | Cron expression for scheduled backups. |
| `BACKUP_RETENTION_DAYS` | Optional | Delete backups older than the specified number of days. |
| `BACKUP_CONFIG_FILE` | Optional (flag `-c`) | Configuration file for multi database backup. (e.g: `/backup/config.yaml`) |
| `SSH_HOST` | Required for SSH storage | SSH remote hostname or IP. |
| `SSH_USER` | Required for SSH storage | SSH remote username. |
| `SSH_PASSWORD` | Optional | SSH remote user's password. |
| `SSH_IDENTIFY_FILE` | Optional | SSH remote user's private key. |
| `SSH_PORT` | Optional (default: `22`) | SSH remote server port. |
| `REMOTE_PATH` | Required for SSH/FTP storage | Remote path (e.g., `/home/toto/backup`). |
| `FTP_HOST` | Required for FTP storage | FTP hostname. |
| `FTP_PORT` | Optional (default: `21`) | FTP server port. |
| `FTP_USER` | Required for FTP storage | FTP username. |
| `FTP_PASSWORD` | Required for FTP storage | FTP user password. |
| `TARGET_DB_HOST` | Required for migration | Target database host. |
| `TARGET_DB_PORT` | Optional (default: `5432`) | Target database port. |
| `TARGET_DB_NAME` | Required for migration | Target database name. |
| `TARGET_DB_USERNAME` | Required for migration | Target database username. |
| `TARGET_DB_PASSWORD` | Required for migration | Target database password. |
| `TARGET_DB_URL` | Optional | Target database URL in JDBC URI format. |
| `TG_TOKEN` | Required for Telegram notifications | Telegram token (`BOT-ID:BOT-TOKEN`). |
| `TG_CHAT_ID` | Required for Telegram notifications | Telegram Chat ID. |
| `TZ` | Optional | Time zone for scheduling. |
| `AZURE_STORAGE_CONTAINER_NAME` | Required for Azure Blob Storage | Azure storage container name. |
| `AZURE_STORAGE_ACCOUNT_NAME` | Required for Azure Blob Storage | Azure storage account name. |
| `AZURE_STORAGE_ACCOUNT_KEY` | Required for Azure Blob Storage | Azure storage account key. |
The syntax is:
---
- 1: Minute (0-59)
- 2: Hours (0-23)
- 3: Day (0-31)
- 4: Month (0-12 [12 == December])
- 5: Day of the week(0-7 [7 or 0 == sunday])
## Scheduled Backups
Easy to remember format:
### Running in Scheduled Mode
- **Docker**: Use the `--cron-expression` flag or the `BACKUP_CRON_EXPRESSION` environment variable to schedule backups.
- **Kubernetes**: Use a `CronJob` resource for scheduled backups.
### Cron Syntax
The cron syntax consists of five fields:
```conf
* * * * * command to be executed
* * * * * command
```
```conf
- - - - -
| | | | |
| | | | ----- Day of week (0 - 7) (Sunday=0 or 7)
| | | ------- Month (1 - 12)
| | --------- Day of month (1 - 31)
| ----------- Hour (0 - 23)
------------- Minute (0 - 59)
```
| Field | Description | Values |
|---------------|------------------------------|----------------|
| Minute | Minute of the hour | `0-59` |
| Hour | Hour of the day | `0-23` |
| Day of Month | Day of the month | `1-31` |
| Month | Month of the year | `1-12` |
| Day of Week | Day of the week (0 = Sunday) | `0-7` |
> At every 30th minute
#### Examples
```conf
*/30 * * * *
```
> “At minute 0.” every hour
```conf
0 * * * *
```
- **Every 30 minutes**: `*/30 * * * *`
- **Every hour at minute 0**: `0 * * * *`
- **Every day at 1:00 AM**: `0 1 * * *`
> “At 01:00.” every day
### Predefined Schedules
```conf
0 1 * * *
```
## Predefined schedules
You may use one of several pre-defined schedules in place of a cron expression.
| Entry | Description | Equivalent To |
|------------------------|--------------------------------------------|---------------|
| @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 * |
| @monthly | Run once a month, midnight, first of month | 0 0 1 * * |
| @weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0 |
| @daily (or @midnight) | Run once a day, midnight | 0 0 * * * |
| @hourly | Run once an hour, beginning of hour | 0 * * * * |
| Entry | Description | Equivalent To |
|----------------------------|--------------------------------------------|---------------|
| `@yearly` (or `@annually`) | Run once a year, midnight, Jan. 1st | `0 0 1 1 *` |
| `@monthly` | Run once a month, midnight, first of month | `0 0 1 * *` |
| `@weekly` | Run once a week, midnight between Sat/Sun | `0 0 * * 0` |
| `@daily` (or `@midnight`) | Run once a day, midnight | `0 0 * * *` |
| `@hourly` | Run once an hour, beginning of hour | `0 * * * *` |
### Intervals
You may also schedule a job to execute at fixed intervals, starting at the time it's added or cron is run. This is supported by formatting the cron spec like this:
You can also schedule backups at fixed intervals using the format:
```conf
@every <duration>
where "duration" is a string accepted by time.
```
For example, "@every 1h30m10s" would indicate a schedule that activates after 1 hour, 30 minutes, 10 seconds, and then every interval after that.
- Example: `@every 1h30m10s` runs the backup every 1 hour, 30 minutes, and 10 seconds.

View File

@@ -21,6 +21,7 @@ services:
- AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
- AWS_FORCE_PATH_STYLE=true # true for S3 alternative such as Minio
# mysql-bkup container must be connected to the same network with your database
networks:
- web

View File

@@ -5,7 +5,7 @@ services:
# release version instead of using `latest`.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup --dbname database_name --mode scheduled --period "0 1 * * *"
command: backup --dbname database_name
volumes:
- ./backup:/backup
environment:
@@ -13,3 +13,5 @@ services:
- DB_HOST=mysql
- DB_USERNAME=userName
- DB_PASSWORD=${DB_PASSWORD}
# See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
- BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * *

View File

@@ -6,7 +6,7 @@ services:
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *"
command: backup --storage s3 -d my-database
environment:
- DB_PORT=3306
- DB_HOST=mysql
@@ -21,6 +21,9 @@ services:
- AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false"
- AWS_FORCE_PATH_STYLE=true # true for S3 alternative such as Minio
# See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
- BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * *
# mysql-bkup container must be connected to the same network with your database
networks:
- web

View File

@@ -44,4 +44,6 @@ spec:
value: "xxxx"
- name: AWS_DISABLE_SSL
value: "false"
- name: AWS_FORCE_PATH_STYLE
value: "true"
restartPolicy: Never

39
go.mod
View File

@@ -1,22 +1,39 @@
module github.com/jkaninda/mysql-bkup
go 1.22.5
go 1.24.5
require github.com/spf13/pflag v1.0.5
require github.com/spf13/pflag v1.0.9 // indirect
require (
github.com/aws/aws-sdk-go v1.55.3
github.com/bramvdbogaerde/go-scp v1.5.0
github.com/hpcloud/tail v1.0.0
github.com/spf13/cobra v1.8.0
golang.org/x/crypto v0.18.0
github.com/go-mail/mail v2.3.1+incompatible
github.com/jkaninda/encryptor v0.0.0-20241111100652-926393c9437e
github.com/jkaninda/go-storage v0.1.3
github.com/jkaninda/go-utils v0.1.3
github.com/robfig/cron/v3 v3.0.1
github.com/spf13/cobra v1.10.1
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 // indirect
github.com/ProtonMail/go-crypto v1.1.0 // indirect
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
github.com/ProtonMail/gopenpgp/v2 v2.8.0 // indirect
github.com/aws/aws-sdk-go v1.55.5 // indirect
github.com/bramvdbogaerde/go-scp v1.5.0 // indirect
github.com/cloudflare/circl v1.3.7 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jlaffaye/ftp v0.2.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
golang.org/x/sys v0.22.0 // indirect
gopkg.in/fsnotify.v1 v1.4.7 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
github.com/pkg/errors v0.9.1 // indirect
golang.org/x/crypto v0.28.0 // indirect
golang.org/x/net v0.29.0 // indirect
golang.org/x/sys v0.26.0 // indirect
golang.org/x/text v0.19.0 // indirect
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
gopkg.in/mail.v2 v2.3.1 // indirect
)

133
go.sum
View File

@@ -1,40 +1,129 @@
github.com/aws/aws-sdk-go v1.55.3 h1:0B5hOX+mIx7I5XPOrjrHlKSDQV/+ypFZpIHOx5LOk3E=
github.com/aws/aws-sdk-go v1.55.3/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 h1:mlmW46Q0B79I+Aj4azKC6xDMFN9a9SyZWESlGWYXbFs=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0/go.mod h1:PXe2h+LKcWTX9afWdZoHyODqR4fBa5boUM/8uJfZ0Jo=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/ProtonMail/go-crypto v1.1.0 h1:OnlSGxXflfrWJESDsGQOmACNQRM9IflG3q8XTrOqvbE=
github.com/ProtonMail/go-crypto v1.1.0/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw=
github.com/ProtonMail/gopenpgp/v2 v2.8.0 h1:WvMv3CMcFsqKSM4/Qf8sf3tgyQkzDqQmoSE49bnBuP4=
github.com/ProtonMail/gopenpgp/v2 v2.8.0/go.mod h1:qb2GUSnmA9ipBW5GVtCtEhkummSlqs2A8Ar3S0HBgSY=
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/bramvdbogaerde/go-scp v1.5.0 h1:a9BinAjTfQh273eh7vd3qUgmBC+bx+3TRDtkZWmIpzM=
github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-mail/mail v2.3.1+incompatible h1:UzNOn0k5lpfVtO31cK3hn6I4VEVGhe3lX8AJBAxXExM=
github.com/go-mail/mail v2.3.1+incompatible/go.mod h1:VPWjmmNyRsWXQZHVHT3g0YbIINUkSmuKOiLIDkWbL6M=
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jkaninda/encryptor v0.0.0-20241111100652-926393c9437e h1:jtFKZHt/PLGQWXNgjEFTEwVbxiQQRMoJ7m37trbkJGw=
github.com/jkaninda/encryptor v0.0.0-20241111100652-926393c9437e/go.mod h1:Y1EXpPWQ9PNd7y7E6ez3xgnzZc8fuDWXwX/1/dXNCE4=
github.com/jkaninda/go-storage v0.1.3 h1:lEpHVgFLKSvjsi/6tAek96Y07za3vxmsXF2/+jiCMZU=
github.com/jkaninda/go-storage v0.1.3/go.mod h1:zVRnLprBk/9AUz2+za6Y03MgoNYrqKLy3edVtjqMaps=
github.com/jkaninda/go-utils v0.1.3 h1:BhVRHpR6JIGhE7JVgqOgiFketIqmNtvMvYpveVwDqqY=
github.com/jkaninda/go-utils v0.1.3/go.mod h1:Aa54jEAcDykc3CnOdreqZG80UfSZOvrYecyusu+oPb4=
github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/mail.v2 v2.3.1 h1:WYFn/oANrAGP2C0dcV6/pbkPzv8yGzqTjPmTeO7qoXk=
gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

30
main.go
View File

@@ -1,9 +1,27 @@
// Package main /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package main
import "github.com/jkaninda/mysql-bkup/cmd"

35
migrations/init.sql Normal file
View File

@@ -0,0 +1,35 @@
-- Create the database testdb2 and testdb3
CREATE DATABASE IF NOT EXISTS testdb2;
CREATE DATABASE IF NOT EXISTS testdb3;
CREATE DATABASE IF NOT EXISTS fakedb;
USE testdb;
-- Create the 'users' table
CREATE TABLE users (
id INT AUTO_INCREMENT PRIMARY KEY,
name VARCHAR(100) NOT NULL,
email VARCHAR(100) NOT NULL UNIQUE,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
-- Create the 'orders' table
CREATE TABLE orders (
id INT AUTO_INCREMENT PRIMARY KEY,
user_id INT NOT NULL,
amount DECIMAL(10,2) NOT NULL,
status ENUM('pending', 'completed', 'canceled') NOT NULL DEFAULT 'pending',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
);
-- Insert fake users
INSERT INTO users (name, email) VALUES
('Alice Smith', 'alice@example.com'),
('Bob Johnson', 'bob@example.com'),
('Charlie Brown', 'charlie@example.com');
-- Insert fake orders
INSERT INTO orders (user_id, amount, status) VALUES
(1, 100.50, 'completed'),
(2, 200.75, 'pending'),
(3, 50.00, 'canceled');

View File

@@ -0,0 +1,13 @@
#cronExpression: "@every 20s"
#backupRescueMode: false
databases:
- host: 127.0.0.1
port: 3306
name: testdb
user: user
password: password
- name: testdb2
# database credentials from environment variables
#TESTDB2_DB_USERNAME
#TESTDB2_DB_PASSWORD
#TESTDB2_DB_HOST

128
pkg/azure.go Normal file
View File

@@ -0,0 +1,128 @@
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package pkg
import (
"fmt"
"github.com/jkaninda/go-storage/pkg/azure"
goutils "github.com/jkaninda/go-utils"
"github.com/jkaninda/mysql-bkup/utils"
"os"
"path/filepath"
"time"
)
func azureBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to Azure Blob Storage")
// Backup database
err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne)
if err != nil {
recoverMode(err, "Error backing up database")
return
}
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
}
utils.Info("Uploading backup archive to Azure Blob storage ...")
utils.Info("Backup name is %s", finalFileName)
azureConfig := loadAzureConfig()
azureStorage, err := azure.NewStorage(azure.Config{
ContainerName: azureConfig.containerName,
AccountName: azureConfig.accountName,
AccountKey: azureConfig.accountKey,
RemotePath: config.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating Azure storage: %s", err)
}
err = azureStorage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
// Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error: %s", err)
}
backupSize = fileInfo.Size()
// Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error deleting file: %v", err)
}
if config.prune {
err := azureStorage.Prune(config.backupRetention)
if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
}
utils.Info("Backup name is %s", finalFileName)
utils.Info("Backup size: %s", utils.ConvertBytes(uint64(backupSize)))
utils.Info("Uploading backup archive to Azure Blob storage ... done ")
duration := goutils.FormatDuration(time.Since(startTime), 0)
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: utils.ConvertBytes(uint64(backupSize)),
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName),
Duration: duration,
})
// Delete temp
deleteTemp()
utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration)
}
func azureRestore(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from Azure Blob storage")
azureConfig := loadAzureConfig()
azureStorage, err := azure.NewStorage(azure.Config{
ContainerName: azureConfig.containerName,
AccountName: azureConfig.accountName,
AccountKey: azureConfig.accountKey,
RemotePath: conf.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating SSH storage: %s", err)
}
err = azureStorage.CopyFrom(conf.file)
if err != nil {
utils.Fatal("Error downloading backup file: %s", err)
}
RestoreDatabase(db, conf)
}

View File

@@ -1,56 +1,94 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
// Package internal /
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package pkg
import (
"bytes"
"errors"
"fmt"
"github.com/jkaninda/encryptor"
"github.com/jkaninda/go-storage/pkg/local"
goutils "github.com/jkaninda/go-utils"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/robfig/cron/v3"
"github.com/spf13/cobra"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
)
func StartBackup(cmd *cobra.Command) {
intro()
dbConf = initDbConfig(cmd)
//Initialize backup configs
// Initialize backup configs
config := initBackupConfig(cmd)
if config.cronExpression == "" {
BackupTask(dbConf, config)
} else {
if utils.IsValidCronExpression(config.cronExpression) {
scheduledMode(dbConf, config)
// Load backup configuration file
configFile, err := loadConfigFile()
if err != nil {
dbConf = initDbConfig(cmd)
if config.cronExpression == "" {
config.allowCustomName = true
createBackupTask(dbConf, config)
} else {
utils.Fatal("Cron expression is not valid: %s", config.cronExpression)
if utils.IsValidCronExpression(config.cronExpression) {
scheduledMode(dbConf, config)
} else {
utils.Fatal("Cron expression is not valid: %s", config.cronExpression)
}
}
} else {
startMultiBackup(config, configFile)
}
}
// Run in scheduled mode
// scheduledMode Runs backup in scheduled mode
func scheduledMode(db *dbConfig, config *BackupConfig) {
utils.Info("Running in Scheduled mode")
utils.Info("Backup cron expression: %s", config.cronExpression)
utils.Info("The next scheduled time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
utils.Info("Storage type %s ", config.storage)
//Test database connexion
testDatabaseConnection(db)
// Test backup
utils.Info("Testing backup configurations...")
err := testDatabaseConnection(db)
if err != nil {
utils.Error("Error connecting to database: %s", db.dbName)
utils.Fatal("Error: %s", err)
}
utils.Info("Testing backup configurations...done")
utils.Info("Creating backup job...")
// Create a new cron instance
c := cron.New()
_, err := c.AddFunc(config.cronExpression, func() {
BackupTask(db, config)
_, err = c.AddFunc(config.cronExpression, func() {
createBackupTask(db, config)
utils.Info("Next backup time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
})
if err != nil {
return
@@ -62,200 +100,339 @@ func scheduledMode(db *dbConfig, config *BackupConfig) {
defer c.Stop()
select {}
}
func BackupTask(db *dbConfig, config *BackupConfig) {
//Generate backup file name
backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20240102_150405"))
// multiBackupTask backup multi database
func multiBackupTask(databases []Database, bkConfig *BackupConfig) {
for _, db := range databases {
// Check if path is defined in config file
if db.Path != "" {
bkConfig.remotePath = db.Path
}
createBackupTask(getDatabase(db), bkConfig)
}
}
// createBackupTask backup task
func createBackupTask(db *dbConfig, config *BackupConfig) {
if config.all && !config.allInOne {
backupAll(db, config)
} else {
if db.dbName == "" && !config.all {
utils.Fatal("Database name is required, use DB_NAME environment variable or -d flag")
}
backupTask(db, config)
}
}
// backupAll backup all databases
func backupAll(db *dbConfig, config *BackupConfig) {
databases, err := listDatabases(*db)
if err != nil {
utils.Fatal("Error listing databases: %s", err)
}
for _, dbName := range databases {
if dbName == "information_schema" || dbName == "performance_schema" || dbName == "mysql" || dbName == "sys" || dbName == "innodb" || dbName == "Database" {
continue
}
db.dbName = dbName
config.backupFileName = fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405"))
backupTask(db, config)
}
}
// backupTask backup task
func backupTask(db *dbConfig, config *BackupConfig) {
utils.Info("Starting backup task...")
startTime = time.Now()
prefix := db.dbName
if config.all && config.allInOne {
prefix = "all_databases"
}
// Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql.gz", prefix, time.Now().Format("20060102_150405"))
if config.disableCompression {
backupFileName = fmt.Sprintf("%s_%s.sql", db.dbName, time.Now().Format("20240102_150405"))
backupFileName = fmt.Sprintf("%s_%s.sql", prefix, time.Now().Format("20060102_150405"))
}
if config.customName != "" && config.allowCustomName && !config.all {
backupFileName = fmt.Sprintf("%s.sql.gz", config.customName)
if config.disableCompression {
backupFileName = fmt.Sprintf("%s.sql", config.customName)
}
}
config.backupFileName = backupFileName
switch config.storage {
case "s3":
s3Backup(db, config.backupFileName, config.disableCompression, config.prune, config.backupRetention, config.encryption)
s := strings.ToLower(config.storage)
switch s {
case "local":
localBackup(db, config.backupFileName, config.disableCompression, config.prune, config.backupRetention, config.encryption)
case "ssh", "remote":
sshBackup(db, config.backupFileName, config.remotePath, config.disableCompression, config.prune, config.backupRetention, config.encryption)
localBackup(db, config)
case "s3":
s3Backup(db, config)
case "ssh", "remote", "sftp":
sshBackup(db, config)
case "ftp":
utils.Fatal("Not supported storage type: %s", config.storage)
ftpBackup(db, config)
case "azure":
azureBackup(db, config)
default:
localBackup(db, config.backupFileName, config.disableCompression, config.prune, config.backupRetention, config.encryption)
localBackup(db, config)
}
}
// startMultiBackup start multi backup
func startMultiBackup(bkConfig *BackupConfig, configFile string) {
utils.Info("Starting Multi backup task...")
conf, err := readConf(configFile)
if err != nil {
utils.Fatal("Error reading config file: %s", err)
}
// Check if cronExpression is defined in config file
if conf.CronExpression != "" {
bkConfig.cronExpression = conf.CronExpression
}
if len(conf.Databases) == 0 {
utils.Fatal("No databases found")
}
// Check if cronExpression is defined
if bkConfig.cronExpression == "" {
multiBackupTask(conf.Databases, bkConfig)
} else {
backupRescueMode = conf.BackupRescueMode
// Check if cronExpression is valid
if utils.IsValidCronExpression(bkConfig.cronExpression) {
utils.Info("Running backup in Scheduled mode")
utils.Info("Backup cron expression: %s", bkConfig.cronExpression)
utils.Info("The next scheduled time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
utils.Info("Storage type %s ", bkConfig.storage)
// Test backup
utils.Info("Testing backup configurations...")
for _, db := range conf.Databases {
err = testDatabaseConnection(getDatabase(db))
if err != nil {
recoverMode(err, fmt.Sprintf("Error connecting to database: %s", db.Name))
continue
}
}
utils.Info("Testing backup configurations...done")
utils.Info("Creating backup job...")
// Create a new cron instance
c := cron.New()
_, err := c.AddFunc(bkConfig.cronExpression, func() {
multiBackupTask(conf.Databases, bkConfig)
utils.Info("Next backup time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
})
if err != nil {
return
}
// Start the cron scheduler
c.Start()
utils.Info("Creating backup job...done")
utils.Info("Backup job started")
defer c.Stop()
select {}
} else {
utils.Fatal("Cron expression is not valid: %s", bkConfig.cronExpression)
}
}
}
// BackupDatabase backup database
func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) {
func BackupDatabase(db *dbConfig, backupFileName string, disableCompression, all, singleFile bool) error {
storagePath = os.Getenv("STORAGE_PATH")
utils.Info("Starting database backup...")
err := utils.CheckEnvVars(dbHVars)
if err != nil {
utils.Error("Please make sure all required environment variables for database are set")
utils.Fatal("Error checking environment variables: %s", err)
if err := testDatabaseConnection(db); err != nil {
return fmt.Errorf("database connection failed: %w", err)
}
utils.Info("Starting database backup...")
err = os.Setenv("MYSQL_PWD", db.dbPassword)
dumpArgs := []string{fmt.Sprintf("--defaults-file=%s", mysqlClientConfig)}
if all && singleFile {
utils.Info("Backing up all databases...")
dumpArgs = append(dumpArgs, "--all-databases", "--single-transaction", "--routines", "--triggers")
} else {
utils.Info("Backing up %s database...", db.dbName)
dumpArgs = append(dumpArgs, db.dbName)
}
backupPath := filepath.Join(tmpPath, backupFileName)
if disableCompression {
return runCommandAndSaveOutput("mysqldump", dumpArgs, backupPath)
}
return runCommandWithCompression("mysqldump", dumpArgs, backupPath)
}
// runCommandAndSaveOutput runs a command and saves the output to a file
func runCommandAndSaveOutput(command string, args []string, outputPath string) error {
cmd := exec.Command(command, args...)
output, err := cmd.Output()
if err != nil {
return fmt.Errorf("failed to execute %s: %v, output: %s", command, err, string(output))
}
return os.WriteFile(outputPath, output, 0644)
}
// runCommandWithCompression runs a command and compresses the output
func runCommandWithCompression(command string, args []string, outputPath string) error {
cmd := exec.Command(command, args...)
stdout, err := cmd.StdoutPipe()
if err != nil {
return fmt.Errorf("failed to create stdout pipe: %w", err)
}
gzipCmd := exec.Command("gzip")
gzipCmd.Stdin = stdout
gzipFile, err := os.Create(outputPath)
if err != nil {
return fmt.Errorf("failed to create gzip file: %w", err)
}
defer func(gzipFile *os.File) {
err := gzipFile.Close()
if err != nil {
utils.Error("Error closing gzip file: %v", err)
}
}(gzipFile)
gzipCmd.Stdout = gzipFile
if err := gzipCmd.Start(); err != nil {
return fmt.Errorf("failed to start gzip: %w", err)
}
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to execute %s: %w", command, err)
}
if err := gzipCmd.Wait(); err != nil {
return fmt.Errorf("failed to wait for gzip completion: %w", err)
}
utils.Info("Database has been backed up")
return nil
}
// localBackup backup database to local storage
func localBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to local storage")
err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne)
if err != nil {
recoverMode(err, "Error backing up database")
return
}
testDatabaseConnection(db)
// Backup Database database
utils.Info("Backing up database...")
if disableCompression {
// Execute mysqldump
cmd := exec.Command("mysqldump",
"-h", db.dbHost,
"-P", db.dbPort,
"-u", db.dbUserName,
db.dbName,
)
output, err := cmd.Output()
if err != nil {
log.Fatal(err)
}
// save output
file, err := os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName))
if err != nil {
log.Fatal(err)
}
defer file.Close()
_, err = file.Write(output)
if err != nil {
log.Fatal(err)
}
utils.Done("Database has been backed up")
} else {
// Execute mysqldump
cmd := exec.Command("mysqldump", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName)
stdout, err := cmd.StdoutPipe()
if err != nil {
log.Fatal(err)
}
gzipCmd := exec.Command("gzip")
gzipCmd.Stdin = stdout
gzipCmd.Stdout, err = os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName))
gzipCmd.Start()
if err != nil {
log.Fatal(err)
}
if err := cmd.Run(); err != nil {
log.Fatal(err)
}
if err := gzipCmd.Wait(); err != nil {
log.Fatal(err)
}
utils.Done("Database has been backed up")
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, gpgExtension)
}
}
func localBackup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
utils.Info("Backup database to local storage")
BackupDatabase(db, backupFileName, disableCompression)
finalFileName := backupFileName
if encrypt {
encryptBackup(backupFileName)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, gpgExtension)
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error: %s", err)
}
backupSize = fileInfo.Size()
localStorage := local.NewStorage(local.Config{
LocalPath: tmpPath,
RemotePath: storagePath,
})
err = localStorage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
utils.Info("Backup name is %s", finalFileName)
moveToBackup(finalFileName, storagePath)
//Send notification
utils.NotifySuccess(finalFileName)
//Delete old backup
if prune {
deleteOldBackup(backupRetention)
}
//Delete temp
deleteTemp()
}
utils.Info("Backup size: %s", utils.ConvertBytes(uint64(backupSize)))
utils.Info("Backup saved in %s", filepath.Join(storagePath, finalFileName))
duration := goutils.FormatDuration(time.Since(startTime), 0)
func s3Backup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
utils.Info("Backup database to s3 storage")
//Backup database
BackupDatabase(db, backupFileName, disableCompression)
finalFileName := backupFileName
if encrypt {
encryptBackup(backupFileName)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
}
utils.Info("Uploading backup archive to remote storage S3 ... ")
utils.Info("Backup name is %s", finalFileName)
err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
if err != nil {
utils.Fatal("Error uploading file to S3: %s ", err)
}
//Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
if err != nil {
fmt.Println("Error deleting file: ", err)
}
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: utils.ConvertBytes(uint64(backupSize)),
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(storagePath, finalFileName),
Duration: duration,
})
// Delete old backup
if prune {
err := utils.DeleteOldBackup(bucket, s3Path, backupRetention)
if config.prune {
err = localStorage.Prune(config.backupRetention)
if err != nil {
utils.Fatal("Error deleting old backup from S3: %s ", err)
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
}
utils.Done("Uploading backup archive to remote storage S3 ... done ")
//Send notification
utils.NotifySuccess(finalFileName)
//Delete temp
deleteTemp()
}
// sshBackup backup database to SSH remote server
func sshBackup(db *dbConfig, backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
utils.Info("Backup database to Remote server")
//Backup database
BackupDatabase(db, backupFileName, disableCompression)
finalFileName := backupFileName
if encrypt {
encryptBackup(backupFileName)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
}
utils.Info("Uploading backup archive to remote storage ... ")
utils.Info("Backup name is %s", finalFileName)
err := CopyToRemote(finalFileName, remotePath)
if err != nil {
utils.Fatal("Error uploading file to the remote server: %s ", err)
}
//Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil {
fmt.Println("Error deleting file: ", err)
}
if prune {
//TODO: Delete old backup from remote server
utils.Info("Deleting old backup from a remote server is not implemented yet")
}
utils.Done("Uploading backup archive to remote storage ... done ")
//Send notification
utils.NotifySuccess(finalFileName)
//Delete temp
// Delete temp
deleteTemp()
utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration)
}
// encryptBackup encrypt backup
func encryptBackup(backupFileName string) {
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase)
func encryptBackup(config *BackupConfig) {
backupFile, err := os.ReadFile(filepath.Join(tmpPath, config.backupFileName))
outputFile := fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension)
if err != nil {
utils.Fatal("Error during encrypting backup %s", err)
utils.Fatal("Error reading backup file: %s ", err)
}
if config.usingKey {
utils.Info("Encrypting backup using public key...")
pubKey, err := os.ReadFile(config.publicKey)
if err != nil {
utils.Fatal("Error reading public key: %s ", err)
}
err = encryptor.EncryptWithPublicKey(backupFile, fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension), pubKey)
if err != nil {
utils.Fatal("Error encrypting backup file: %v ", err)
}
utils.Info("Encrypting backup using public key...done")
} else if config.passphrase != "" {
utils.Info("Encrypting backup using passphrase...")
err := encryptor.Encrypt(backupFile, outputFile, config.passphrase)
if err != nil {
utils.Fatal("error during encrypting backup %v", err)
}
utils.Info("Encrypting backup using passphrase...done")
}
}
// listDatabases list all databases
func listDatabases(db dbConfig) ([]string, error) {
databases := []string{}
// Create the mysql client config file
if err := createMysqlClientConfigFile(db); err != nil {
return databases, errors.New(err.Error())
}
utils.Info("Listing databases...")
// Step 1: List all databases
cmd := exec.Command("mariadb", fmt.Sprintf("--defaults-file=%s", mysqlClientConfig), "-e", "SHOW DATABASES;")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
return databases, fmt.Errorf("failed to list databases: %s", err)
}
// Step 2: Parse the output
for _, _db := range strings.Split(out.String(), "\n") {
if _db != "" {
databases = append(databases, _db)
}
}
return databases, nil
}
func recoverMode(err error, msg string) {
if err != nil {
if backupRescueMode {
utils.NotifyError(fmt.Sprintf("%s : %v", msg, err))
utils.Error("Error: %s", msg)
utils.Error("Backup rescue mode is enabled")
utils.Error("Backup will continue")
} else {
utils.Error("Error: %s", msg)
utils.Fatal("Error: %v", err)
return
}
}
}

View File

@@ -1,18 +1,50 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package pkg
import (
"fmt"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
"os"
"strconv"
"strings"
)
type Database struct {
Host string `yaml:"host"`
Port string `yaml:"port"`
Name string `yaml:"name"`
User string `yaml:"user"`
Password string `yaml:"password"`
Path string `yaml:"path"`
}
type Config struct {
CronExpression string `yaml:"cronExpression"`
BackupRescueMode bool `yaml:"backupRescueMode"`
Databases []Database `yaml:"databases"`
}
type dbConfig struct {
@@ -29,33 +61,65 @@ type targetDbConfig struct {
targetDbPassword string
targetDbName string
}
type TgConfig struct {
Token string
ChatId string
}
type BackupConfig struct {
backupFileName string
backupRetention int
disableCompression bool
prune bool
encryption bool
remotePath string
gpqPassphrase string
encryption bool
usingKey bool
passphrase string
publicKey string
storage string
cronExpression string
all bool
allInOne bool
customName string
allowCustomName bool
}
type RestoreConfig struct {
s3Path string
remotePath string
storage string
file string
bucket string
gpqPassphrase string
type FTPConfig struct {
host string
user string
password string
port int
remotePath string
}
type AzureConfig struct {
accountName string
accountKey string
containerName string
}
// SSHConfig holds the SSH connection details
type SSHConfig struct {
user string
password string
hostName string
port int
identifyFile string
}
type AWSConfig struct {
endpoint string
bucket string
accessKey string
secretKey string
region string
remotePath string
disableSsl bool
forcePathStyle bool
}
func initDbConfig(cmd *cobra.Command) *dbConfig {
//Set env
// Set env
utils.GetEnv(cmd, "dbname", "DB_NAME")
dConf := dbConfig{}
dConf.dbHost = os.Getenv("DB_HOST")
dConf.dbPort = os.Getenv("DB_PORT")
dConf.dbPort = utils.EnvWithDefault("DB_PORT", "3306")
dConf.dbName = os.Getenv("DB_NAME")
dConf.dbUserName = os.Getenv("DB_USERNAME")
dConf.dbPassword = os.Getenv("DB_PASSWORD")
@@ -67,26 +131,155 @@ func initDbConfig(cmd *cobra.Command) *dbConfig {
}
return &dConf
}
func getDatabase(database Database) *dbConfig {
// Set default values from environment variables if not provided
database.User = getEnvOrDefault(database.User, "DB_USERNAME", database.Name, "")
database.Password = getEnvOrDefault(database.Password, "DB_PASSWORD", database.Name, "")
database.Host = getEnvOrDefault(database.Host, "DB_HOST", database.Name, "")
database.Port = getEnvOrDefault(database.Port, "DB_PORT", database.Name, "3306")
return &dbConfig{
dbHost: database.Host,
dbPort: database.Port,
dbName: database.Name,
dbUserName: database.User,
dbPassword: database.Password,
}
}
// Helper function to get environment variable or use a default value
func getEnvOrDefault(currentValue, envKey, suffix, defaultValue string) string {
// Return the current value if it's already set
if currentValue != "" {
return currentValue
}
// Check for suffixed or prefixed environment variables if a suffix is provided
if suffix != "" {
suffixUpper := strings.ToUpper(suffix)
envSuffix := os.Getenv(fmt.Sprintf("%s_%s", envKey, suffixUpper))
if envSuffix != "" {
return envSuffix
}
envPrefix := os.Getenv(fmt.Sprintf("%s_%s", suffixUpper, envKey))
if envPrefix != "" {
return envPrefix
}
}
// Fall back to the default value using a helper function
return utils.EnvWithDefault(envKey, defaultValue)
}
// loadSSHConfig loads the SSH configuration from environment variables
func loadSSHConfig() (*SSHConfig, error) {
utils.GetEnvVariable("SSH_HOST", "SSH_HOST_NAME")
sshVars := []string{"SSH_USER", "SSH_HOST", "SSH_PORT", "REMOTE_PATH"}
err := utils.CheckEnvVars(sshVars)
if err != nil {
return nil, fmt.Errorf("error missing environment variables: %w", err)
}
return &SSHConfig{
user: os.Getenv("SSH_USER"),
password: os.Getenv("SSH_PASSWORD"),
hostName: os.Getenv("SSH_HOST"),
port: utils.GetIntEnv("SSH_PORT"),
identifyFile: os.Getenv("SSH_IDENTIFY_FILE"),
}, nil
}
func loadFtpConfig() *FTPConfig {
// Initialize data configs
fConfig := FTPConfig{}
fConfig.host = utils.GetEnvVariable("FTP_HOST", "FTP_HOST_NAME")
fConfig.user = os.Getenv("FTP_USER")
fConfig.password = os.Getenv("FTP_PASSWORD")
fConfig.port = utils.GetIntEnv("FTP_PORT")
fConfig.remotePath = os.Getenv("REMOTE_PATH")
err := utils.CheckEnvVars(ftpVars)
if err != nil {
utils.Error("Please make sure all required environment variables for FTP are set")
utils.Fatal("Error missing environment variables: %s", err)
}
return &fConfig
}
func loadAzureConfig() *AzureConfig {
// Initialize data configs
aConfig := AzureConfig{}
aConfig.containerName = os.Getenv("AZURE_STORAGE_CONTAINER_NAME")
aConfig.accountName = os.Getenv("AZURE_STORAGE_ACCOUNT_NAME")
aConfig.accountKey = os.Getenv("AZURE_STORAGE_ACCOUNT_KEY")
err := utils.CheckEnvVars(azureVars)
if err != nil {
utils.Error("Please make sure all required environment variables for Azure Blob storage are set")
utils.Fatal("Error missing environment variables: %s", err)
}
return &aConfig
}
func initAWSConfig() *AWSConfig {
// Initialize AWS configs
aConfig := AWSConfig{}
aConfig.endpoint = utils.GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
aConfig.accessKey = utils.GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
aConfig.secretKey = utils.GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
aConfig.bucket = utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
aConfig.remotePath = utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
aConfig.region = os.Getenv("AWS_REGION")
disableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
if err != nil {
disableSsl = false
}
forcePathStyle, err := strconv.ParseBool(os.Getenv("AWS_FORCE_PATH_STYLE"))
if err != nil {
forcePathStyle = false
}
aConfig.disableSsl = disableSsl
aConfig.forcePathStyle = forcePathStyle
err = utils.CheckEnvVars(awsVars)
if err != nil {
utils.Error("Please make sure all required environment variables for AWS S3 are set")
utils.Fatal("Error checking environment variables: %s", err)
}
return &aConfig
}
func initBackupConfig(cmd *cobra.Command) *BackupConfig {
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION")
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
//Get flag value and set env
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
utils.GetEnv(cmd, "path", "REMOTE_PATH")
utils.GetEnv(cmd, "config", "BACKUP_CONFIG_FILE")
// Get flag value and set env
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE")
backupRetention, _ := cmd.Flags().GetInt("keep-last")
prune, _ := cmd.Flags().GetBool("prune")
prune := false
backupRetention := utils.GetIntEnv("BACKUP_RETENTION_DAYS")
if backupRetention > 0 {
prune = true
}
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
customName, _ := cmd.Flags().GetString("custom-name")
all, _ := cmd.Flags().GetBool("all-databases")
allInOne, _ := cmd.Flags().GetBool("all-in-one")
if allInOne {
all = true
}
_, _ = cmd.Flags().GetString("mode")
gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
passphrase := os.Getenv("GPG_PASSPHRASE")
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
cronExpression := os.Getenv("BACKUP_CRON_EXPRESSION")
if gpqPassphrase != "" {
publicKeyFile, err := checkPubKeyFile(os.Getenv("GPG_PUBLIC_KEY"))
if err == nil {
encryption = true
usingKey = true
} else if passphrase != "" {
encryption = true
usingKey = false
}
//Initialize backup configs
// Initialize backup configs
config := BackupConfig{}
config.backupRetention = backupRetention
config.disableCompression = disableCompression
@@ -94,22 +287,46 @@ func initBackupConfig(cmd *cobra.Command) *BackupConfig {
config.storage = storage
config.encryption = encryption
config.remotePath = remotePath
config.gpqPassphrase = gpqPassphrase
config.passphrase = passphrase
config.publicKey = publicKeyFile
config.usingKey = usingKey
config.cronExpression = cronExpression
config.all = all
config.allInOne = allInOne
config.customName = customName
return &config
}
type RestoreConfig struct {
s3Path string
remotePath string
storage string
file string
bucket string
usingKey bool
passphrase string
privateKey string
}
func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "path", "REMOTE_PATH")
//Get flag value and set env
// Get flag value and set env
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE")
file = utils.GetEnv(cmd, "file", "FILE_NAME")
_, _ = cmd.Flags().GetString("mode")
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
//Initialize restore configs
passphrase := os.Getenv("GPG_PASSPHRASE")
privateKeyFile, err := checkPrKeyFile(os.Getenv("GPG_PRIVATE_KEY"))
if err == nil {
usingKey = true
} else if passphrase != "" {
usingKey = false
}
// Initialize restore configs
rConfig := RestoreConfig{}
rConfig.s3Path = s3Path
rConfig.remotePath = remotePath
@@ -117,13 +334,15 @@ func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
rConfig.bucket = bucket
rConfig.file = file
rConfig.storage = storage
rConfig.gpqPassphrase = gpqPassphrase
rConfig.passphrase = passphrase
rConfig.usingKey = usingKey
rConfig.privateKey = privateKeyFile
return &rConfig
}
func initTargetDbConfig() *targetDbConfig {
tdbConfig := targetDbConfig{}
tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST")
tdbConfig.targetDbPort = os.Getenv("TARGET_DB_PORT")
tdbConfig.targetDbPort = utils.EnvWithDefault("TARGET_DB_PORT", "3306")
tdbConfig.targetDbName = os.Getenv("TARGET_DB_NAME")
tdbConfig.targetDbUserName = os.Getenv("TARGET_DB_USERNAME")
tdbConfig.targetDbPassword = os.Getenv("TARGET_DB_PASSWORD")
@@ -135,3 +354,10 @@ func initTargetDbConfig() *targetDbConfig {
}
return &tdbConfig
}
func loadConfigFile() (string, error) {
backupConfigFile, err := checkConfigFile(os.Getenv("BACKUP_CONFIG_FILE"))
if err == nil {
return backupConfigFile, nil
}
return "", fmt.Errorf("backup config file not found")
}

View File

@@ -1,63 +0,0 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
"github.com/jkaninda/mysql-bkup/utils"
"os"
"os/exec"
"strings"
)
func Decrypt(inputFile string, passphrase string) error {
utils.Info("Decrypting backup file: " + inputFile + " ...")
//Create gpg home dir
err := utils.MakeDirAll(gpgHome)
if err != nil {
return err
}
utils.SetEnv("GNUPGHOME", gpgHome)
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
if err != nil {
return err
}
utils.Info("Backup file decrypted successful!")
return nil
}
func Encrypt(inputFile string, passphrase string) error {
utils.Info("Encrypting backup...")
//Create gpg home dir
err := utils.MakeDirAll(gpgHome)
if err != nil {
return err
}
utils.SetEnv("GNUPGHOME", gpgHome)
cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--symmetric", "--cipher-algo", algorithm, inputFile)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
if err != nil {
return err
}
utils.Info("Backup file encrypted successful!")
return nil
}
func RemoveLastExtension(filename string) string {
if idx := strings.LastIndex(filename, "."); idx != -1 {
return filename[:idx]
}
return filename
}

View File

@@ -1,87 +1,49 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package pkg
import (
"bytes"
"errors"
"fmt"
goutils "github.com/jkaninda/go-utils"
"github.com/jkaninda/mysql-bkup/utils"
"gopkg.in/yaml.v3"
"os"
"os/exec"
"path/filepath"
"time"
"strings"
)
func copyToTmp(sourcePath string, backupFileName string) {
//Copy backup from storage to /tmp
err := utils.CopyFile(filepath.Join(sourcePath, backupFileName), filepath.Join(tmpPath, backupFileName))
if err != nil {
utils.Fatal(fmt.Sprintf("Error copying file %s %s", backupFileName, err))
}
func intro() {
fmt.Println("Starting MYSQL-BKUP...")
fmt.Printf("Version: %s\n", utils.Version)
fmt.Println("Copyright (c) 2024 Jonas Kaninda")
}
func moveToBackup(backupFileName string, destinationPath string) {
//Copy backup from tmp folder to storage destination
err := utils.CopyFile(filepath.Join(tmpPath, backupFileName), filepath.Join(destinationPath, backupFileName))
if err != nil {
utils.Fatal(fmt.Sprintf("Error copying file %s %s", backupFileName, err))
}
//Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
if err != nil {
fmt.Println("Error deleting file:", err)
}
utils.Done("Database has been backed up and copied to %s", filepath.Join(destinationPath, backupFileName))
}
func deleteOldBackup(retentionDays int) {
utils.Info("Deleting old backups...")
storagePath = os.Getenv("STORAGE_PATH")
// Define the directory path
backupDir := storagePath + "/"
// Get current time
currentTime := time.Now()
// Delete file
deleteFile := func(filePath string) error {
err := os.Remove(filePath)
if err != nil {
utils.Fatal(fmt.Sprintf("Error: %s", err))
} else {
utils.Done("File %s has been deleted successfully", filePath)
}
return err
}
// Walk through the directory and delete files modified more than specified days ago
err := filepath.Walk(backupDir, func(filePath string, fileInfo os.FileInfo, err error) error {
if err != nil {
return err
}
// Check if it's a regular file and if it was modified more than specified days ago
if fileInfo.Mode().IsRegular() {
timeDiff := currentTime.Sub(fileInfo.ModTime())
if timeDiff.Hours() > 24*float64(retentionDays) {
err := deleteFile(filePath)
if err != nil {
return err
}
}
}
return nil
})
if err != nil {
utils.Fatal(fmt.Sprintf("Error: %s", err))
return
}
utils.Done("Deleting old backups...done")
}
// copyToTmp copy file to temporary directory
func deleteTemp() {
utils.Info("Deleting %s ...", tmpPath)
err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error {
@@ -105,27 +67,137 @@ func deleteTemp() {
}
}
// TestDatabaseConnection tests the database connection
func testDatabaseConnection(db *dbConfig) {
err := os.Setenv("MYSQL_PWD", db.dbPassword)
if err != nil {
return
// TestDatabaseConnection tests the database connection
func testDatabaseConnection(db *dbConfig) error {
// Create the mysql client config file
if err := createMysqlClientConfigFile(*db); err != nil {
return errors.New(err.Error())
}
utils.Info("Connecting to %s database ...", db.dbName)
cmd := exec.Command("mysql", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, db.dbName, "-e", "quit")
// Set database name for notification error
utils.DatabaseName = db.dbName
// Prepare the command to test the database connection
cmd := exec.Command("mariadb", fmt.Sprintf("--defaults-file=%s", mysqlClientConfig), db.dbName, "-e", "quit")
// Capture the output
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
err = cmd.Run()
if err != nil {
utils.Fatal("Error testing database connection: %v\nOutput: %s", err, out.String())
// Run the command
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to connect to database %s: %v, output: %s", db.dbName, err, out.String())
}
utils.Info("Successfully connected to %s database", db.dbName)
utils.Info("Successfully connected to %s database", db.dbName)
return nil
}
func intro() {
utils.Info("Starting MySQL Backup...")
utils.Info("Copyright © 2024 Jonas Kaninda ")
// checkPubKeyFile checks gpg public key
func checkPubKeyFile(pubKey string) (string, error) {
// Define possible key file names
keyFiles := []string{filepath.Join(gpgHome, "public_key.asc"), filepath.Join(gpgHome, "public_key.gpg"), pubKey}
// Loop through key file names and check if they exist
for _, keyFile := range keyFiles {
if _, err := os.Stat(keyFile); err == nil {
// File exists
return keyFile, nil
} else if os.IsNotExist(err) {
// File does not exist, continue to the next one
continue
} else {
// An unexpected error occurred
return "", err
}
}
// Return an error if neither file exists
return "", fmt.Errorf("no public key file found")
}
// checkPrKeyFile checks private key
func checkPrKeyFile(prKey string) (string, error) {
// Define possible key file names
keyFiles := []string{filepath.Join(gpgHome, "private_key.asc"), filepath.Join(gpgHome, "private_key.gpg"), prKey}
// Loop through key file names and check if they exist
for _, keyFile := range keyFiles {
if _, err := os.Stat(keyFile); err == nil {
// File exists
return keyFile, nil
} else if os.IsNotExist(err) {
// File does not exist, continue to the next one
continue
} else {
// An unexpected error occurred
return "", err
}
}
// Return an error if neither file exists
return "", fmt.Errorf("no public key file found")
}
// readConf reads config file and returns Config
func readConf(configFile string) (*Config, error) {
if utils.FileExists(configFile) {
buf, err := os.ReadFile(configFile)
if err != nil {
return nil, err
}
c := &Config{}
err = yaml.Unmarshal(buf, c)
if err != nil {
return nil, fmt.Errorf("in file %q: %w", configFile, err)
}
return c, err
}
return nil, fmt.Errorf("config file %q not found", configFile)
}
// checkConfigFile checks config files and returns one config file
func checkConfigFile(filePath string) (string, error) {
// Remove the quotes
filePath = strings.Trim(filePath, `"`)
// Define possible config file names
configFiles := []string{filepath.Join(workingDir, "config.yaml"), filepath.Join(workingDir, "config.yml"), filePath}
// Loop through config file names and check if they exist
for _, configFile := range configFiles {
if _, err := os.Stat(configFile); err == nil {
// File exists
return configFile, nil
} else if os.IsNotExist(err) {
// File does not exist, continue to the next one
continue
} else {
// An unexpected error occurred
return "", err
}
}
// Return an error if neither file exists
return "", fmt.Errorf("no config file found")
}
func RemoveLastExtension(filename string) string {
if idx := strings.LastIndex(filename, "."); idx != -1 {
return filename[:idx]
}
return filename
}
// Create mysql client config file
func createMysqlClientConfigFile(db dbConfig) error {
caCertPath := goutils.GetStringEnvWithDefault("DB_SSL_CA", "/etc/ssl/certs/ca-certificates.crt")
sslMode := goutils.GetStringEnvWithDefault("DB_SSL_MODE", "0")
// Create the mysql client config file
mysqlClientConfigFile := filepath.Join(tmpPath, "my.cnf")
mysqlCl := fmt.Sprintf("[client]\nhost=%s\nport=%s\nuser=%s\npassword=%s\nssl-ca=%s\nssl=%s\n", db.dbHost, db.dbPort, db.dbUserName, db.dbPassword, caCertPath, sslMode)
if err := os.WriteFile(mysqlClientConfigFile, []byte(mysqlCl), 0644); err != nil {
return fmt.Errorf("failed to create mysql client config file: %v", err)
}
return nil
}

View File

@@ -1,9 +1,27 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package pkg
import (
@@ -16,11 +34,11 @@ import (
func StartMigration(cmd *cobra.Command) {
intro()
utils.Info("Starting database migration...")
//Get DB config
// Get DB config
dbConf = initDbConfig(cmd)
targetDbConf = initTargetDbConfig()
//Defining the target database variables
// Defining the target database variables
newDbConfig := dbConfig{}
newDbConfig.dbHost = targetDbConf.targetDbHost
newDbConfig.dbPort = targetDbConf.targetDbPort
@@ -28,13 +46,18 @@ func StartMigration(cmd *cobra.Command) {
newDbConfig.dbUserName = targetDbConf.targetDbUserName
newDbConfig.dbPassword = targetDbConf.targetDbPassword
//Generate file name
// Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
//Backup source Database
BackupDatabase(dbConf, backupFileName, true)
//Restore source database into target database
conf := &RestoreConfig{}
conf.file = backupFileName
// Backup source Database
err := BackupDatabase(dbConf, backupFileName, true, false, false)
if err != nil {
utils.Fatal("Error backing up database: %s", err)
}
// Restore source database into target database
utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
RestoreDatabase(&newDbConfig, backupFileName)
RestoreDatabase(&newDbConfig, conf)
utils.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
utils.Info("Database migration completed.")
}

228
pkg/remote.go Normal file
View File

@@ -0,0 +1,228 @@
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package pkg
import (
"fmt"
"github.com/jkaninda/go-storage/pkg/ftp"
"github.com/jkaninda/go-storage/pkg/ssh"
goutils "github.com/jkaninda/go-utils"
"github.com/jkaninda/mysql-bkup/utils"
"os"
"path/filepath"
"time"
)
func sshBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to Remote server")
// Backup database
err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne)
if err != nil {
recoverMode(err, "Error backing up database")
return
}
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
}
utils.Info("Uploading backup archive to remote storage ... ")
sshConfig, err := loadSSHConfig()
if err != nil {
utils.Fatal("Error loading ssh config: %s", err)
}
sshStorage, err := ssh.NewStorage(ssh.Config{
Host: sshConfig.hostName,
Port: sshConfig.port,
User: sshConfig.user,
Password: sshConfig.password,
IdentifyFile: sshConfig.identifyFile,
RemotePath: config.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating SSH storage: %s", err)
}
err = sshStorage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
// Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error: %s", err)
}
backupSize = fileInfo.Size()
utils.Info("Backup name is %s", finalFileName)
utils.Info("Backup size: %s", utils.ConvertBytes(uint64(backupSize)))
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
// Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error deleting file: %v", err)
}
if config.prune {
err := sshStorage.Prune(config.backupRetention)
if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
}
utils.Info("Uploading backup archive to remote storage ... done ")
duration := goutils.FormatDuration(time.Since(startTime), 0)
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: utils.ConvertBytes(uint64(backupSize)),
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName),
Duration: duration,
})
// Delete temp
deleteTemp()
utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration)
}
func remoteRestore(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from remote server")
sshConfig, err := loadSSHConfig()
if err != nil {
utils.Fatal("Error loading ssh config: %s", err)
}
sshStorage, err := ssh.NewStorage(ssh.Config{
Host: sshConfig.hostName,
Port: sshConfig.port,
User: sshConfig.user,
Password: sshConfig.password,
IdentifyFile: sshConfig.identifyFile,
RemotePath: conf.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating SSH storage: %s", err)
}
err = sshStorage.CopyFrom(conf.file)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
RestoreDatabase(db, conf)
}
func ftpRestore(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from FTP server")
ftpConfig := loadFtpConfig()
ftpStorage, err := ftp.NewStorage(ftp.Config{
Host: ftpConfig.host,
Port: ftpConfig.port,
User: ftpConfig.user,
Password: ftpConfig.password,
RemotePath: conf.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating SSH storage: %s", err)
}
err = ftpStorage.CopyFrom(conf.file)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
RestoreDatabase(db, conf)
}
func ftpBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to the remote FTP server")
// Backup database
err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne)
if err != nil {
recoverMode(err, "Error backing up database")
return
}
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
}
utils.Info("Uploading backup archive to the remote FTP server ... ")
utils.Info("Backup name is %s", finalFileName)
ftpConfig := loadFtpConfig()
ftpStorage, err := ftp.NewStorage(ftp.Config{
Host: ftpConfig.host,
Port: ftpConfig.port,
User: ftpConfig.user,
Password: ftpConfig.password,
RemotePath: config.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating SSH storage: %s", err)
}
err = ftpStorage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
// Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error: %s", err)
}
backupSize = fileInfo.Size()
// Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error deleting file: %v", err)
}
if config.prune {
err := ftpStorage.Prune(config.backupRetention)
if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
}
utils.Info("Backup name is %s", finalFileName)
utils.Info("Backup size: %s", utils.ConvertBytes(uint64(backupSize)))
utils.Info("Uploading backup archive to the remote FTP server ... done ")
duration := goutils.FormatDuration(time.Since(startTime), 0)
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: utils.ConvertBytes(uint64(backupSize)),
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName),
Duration: duration,
})
// Delete temp
deleteTemp()
utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration)
}

View File

@@ -1,13 +1,33 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package pkg
import (
"fmt"
"github.com/jkaninda/encryptor"
"github.com/jkaninda/go-storage/pkg/local"
"github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra"
"os"
@@ -21,107 +41,113 @@ func StartRestore(cmd *cobra.Command) {
restoreConf := initRestoreConfig(cmd)
switch restoreConf.storage {
case "s3":
restoreFromS3(dbConf, restoreConf.file, restoreConf.bucket, restoreConf.s3Path)
case "local":
utils.Info("Restore database from local")
copyToTmp(storagePath, restoreConf.file)
RestoreDatabase(dbConf, restoreConf.file)
case "ssh":
restoreFromRemote(dbConf, restoreConf.file, restoreConf.remotePath)
case "ftp":
utils.Fatal("Restore from FTP is not yet supported")
localRestore(dbConf, restoreConf)
case "s3", "S3":
s3Restore(dbConf, restoreConf)
case "ssh", "SSH", "remote":
remoteRestore(dbConf, restoreConf)
case "ftp", "FTP":
ftpRestore(dbConf, restoreConf)
case "azure":
azureRestore(dbConf, restoreConf)
default:
utils.Info("Restore database from local")
copyToTmp(storagePath, restoreConf.file)
RestoreDatabase(dbConf, restoreConf.file)
localRestore(dbConf, restoreConf)
}
}
func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) {
utils.Info("Restore database from local")
basePath := filepath.Dir(restoreConf.file)
fileName := filepath.Base(restoreConf.file)
restoreConf.file = fileName
if basePath == "" || basePath == "." {
basePath = storagePath
}
localStorage := local.NewStorage(local.Config{
RemotePath: basePath,
LocalPath: tmpPath,
})
err := localStorage.CopyFrom(fileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
RestoreDatabase(dbConf, restoreConf)
}
func restoreFromS3(db *dbConfig, file, bucket, s3Path string) {
utils.Info("Restore database from s3")
err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
if err != nil {
utils.Fatal("Error download file from s3 %s %v", file, err)
}
RestoreDatabase(db, file)
}
func restoreFromRemote(db *dbConfig, file, remotePath string) {
utils.Info("Restore database from remote server")
err := CopyFromRemote(file, remotePath)
if err != nil {
utils.Fatal("Error download file from remote server: %s %v ", filepath.Join(remotePath, file), err)
}
RestoreDatabase(db, file)
}
// RestoreDatabase restore database
func RestoreDatabase(db *dbConfig, file string) {
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
if file == "" {
// RestoreDatabase restores the database from a backup file
func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
if conf.file == "" {
utils.Fatal("Error, file required")
}
err := utils.CheckEnvVars(dbHVars)
filePath := filepath.Join(tmpPath, conf.file)
rFile, err := os.ReadFile(filePath)
if err != nil {
utils.Error("Please make sure all required environment variables for database are set")
utils.Fatal("Error checking environment variables: %s", err)
utils.Fatal("Error reading backup file: %v", err)
}
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
extension := filepath.Ext(filePath)
outputFile := RemoveLastExtension(filePath)
if extension == ".gpg" {
if gpgPassphrase == "" {
utils.Fatal("Error: GPG passphrase is required, your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE environment variable is required.")
} else {
//Decrypt file
err := Decrypt(filepath.Join(tmpPath, file), gpgPassphrase)
if err != nil {
utils.Fatal("Error decrypting file %s %v", file, err)
}
//Update file name
file = RemoveLastExtension(file)
}
decryptBackup(conf, rFile, outputFile)
}
if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) {
err = os.Setenv("MYSQL_PWD", db.dbPassword)
restorationFile := filepath.Join(tmpPath, conf.file)
if !utils.FileExists(restorationFile) {
utils.Fatal("File not found: %s", restorationFile)
}
if err := testDatabaseConnection(db); err != nil {
utils.Fatal("Error connecting to the database: %v", err)
}
utils.Info("Restoring database...")
restoreDatabaseFile(db, restorationFile)
}
func decryptBackup(conf *RestoreConfig, rFile []byte, outputFile string) {
if conf.usingKey {
utils.Info("Decrypting backup using private key...")
prKey, err := os.ReadFile(conf.privateKey)
if err != nil {
return
utils.Fatal("Error reading private key: %v", err)
}
testDatabaseConnection(db)
utils.Info("Restoring database...")
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
// Restore from compressed file / .sql.gz
if extension == ".gz" {
str := "zcat " + filepath.Join(tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
_, err := exec.Command("bash", "-c", str).Output()
if err != nil {
utils.Fatal("Error, in restoring the database %v", err)
}
utils.Info("Restoring database... done")
utils.Done("Database has been restored")
//Delete temp
deleteTemp()
} else if extension == ".sql" {
//Restore from sql file
str := "cat " + filepath.Join(tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
_, err := exec.Command("bash", "-c", str).Output()
if err != nil {
utils.Fatal("Error in restoring the database %v", err)
}
utils.Info("Restoring database... done")
utils.Done("Database has been restored")
//Delete temp
deleteTemp()
} else {
utils.Fatal("Unknown file extension %s", extension)
if err := encryptor.DecryptWithPrivateKey(rFile, outputFile, prKey, conf.passphrase); err != nil {
utils.Fatal("Error decrypting backup: %v", err)
}
} else {
utils.Fatal("File not found in %s", filepath.Join(tmpPath, file))
if conf.passphrase == "" {
utils.Fatal("Passphrase or private key required for GPG file.")
}
utils.Info("Decrypting backup using passphrase...")
if err := encryptor.Decrypt(rFile, outputFile, conf.passphrase); err != nil {
utils.Fatal("Error decrypting file: %v", err)
}
conf.file = RemoveLastExtension(conf.file)
}
}
func restoreDatabaseFile(db *dbConfig, restorationFile string) {
extension := filepath.Ext(restorationFile)
var cmdStr string
switch extension {
case ".gz":
cmdStr = fmt.Sprintf("zcat %s | mariadb --defaults-file=%s %s", restorationFile, mysqlClientConfig, db.dbName)
case ".sql":
cmdStr = fmt.Sprintf("cat %s | mariadb --defaults-file=%s %s", restorationFile, mysqlClientConfig, db.dbName)
default:
utils.Fatal("Unknown file extension: %s", extension)
}
cmd := exec.Command("sh", "-c", cmdStr)
output, err := cmd.CombinedOutput()
if err != nil {
utils.Fatal("Error restoring database: %v\nOutput: %s", err, string(output))
}
utils.Info("Database has been restored successfully.")
deleteTemp()
}

138
pkg/s3.go Normal file
View File

@@ -0,0 +1,138 @@
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package pkg
import (
"fmt"
"github.com/jkaninda/go-storage/pkg/s3"
goutils "github.com/jkaninda/go-utils"
"github.com/jkaninda/mysql-bkup/utils"
"os"
"path/filepath"
"time"
)
func s3Backup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to s3 storage")
// Backup database
err := BackupDatabase(db, config.backupFileName, disableCompression, config.all, config.allInOne)
if err != nil {
recoverMode(err, "Error backing up database")
return
}
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
}
utils.Info("Uploading backup archive to remote storage S3 ... ")
awsConfig := initAWSConfig()
if config.remotePath == "" {
config.remotePath = awsConfig.remotePath
}
utils.Info("Backup name is %s", finalFileName)
s3Storage, err := s3.NewStorage(s3.Config{
Endpoint: awsConfig.endpoint,
Bucket: awsConfig.bucket,
AccessKey: awsConfig.accessKey,
SecretKey: awsConfig.secretKey,
Region: awsConfig.region,
DisableSsl: awsConfig.disableSsl,
ForcePathStyle: awsConfig.forcePathStyle,
RemotePath: config.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating s3 storage: %s", err)
}
err = s3Storage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
// Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error: %s", err)
}
backupSize = fileInfo.Size()
// Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName))
if err != nil {
fmt.Println("Error deleting file: ", err)
}
// Delete old backup
if config.prune {
err := s3Storage.Prune(config.backupRetention)
if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
}
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
utils.Info("Uploading backup archive to remote storage S3 ... done ")
duration := goutils.FormatDuration(time.Since(startTime), 0)
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: utils.ConvertBytes(uint64(backupSize)),
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName),
Duration: duration,
})
// Delete temp
deleteTemp()
utils.Info("The backup of the %s database has been completed in %s", db.dbName, duration)
}
func s3Restore(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from s3")
awsConfig := initAWSConfig()
if conf.remotePath == "" {
conf.remotePath = awsConfig.remotePath
}
s3Storage, err := s3.NewStorage(s3.Config{
Endpoint: awsConfig.endpoint,
Bucket: awsConfig.bucket,
AccessKey: awsConfig.accessKey,
SecretKey: awsConfig.secretKey,
Region: awsConfig.region,
DisableSsl: awsConfig.disableSsl,
ForcePathStyle: awsConfig.forcePathStyle,
RemotePath: conf.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating s3 storage: %s", err)
}
err = s3Storage.CopyFrom(conf.file)
if err != nil {
utils.Fatal("Error download file from S3 storage: %s", err)
}
RestoreDatabase(db, conf)
}

View File

@@ -1,121 +0,0 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package pkg
import (
"context"
"errors"
"fmt"
"github.com/bramvdbogaerde/go-scp"
"github.com/bramvdbogaerde/go-scp/auth"
"github.com/jkaninda/mysql-bkup/utils"
"golang.org/x/crypto/ssh"
"os"
"path/filepath"
)
func CopyToRemote(fileName, remotePath string) error {
sshUser := os.Getenv("SSH_USER")
sshPassword := os.Getenv("SSH_PASSWORD")
sshHostName := os.Getenv("SSH_HOST_NAME")
sshPort := os.Getenv("SSH_PORT")
sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE")
err := utils.CheckEnvVars(sshHVars)
if err != nil {
utils.Error("Error checking environment variables: %s", err)
os.Exit(1)
}
clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) {
clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey())
} else {
if sshPassword == "" {
return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty")
}
utils.Warn("Accessing the remote server using password, password is not recommended")
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
}
// Create a new SCP client
client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig)
// Connect to the remote server
err = client.Connect()
if err != nil {
return errors.New("Couldn't establish a connection to the remote server")
}
// Open a file
file, _ := os.Open(filepath.Join(tmpPath, fileName))
// Close client connection after the file has been copied
defer client.Close()
// Close the file after it has been copied
defer file.Close()
// the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application.
err = client.CopyFromFile(context.Background(), *file, filepath.Join(remotePath, fileName), "0655")
if err != nil {
fmt.Println("Error while copying file ")
return err
}
return nil
}
func CopyFromRemote(fileName, remotePath string) error {
sshUser := os.Getenv("SSH_USER")
sshPassword := os.Getenv("SSH_PASSWORD")
sshHostName := os.Getenv("SSH_HOST_NAME")
sshPort := os.Getenv("SSH_PORT")
sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE")
err := utils.CheckEnvVars(sshHVars)
if err != nil {
utils.Error("Error checking environment variables\n: %s", err)
os.Exit(1)
}
clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) {
clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey())
} else {
if sshPassword == "" {
return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n")
}
utils.Warn("Accessing the remote server using password, password is not recommended")
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
}
// Create a new SCP client
client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig)
// Connect to the remote server
err = client.Connect()
if err != nil {
return errors.New("Couldn't establish a connection to the remote server\n")
}
// Close client connection after the file has been copied
defer client.Close()
file, err := os.OpenFile(filepath.Join(tmpPath, fileName), os.O_RDWR|os.O_CREATE, 0777)
if err != nil {
fmt.Println("Couldn't open the output file")
}
defer file.Close()
// the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application.
err = client.CopyFromRemote(context.Background(), file, filepath.Join(remotePath, fileName))
if err != nil {
fmt.Println("Error while copying file ", err)
return err
}
return nil
}

View File

@@ -1,24 +1,52 @@
// Package pkg /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package pkg
const cronLogFile = "/var/log/mysql-bkup.log"
import (
"path/filepath"
"time"
)
const tmpPath = "/tmp/backup"
const backupCronFile = "/usr/local/bin/backup_cron.sh"
const algorithm = "aes256"
const gpgHome = "/config/gnupg"
const gpgExtension = "gpg"
const timeFormat = "2006-01-02 at 15:04:05"
var (
storage = "local"
file = ""
storagePath = "/backup"
disableCompression = false
encryption = false
storage = "local"
file = ""
storagePath = "/backup"
workingDir = "/config"
disableCompression = false
encryption = false
usingKey = false
backupSize int64 = 0
startTime = time.Now()
backupRescueMode = false
mysqlClientConfig = filepath.Join(tmpPath, "my.cnf")
)
// dbHVars Required environment variables for database
@@ -26,11 +54,9 @@ var dbHVars = []string{
"DB_HOST",
"DB_PASSWORD",
"DB_USERNAME",
"DB_NAME",
}
var tdbRVars = []string{
"TARGET_DB_HOST",
"TARGET_DB_PORT",
"TARGET_DB_NAME",
"TARGET_DB_USERNAME",
"TARGET_DB_PASSWORD",
@@ -39,10 +65,23 @@ var tdbRVars = []string{
var dbConf *dbConfig
var targetDbConf *targetDbConfig
// sshHVars Required environment variables for SSH remote server storage
var sshHVars = []string{
"SSH_USER",
"SSH_REMOTE_PATH",
"SSH_HOST_NAME",
"SSH_PORT",
var ftpVars = []string{
"FTP_HOST_NAME",
"FTP_USER",
"FTP_PASSWORD",
"FTP_PORT",
}
var azureVars = []string{
"AZURE_STORAGE_CONTAINER_NAME",
"AZURE_STORAGE_ACCOUNT_NAME",
"AZURE_STORAGE_ACCOUNT_KEY",
}
// AwsVars Required environment variables for AWS S3 storage
var awsVars = []string{
"AWS_S3_ENDPOINT",
"AWS_S3_BUCKET_NAME",
"AWS_ACCESS_KEY",
"AWS_SECRET_KEY",
"AWS_REGION",
}

View File

@@ -0,0 +1,69 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>🔴 Urgent: Database Backup Failure</title>
<style>
body {
font-family: Arial, sans-serif;
background-color: #f8f9fa;
color: #333;
margin: 0;
padding: 20px;
}
h2 {
color: #d9534f;
}
.details {
background-color: #ffffff;
border: 1px solid #ddd;
padding: 15px;
border-radius: 5px;
margin-top: 10px;
}
.details ul {
list-style-type: none;
padding: 0;
}
.details li {
margin: 5px 0;
}
a {
color: #0275d8;
text-decoration: none;
}
a:hover {
text-decoration: underline;
}
footer {
margin-top: 20px;
font-size: 0.9em;
color: #6c757d;
}
</style>
</head>
<body>
<h2>🔴 Urgent: Database Backup Failure Notification</h2>
<p>Hi,</p>
<p>An error occurred during the database backup process. Please review the details below and take the necessary actions:</p>
<div class="details">
<h3>Failure Details:</h3>
<ul>
<li><strong>Database Name:</strong> {{.DatabaseName}}</li>
<li><strong>Date:</strong> {{.EndTime}}</li>
<li><strong>Backup Reference:</strong> {{.BackupReference}}</li>
<li><strong>Error Message:</strong> {{.Error}}</li>
</ul>
</div>
<p>We recommend investigating the issue as soon as possible to prevent potential data loss or service disruptions.</p>
<p>For more information, visit the <a href="https://jkaninda.github.io/mysql-bkup">mysql-bkup documentation</a>.</p>
<footer>
&copy; 2024 <a href="https://github.com/jkaninda/mysql-bkup">mysql-bkup</a> | Automated Backup System
</footer>
</body>
</html>

69
templates/email.tmpl Normal file
View File

@@ -0,0 +1,69 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>✅ Database Backup Successful {{.Database}}</title>
<style>
body {
font-family: Arial, sans-serif;
background-color: #f8f9fa;
color: #333;
margin: 0;
padding: 20px;
}
h2 {
color: #5cb85c;
}
.details {
background-color: #ffffff;
border: 1px solid #ddd;
padding: 15px;
border-radius: 5px;
margin-top: 10px;
}
.details ul {
list-style-type: none;
padding: 0;
}
.details li {
margin: 5px 0;
}
a {
color: #0275d8;
text-decoration: none;
}
a:hover {
text-decoration: underline;
}
footer {
margin-top: 20px;
font-size: 0.9em;
color: #6c757d;
}
</style>
</head>
<body>
<h2>✅ Database Backup Successful</h2>
<p>Hi,</p>
<p>The backup process for the <strong>{{.Database}}</strong> database was successfully completed. Please find the details below:</p>
<div class="details">
<h3>Backup Details:</h3>
<ul>
<li><strong>Database Name:</strong> {{.Database}}</li>
<li><strong>Backup Duration:</strong> {{.Duration}}</li>
<li><strong>Backup Storage:</strong> {{.Storage}}</li>
<li><strong>Backup Location:</strong> {{.BackupLocation}}</li>
<li><strong>Backup Size:</strong> {{.BackupSize}}</li>
<li><strong>Backup Reference:</strong> {{.BackupReference}}</li>
</ul>
</div>
<p>You can access the backup at the specified location if needed. Thank you for using <a href="https://jkaninda.github.io/mysql-bkup/">mysql-bkup</a>.</p>
<footer>
&copy; 2024 <a href="https://github.com/jkaninda/mysql-bkup">mysql-bkup</a> | Automated Backup System
</footer>
</body>
</html>

View File

@@ -0,0 +1,11 @@
🔴 Urgent: Database Backup Failure Notification
Hi,
An error occurred during the database backup process.
Please review the details below and take the necessary actions:
Failure Details:
- Database Name: {{.DatabaseName}}
- Date: {{.EndTime}}
- Backup Reference: {{.BackupReference}}
- Error Message: {{.Error}}
We recommend investigating the issue as soon as possible to prevent potential data loss or service disruptions.

15
templates/telegram.tmpl Normal file
View File

@@ -0,0 +1,15 @@
✅ Database Backup Successful
Hi,
The backup process for the {{.Database}} database was successfully completed.
Please find the details below:
Backup Details:
- Database Name: {{.Database}}
- Backup Duration: {{.Duration}}
- Backup Storage: {{.Storage}}
- Backup Location: {{.BackupLocation}}
- Backup Size: {{.BackupSize}}
- Backup Reference: {{.BackupReference}}
You can access the backup at the specified location if needed.

95
utils/config.go Normal file
View File

@@ -0,0 +1,95 @@
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package utils
import "os"
type MailConfig struct {
MailHost string
MailPort int
MailUserName string
MailPassword string
MailTo string
MailFrom string
SkipTls bool
}
type NotificationData struct {
File string
BackupSize string
Database string
Duration string
Storage string
BackupLocation string
BackupReference string
}
type ErrorMessage struct {
Database string
EndTime string
Error string
BackupReference string
DatabaseName string
}
// loadMailConfig gets mail environment variables and returns MailConfig
func loadMailConfig() *MailConfig {
return &MailConfig{
MailHost: os.Getenv("MAIL_HOST"),
MailPort: GetIntEnv("MAIL_PORT"),
MailUserName: os.Getenv("MAIL_USERNAME"),
MailPassword: os.Getenv("MAIL_PASSWORD"),
MailTo: os.Getenv("MAIL_TO"),
MailFrom: os.Getenv("MAIL_FROM"),
SkipTls: os.Getenv("MAIL_SKIP_TLS") == "false",
}
}
// TimeFormat returns the format of the time
func TimeFormat() string {
format := os.Getenv("TIME_FORMAT")
if format == "" {
return "2006-01-02 at 15:04:05"
}
return format
}
func backupReference() string {
return os.Getenv("BACKUP_REFERENCE")
}
const templatePath = "/config/templates"
var DatabaseName = ""
var vars = []string{
"TG_TOKEN",
"TG_CHAT_ID",
}
var mailVars = []string{
"MAIL_HOST",
"MAIL_PORT",
"MAIL_FROM",
"MAIL_TO",
}

View File

@@ -1,16 +1,35 @@
// Package utils /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package utils
const RestoreExample = "mysql-bkup restore --dbname database --file db_20231219_022941.sql.gz\n" +
const RestoreExample = "restore --dbname database --file db_20231219_022941.sql.gz\n" +
"restore --dbname database --storage s3 --path /custom-path --file db_20231219_022941.sql.gz"
const BackupExample = "mysql-bkup backup --dbname database --disable-compression\n" +
const BackupExample = "backup --dbname database --disable-compression\n" +
"backup --dbname database --storage s3 --path /custom-path --disable-compression"
const MainExample = "mysql-bkup backup --dbname database --disable-compression\n" +
"backup --dbname database --storage s3 --path /custom-path\n" +
"restore --dbname database --file db_20231219_022941.sql.gz"
const traceLog = "trace"

View File

@@ -1,67 +1,103 @@
// Package utils /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package utils
import (
"fmt"
"log"
"os"
"time"
"runtime"
"strings"
)
var currentTime = time.Now().Format("2006/01/02 15:04:05")
// Info returns info log
func Info(msg string, args ...interface{}) {
log.SetOutput(getStd("/dev/stdout"))
logWithCaller("INFO", msg, args...)
func Info(msg string, args ...any) {
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s INFO: %s\n", currentTime, msg)
} else {
fmt.Printf("%s INFO: %s\n", currentTime, formattedMessage)
}
}
// Warn warning message
func Warn(msg string, args ...any) {
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s WARN: %s\n", currentTime, msg)
} else {
fmt.Printf("%s WARN: %s\n", currentTime, formattedMessage)
}
}
func Error(msg string, args ...any) {
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s ERROR: %s\n", currentTime, msg)
} else {
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
}
}
func Done(msg string, args ...any) {
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s INFO: %s\n", currentTime, msg)
} else {
fmt.Printf("%s INFO: %s\n", currentTime, formattedMessage)
}
// Warn returns warning log
func Warn(msg string, args ...interface{}) {
log.SetOutput(getStd("/dev/stdout"))
logWithCaller("WARN", msg, args...)
}
// Fatal logs an error message and exits the program
func Fatal(msg string, args ...any) {
// Fatal logs an error message and exits the program.
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s ERROR: %s\n", currentTime, msg)
NotifyError(msg)
} else {
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
NotifyError(formattedMessage)
// Error logs error messages
func Error(msg string, args ...interface{}) {
log.SetOutput(getStd("/dev/stderr"))
logWithCaller("ERROR", msg, args...)
}
func Fatal(msg string, args ...interface{}) {
log.SetOutput(os.Stdout)
// Format message if there are additional arguments
formattedMessage := msg
if len(args) > 0 {
formattedMessage = fmt.Sprintf(msg, args...)
}
logWithCaller("ERROR", msg, args...)
NotifyError(formattedMessage)
os.Exit(1)
os.Kill.Signal()
}
// Helper function to format and log messages with file and line number
func logWithCaller(level, msg string, args ...interface{}) {
// Format message if there are additional arguments
formattedMessage := msg
if len(args) > 0 {
formattedMessage = fmt.Sprintf(msg, args...)
}
// Get the caller's file and line number (skip 2 frames)
_, file, line, ok := runtime.Caller(2)
if !ok {
file = "unknown"
line = 0
}
// Log message with caller information if GOMA_LOG_LEVEL is trace
if strings.ToLower(level) != "off" {
if strings.ToLower(level) == traceLog {
log.Printf("%s: %s (File: %s, Line: %d)\n", level, formattedMessage, file, line)
} else {
log.Printf("%s: %s\n", level, formattedMessage)
}
}
}
func getStd(out string) *os.File {
switch out {
case "/dev/stdout":
return os.Stdout
case "/dev/stderr":
return os.Stderr
case "/dev/stdin":
return os.Stdin
default:
return os.Stdout
}
}

179
utils/notification.go Normal file
View File

@@ -0,0 +1,179 @@
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package utils
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"github.com/go-mail/mail"
"html/template"
"io"
"net/http"
"os"
"path/filepath"
"strings"
"time"
)
func parseTemplate[T any](data T, fileName string) (string, error) {
// Open the file
tmpl, err := template.ParseFiles(filepath.Join(templatePath, fileName))
if err != nil {
return "", err
}
var buf bytes.Buffer
if err = tmpl.Execute(&buf, data); err != nil {
return "", err
}
return buf.String(), nil
}
func SendEmail(subject, body string) error {
Info("Start sending email notification....")
config := loadMailConfig()
emails := strings.Split(config.MailTo, ",")
m := mail.NewMessage()
m.SetHeader("From", config.MailFrom)
m.SetHeader("To", emails...)
m.SetHeader("Subject", subject)
m.SetBody("text/html", body)
d := mail.NewDialer(config.MailHost, config.MailPort, config.MailUserName, config.MailPassword)
d.TLSConfig = &tls.Config{InsecureSkipVerify: config.SkipTls}
if err := d.DialAndSend(m); err != nil {
Error("Error could not send email : %v", err)
return err
}
Info("Email notification has been sent")
return nil
}
func sendMessage(msg string) error {
Info("Sending Telegram notification... ")
chatId := os.Getenv("TG_CHAT_ID")
body, _ := json.Marshal(map[string]string{
"chat_id": chatId,
"text": msg,
})
url := fmt.Sprintf("%s/sendMessage", getTgUrl())
// Create an HTTP post request
request, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
if err != nil {
panic(err)
}
request.Header.Add("Content-Type", "application/json")
client := &http.Client{}
response, err := client.Do(request)
if err != nil {
return err
}
code := response.StatusCode
if code == 200 {
Info("Telegram notification has been sent")
return nil
} else {
body, _ := io.ReadAll(response.Body)
Error("Error could not send message, error: %s", string(body))
return fmt.Errorf("error could not send message %s", string(body))
}
}
func NotifySuccess(notificationData *NotificationData) {
notificationData.BackupReference = backupReference()
// Email notification
err := CheckEnvVars(mailVars)
if err == nil {
body, err := parseTemplate(*notificationData, "email.tmpl")
if err != nil {
Error("Could not parse email template: %v", err)
}
err = SendEmail(fmt.Sprintf("✅ Database Backup Notification %s", notificationData.Database), body)
if err != nil {
Error("Could not send email: %v", err)
}
}
// Telegram notification
err = CheckEnvVars(vars)
if err == nil {
message, err := parseTemplate(*notificationData, "telegram.tmpl")
if err != nil {
Error("Could not parse telegram template: %v", err)
}
err = sendMessage(message)
if err != nil {
Error("Could not send Telegram message: %v", err)
}
}
}
func NotifyError(error string) {
// Email notification
err := CheckEnvVars(mailVars)
if err == nil {
body, err := parseTemplate(ErrorMessage{
Error: error,
EndTime: time.Now().Format(TimeFormat()),
BackupReference: os.Getenv("BACKUP_REFERENCE"),
DatabaseName: DatabaseName,
}, "email-error.tmpl")
if err != nil {
Error("Could not parse error template: %v", err)
}
err = SendEmail("🔴 Urgent: Database Backup Failure Notification", body)
if err != nil {
Error("Could not send email: %v", err)
}
}
// Telegram notification
err = CheckEnvVars(vars)
if err == nil {
message, err := parseTemplate(ErrorMessage{
Error: error,
EndTime: time.Now().Format(TimeFormat()),
BackupReference: os.Getenv("BACKUP_REFERENCE"),
DatabaseName: DatabaseName,
}, "telegram-error.tmpl")
if err != nil {
Error("Could not parse error template: %v", err)
}
err = sendMessage(message)
if err != nil {
Error("Could not send telegram message: %v", err)
}
}
}
func getTgUrl() string {
return fmt.Sprintf("https://api.telegram.org/bot%s", os.Getenv("TG_TOKEN"))
}

View File

@@ -1,175 +0,0 @@
// Package utils /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
package utils
import (
"bytes"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"log"
"net/http"
"os"
"path/filepath"
"strconv"
"time"
)
// CreateSession creates a new AWS session
func CreateSession() (*session.Session, error) {
// AwsVars Required environment variables for AWS S3 storage
var awsVars = []string{
"AWS_S3_ENDPOINT",
"AWS_S3_BUCKET_NAME",
"AWS_ACCESS_KEY",
"AWS_SECRET_KEY",
"AWS_REGION",
"AWS_REGION",
"AWS_REGION",
}
endPoint := GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
accessKey := GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
secretKey := GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
_ = GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
region := os.Getenv("AWS_REGION")
awsDisableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
if err != nil {
Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
}
err = CheckEnvVars(awsVars)
if err != nil {
Fatal("Error checking environment variables\n: %s", err)
}
// S3 Config
s3Config := &aws.Config{
Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""),
Endpoint: aws.String(endPoint),
Region: aws.String(region),
DisableSSL: aws.Bool(awsDisableSsl),
S3ForcePathStyle: aws.Bool(true),
}
return session.NewSession(s3Config)
}
// UploadFileToS3 uploads a file to S3 with a given prefix
func UploadFileToS3(filePath, key, bucket, prefix string) error {
sess, err := CreateSession()
if err != nil {
return err
}
svc := s3.New(sess)
file, err := os.Open(filepath.Join(filePath, key))
if err != nil {
return err
}
defer file.Close()
fileInfo, err := file.Stat()
if err != nil {
return err
}
objectKey := filepath.Join(prefix, key)
buffer := make([]byte, fileInfo.Size())
file.Read(buffer)
fileBytes := bytes.NewReader(buffer)
fileType := http.DetectContentType(buffer)
_, err = svc.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(objectKey),
Body: fileBytes,
ContentLength: aws.Int64(fileInfo.Size()),
ContentType: aws.String(fileType),
})
if err != nil {
return err
}
return nil
}
func DownloadFile(destinationPath, key, bucket, prefix string) error {
sess, err := CreateSession()
if err != nil {
return err
}
Info("Download backup from S3 storage...")
file, err := os.Create(filepath.Join(destinationPath, key))
if err != nil {
fmt.Println("Failed to create file", err)
return err
}
defer file.Close()
objectKey := filepath.Join(prefix, key)
downloader := s3manager.NewDownloader(sess)
numBytes, err := downloader.Download(file,
&s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(objectKey),
})
if err != nil {
fmt.Println("Failed to download file", err)
return err
}
Info("Backup downloaded: %s bytes size %s ", file.Name(), numBytes)
return nil
}
func DeleteOldBackup(bucket, prefix string, retention int) error {
sess, err := CreateSession()
if err != nil {
return err
}
svc := s3.New(sess)
// Get the current time and the time threshold for 7 days ago
now := time.Now()
backupRetentionDays := now.AddDate(0, 0, -retention)
// List objects in the bucket
listObjectsInput := &s3.ListObjectsV2Input{
Bucket: aws.String(bucket),
Prefix: aws.String(prefix),
}
err = svc.ListObjectsV2Pages(listObjectsInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, object := range page.Contents {
if object.LastModified.Before(backupRetentionDays) {
// Object is older than retention days, delete it
_, err := svc.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(bucket),
Key: object.Key,
})
if err != nil {
log.Printf("Failed to delete object %s: %v", *object.Key, err)
} else {
fmt.Printf("Deleted object %s\n", *object.Key)
}
}
}
return !lastPage
})
if err != nil {
log.Fatalf("Failed to list objects: %v", err)
}
fmt.Println("Finished deleting old files.")
return nil
}

View File

@@ -1,25 +1,43 @@
// Package utils /
/*****
@author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda
**/
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package utils
import (
"bytes"
"encoding/json"
"fmt"
"github.com/robfig/cron/v3"
"github.com/spf13/cobra"
"io"
"io/fs"
"io/ioutil"
"net/http"
"os"
"strconv"
"time"
)
var Version = ""
// FileExists checks if the file does exist
func FileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
@@ -33,7 +51,13 @@ func WriteToFile(filePath, content string) error {
if err != nil {
return err
}
defer file.Close()
defer func(file *os.File) {
err := file.Close()
if err != nil {
return
}
}(file)
_, err = file.WriteString(content)
return err
@@ -51,14 +75,25 @@ func CopyFile(src, dst string) error {
if err != nil {
return fmt.Errorf("failed to open source file: %v", err)
}
defer sourceFile.Close()
defer func(sourceFile *os.File) {
err := sourceFile.Close()
if err != nil {
return
}
}(sourceFile)
// Create the destination file
destinationFile, err := os.Create(dst)
if err != nil {
return fmt.Errorf("failed to create destination file: %v", err)
}
defer destinationFile.Close()
defer func(destinationFile *os.File) {
err := destinationFile.Close()
if err != nil {
return
}
}(destinationFile)
// Copy the content from source to destination
_, err = io.Copy(destinationFile, sourceFile)
@@ -85,7 +120,12 @@ func IsDirEmpty(name string) (bool, error) {
if err != nil {
return false, err
}
defer f.Close()
defer func(f *os.File) {
err := f.Close()
if err != nil {
return
}
}(f)
_, err = f.Readdirnames(1)
if err == nil {
@@ -133,14 +173,11 @@ func GetEnvVariable(envName, oldEnvName string) string {
if err != nil {
return value
}
Warn("%s is deprecated, please use %s instead!", oldEnvName, envName)
Warn("%s is deprecated, please use %s instead! ", oldEnvName, envName)
}
}
return value
}
func ShowHistory() {
}
// CheckEnvVars checks if all the specified environment variables are set
func CheckEnvVars(vars []string) error {
@@ -187,71 +224,49 @@ func GetIntEnv(envName string) int {
}
return ret
}
func sendMessage(msg string) {
Info("Sending notification... ")
chatId := os.Getenv("TG_CHAT_ID")
body, _ := json.Marshal(map[string]string{
"chat_id": chatId,
"text": msg,
})
url := fmt.Sprintf("%s/sendMessage", getTgUrl())
// Create an HTTP post request
request, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
if err != nil {
panic(err)
}
request.Header.Add("Content-Type", "application/json")
client := &http.Client{}
response, err := client.Do(request)
if err != nil {
panic(err)
}
code := response.StatusCode
if code == 200 {
Info("Notification has been sent")
} else {
body, _ := ioutil.ReadAll(response.Body)
Error("Message not sent, error: %s", string(body))
}
}
func NotifySuccess(fileName string) {
var vars = []string{
"TG_TOKEN",
"TG_CHAT_ID",
}
//Telegram notification
err := CheckEnvVars(vars)
if err == nil {
message := "[✅ MySQL Backup ]\n" +
"Database has been backed up \n" +
"Backup name is " + fileName
sendMessage(message)
}
}
func NotifyError(error string) {
var vars = []string{
"TG_TOKEN",
"TG_CHAT_ID",
}
//Telegram notification
err := CheckEnvVars(vars)
if err == nil {
message := "[🔴 MySQL Backup ]\n" +
"An error occurred during database backup \n" +
"Error: " + error
sendMessage(message)
func EnvWithDefault(envName string, defaultValue string) string {
value := os.Getenv(envName)
if value == "" {
return defaultValue
}
return value
}
func getTgUrl() string {
return fmt.Sprintf("https://api.telegram.org/bot%s", os.Getenv("TG_TOKEN"))
}
// IsValidCronExpression verify cronExpression and returns boolean
func IsValidCronExpression(cronExpr string) bool {
// Parse the cron expression
_, err := cron.ParseStandard(cronExpr)
return err == nil
}
// CronNextTime returns cronExpression next time
func CronNextTime(cronExpr string) time.Time {
// Parse the cron expression
schedule, err := cron.ParseStandard(cronExpr)
if err != nil {
Error("Error parsing cron expression: %s", err)
return time.Time{}
}
// Get the current time
now := time.Now()
// Get the next scheduled time
next := schedule.Next(now)
return next
}
// ConvertBytes converts bytes to a human-readable string with the appropriate unit (bytes, MiB, or GiB).
func ConvertBytes(bytes uint64) string {
const (
MiB = 1024 * 1024
GiB = MiB * 1024
)
switch {
case bytes >= GiB:
return fmt.Sprintf("%.2f GiB", float64(bytes)/float64(GiB))
case bytes >= MiB:
return fmt.Sprintf("%.2f MiB", float64(bytes)/float64(MiB))
default:
return fmt.Sprintf("%d bytes", bytes)
}
}