mirror of
https://github.com/jkaninda/mysql-bkup.git
synced 2025-12-06 13:39:41 +01:00
Compare commits
95 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| fd444293b4 | |||
|
|
1940ceba9a | ||
|
|
07d580a8a9 | ||
| 9a261b22ec | |||
|
|
e7a58f0569 | ||
| 1b529725d7 | |||
|
|
d8c73560b8 | ||
|
|
d5a0adc981 | ||
| 6df3bae9e2 | |||
|
|
f7d624fd15 | ||
| 1e9e1ed951 | |||
|
|
917ba8947f | ||
| 94a1dcdff7 | |||
|
|
f70e549b16 | ||
|
|
607478fcc6 | ||
| 2862e504f5 | |||
|
|
29420ee13e | ||
| f53272ccf0 | |||
|
|
c360441445 | ||
|
|
f6916231f7 | ||
|
|
afd4afc83b | ||
|
|
9016a9ec7a | ||
| 4ecd96e75c | |||
|
|
8a88e4a727 | ||
| 62f86adea9 | |||
| eb414d818c | |||
| 6721cc430d | |||
|
|
8e20e9595f | ||
| 02e3267237 | |||
|
|
448ef4d988 | ||
| 70ac78c2cd | |||
|
|
72f5ef4839 | ||
| 6a51f591a5 | |||
| d55ade3c21 | |||
|
|
cdbd6dcd6a | ||
|
|
307e18d9ff | ||
| 8d366f0302 | |||
| 05e32c3cc1 | |||
|
|
edd13907d0 | ||
|
|
7cb1c50927 | ||
| f545704b02 | |||
| 90f5391b24 | |||
| ca241b4fef | |||
|
|
3911296921 | ||
| 8d04d276ba | |||
|
|
221079e0ea | ||
| 590b2d8bc6 | |||
|
|
d2aeb55ebc | ||
|
|
431be36210 | ||
| ef2c5c80cd | |||
|
|
3a0137d6ea | ||
|
|
8afb5ace40 | ||
|
|
5569258a71 | ||
|
|
f3ec395e37 | ||
| ba432997c8 | |||
|
|
dc20ea9635 | ||
| 40557af437 | |||
|
|
1dcb9586a6 | ||
|
|
2c6336e84a | ||
| c16ee3a492 | |||
|
|
3f7d28ea49 | ||
| cea1ef9c3b | |||
|
|
56c271bc29 | ||
| 45c30dca5f | |||
|
|
b0ae212578 | ||
|
|
6e2d3a9f21 | ||
|
|
dd314aa4cb | ||
|
|
24ccdaa671 | ||
| 45e3452376 | |||
|
|
3527b4cdcd | ||
| dc6fe2f4b9 | |||
|
|
f0afc0f4e0 | ||
| 7d7c813bb0 | |||
|
|
6b8491cdc0 | ||
| a1dd6e3f58 | |||
|
|
86ba3530c9 | ||
| e1f3b15003 | |||
|
|
1577e92a66 | ||
| 7b67f88769 | |||
|
|
043233dabe | ||
|
|
d6652cfb75 | ||
| 140ed608ab | |||
|
|
98211a27b8 | ||
| 4e4d45e555 | |||
|
|
01e41acb5c | ||
| 3dce2017f8 | |||
|
|
ed2f1b8d9c | ||
| b64875df21 | |||
|
|
fc90507b3f | ||
| df0efd24d3 | |||
|
|
e5dd7e76ce | ||
| 12fbb67a09 | |||
|
|
df490af7b6 | ||
| d930c3e2f6 | |||
|
|
e4258cb12e |
69
.env.example
Normal file
69
.env.example
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
### Database
|
||||||
|
DB_HOST=
|
||||||
|
DB_PORT=3306
|
||||||
|
DB_USERNAME=
|
||||||
|
DB_PASSWORD=
|
||||||
|
DB_NAME=
|
||||||
|
TZ=Europe/Paris
|
||||||
|
|
||||||
|
### Database Migration
|
||||||
|
#TARGET_DB_HOST=
|
||||||
|
#TARGET_DB_PORT=3306
|
||||||
|
#TARGET_DB_NAME=
|
||||||
|
#TARGET_DB_USERNAME=
|
||||||
|
#TARGET_DB_PASSWORD=
|
||||||
|
|
||||||
|
### Backup restoration
|
||||||
|
#FILE_NAME=
|
||||||
|
### AWS S3 Storage
|
||||||
|
#ACCESS_KEY=
|
||||||
|
#SECRET_KEY=
|
||||||
|
#AWS_S3_BUCKET_NAME=
|
||||||
|
#AWS_S3_ENDPOINT=
|
||||||
|
#AWS_REGION=
|
||||||
|
#AWS_S3_PATH=
|
||||||
|
#AWS_DISABLE_SSL=false
|
||||||
|
#AWS_FORCE_PATH_STYLE=true
|
||||||
|
|
||||||
|
### Backup Cron Expression
|
||||||
|
#BACKUP_CRON_EXPRESSION=@midnight
|
||||||
|
##Delete old backup created more than specified days ago
|
||||||
|
#BACKUP_RETENTION_DAYS=7
|
||||||
|
|
||||||
|
####SSH Storage
|
||||||
|
#SSH_HOST_NAME=
|
||||||
|
#SSH_PORT=22
|
||||||
|
#SSH_USER=
|
||||||
|
#SSH_PASSWORD=
|
||||||
|
#SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
||||||
|
|
||||||
|
####FTP Storage
|
||||||
|
#FTP_PASSWORD=
|
||||||
|
#FTP_HOST_NAME=
|
||||||
|
#FTP_USER=
|
||||||
|
#FTP_PORT=21
|
||||||
|
#REMOTE_PATH=
|
||||||
|
#### Backup encryption
|
||||||
|
#GPG_PUBLIC_KEY=/config/public_key.asc
|
||||||
|
#GPG_PRIVATE_KEY=/config/private_key.asc
|
||||||
|
#GPG_PASSPHRASE=Your strong passphrase
|
||||||
|
## For multiple database backup on Docker or Docker in Swarm mode
|
||||||
|
#BACKUP_CONFIG_FILE=/config/config.yaml
|
||||||
|
### Database restoration
|
||||||
|
#FILE_NAME=
|
||||||
|
### Notification
|
||||||
|
#BACKUP_REFERENCE=K8s/Paris cluster
|
||||||
|
## Telegram
|
||||||
|
#TG_TOKEN=
|
||||||
|
#TG_CHAT_ID=
|
||||||
|
### Email
|
||||||
|
#MAIL_HOST=
|
||||||
|
#MAIL_PORT=
|
||||||
|
#MAIL_USERNAME=
|
||||||
|
#MAIL_PASSWORD=
|
||||||
|
#MAIL_FROM=Backup Jobs <backup-jobs@example.com>
|
||||||
|
#MAIL_TO=backup@example.com,me@example.com,team@example.com
|
||||||
|
#MAIL_SKIP_TLS=false
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
10
.github/dependabot.yml
vendored
Normal file
10
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: docker
|
||||||
|
directory: /
|
||||||
|
schedule:
|
||||||
|
interval: weekly
|
||||||
|
- package-ecosystem: gomod
|
||||||
|
directory: /
|
||||||
|
schedule:
|
||||||
|
interval: weekly
|
||||||
43
.golangci.yml
Normal file
43
.golangci.yml
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
run:
|
||||||
|
timeout: 5m
|
||||||
|
allow-parallel-runners: true
|
||||||
|
|
||||||
|
issues:
|
||||||
|
# don't skip warning about doc comments
|
||||||
|
# don't exclude the default set of lint
|
||||||
|
exclude-use-default: false
|
||||||
|
# restore some of the defaults
|
||||||
|
# (fill in the rest as needed)
|
||||||
|
exclude-rules:
|
||||||
|
- path: "internal/*"
|
||||||
|
linters:
|
||||||
|
- dupl
|
||||||
|
- lll
|
||||||
|
- goimports
|
||||||
|
linters:
|
||||||
|
disable-all: true
|
||||||
|
enable:
|
||||||
|
- dupl
|
||||||
|
- errcheck
|
||||||
|
- copyloopvar
|
||||||
|
- ginkgolinter
|
||||||
|
- goconst
|
||||||
|
- gocyclo
|
||||||
|
- gofmt
|
||||||
|
- gosimple
|
||||||
|
- govet
|
||||||
|
- ineffassign
|
||||||
|
- misspell
|
||||||
|
- nakedret
|
||||||
|
- prealloc
|
||||||
|
- revive
|
||||||
|
- staticcheck
|
||||||
|
- typecheck
|
||||||
|
- unconvert
|
||||||
|
- unparam
|
||||||
|
- unused
|
||||||
|
|
||||||
|
linters-settings:
|
||||||
|
revive:
|
||||||
|
rules:
|
||||||
|
- name: comment-spacings
|
||||||
68
Dockerfile
68
Dockerfile
@@ -1,5 +1,6 @@
|
|||||||
FROM golang:1.22.5 AS build
|
FROM golang:1.23.3 AS build
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
ARG appVersion=""
|
||||||
|
|
||||||
# Copy the source code.
|
# Copy the source code.
|
||||||
COPY . .
|
COPY . .
|
||||||
@@ -7,73 +8,36 @@ COPY . .
|
|||||||
RUN go mod download
|
RUN go mod download
|
||||||
|
|
||||||
# Build
|
# Build
|
||||||
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/mysql-bkup
|
RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-X 'github.com/jkaninda/mysql-bkup/utils.Version=${appVersion}'" -o /app/mysql-bkup
|
||||||
|
|
||||||
FROM alpine:3.20.3
|
FROM alpine:3.21.0
|
||||||
ENV DB_HOST=""
|
|
||||||
ENV DB_NAME=""
|
|
||||||
ENV DB_USERNAME=""
|
|
||||||
ENV DB_PASSWORD=""
|
|
||||||
ENV DB_PORT=3306
|
|
||||||
ENV STORAGE=local
|
|
||||||
ENV AWS_S3_ENDPOINT=""
|
|
||||||
ENV AWS_S3_BUCKET_NAME=""
|
|
||||||
ENV AWS_ACCESS_KEY=""
|
|
||||||
ENV AWS_SECRET_KEY=""
|
|
||||||
ENV AWS_S3_PATH=""
|
|
||||||
ENV AWS_REGION="us-west-2"
|
|
||||||
ENV AWS_DISABLE_SSL="false"
|
|
||||||
ENV AWS_FORCE_PATH_STYLE="true"
|
|
||||||
ENV GPG_PASSPHRASE=""
|
|
||||||
ENV SSH_USER=""
|
|
||||||
ENV SSH_PASSWORD=""
|
|
||||||
ENV SSH_HOST=""
|
|
||||||
ENV SSH_IDENTIFY_FILE=""
|
|
||||||
ENV SSH_PORT=22
|
|
||||||
ENV REMOTE_PATH=""
|
|
||||||
ENV FTP_HOST=""
|
|
||||||
ENV FTP_PORT=21
|
|
||||||
ENV FTP_USER=""
|
|
||||||
ENV FTP_PASSWORD=""
|
|
||||||
ENV TARGET_DB_HOST=""
|
|
||||||
ENV TARGET_DB_PORT=3306
|
|
||||||
ENV TARGET_DB_NAME=""
|
|
||||||
ENV TARGET_DB_USERNAME=""
|
|
||||||
ENV TARGET_DB_PASSWORD=""
|
|
||||||
ENV BACKUP_CRON_EXPRESSION=""
|
|
||||||
ENV TG_TOKEN=""
|
|
||||||
ENV TG_CHAT_ID=""
|
|
||||||
ENV TZ=UTC
|
ENV TZ=UTC
|
||||||
ARG WORKDIR="/config"
|
ARG WORKDIR="/config"
|
||||||
ARG BACKUPDIR="/backup"
|
ARG BACKUPDIR="/backup"
|
||||||
ARG BACKUP_TMP_DIR="/tmp/backup"
|
ARG BACKUP_TMP_DIR="/tmp/backup"
|
||||||
ARG appVersion="v1.2.12"
|
ARG TEMPLATES_DIR="/config/templates"
|
||||||
|
ARG appVersion=""
|
||||||
ENV VERSION=${appVersion}
|
ENV VERSION=${appVersion}
|
||||||
LABEL author="Jonas Kaninda"
|
LABEL author="Jonas Kaninda"
|
||||||
LABEL version=${appVersion}
|
LABEL version=${appVersion}
|
||||||
|
LABEL github="github.com/jkaninda/mysql-bkup"
|
||||||
|
|
||||||
RUN apk --update add --no-cache mysql-client mariadb-connector-c tzdata
|
RUN apk --update add --no-cache mysql-client mariadb-connector-c tzdata ca-certificates
|
||||||
RUN mkdir $WORKDIR
|
RUN mkdir -p $WORKDIR $BACKUPDIR $TEMPLATES_DIR $BACKUP_TMP_DIR && \
|
||||||
RUN mkdir $BACKUPDIR
|
chmod a+rw $WORKDIR $BACKUPDIR $BACKUP_TMP_DIR
|
||||||
RUN mkdir -p $BACKUP_TMP_DIR
|
|
||||||
RUN chmod 777 $WORKDIR
|
|
||||||
RUN chmod 777 $BACKUPDIR
|
|
||||||
RUN chmod 777 $BACKUP_TMP_DIR
|
|
||||||
RUN chmod 777 $WORKDIR
|
|
||||||
|
|
||||||
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
|
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
|
||||||
RUN chmod +x /usr/local/bin/mysql-bkup
|
COPY ./templates/* $TEMPLATES_DIR/
|
||||||
|
RUN chmod +x /usr/local/bin/mysql-bkup && \
|
||||||
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
|
ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
|
||||||
|
|
||||||
# Create backup script and make it executable
|
# Create backup script and make it executable
|
||||||
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
|
RUN printf '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \
|
||||||
chmod +x /usr/local/bin/backup
|
chmod +x /usr/local/bin/backup
|
||||||
# Create restore script and make it executable
|
# Create restore script and make it executable
|
||||||
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup restore "$@"' > /usr/local/bin/restore && \
|
RUN printf '#!/bin/sh\n/usr/local/bin/mysql-bkup restore "$@"' > /usr/local/bin/restore && \
|
||||||
chmod +x /usr/local/bin/restore
|
chmod +x /usr/local/bin/restore
|
||||||
# Create migrate script and make it executable
|
# Create migrate script and make it executable
|
||||||
RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup migrate "$@"' > /usr/local/bin/migrate && \
|
RUN printf '#!/bin/sh\n/usr/local/bin/mysql-bkup migrate "$@"' > /usr/local/bin/migrate && \
|
||||||
chmod +x /usr/local/bin/migrate
|
chmod +x /usr/local/bin/migrate
|
||||||
|
|
||||||
WORKDIR $WORKDIR
|
WORKDIR $WORKDIR
|
||||||
|
|||||||
58
README.md
58
README.md
@@ -1,11 +1,7 @@
|
|||||||
# MySQL Backup
|
# MYSQL-BKUP
|
||||||
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, FTP and SSH compatible storage.
|
|
||||||
It also supports __encrypting__ your backups using GPG.
|
|
||||||
|
|
||||||
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
|
**MYSQL-BKUP** is a Docker container image designed to **backup, restore, and migrate MySQL databases**.
|
||||||
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3, FTP or SSH compatible storage.
|
It supports a variety of storage options and ensures data security through GPG encryption.
|
||||||
|
|
||||||
It also supports database __encryption__ using GPG.
|
|
||||||
|
|
||||||
[](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml)
|
[](https://github.com/jkaninda/mysql-bkup/actions/workflows/release.yml)
|
||||||
[](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup)
|
[](https://goreportcard.com/report/github.com/jkaninda/mysql-bkup)
|
||||||
@@ -13,6 +9,37 @@ It also supports database __encryption__ using GPG.
|
|||||||

|

|
||||||
<a href="https://ko-fi.com/jkaninda"><img src="https://uploads-ssl.webflow.com/5c14e387dab576fe667689cf/5cbed8a4ae2b88347c06c923_BuyMeACoffee_blue.png" height="20" alt="buy ma a coffee"></a>
|
<a href="https://ko-fi.com/jkaninda"><img src="https://uploads-ssl.webflow.com/5c14e387dab576fe667689cf/5cbed8a4ae2b88347c06c923_BuyMeACoffee_blue.png" height="20" alt="buy ma a coffee"></a>
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- **Storage Options:**
|
||||||
|
- Local storage
|
||||||
|
- AWS S3 or any S3-compatible object storage
|
||||||
|
- FTP
|
||||||
|
- SSH-compatible storage
|
||||||
|
- Azure Blob storage
|
||||||
|
|
||||||
|
- **Data Security:**
|
||||||
|
- Backups can be encrypted using **GPG** to ensure confidentiality.
|
||||||
|
|
||||||
|
- **Deployment Flexibility:**
|
||||||
|
- Available as the [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image.
|
||||||
|
- Deployable on **Docker**, **Docker Swarm**, and **Kubernetes**.
|
||||||
|
- Supports recurring backups of MySQL databases when deployed:
|
||||||
|
- On Docker for automated backup schedules.
|
||||||
|
- As a **Job** or **CronJob** on Kubernetes.
|
||||||
|
|
||||||
|
- **Notifications:**
|
||||||
|
- Get real-time updates on backup success or failure via:
|
||||||
|
- **Telegram**
|
||||||
|
- **Email**
|
||||||
|
|
||||||
|
## Use Cases
|
||||||
|
|
||||||
|
- **Automated Recurring Backups:** Schedule regular backups for MySQL databases.
|
||||||
|
- **Cross-Environment Migration:** Easily migrate your MySQL databases across different environments using supported storage options.
|
||||||
|
- **Secure Backup Management:** Protect your data with GPG encryption.
|
||||||
|
|
||||||
|
|
||||||
Successfully tested on:
|
Successfully tested on:
|
||||||
- Docker
|
- Docker
|
||||||
- Docker in Swarm mode
|
- Docker in Swarm mode
|
||||||
@@ -34,8 +61,9 @@ Successfully tested on:
|
|||||||
## Storage:
|
## Storage:
|
||||||
- Local
|
- Local
|
||||||
- AWS S3 or any S3 Alternatives for Object Storage
|
- AWS S3 or any S3 Alternatives for Object Storage
|
||||||
- SSH remote server
|
- SSH remote storage server
|
||||||
|
- FTP remote storage server
|
||||||
|
- Azure Blob storage
|
||||||
## Quickstart
|
## Quickstart
|
||||||
|
|
||||||
### Simple backup using Docker CLI
|
### Simple backup using Docker CLI
|
||||||
@@ -83,11 +111,10 @@ services:
|
|||||||
- TZ=Europe/Paris
|
- TZ=Europe/Paris
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
```
|
```
|
||||||
|
|
||||||
### Docker recurring backup
|
### Docker recurring backup
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
@@ -96,7 +123,7 @@ networks:
|
|||||||
-e "DB_HOST=hostname" \
|
-e "DB_HOST=hostname" \
|
||||||
-e "DB_USERNAME=user" \
|
-e "DB_USERNAME=user" \
|
||||||
-e "DB_PASSWORD=password" \
|
-e "DB_PASSWORD=password" \
|
||||||
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 1m"
|
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 15m" #@midnight
|
||||||
```
|
```
|
||||||
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
||||||
|
|
||||||
@@ -159,16 +186,11 @@ docker pull ghcr.io/jkaninda/mysql-bkup
|
|||||||
|
|
||||||
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
|
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
|
||||||
|
|
||||||
## Supported Engines
|
|
||||||
|
|
||||||
This image is developed and tested against the Docker CE engine and Kubernetes exclusively.
|
|
||||||
While it may work against different implementations, there are no guarantees about support for non-Docker engines.
|
|
||||||
|
|
||||||
## References
|
## References
|
||||||
|
|
||||||
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
||||||
|
|
||||||
- The original image is based on `alpine` and requires additional tools, making it heavy.
|
- The original image is based on `Alpine` and requires additional tools, making it heavy.
|
||||||
- This image is written in Go.
|
- This image is written in Go.
|
||||||
- `arm64` and `arm/v7` architectures are supported.
|
- `arm64` and `arm/v7` architectures are supported.
|
||||||
- Docker in Swarm mode is supported.
|
- Docker in Swarm mode is supported.
|
||||||
|
|||||||
@@ -1,9 +1,27 @@
|
|||||||
// Package cmd /
|
/*
|
||||||
/*****
|
MIT License
|
||||||
@author Jonas Kaninda
|
|
||||||
@license MIT License <https://opensource.org/licenses/MIT>
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
@Copyright © 2024 Jonas Kaninda
|
|
||||||
**/
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -20,18 +38,16 @@ var BackupCmd = &cobra.Command{
|
|||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
pkg.StartBackup(cmd)
|
pkg.StartBackup(cmd)
|
||||||
} else {
|
} else {
|
||||||
utils.Fatal("Error, no argument required")
|
utils.Fatal(`"backup" accepts no argument %q`, args)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
//Backup
|
//Backup
|
||||||
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
|
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Define storage: local, s3, ssh, ftp")
|
||||||
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
||||||
BackupCmd.PersistentFlags().StringP("cron-expression", "", "", "Backup cron expression")
|
BackupCmd.PersistentFlags().StringP("cron-expression", "", "", "Backup cron expression")
|
||||||
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")
|
|
||||||
BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "Delete files created more than specified days ago, default 7 days")
|
|
||||||
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
|
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,27 @@
|
|||||||
// Package cmd /
|
/*
|
||||||
/*****
|
MIT License
|
||||||
@author Jonas Kaninda
|
|
||||||
@license MIT License <https://opensource.org/licenses/MIT>
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
@Copyright © 2024 Jonas Kaninda
|
|
||||||
**/
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -19,7 +37,7 @@ var MigrateCmd = &cobra.Command{
|
|||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
pkg.StartMigration(cmd)
|
pkg.StartMigration(cmd)
|
||||||
} else {
|
} else {
|
||||||
utils.Fatal("Error, no argument required")
|
utils.Fatal(`"migrate" accepts no argument %q`, args)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,27 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -14,7 +38,7 @@ var RestoreCmd = &cobra.Command{
|
|||||||
if len(args) == 0 {
|
if len(args) == 0 {
|
||||||
pkg.StartRestore(cmd)
|
pkg.StartRestore(cmd)
|
||||||
} else {
|
} else {
|
||||||
utils.Fatal("Error, no argument required")
|
utils.Fatal(`"restore" accepts no argument %q`, args)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -24,7 +48,7 @@ var RestoreCmd = &cobra.Command{
|
|||||||
func init() {
|
func init() {
|
||||||
//Restore
|
//Restore
|
||||||
RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database")
|
RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database")
|
||||||
RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
|
RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Define storage: local, s3, ssh, ftp")
|
||||||
RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
30
cmd/root.go
30
cmd/root.go
@@ -1,9 +1,27 @@
|
|||||||
// Package cmd /
|
/*
|
||||||
/*****
|
MIT License
|
||||||
@author Jonas Kaninda
|
|
||||||
@license MIT License <https://opensource.org/licenses/MIT>
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
@Copyright © 2024 Jonas Kaninda
|
|
||||||
**/
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -1,13 +1,32 @@
|
|||||||
// Package cmd /
|
/*
|
||||||
/*****
|
MIT License
|
||||||
@author Jonas Kaninda
|
|
||||||
@license MIT License <https://opensource.org/licenses/MIT>
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
@Copyright © 2024 Jonas Kaninda
|
|
||||||
**/
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
@@ -23,6 +42,6 @@ var VersionCmd = &cobra.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Version() {
|
func Version() {
|
||||||
fmt.Printf("Version: %s \n", appVersion)
|
fmt.Printf("Version: %s \n", utils.Version)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
}
|
}
|
||||||
|
|||||||
44
docs/how-tos/azure-blob.md
Normal file
44
docs/how-tos/azure-blob.md
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
---
|
||||||
|
title: Azure Blob storage
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 5
|
||||||
|
---
|
||||||
|
# Azure Blob storage
|
||||||
|
|
||||||
|
{: .note }
|
||||||
|
As described on local backup section, to change the storage of you backup and use Azure Blob as storage. You need to add `--storage azure` (-s azure).
|
||||||
|
You can also specify a folder where you want to save you data by adding `--path my-custom-path` flag.
|
||||||
|
|
||||||
|
|
||||||
|
## Backup to Azure Blob storage
|
||||||
|
|
||||||
|
```yml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
# In production, it is advised to lock your image tag to a proper
|
||||||
|
# release version instead of using `latest`.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for a list of available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command: backup --storage azure -d database --path my-custom-path
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
## Azure Blob configurations
|
||||||
|
- AZURE_STORAGE_CONTAINER_NAME=backup-container
|
||||||
|
- AZURE_STORAGE_ACCOUNT_NAME=account-name
|
||||||
|
- AZURE_STORAGE_ACCOUNT_KEY=Ppby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==
|
||||||
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -37,6 +37,7 @@ services:
|
|||||||
- AWS_SECRET_KEY=xxxxx
|
- AWS_SECRET_KEY=xxxxx
|
||||||
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||||
- AWS_DISABLE_SSL="false"
|
- AWS_DISABLE_SSL="false"
|
||||||
|
- AWS_FORCE_PATH_STYLE=true # true for S3 alternative such as Minio
|
||||||
|
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
networks:
|
networks:
|
||||||
@@ -73,8 +74,11 @@ services:
|
|||||||
- AWS_ACCESS_KEY=xxxx
|
- AWS_ACCESS_KEY=xxxx
|
||||||
- AWS_SECRET_KEY=xxxxx
|
- AWS_SECRET_KEY=xxxxx
|
||||||
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
|
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
|
||||||
|
#Delete old backup created more than specified days ago
|
||||||
|
#- BACKUP_RETENTION_DAYS=7
|
||||||
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||||
- AWS_DISABLE_SSL="false"
|
- AWS_DISABLE_SSL="false"
|
||||||
|
- AWS_FORCE_PATH_STYLE=true # true for S3 alternative such as Minio
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
@@ -82,53 +86,3 @@ networks:
|
|||||||
web:
|
web:
|
||||||
```
|
```
|
||||||
|
|
||||||
## Deploy on Kubernetes
|
|
||||||
|
|
||||||
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as CronJob.
|
|
||||||
|
|
||||||
### Simple Kubernetes CronJob usage:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: CronJob
|
|
||||||
metadata:
|
|
||||||
name: bkup-job
|
|
||||||
spec:
|
|
||||||
schedule: "0 1 * * *"
|
|
||||||
jobTemplate:
|
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: mysql-bkup
|
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- backup -s s3 --path /custom_path
|
|
||||||
env:
|
|
||||||
- name: DB_PORT
|
|
||||||
value: "3306"
|
|
||||||
- name: DB_HOST
|
|
||||||
value: ""
|
|
||||||
- name: DB_NAME
|
|
||||||
value: ""
|
|
||||||
- name: DB_USERNAME
|
|
||||||
value: ""
|
|
||||||
# Please use secret!
|
|
||||||
- name: DB_PASSWORD
|
|
||||||
value: ""
|
|
||||||
- name: AWS_S3_ENDPOINT
|
|
||||||
value: "https://s3.amazonaws.com"
|
|
||||||
- name: AWS_S3_BUCKET_NAME
|
|
||||||
value: "xxx"
|
|
||||||
- name: AWS_REGION
|
|
||||||
value: "us-west-2"
|
|
||||||
- name: AWS_ACCESS_KEY
|
|
||||||
value: "xxxx"
|
|
||||||
- name: AWS_SECRET_KEY
|
|
||||||
value: "xxxx"
|
|
||||||
- name: AWS_DISABLE_SSL
|
|
||||||
value: "false"
|
|
||||||
restartPolicy: OnFailure
|
|
||||||
```
|
|
||||||
@@ -79,6 +79,8 @@ services:
|
|||||||
- REMOTE_PATH=/home/jkaninda/backups
|
- REMOTE_PATH=/home/jkaninda/backups
|
||||||
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
- SSH_IDENTIFY_FILE=/tmp/id_ed25519
|
||||||
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
|
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
|
||||||
|
#Delete old backup created more than specified days ago
|
||||||
|
#- BACKUP_RETENTION_DAYS=7
|
||||||
## We advise you to use a private jey instead of password
|
## We advise you to use a private jey instead of password
|
||||||
#- SSH_PASSWORD=password
|
#- SSH_PASSWORD=password
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
@@ -87,55 +89,3 @@ services:
|
|||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
```
|
```
|
||||||
|
|
||||||
## Deploy on Kubernetes
|
|
||||||
|
|
||||||
For Kubernetes, you don't need to run it in scheduled mode.
|
|
||||||
You can deploy it as CronJob.
|
|
||||||
|
|
||||||
Simple Kubernetes CronJob usage:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: CronJob
|
|
||||||
metadata:
|
|
||||||
name: bkup-job
|
|
||||||
spec:
|
|
||||||
schedule: "0 1 * * *"
|
|
||||||
jobTemplate:
|
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: mysql-bkup
|
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- backup -s ssh
|
|
||||||
env:
|
|
||||||
- name: DB_PORT
|
|
||||||
value: "3306"
|
|
||||||
- name: DB_HOST
|
|
||||||
value: ""
|
|
||||||
- name: DB_NAME
|
|
||||||
value: ""
|
|
||||||
- name: DB_USERNAME
|
|
||||||
value: ""
|
|
||||||
# Please use secret!
|
|
||||||
- name: DB_PASSWORD
|
|
||||||
value: ""
|
|
||||||
- name: SSH_HOST
|
|
||||||
value: ""
|
|
||||||
- name: SSH_PORT
|
|
||||||
value: "22"
|
|
||||||
- name: SSH_USER
|
|
||||||
value: "xxx"
|
|
||||||
- name: REMOTE_PATH
|
|
||||||
value: "/home/jkaninda/backups"
|
|
||||||
- name: AWS_ACCESS_KEY
|
|
||||||
value: "xxxx"
|
|
||||||
- name: SSH_IDENTIFY_FILE
|
|
||||||
value: "/tmp/id_ed25519"
|
|
||||||
restartPolicy: Never
|
|
||||||
```
|
|
||||||
@@ -75,6 +75,8 @@ services:
|
|||||||
- DB_USERNAME=username
|
- DB_USERNAME=username
|
||||||
- DB_PASSWORD=password
|
- DB_PASSWORD=password
|
||||||
- BACKUP_CRON_EXPRESSION=0 1 * * *
|
- BACKUP_CRON_EXPRESSION=0 1 * * *
|
||||||
|
#Delete old backup created more than specified days ago
|
||||||
|
#- BACKUP_RETENTION_DAYS=7
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
|
|||||||
@@ -59,6 +59,8 @@ spec:
|
|||||||
value: "xxxx"
|
value: "xxxx"
|
||||||
- name: AWS_DISABLE_SSL
|
- name: AWS_DISABLE_SSL
|
||||||
value: "false"
|
value: "false"
|
||||||
|
- name: AWS_FORCE_PATH_STYLE
|
||||||
|
value: "false"
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -81,13 +83,9 @@ spec:
|
|||||||
# for a list of available releases.
|
# for a list of available releases.
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
command:
|
command:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
- -c
|
- -c
|
||||||
- bkup
|
- backup --storage ssh
|
||||||
- backup
|
|
||||||
- --storage
|
|
||||||
- ssh
|
|
||||||
- --disable-compression
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
memory: "128Mi"
|
memory: "128Mi"
|
||||||
@@ -116,7 +114,7 @@ spec:
|
|||||||
value: "/home/toto/backup"
|
value: "/home/toto/backup"
|
||||||
# Optional, required if you want to encrypt your backup
|
# Optional, required if you want to encrypt your backup
|
||||||
- name: GPG_PASSPHRASE
|
- name: GPG_PASSPHRASE
|
||||||
value: "xxxx"
|
value: "secure-passphrase"
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -139,13 +137,9 @@ spec:
|
|||||||
# for a list of available releases.
|
# for a list of available releases.
|
||||||
image: jkaninda/mysql-bkup
|
image: jkaninda/mysql-bkup
|
||||||
command:
|
command:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
- -c
|
- -c
|
||||||
- bkup
|
- backup --storage ssh --file store_20231219_022941.sql.gz
|
||||||
- restore
|
|
||||||
- --storage
|
|
||||||
- ssh
|
|
||||||
- --file store_20231219_022941.sql.gz
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
memory: "128Mi"
|
memory: "128Mi"
|
||||||
@@ -238,7 +232,6 @@ spec:
|
|||||||
|
|
||||||
This image also supports Kubernetes security context, you can run it in Rootless environment.
|
This image also supports Kubernetes security context, you can run it in Rootless environment.
|
||||||
It has been tested on Openshift, it works well.
|
It has been tested on Openshift, it works well.
|
||||||
Deployment on OpenShift is supported, you need to remove `securityContext` section on your yaml file.
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: batch/v1
|
apiVersion: batch/v1
|
||||||
@@ -301,3 +294,55 @@ spec:
|
|||||||
# value: "xxx"
|
# value: "xxx"
|
||||||
restartPolicy: OnFailure
|
restartPolicy: OnFailure
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Migrate database
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: Job
|
||||||
|
metadata:
|
||||||
|
name: migrate-db
|
||||||
|
spec:
|
||||||
|
ttlSecondsAfterFinished: 100
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: mysql-bkup
|
||||||
|
# In production, it is advised to lock your image tag to a proper
|
||||||
|
# release version instead of using `latest`.
|
||||||
|
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||||
|
# for a list of available releases.
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- migrate
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: "128Mi"
|
||||||
|
cpu: "500m"
|
||||||
|
env:
|
||||||
|
## Source Database
|
||||||
|
- name: DB_HOST
|
||||||
|
value: "mysql"
|
||||||
|
- name: DB_PORT
|
||||||
|
value: "3306"
|
||||||
|
- name: DB_NAME
|
||||||
|
value: "dbname"
|
||||||
|
- name: DB_USERNAME
|
||||||
|
value: "username"
|
||||||
|
- name: DB_PASSWORD
|
||||||
|
value: "password"
|
||||||
|
## Target Database
|
||||||
|
- name: TARGET_DB_HOST
|
||||||
|
value: "target-mysql"
|
||||||
|
- name: TARGET_DB_PORT
|
||||||
|
value: "3306"
|
||||||
|
- name: TARGET_DB_NAME
|
||||||
|
value: "dbname"
|
||||||
|
- name: TARGET_DB_USERNAME
|
||||||
|
value: "username"
|
||||||
|
- name: TARGET_DB_PASSWORD
|
||||||
|
value: "password"
|
||||||
|
restartPolicy: Never
|
||||||
|
```
|
||||||
@@ -78,54 +78,3 @@ TARGET_DB_PASSWORD=password
|
|||||||
jkaninda/mysql-bkup migrate
|
jkaninda/mysql-bkup migrate
|
||||||
```
|
```
|
||||||
|
|
||||||
## Kubernetes
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
name: migrate-db
|
|
||||||
spec:
|
|
||||||
ttlSecondsAfterFinished: 100
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: mysql-bkup
|
|
||||||
# In production, it is advised to lock your image tag to a proper
|
|
||||||
# release version instead of using `latest`.
|
|
||||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
|
||||||
# for a list of available releases.
|
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- migrate
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
memory: "128Mi"
|
|
||||||
cpu: "500m"
|
|
||||||
env:
|
|
||||||
## Source Database
|
|
||||||
- name: DB_HOST
|
|
||||||
value: "mysql"
|
|
||||||
- name: DB_PORT
|
|
||||||
value: "3306"
|
|
||||||
- name: DB_NAME
|
|
||||||
value: "dbname"
|
|
||||||
- name: DB_USERNAME
|
|
||||||
value: "username"
|
|
||||||
- name: DB_PASSWORD
|
|
||||||
value: "password"
|
|
||||||
## Target Database
|
|
||||||
- name: TARGET_DB_HOST
|
|
||||||
value: "target-mysql"
|
|
||||||
- name: TARGET_DB_PORT
|
|
||||||
value: "3306"
|
|
||||||
- name: TARGET_DB_NAME
|
|
||||||
value: "dbname"
|
|
||||||
- name: TARGET_DB_USERNAME
|
|
||||||
value: "username"
|
|
||||||
- name: TARGET_DB_PASSWORD
|
|
||||||
value: "password"
|
|
||||||
restartPolicy: Never
|
|
||||||
```
|
|
||||||
162
docs/how-tos/receive-notification.md
Normal file
162
docs/how-tos/receive-notification.md
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
---
|
||||||
|
title: Receive notifications
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 12
|
||||||
|
---
|
||||||
|
Send Email or Telegram notifications on successfully or failed backup.
|
||||||
|
|
||||||
|
### Email
|
||||||
|
To send out email notifications on failed or successfully backup runs, provide SMTP credentials, a sender and a recipient:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command: backup
|
||||||
|
volumes:
|
||||||
|
- ./backup:/backup
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
- MAIL_HOST=
|
||||||
|
- MAIL_PORT=587
|
||||||
|
- MAIL_USERNAME=
|
||||||
|
- MAIL_PASSWORD=!
|
||||||
|
- MAIL_FROM=Backup Jobs <backup@example.com>
|
||||||
|
## Multiple recipients separated by a comma
|
||||||
|
- MAIL_TO=me@example.com,team@example.com,manager@example.com
|
||||||
|
- MAIL_SKIP_TLS=false
|
||||||
|
## Time format for notification
|
||||||
|
- TIME_FORMAT=2006-01-02 at 15:04:05
|
||||||
|
## Backup reference, in case you want to identify every backup instance
|
||||||
|
- BACKUP_REFERENCE=database/Paris cluster
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
|
|
||||||
|
### Telegram
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
mysql-bkup:
|
||||||
|
image: jkaninda/mysql-bkup
|
||||||
|
container_name: mysql-bkup
|
||||||
|
command: backup
|
||||||
|
volumes:
|
||||||
|
- ./backup:/backup
|
||||||
|
environment:
|
||||||
|
- DB_PORT=3306
|
||||||
|
- DB_HOST=mysql
|
||||||
|
- DB_NAME=database
|
||||||
|
- DB_USERNAME=username
|
||||||
|
- DB_PASSWORD=password
|
||||||
|
- TG_TOKEN=[BOT ID]:[BOT TOKEN]
|
||||||
|
- TG_CHAT_ID=
|
||||||
|
## Time format for notification
|
||||||
|
- TIME_FORMAT=2006-01-02 at 15:04:05
|
||||||
|
## Backup reference, in case you want to identify every backup instance
|
||||||
|
- BACKUP_REFERENCE=database/Paris cluster
|
||||||
|
networks:
|
||||||
|
- web
|
||||||
|
networks:
|
||||||
|
web:
|
||||||
|
```
|
||||||
|
|
||||||
|
### Customize notifications
|
||||||
|
|
||||||
|
The title and body of the notifications can be tailored to your needs using Go templates.
|
||||||
|
Template sources must be mounted inside the container in /config/templates:
|
||||||
|
|
||||||
|
- email.tmpl: Email notification template
|
||||||
|
- telegram.tmpl: Telegram notification template
|
||||||
|
- email-error.tmpl: Error notification template
|
||||||
|
- telegram-error.tmpl: Error notification template
|
||||||
|
|
||||||
|
### Data
|
||||||
|
|
||||||
|
Here is a list of all data passed to the template:
|
||||||
|
- `Database` : Database name
|
||||||
|
- `StartTime`: Backup start time process
|
||||||
|
- `EndTime`: Backup start time process
|
||||||
|
- `Storage`: Backup storage
|
||||||
|
- `BackupLocation`: Backup location
|
||||||
|
- `BackupSize`: Backup size
|
||||||
|
- `BackupReference`: Backup reference(eg: database/cluster name or server name)
|
||||||
|
|
||||||
|
> email.template:
|
||||||
|
|
||||||
|
|
||||||
|
```html
|
||||||
|
<h2>Hi,</h2>
|
||||||
|
<p>Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.</p>
|
||||||
|
<h3>Backup Details:</h3>
|
||||||
|
<ul>
|
||||||
|
<li>Database Name: {{.Database}}</li>
|
||||||
|
<li>Backup Start Time: {{.StartTime}}</li>
|
||||||
|
<li>Backup End Time: {{.EndTime}}</li>
|
||||||
|
<li>Backup Storage: {{.Storage}}</li>
|
||||||
|
<li>Backup Location: {{.BackupLocation}}</li>
|
||||||
|
<li>Backup Size: {{.BackupSize}} bytes</li>
|
||||||
|
<li>Backup Reference: {{.BackupReference}} </li>
|
||||||
|
</ul>
|
||||||
|
<p>Best regards,</p>
|
||||||
|
```
|
||||||
|
|
||||||
|
> telegram.template
|
||||||
|
|
||||||
|
```html
|
||||||
|
✅ Database Backup Notification – {{.Database}}
|
||||||
|
Hi,
|
||||||
|
Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.
|
||||||
|
|
||||||
|
Backup Details:
|
||||||
|
- Database Name: {{.Database}}
|
||||||
|
- Backup Start Time: {{.StartTime}}
|
||||||
|
- Backup EndTime: {{.EndTime}}
|
||||||
|
- Backup Storage: {{.Storage}}
|
||||||
|
- Backup Location: {{.BackupLocation}}
|
||||||
|
- Backup Size: {{.BackupSize}} bytes
|
||||||
|
- Backup Reference: {{.BackupReference}}
|
||||||
|
```
|
||||||
|
|
||||||
|
> email-error.template
|
||||||
|
|
||||||
|
```html
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<title>🔴 Urgent: Database Backup Failure Notification</title>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h2>Hi,</h2>
|
||||||
|
<p>An error occurred during database backup.</p>
|
||||||
|
<h3>Failure Details:</h3>
|
||||||
|
<ul>
|
||||||
|
<li>Error Message: {{.Error}}</li>
|
||||||
|
<li>Date: {{.EndTime}}</li>
|
||||||
|
<li>Backup Reference: {{.BackupReference}} </li>
|
||||||
|
</ul>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
```
|
||||||
|
|
||||||
|
> telegram-error.template
|
||||||
|
|
||||||
|
|
||||||
|
```html
|
||||||
|
🔴 Urgent: Database Backup Failure Notification
|
||||||
|
|
||||||
|
An error occurred during database backup.
|
||||||
|
Failure Details:
|
||||||
|
|
||||||
|
Error Message: {{.Error}}
|
||||||
|
Date: {{.EndTime}}
|
||||||
|
```
|
||||||
@@ -10,7 +10,7 @@ nav_order: 6
|
|||||||
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||||
|
|
||||||
{: .note }
|
{: .note }
|
||||||
It supports __.sql__ and __.sql.gz__ compressed file.
|
It supports __.sql__,__.sql.gpg__ and __.sql.gz__,__.sql.gz.gpg__ compressed file.
|
||||||
|
|
||||||
### Restore
|
### Restore
|
||||||
|
|
||||||
@@ -40,56 +40,10 @@ services:
|
|||||||
- AWS_SECRET_KEY=xxxxx
|
- AWS_SECRET_KEY=xxxxx
|
||||||
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||||
- AWS_DISABLE_SSL="false"
|
- AWS_DISABLE_SSL="false"
|
||||||
|
- AWS_FORCE_PATH_STYLE="false"
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
```
|
```
|
||||||
|
|
||||||
## Restore on Kubernetes
|
|
||||||
|
|
||||||
Simple Kubernetes restore Job:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
name: restore-db
|
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: mysql-bkup
|
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- restore -s s3 --path /custom_path -f store_20231219_022941.sql.gz
|
|
||||||
env:
|
|
||||||
- name: DB_PORT
|
|
||||||
value: "3306"
|
|
||||||
- name: DB_HOST
|
|
||||||
value: ""
|
|
||||||
- name: DB_NAME
|
|
||||||
value: ""
|
|
||||||
- name: DB_USERNAME
|
|
||||||
value: ""
|
|
||||||
# Please use secret!
|
|
||||||
- name: DB_PASSWORD
|
|
||||||
value: ""
|
|
||||||
- name: AWS_S3_ENDPOINT
|
|
||||||
value: "https://s3.amazonaws.com"
|
|
||||||
- name: AWS_S3_BUCKET_NAME
|
|
||||||
value: "xxx"
|
|
||||||
- name: AWS_REGION
|
|
||||||
value: "us-west-2"
|
|
||||||
- name: AWS_ACCESS_KEY
|
|
||||||
value: "xxxx"
|
|
||||||
- name: AWS_SECRET_KEY
|
|
||||||
value: "xxxx"
|
|
||||||
- name: AWS_DISABLE_SSL
|
|
||||||
value: "false"
|
|
||||||
restartPolicy: Never
|
|
||||||
backoffLimit: 4
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ nav_order: 7
|
|||||||
To restore the database from your remote server, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
To restore the database from your remote server, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||||
|
|
||||||
{: .note }
|
{: .note }
|
||||||
It supports __.sql__ and __.sql.gz__ compressed file.
|
It supports __.sql__,__.sql.gpg__ and __.sql.gz__,__.sql.gz.gpg__ compressed file.
|
||||||
|
|
||||||
### Restore
|
### Restore
|
||||||
|
|
||||||
@@ -44,50 +44,4 @@ services:
|
|||||||
- web
|
- web
|
||||||
networks:
|
networks:
|
||||||
web:
|
web:
|
||||||
```
|
|
||||||
## Restore on Kubernetes
|
|
||||||
|
|
||||||
Simple Kubernetes restore Job:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
name: restore-db
|
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: mysql-bkup
|
|
||||||
image: jkaninda/mysql-bkup
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- restore -s ssh -f store_20231219_022941.sql.gz
|
|
||||||
env:
|
|
||||||
- name: DB_PORT
|
|
||||||
value: "3306"
|
|
||||||
- name: DB_HOST
|
|
||||||
value: ""
|
|
||||||
- name: DB_NAME
|
|
||||||
value: ""
|
|
||||||
- name: DB_USERNAME
|
|
||||||
value: ""
|
|
||||||
# Please use secret!
|
|
||||||
- name: DB_PASSWORD
|
|
||||||
value: ""
|
|
||||||
- name: SSH_HOST_NAME
|
|
||||||
value: ""
|
|
||||||
- name: SSH_PORT
|
|
||||||
value: "22"
|
|
||||||
- name: SSH_USER
|
|
||||||
value: "xxx"
|
|
||||||
- name: SSH_REMOTE_PATH
|
|
||||||
value: "/home/jkaninda/backups"
|
|
||||||
- name: AWS_ACCESS_KEY
|
|
||||||
value: "xxxx"
|
|
||||||
- name: SSH_IDENTIFY_FILE
|
|
||||||
value: "/tmp/id_ed25519"
|
|
||||||
restartPolicy: Never
|
|
||||||
backoffLimit: 4
|
|
||||||
```
|
```
|
||||||
@@ -10,7 +10,7 @@ nav_order: 5
|
|||||||
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`.
|
||||||
|
|
||||||
{: .note }
|
{: .note }
|
||||||
It supports __.sql__ and __.sql.gz__ compressed file.
|
It supports __.sql__,__.sql.gpg__ and __.sql.gz__,__.sql.gz.gpg__ compressed file.
|
||||||
|
|
||||||
### Restore
|
### Restore
|
||||||
|
|
||||||
|
|||||||
@@ -6,20 +6,40 @@ nav_order: 1
|
|||||||
|
|
||||||
# About mysql-bkup
|
# About mysql-bkup
|
||||||
{:.no_toc}
|
{:.no_toc}
|
||||||
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, FTP and SSH remote storage.
|
|
||||||
It also supports __encrypting__ your backups using GPG.
|
|
||||||
|
|
||||||
We are open to receiving stars, PRs, and issues!
|
**MYSQL-BKUP** is a Docker container image designed to **backup, restore, and migrate MySQL databases**.
|
||||||
|
It supports a variety of storage options and ensures data security through GPG encryption.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
{: .fs-6 .fw-300 }
|
- **Storage Options:**
|
||||||
|
- Local storage
|
||||||
|
- AWS S3 or any S3-compatible object storage
|
||||||
|
- FTP
|
||||||
|
- SSH-compatible storage
|
||||||
|
- Azure Blob storage
|
||||||
|
|
||||||
---
|
- **Data Security:**
|
||||||
|
- Backups can be encrypted using **GPG** to ensure confidentiality.
|
||||||
|
|
||||||
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
|
- **Deployment Flexibility:**
|
||||||
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage.
|
- Available as the [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image.
|
||||||
|
- Deployable on **Docker**, **Docker Swarm**, and **Kubernetes**.
|
||||||
|
- Supports recurring backups of MySQL databases when deployed:
|
||||||
|
- On Docker for automated backup schedules.
|
||||||
|
- As a **Job** or **CronJob** on Kubernetes.
|
||||||
|
|
||||||
|
- **Notifications:**
|
||||||
|
- Get real-time updates on backup success or failure via:
|
||||||
|
- **Telegram**
|
||||||
|
- **Email**
|
||||||
|
|
||||||
|
## Use Cases
|
||||||
|
|
||||||
|
- **Automated Recurring Backups:** Schedule regular backups for MySQL databases.
|
||||||
|
- **Cross-Environment Migration:** Easily migrate your MySQL databases across different environments using supported storage options.
|
||||||
|
- **Secure Backup Management:** Protect your data with GPG encryption.
|
||||||
|
|
||||||
It also supports database __encryption__ using GPG.
|
|
||||||
|
|
||||||
|
|
||||||
{: .note }
|
{: .note }
|
||||||
@@ -88,7 +108,7 @@ networks:
|
|||||||
-e "DB_HOST=hostname" \
|
-e "DB_HOST=hostname" \
|
||||||
-e "DB_USERNAME=user" \
|
-e "DB_USERNAME=user" \
|
||||||
-e "DB_PASSWORD=password" \
|
-e "DB_PASSWORD=password" \
|
||||||
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 1m"
|
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 15m" #@midnight
|
||||||
```
|
```
|
||||||
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
||||||
|
|
||||||
|
|||||||
@@ -26,8 +26,6 @@ Backup, restore and migrate targets, schedule and retention are configured using
|
|||||||
| --dbname | -d | Database name |
|
| --dbname | -d | Database name |
|
||||||
| --port | -p | Database port (default: 3306) |
|
| --port | -p | Database port (default: 3306) |
|
||||||
| --disable-compression | | Disable database backup compression |
|
| --disable-compression | | Disable database backup compression |
|
||||||
| --prune | | Delete old backup, default disabled |
|
|
||||||
| --keep-last | | Delete old backup created more than specified days ago, default 7 days |
|
|
||||||
| --cron-expression | | Backup cron expression, eg: (* * * * *) or @daily |
|
| --cron-expression | | Backup cron expression, eg: (* * * * *) or @daily |
|
||||||
| --help | -h | Print this help message and exit |
|
| --help | -h | Print this help message and exit |
|
||||||
| --version | -V | Print version information and exit |
|
| --version | -V | Print version information and exit |
|
||||||
@@ -52,6 +50,7 @@ Backup, restore and migrate targets, schedule and retention are configured using
|
|||||||
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
|
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
|
||||||
| GPG_PUBLIC_KEY | Optional, required to encrypt backup | GPG public key, used to encrypt backup (/config/public_key.asc) |
|
| GPG_PUBLIC_KEY | Optional, required to encrypt backup | GPG public key, used to encrypt backup (/config/public_key.asc) |
|
||||||
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the `--cron-expression` flag | Backup cron expression for docker in scheduled mode |
|
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the `--cron-expression` flag | Backup cron expression for docker in scheduled mode |
|
||||||
|
| BACKUP_RETENTION_DAYS | Optional | Delete old backup created more than specified days ago |
|
||||||
| SSH_HOST | Optional, required for SSH storage | ssh remote hostname or ip |
|
| SSH_HOST | Optional, required for SSH storage | ssh remote hostname or ip |
|
||||||
| SSH_USER | Optional, required for SSH storage | ssh remote user |
|
| SSH_USER | Optional, required for SSH storage | ssh remote user |
|
||||||
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
|
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ services:
|
|||||||
- AWS_SECRET_KEY=xxxxx
|
- AWS_SECRET_KEY=xxxxx
|
||||||
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||||
- AWS_DISABLE_SSL="false"
|
- AWS_DISABLE_SSL="false"
|
||||||
|
- AWS_FORCE_PATH_STYLE=true # true for S3 alternative such as Minio
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
networks:
|
networks:
|
||||||
- web
|
- web
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ services:
|
|||||||
- AWS_SECRET_KEY=xxxxx
|
- AWS_SECRET_KEY=xxxxx
|
||||||
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||||
- AWS_DISABLE_SSL="false"
|
- AWS_DISABLE_SSL="false"
|
||||||
|
- AWS_FORCE_PATH_STYLE=true # true for S3 alternative such as Minio
|
||||||
# See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
# See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
|
||||||
- BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * *
|
- BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * *
|
||||||
# mysql-bkup container must be connected to the same network with your database
|
# mysql-bkup container must be connected to the same network with your database
|
||||||
|
|||||||
@@ -44,4 +44,6 @@ spec:
|
|||||||
value: "xxxx"
|
value: "xxxx"
|
||||||
- name: AWS_DISABLE_SSL
|
- name: AWS_DISABLE_SSL
|
||||||
value: "false"
|
value: "false"
|
||||||
|
- name: AWS_FORCE_PATH_STYLE
|
||||||
|
value: "true"
|
||||||
restartPolicy: Never
|
restartPolicy: Never
|
||||||
32
go.mod
32
go.mod
@@ -1,32 +1,38 @@
|
|||||||
module github.com/jkaninda/mysql-bkup
|
module github.com/jkaninda/mysql-bkup
|
||||||
|
|
||||||
go 1.22.5
|
go 1.23.2
|
||||||
|
|
||||||
require github.com/spf13/pflag v1.0.5
|
require github.com/spf13/pflag v1.0.5 // indirect
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/ProtonMail/gopenpgp/v2 v2.7.5
|
github.com/go-mail/mail v2.3.1+incompatible
|
||||||
github.com/aws/aws-sdk-go v1.55.3
|
github.com/jkaninda/encryptor v0.0.0-20241013064832-ed4bd6a1b221
|
||||||
github.com/bramvdbogaerde/go-scp v1.5.0
|
github.com/jkaninda/go-storage v0.1.2
|
||||||
github.com/hpcloud/tail v1.0.0
|
|
||||||
github.com/jlaffaye/ftp v0.2.0
|
|
||||||
github.com/robfig/cron/v3 v3.0.1
|
github.com/robfig/cron/v3 v3.0.1
|
||||||
github.com/spf13/cobra v1.8.0
|
github.com/spf13/cobra v1.8.1
|
||||||
golang.org/x/crypto v0.18.0
|
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 // indirect
|
||||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 // indirect
|
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 // indirect
|
||||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
|
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
|
||||||
|
github.com/ProtonMail/gopenpgp/v2 v2.7.5 // indirect
|
||||||
|
github.com/aws/aws-sdk-go v1.55.5 // indirect
|
||||||
|
github.com/bramvdbogaerde/go-scp v1.5.0 // indirect
|
||||||
github.com/cloudflare/circl v1.3.3 // indirect
|
github.com/cloudflare/circl v1.3.3 // indirect
|
||||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
|
github.com/jlaffaye/ftp v0.2.0 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
golang.org/x/sys v0.22.0 // indirect
|
golang.org/x/crypto v0.28.0 // indirect
|
||||||
golang.org/x/text v0.14.0 // indirect
|
golang.org/x/net v0.29.0 // indirect
|
||||||
gopkg.in/fsnotify.v1 v1.4.7 // indirect
|
golang.org/x/sys v0.26.0 // indirect
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
golang.org/x/text v0.19.0 // indirect
|
||||||
|
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
||||||
|
gopkg.in/mail.v2 v2.3.1 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
56
go.sum
56
go.sum
@@ -1,11 +1,15 @@
|
|||||||
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 h1:mlmW46Q0B79I+Aj4azKC6xDMFN9a9SyZWESlGWYXbFs=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0/go.mod h1:PXe2h+LKcWTX9afWdZoHyODqR4fBa5boUM/8uJfZ0Jo=
|
||||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs=
|
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs=
|
||||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=
|
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=
|
||||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw=
|
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw=
|
||||||
github.com/ProtonMail/gopenpgp/v2 v2.7.5 h1:STOY3vgES59gNgoOt2w0nyHBjKViB/qSg7NjbQWPJkA=
|
github.com/ProtonMail/gopenpgp/v2 v2.7.5 h1:STOY3vgES59gNgoOt2w0nyHBjKViB/qSg7NjbQWPJkA=
|
||||||
github.com/ProtonMail/gopenpgp/v2 v2.7.5/go.mod h1:IhkNEDaxec6NyzSI0PlxapinnwPVIESk8/76da3Ct3g=
|
github.com/ProtonMail/gopenpgp/v2 v2.7.5/go.mod h1:IhkNEDaxec6NyzSI0PlxapinnwPVIESk8/76da3Ct3g=
|
||||||
github.com/aws/aws-sdk-go v1.55.3 h1:0B5hOX+mIx7I5XPOrjrHlKSDQV/+ypFZpIHOx5LOk3E=
|
|
||||||
github.com/aws/aws-sdk-go v1.55.3/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
|
||||||
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
|
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
|
||||||
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||||
github.com/bramvdbogaerde/go-scp v1.5.0 h1:a9BinAjTfQh273eh7vd3qUgmBC+bx+3TRDtkZWmIpzM=
|
github.com/bramvdbogaerde/go-scp v1.5.0 h1:a9BinAjTfQh273eh7vd3qUgmBC+bx+3TRDtkZWmIpzM=
|
||||||
@@ -13,47 +17,55 @@ github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9Hu
|
|||||||
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
||||||
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
|
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
|
||||||
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
|
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ=
|
||||||
|
github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/go-mail/mail v2.3.1+incompatible h1:UzNOn0k5lpfVtO31cK3hn6I4VEVGhe3lX8AJBAxXExM=
|
||||||
|
github.com/go-mail/mail v2.3.1+incompatible/go.mod h1:VPWjmmNyRsWXQZHVHT3g0YbIINUkSmuKOiLIDkWbL6M=
|
||||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
|
github.com/jkaninda/encryptor v0.0.0-20241013064832-ed4bd6a1b221 h1:AwkCf7el1kzeCJ89A+gUAK0ero5JYnvLOKsYMzq+rs4=
|
||||||
|
github.com/jkaninda/encryptor v0.0.0-20241013064832-ed4bd6a1b221/go.mod h1:9F8ZJ+ZXE8DZBo77+aneGj8LMjrYXX6eFUCC/uqZOUo=
|
||||||
|
github.com/jkaninda/go-storage v0.1.1 h1:vjpdD/fh39S5HGyfHvLE5HGYOEPIukINlOX3OnM3GW4=
|
||||||
|
github.com/jkaninda/go-storage v0.1.1/go.mod h1:7VK5gQISQaLxtLfBtc+een8spcgLVSBAKTRuyF1N81I=
|
||||||
|
github.com/jkaninda/go-storage v0.1.2 h1:d7+TRPjmHXdSqO0wne3KAB8zt9ih8lf5D8aL4n7/Dds=
|
||||||
|
github.com/jkaninda/go-storage v0.1.2/go.mod h1:zVRnLprBk/9AUz2+za6Y03MgoNYrqKLy3edVtjqMaps=
|
||||||
github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
|
github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
|
||||||
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
|
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
|
||||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||||
|
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
|
||||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
|
||||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
|
||||||
|
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||||
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
|
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
|
||||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
|
||||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
|
||||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
@@ -62,6 +74,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
|
|||||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||||
|
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
|
||||||
|
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@@ -74,31 +88,35 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
|
||||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||||
|
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
|
||||||
|
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
|
||||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
|
||||||
|
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
gopkg.in/mail.v2 v2.3.1 h1:WYFn/oANrAGP2C0dcV6/pbkPzv8yGzqTjPmTeO7qoXk=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw=
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
|
||||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
|||||||
30
main.go
30
main.go
@@ -1,9 +1,27 @@
|
|||||||
// Package main /
|
/*
|
||||||
/*****
|
MIT License
|
||||||
@author Jonas Kaninda
|
|
||||||
@license MIT License <https://opensource.org/licenses/MIT>
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
@Copyright © 2024 Jonas Kaninda
|
|
||||||
**/
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import "github.com/jkaninda/mysql-bkup/cmd"
|
import "github.com/jkaninda/mysql-bkup/cmd"
|
||||||
|
|||||||
121
pkg/azure.go
Normal file
121
pkg/azure.go
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/jkaninda/go-storage/pkg/azure"
|
||||||
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
|
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func azureBackup(db *dbConfig, config *BackupConfig) {
|
||||||
|
utils.Info("Backup database to the remote FTP server")
|
||||||
|
startTime = time.Now().Format(utils.TimeFormat())
|
||||||
|
|
||||||
|
// Backup database
|
||||||
|
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||||
|
finalFileName := config.backupFileName
|
||||||
|
if config.encryption {
|
||||||
|
encryptBackup(config)
|
||||||
|
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||||
|
}
|
||||||
|
utils.Info("Uploading backup archive to Azure Blob storage ...")
|
||||||
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
|
azureConfig := loadAzureConfig()
|
||||||
|
azureStorage, err := azure.NewStorage(azure.Config{
|
||||||
|
ContainerName: azureConfig.containerName,
|
||||||
|
AccountName: azureConfig.accountName,
|
||||||
|
AccountKey: azureConfig.accountKey,
|
||||||
|
RemotePath: config.remotePath,
|
||||||
|
LocalPath: tmpPath,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error creating Azure storage: %s", err)
|
||||||
|
}
|
||||||
|
err = azureStorage.Copy(finalFileName)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error copying backup file: %s", err)
|
||||||
|
}
|
||||||
|
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||||
|
// Get backup info
|
||||||
|
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error: %s", err)
|
||||||
|
}
|
||||||
|
backupSize = fileInfo.Size()
|
||||||
|
// Delete backup file from tmp folder
|
||||||
|
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error deleting file: %v", err)
|
||||||
|
|
||||||
|
}
|
||||||
|
if config.prune {
|
||||||
|
err := azureStorage.Prune(config.backupRetention)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
utils.Info("Uploading backup archive to Azure Blob storage ... done ")
|
||||||
|
|
||||||
|
// Send notification
|
||||||
|
utils.NotifySuccess(&utils.NotificationData{
|
||||||
|
File: finalFileName,
|
||||||
|
BackupSize: backupSize,
|
||||||
|
Database: db.dbName,
|
||||||
|
Storage: config.storage,
|
||||||
|
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||||
|
StartTime: startTime,
|
||||||
|
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||||
|
})
|
||||||
|
// Delete temp
|
||||||
|
deleteTemp()
|
||||||
|
utils.Info("Backup completed successfully")
|
||||||
|
}
|
||||||
|
func azureRestore(db *dbConfig, conf *RestoreConfig) {
|
||||||
|
utils.Info("Restore database from Azure Blob storage")
|
||||||
|
azureConfig := loadAzureConfig()
|
||||||
|
azureStorage, err := azure.NewStorage(azure.Config{
|
||||||
|
ContainerName: azureConfig.containerName,
|
||||||
|
AccountName: azureConfig.accountName,
|
||||||
|
AccountKey: azureConfig.accountKey,
|
||||||
|
RemotePath: conf.remotePath,
|
||||||
|
LocalPath: tmpPath,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error creating SSH storage: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = azureStorage.CopyFrom(conf.file)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error downloading backup file: %s", err)
|
||||||
|
}
|
||||||
|
RestoreDatabase(db, conf)
|
||||||
|
}
|
||||||
283
pkg/backup.go
283
pkg/backup.go
@@ -1,13 +1,34 @@
|
|||||||
// Package pkg /
|
// Package internal /
|
||||||
/*****
|
/*
|
||||||
@author Jonas Kaninda
|
MIT License
|
||||||
@license MIT License <https://opensource.org/licenses/MIT>
|
|
||||||
@Copyright © 2024 Jonas Kaninda
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
**/
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package pkg
|
package pkg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/jkaninda/encryptor"
|
||||||
|
"github.com/jkaninda/go-storage/pkg/local"
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
"github.com/robfig/cron/v3"
|
"github.com/robfig/cron/v3"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@@ -20,9 +41,9 @@ import (
|
|||||||
|
|
||||||
func StartBackup(cmd *cobra.Command) {
|
func StartBackup(cmd *cobra.Command) {
|
||||||
intro()
|
intro()
|
||||||
//Initialize backup configs
|
// Initialize backup configs
|
||||||
config := initBackupConfig(cmd)
|
config := initBackupConfig(cmd)
|
||||||
//Load backup configuration file
|
// Load backup configuration file
|
||||||
configFile, err := loadConfigFile()
|
configFile, err := loadConfigFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dbConf = initDbConfig(cmd)
|
dbConf = initDbConfig(cmd)
|
||||||
@@ -41,15 +62,16 @@ func StartBackup(cmd *cobra.Command) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run in scheduled mode
|
// scheduledMode Runs backup in scheduled mode
|
||||||
func scheduledMode(db *dbConfig, config *BackupConfig) {
|
func scheduledMode(db *dbConfig, config *BackupConfig) {
|
||||||
utils.Info("Running in Scheduled mode")
|
utils.Info("Running in Scheduled mode")
|
||||||
utils.Info("Backup cron expression: %s", config.cronExpression)
|
utils.Info("Backup cron expression: %s", config.cronExpression)
|
||||||
|
utils.Info("The next scheduled time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
|
||||||
utils.Info("Storage type %s ", config.storage)
|
utils.Info("Storage type %s ", config.storage)
|
||||||
|
|
||||||
//Test backup
|
// Test backup
|
||||||
utils.Info("Testing backup configurations...")
|
utils.Info("Testing backup configurations...")
|
||||||
BackupTask(db, config)
|
testDatabaseConnection(db)
|
||||||
utils.Info("Testing backup configurations...done")
|
utils.Info("Testing backup configurations...done")
|
||||||
utils.Info("Creating backup job...")
|
utils.Info("Creating backup job...")
|
||||||
// Create a new cron instance
|
// Create a new cron instance
|
||||||
@@ -57,6 +79,8 @@ func scheduledMode(db *dbConfig, config *BackupConfig) {
|
|||||||
|
|
||||||
_, err := c.AddFunc(config.cronExpression, func() {
|
_, err := c.AddFunc(config.cronExpression, func() {
|
||||||
BackupTask(db, config)
|
BackupTask(db, config)
|
||||||
|
utils.Info("Next backup time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
|
||||||
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
@@ -68,9 +92,22 @@ func scheduledMode(db *dbConfig, config *BackupConfig) {
|
|||||||
defer c.Stop()
|
defer c.Stop()
|
||||||
select {}
|
select {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// multiBackupTask backup multi database
|
||||||
|
func multiBackupTask(databases []Database, bkConfig *BackupConfig) {
|
||||||
|
for _, db := range databases {
|
||||||
|
// Check if path is defined in config file
|
||||||
|
if db.Path != "" {
|
||||||
|
bkConfig.remotePath = db.Path
|
||||||
|
}
|
||||||
|
BackupTask(getDatabase(db), bkConfig)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BackupTask backups database
|
||||||
func BackupTask(db *dbConfig, config *BackupConfig) {
|
func BackupTask(db *dbConfig, config *BackupConfig) {
|
||||||
utils.Info("Starting backup task...")
|
utils.Info("Starting backup task...")
|
||||||
//Generate file name
|
// Generate file name
|
||||||
backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20060102_150405"))
|
backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20060102_150405"))
|
||||||
if config.disableCompression {
|
if config.disableCompression {
|
||||||
backupFileName = fmt.Sprintf("%s_%s.sql", db.dbName, time.Now().Format("20060102_150405"))
|
backupFileName = fmt.Sprintf("%s_%s.sql", db.dbName, time.Now().Format("20060102_150405"))
|
||||||
@@ -85,59 +122,57 @@ func BackupTask(db *dbConfig, config *BackupConfig) {
|
|||||||
sshBackup(db, config)
|
sshBackup(db, config)
|
||||||
case "ftp", "FTP":
|
case "ftp", "FTP":
|
||||||
ftpBackup(db, config)
|
ftpBackup(db, config)
|
||||||
//utils.Fatal("Not supported storage type: %s", config.storage)
|
case "azure":
|
||||||
|
azureBackup(db, config)
|
||||||
default:
|
default:
|
||||||
localBackup(db, config)
|
localBackup(db, config)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func multiBackupTask(databases []Database, bkConfig *BackupConfig) {
|
|
||||||
for _, db := range databases {
|
|
||||||
//Check if path is defined in config file
|
|
||||||
if db.Path != "" {
|
|
||||||
bkConfig.remotePath = db.Path
|
|
||||||
}
|
|
||||||
BackupTask(getDatabase(db), bkConfig)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func startMultiBackup(bkConfig *BackupConfig, configFile string) {
|
func startMultiBackup(bkConfig *BackupConfig, configFile string) {
|
||||||
utils.Info("Starting multiple backup jobs...")
|
utils.Info("Starting backup task...")
|
||||||
var conf = &Config{}
|
|
||||||
conf, err := readConf(configFile)
|
conf, err := readConf(configFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error reading config file: %s", err)
|
utils.Fatal("Error reading config file: %s", err)
|
||||||
}
|
}
|
||||||
//Check if cronExpression is defined in config file
|
// Check if cronExpression is defined in config file
|
||||||
if conf.CronExpression != "" {
|
if conf.CronExpression != "" {
|
||||||
bkConfig.cronExpression = conf.CronExpression
|
bkConfig.cronExpression = conf.CronExpression
|
||||||
}
|
}
|
||||||
|
if len(conf.Databases) == 0 {
|
||||||
|
utils.Fatal("No databases found")
|
||||||
|
}
|
||||||
// Check if cronExpression is defined
|
// Check if cronExpression is defined
|
||||||
if bkConfig.cronExpression == "" {
|
if bkConfig.cronExpression == "" {
|
||||||
multiBackupTask(conf.Databases, bkConfig)
|
multiBackupTask(conf.Databases, bkConfig)
|
||||||
} else {
|
} else {
|
||||||
// Check if cronExpression is valid
|
// Check if cronExpression is valid
|
||||||
if utils.IsValidCronExpression(bkConfig.cronExpression) {
|
if utils.IsValidCronExpression(bkConfig.cronExpression) {
|
||||||
utils.Info("Running MultiBackup in Scheduled mode")
|
utils.Info("Running backup in Scheduled mode")
|
||||||
utils.Info("Backup cron expression: %s", bkConfig.cronExpression)
|
utils.Info("Backup cron expression: %s", bkConfig.cronExpression)
|
||||||
|
utils.Info("The next scheduled time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
|
||||||
utils.Info("Storage type %s ", bkConfig.storage)
|
utils.Info("Storage type %s ", bkConfig.storage)
|
||||||
|
|
||||||
//Test backup
|
// Test backup
|
||||||
utils.Info("Testing backup configurations...")
|
utils.Info("Testing backup configurations...")
|
||||||
multiBackupTask(conf.Databases, bkConfig)
|
for _, db := range conf.Databases {
|
||||||
|
testDatabaseConnection(getDatabase(db))
|
||||||
|
}
|
||||||
utils.Info("Testing backup configurations...done")
|
utils.Info("Testing backup configurations...done")
|
||||||
utils.Info("Creating multi backup job...")
|
utils.Info("Creating backup job...")
|
||||||
// Create a new cron instance
|
// Create a new cron instance
|
||||||
c := cron.New()
|
c := cron.New()
|
||||||
|
|
||||||
_, err := c.AddFunc(bkConfig.cronExpression, func() {
|
_, err := c.AddFunc(bkConfig.cronExpression, func() {
|
||||||
// Create a channel
|
|
||||||
multiBackupTask(conf.Databases, bkConfig)
|
multiBackupTask(conf.Databases, bkConfig)
|
||||||
|
utils.Info("Next backup time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
|
||||||
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Start the cron scheduler
|
// Start the cron scheduler
|
||||||
c.Start()
|
c.Start()
|
||||||
utils.Info("Creating multi backup job...done")
|
utils.Info("Creating backup job...done")
|
||||||
utils.Info("Backup job started")
|
utils.Info("Backup job started")
|
||||||
defer c.Stop()
|
defer c.Stop()
|
||||||
select {}
|
select {}
|
||||||
@@ -151,7 +186,6 @@ func startMultiBackup(bkConfig *BackupConfig, configFile string) {
|
|||||||
|
|
||||||
// BackupDatabase backup database
|
// BackupDatabase backup database
|
||||||
func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) {
|
func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) {
|
||||||
|
|
||||||
storagePath = os.Getenv("STORAGE_PATH")
|
storagePath = os.Getenv("STORAGE_PATH")
|
||||||
|
|
||||||
utils.Info("Starting database backup...")
|
utils.Info("Starting database backup...")
|
||||||
@@ -175,21 +209,26 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
|
|||||||
)
|
)
|
||||||
output, err := cmd.Output()
|
output, err := cmd.Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
utils.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// save output
|
// save output
|
||||||
file, err := os.Create(filepath.Join(tmpPath, backupFileName))
|
file, err := os.Create(filepath.Join(tmpPath, backupFileName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
utils.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer func(file *os.File) {
|
||||||
|
err := file.Close()
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal(err.Error())
|
||||||
|
}
|
||||||
|
}(file)
|
||||||
|
|
||||||
_, err = file.Write(output)
|
_, err = file.Write(output)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
utils.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
utils.Done("Database has been backed up")
|
utils.Info("Database has been backed up")
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// Execute mysqldump
|
// Execute mysqldump
|
||||||
@@ -201,9 +240,9 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
|
|||||||
gzipCmd := exec.Command("gzip")
|
gzipCmd := exec.Command("gzip")
|
||||||
gzipCmd.Stdin = stdout
|
gzipCmd.Stdin = stdout
|
||||||
gzipCmd.Stdout, err = os.Create(filepath.Join(tmpPath, backupFileName))
|
gzipCmd.Stdout, err = os.Create(filepath.Join(tmpPath, backupFileName))
|
||||||
gzipCmd.Start()
|
err = gzipCmd.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
return
|
||||||
}
|
}
|
||||||
if err := cmd.Run(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
@@ -211,160 +250,82 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
|
|||||||
if err := gzipCmd.Wait(); err != nil {
|
if err := gzipCmd.Wait(); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
utils.Done("Database has been backed up")
|
utils.Info("Database has been backed up")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
func localBackup(db *dbConfig, config *BackupConfig) {
|
func localBackup(db *dbConfig, config *BackupConfig) {
|
||||||
utils.Info("Backup database to local storage")
|
utils.Info("Backup database to local storage")
|
||||||
|
startTime = time.Now().Format(utils.TimeFormat())
|
||||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||||
finalFileName := config.backupFileName
|
finalFileName := config.backupFileName
|
||||||
if config.encryption {
|
if config.encryption {
|
||||||
encryptBackup(config)
|
encryptBackup(config)
|
||||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, gpgExtension)
|
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, gpgExtension)
|
||||||
}
|
}
|
||||||
|
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||||
utils.Info("Backup name is %s", finalFileName)
|
|
||||||
moveToBackup(finalFileName, storagePath)
|
|
||||||
//Send notification
|
|
||||||
utils.NotifySuccess(finalFileName)
|
|
||||||
//Delete old backup
|
|
||||||
if config.prune {
|
|
||||||
deleteOldBackup(config.backupRetention)
|
|
||||||
}
|
|
||||||
//Delete temp
|
|
||||||
deleteTemp()
|
|
||||||
utils.Info("Backup completed successfully")
|
|
||||||
}
|
|
||||||
|
|
||||||
func s3Backup(db *dbConfig, config *BackupConfig) {
|
|
||||||
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
|
||||||
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
|
|
||||||
utils.Info("Backup database to s3 storage")
|
|
||||||
//Backup database
|
|
||||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
|
||||||
finalFileName := config.backupFileName
|
|
||||||
if config.encryption {
|
|
||||||
encryptBackup(config)
|
|
||||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
|
||||||
}
|
|
||||||
utils.Info("Uploading backup archive to remote storage S3 ... ")
|
|
||||||
|
|
||||||
utils.Info("Backup name is %s", finalFileName)
|
|
||||||
err := UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error uploading backup archive to S3: %s ", err)
|
utils.Error("Error: %s", err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
backupSize = fileInfo.Size()
|
||||||
//Delete backup file from tmp folder
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName))
|
localStorage := local.NewStorage(local.Config{
|
||||||
|
LocalPath: tmpPath,
|
||||||
|
RemotePath: storagePath,
|
||||||
|
})
|
||||||
|
err = localStorage.Copy(finalFileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("Error deleting file: ", err)
|
utils.Fatal("Error copying backup file: %s", err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
utils.Info("Backup saved in %s", filepath.Join(storagePath, finalFileName))
|
||||||
|
// Send notification
|
||||||
|
utils.NotifySuccess(&utils.NotificationData{
|
||||||
|
File: finalFileName,
|
||||||
|
BackupSize: backupSize,
|
||||||
|
Database: db.dbName,
|
||||||
|
Storage: config.storage,
|
||||||
|
BackupLocation: filepath.Join(storagePath, finalFileName),
|
||||||
|
StartTime: startTime,
|
||||||
|
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||||
|
})
|
||||||
// Delete old backup
|
// Delete old backup
|
||||||
if config.prune {
|
if config.prune {
|
||||||
err := DeleteOldBackup(bucket, s3Path, config.backupRetention)
|
err = localStorage.Prune(config.backupRetention)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error deleting old backup from S3: %s ", err)
|
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
utils.Done("Uploading backup archive to remote storage S3 ... done ")
|
// Delete temp
|
||||||
//Send notification
|
|
||||||
utils.NotifySuccess(finalFileName)
|
|
||||||
//Delete temp
|
|
||||||
deleteTemp()
|
deleteTemp()
|
||||||
utils.Info("Backup completed successfully")
|
utils.Info("Backup completed successfully")
|
||||||
|
|
||||||
}
|
|
||||||
func sshBackup(db *dbConfig, config *BackupConfig) {
|
|
||||||
utils.Info("Backup database to Remote server")
|
|
||||||
//Backup database
|
|
||||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
|
||||||
finalFileName := config.backupFileName
|
|
||||||
if config.encryption {
|
|
||||||
encryptBackup(config)
|
|
||||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
|
||||||
}
|
|
||||||
utils.Info("Uploading backup archive to remote storage ... ")
|
|
||||||
utils.Info("Backup name is %s", finalFileName)
|
|
||||||
err := CopyToRemote(finalFileName, config.remotePath)
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatal("Error uploading file to the remote server: %s ", err)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
//Delete backup file from tmp folder
|
|
||||||
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
|
||||||
if err != nil {
|
|
||||||
utils.Error("Error deleting file: %v", err)
|
|
||||||
|
|
||||||
}
|
|
||||||
if config.prune {
|
|
||||||
//TODO: Delete old backup from remote server
|
|
||||||
utils.Info("Deleting old backup from a remote server is not implemented yet")
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
utils.Done("Uploading backup archive to remote storage ... done ")
|
|
||||||
//Send notification
|
|
||||||
utils.NotifySuccess(finalFileName)
|
|
||||||
//Delete temp
|
|
||||||
deleteTemp()
|
|
||||||
utils.Info("Backup completed successfully")
|
|
||||||
|
|
||||||
}
|
|
||||||
func ftpBackup(db *dbConfig, config *BackupConfig) {
|
|
||||||
utils.Info("Backup database to the remote FTP server")
|
|
||||||
//Backup database
|
|
||||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
|
||||||
finalFileName := config.backupFileName
|
|
||||||
if config.encryption {
|
|
||||||
encryptBackup(config)
|
|
||||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
|
||||||
}
|
|
||||||
utils.Info("Uploading backup archive to the remote FTP server ... ")
|
|
||||||
utils.Info("Backup name is %s", finalFileName)
|
|
||||||
err := CopyToFTP(finalFileName, config.remotePath)
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatal("Error uploading file to the remote FTP server: %s ", err)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
//Delete backup file from tmp folder
|
|
||||||
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
|
||||||
if err != nil {
|
|
||||||
utils.Error("Error deleting file: %v", err)
|
|
||||||
|
|
||||||
}
|
|
||||||
if config.prune {
|
|
||||||
//TODO: Delete old backup from remote server
|
|
||||||
utils.Info("Deleting old backup from a remote server is not implemented yet")
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
utils.Done("Uploading backup archive to the remote FTP server ... done ")
|
|
||||||
//Send notification
|
|
||||||
utils.NotifySuccess(finalFileName)
|
|
||||||
//Delete temp
|
|
||||||
deleteTemp()
|
|
||||||
utils.Info("Backup completed successfully")
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func encryptBackup(config *BackupConfig) {
|
func encryptBackup(config *BackupConfig) {
|
||||||
|
backupFile, err := os.ReadFile(filepath.Join(tmpPath, config.backupFileName))
|
||||||
|
outputFile := fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error reading backup file: %s ", err)
|
||||||
|
}
|
||||||
if config.usingKey {
|
if config.usingKey {
|
||||||
err := encryptWithGPGPublicKey(filepath.Join(tmpPath, config.backupFileName), config.publicKey)
|
utils.Info("Encrypting backup using public key...")
|
||||||
|
pubKey, err := os.ReadFile(config.publicKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("error during encrypting backup %v", err)
|
utils.Fatal("Error reading public key: %s ", err)
|
||||||
}
|
}
|
||||||
|
err = encryptor.EncryptWithPublicKey(backupFile, fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension), pubKey)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error encrypting backup file: %v ", err)
|
||||||
|
}
|
||||||
|
utils.Info("Encrypting backup using public key...done")
|
||||||
|
|
||||||
} else if config.passphrase != "" {
|
} else if config.passphrase != "" {
|
||||||
err := encryptWithGPG(filepath.Join(tmpPath, config.backupFileName), config.passphrase)
|
utils.Info("Encrypting backup using passphrase...")
|
||||||
|
err := encryptor.Encrypt(backupFile, outputFile, config.passphrase)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("error during encrypting backup %v", err)
|
utils.Fatal("error during encrypting backup %v", err)
|
||||||
}
|
}
|
||||||
|
utils.Info("Encrypting backup using passphrase...done")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,9 +1,27 @@
|
|||||||
// Package pkg /
|
/*
|
||||||
/*****
|
MIT License
|
||||||
@author Jonas Kaninda
|
|
||||||
@license MIT License <https://opensource.org/licenses/MIT>
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
@Copyright © 2024 Jonas Kaninda
|
|
||||||
**/
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package pkg
|
package pkg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -65,6 +83,11 @@ type FTPConfig struct {
|
|||||||
port string
|
port string
|
||||||
remotePath string
|
remotePath string
|
||||||
}
|
}
|
||||||
|
type AzureConfig struct {
|
||||||
|
accountName string
|
||||||
|
accountKey string
|
||||||
|
containerName string
|
||||||
|
}
|
||||||
|
|
||||||
// SSHConfig holds the SSH connection details
|
// SSHConfig holds the SSH connection details
|
||||||
type SSHConfig struct {
|
type SSHConfig struct {
|
||||||
@@ -80,12 +103,13 @@ type AWSConfig struct {
|
|||||||
accessKey string
|
accessKey string
|
||||||
secretKey string
|
secretKey string
|
||||||
region string
|
region string
|
||||||
|
remotePath string
|
||||||
disableSsl bool
|
disableSsl bool
|
||||||
forcePathStyle bool
|
forcePathStyle bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func initDbConfig(cmd *cobra.Command) *dbConfig {
|
func initDbConfig(cmd *cobra.Command) *dbConfig {
|
||||||
//Set env
|
// Set env
|
||||||
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
utils.GetEnv(cmd, "dbname", "DB_NAME")
|
||||||
dConf := dbConfig{}
|
dConf := dbConfig{}
|
||||||
dConf.dbHost = os.Getenv("DB_HOST")
|
dConf.dbHost = os.Getenv("DB_HOST")
|
||||||
@@ -129,8 +153,8 @@ func loadSSHConfig() (*SSHConfig, error) {
|
|||||||
identifyFile: os.Getenv("SSH_IDENTIFY_FILE"),
|
identifyFile: os.Getenv("SSH_IDENTIFY_FILE"),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
func initFtpConfig() *FTPConfig {
|
func loadFtpConfig() *FTPConfig {
|
||||||
//Initialize data configs
|
// Initialize data configs
|
||||||
fConfig := FTPConfig{}
|
fConfig := FTPConfig{}
|
||||||
fConfig.host = utils.GetEnvVariable("FTP_HOST", "FTP_HOST_NAME")
|
fConfig.host = utils.GetEnvVariable("FTP_HOST", "FTP_HOST_NAME")
|
||||||
fConfig.user = os.Getenv("FTP_USER")
|
fConfig.user = os.Getenv("FTP_USER")
|
||||||
@@ -144,21 +168,38 @@ func initFtpConfig() *FTPConfig {
|
|||||||
}
|
}
|
||||||
return &fConfig
|
return &fConfig
|
||||||
}
|
}
|
||||||
|
func loadAzureConfig() *AzureConfig {
|
||||||
|
// Initialize data configs
|
||||||
|
aConfig := AzureConfig{}
|
||||||
|
aConfig.containerName = os.Getenv("AZURE_STORAGE_CONTAINER_NAME")
|
||||||
|
aConfig.accountName = os.Getenv("AZURE_STORAGE_ACCOUNT_NAME")
|
||||||
|
aConfig.accountKey = os.Getenv("AZURE_STORAGE_ACCOUNT_KEY")
|
||||||
|
|
||||||
|
err := utils.CheckEnvVars(azureVars)
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Please make sure all required environment variables for Azure Blob storage are set")
|
||||||
|
utils.Fatal("Error missing environment variables: %s", err)
|
||||||
|
}
|
||||||
|
return &aConfig
|
||||||
|
}
|
||||||
|
|
||||||
func initAWSConfig() *AWSConfig {
|
func initAWSConfig() *AWSConfig {
|
||||||
//Initialize AWS configs
|
// Initialize AWS configs
|
||||||
aConfig := AWSConfig{}
|
aConfig := AWSConfig{}
|
||||||
aConfig.endpoint = utils.GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
|
aConfig.endpoint = utils.GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
|
||||||
aConfig.accessKey = utils.GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
|
aConfig.accessKey = utils.GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
|
||||||
aConfig.secretKey = utils.GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
|
aConfig.secretKey = utils.GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
|
||||||
aConfig.bucket = utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
aConfig.bucket = utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||||
|
aConfig.remotePath = utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
|
||||||
|
|
||||||
aConfig.region = os.Getenv("AWS_REGION")
|
aConfig.region = os.Getenv("AWS_REGION")
|
||||||
disableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
|
disableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
|
disableSsl = false
|
||||||
}
|
}
|
||||||
forcePathStyle, err := strconv.ParseBool(os.Getenv("AWS_FORCE_PATH_STYLE"))
|
forcePathStyle, err := strconv.ParseBool(os.Getenv("AWS_FORCE_PATH_STYLE"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Unable to parse AWS_FORCE_PATH_STYLE env var: %s", err)
|
forcePathStyle = false
|
||||||
}
|
}
|
||||||
aConfig.disableSsl = disableSsl
|
aConfig.disableSsl = disableSsl
|
||||||
aConfig.forcePathStyle = forcePathStyle
|
aConfig.forcePathStyle = forcePathStyle
|
||||||
@@ -172,13 +213,15 @@ func initAWSConfig() *AWSConfig {
|
|||||||
func initBackupConfig(cmd *cobra.Command) *BackupConfig {
|
func initBackupConfig(cmd *cobra.Command) *BackupConfig {
|
||||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||||
utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION")
|
utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION")
|
||||||
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
|
|
||||||
utils.GetEnv(cmd, "path", "REMOTE_PATH")
|
utils.GetEnv(cmd, "path", "REMOTE_PATH")
|
||||||
//Get flag value and set env
|
// Get flag value and set env
|
||||||
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
|
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
|
||||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||||
backupRetention, _ := cmd.Flags().GetInt("keep-last")
|
prune := false
|
||||||
prune, _ := cmd.Flags().GetBool("prune")
|
backupRetention := utils.GetIntEnv("BACKUP_RETENTION_DAYS")
|
||||||
|
if backupRetention > 0 {
|
||||||
|
prune = true
|
||||||
|
}
|
||||||
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
|
disableCompression, _ = cmd.Flags().GetBool("disable-compression")
|
||||||
_, _ = cmd.Flags().GetString("mode")
|
_, _ = cmd.Flags().GetString("mode")
|
||||||
passphrase := os.Getenv("GPG_PASSPHRASE")
|
passphrase := os.Getenv("GPG_PASSPHRASE")
|
||||||
@@ -193,7 +236,7 @@ func initBackupConfig(cmd *cobra.Command) *BackupConfig {
|
|||||||
encryption = true
|
encryption = true
|
||||||
usingKey = false
|
usingKey = false
|
||||||
}
|
}
|
||||||
//Initialize backup configs
|
// Initialize backup configs
|
||||||
config := BackupConfig{}
|
config := BackupConfig{}
|
||||||
config.backupRetention = backupRetention
|
config.backupRetention = backupRetention
|
||||||
config.disableCompression = disableCompression
|
config.disableCompression = disableCompression
|
||||||
@@ -223,7 +266,7 @@ func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
|
|||||||
utils.SetEnv("STORAGE_PATH", storagePath)
|
utils.SetEnv("STORAGE_PATH", storagePath)
|
||||||
utils.GetEnv(cmd, "path", "REMOTE_PATH")
|
utils.GetEnv(cmd, "path", "REMOTE_PATH")
|
||||||
|
|
||||||
//Get flag value and set env
|
// Get flag value and set env
|
||||||
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||||
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
|
remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
|
||||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||||
@@ -237,7 +280,7 @@ func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
|
|||||||
usingKey = false
|
usingKey = false
|
||||||
}
|
}
|
||||||
|
|
||||||
//Initialize restore configs
|
// Initialize restore configs
|
||||||
rConfig := RestoreConfig{}
|
rConfig := RestoreConfig{}
|
||||||
rConfig.s3Path = s3Path
|
rConfig.s3Path = s3Path
|
||||||
rConfig.remotePath = remotePath
|
rConfig.remotePath = remotePath
|
||||||
@@ -253,7 +296,7 @@ func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
|
|||||||
func initTargetDbConfig() *targetDbConfig {
|
func initTargetDbConfig() *targetDbConfig {
|
||||||
tdbConfig := targetDbConfig{}
|
tdbConfig := targetDbConfig{}
|
||||||
tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST")
|
tdbConfig.targetDbHost = os.Getenv("TARGET_DB_HOST")
|
||||||
tdbConfig.targetDbPort = os.Getenv("TARGET_DB_PORT")
|
tdbConfig.targetDbPort = utils.EnvWithDefault("TARGET_DB_PORT", "3306")
|
||||||
tdbConfig.targetDbName = os.Getenv("TARGET_DB_NAME")
|
tdbConfig.targetDbName = os.Getenv("TARGET_DB_NAME")
|
||||||
tdbConfig.targetDbUserName = os.Getenv("TARGET_DB_USERNAME")
|
tdbConfig.targetDbUserName = os.Getenv("TARGET_DB_USERNAME")
|
||||||
tdbConfig.targetDbPassword = os.Getenv("TARGET_DB_PASSWORD")
|
tdbConfig.targetDbPassword = os.Getenv("TARGET_DB_PASSWORD")
|
||||||
|
|||||||
182
pkg/encrypt.go
182
pkg/encrypt.go
@@ -1,182 +0,0 @@
|
|||||||
// Package pkg /
|
|
||||||
/*****
|
|
||||||
@author Jonas Kaninda
|
|
||||||
@license MIT License <https://opensource.org/licenses/MIT>
|
|
||||||
@Copyright © 2024 Jonas Kaninda
|
|
||||||
**/
|
|
||||||
package pkg
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"github.com/ProtonMail/gopenpgp/v2/crypto"
|
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// decryptWithGPG decrypts backup file using a passphrase
|
|
||||||
func decryptWithGPG(inputFile string, passphrase string) error {
|
|
||||||
utils.Info("Decrypting backup using passphrase...")
|
|
||||||
// Read the encrypted file
|
|
||||||
encFileContent, err := os.ReadFile(inputFile)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New(fmt.Sprintf("Error reading encrypted file: %s", err))
|
|
||||||
}
|
|
||||||
// Define the passphrase used to encrypt the file
|
|
||||||
_passphrase := []byte(passphrase)
|
|
||||||
// Create a PGP message object from the encrypted file content
|
|
||||||
encryptedMessage := crypto.NewPGPMessage(encFileContent)
|
|
||||||
// Decrypt the message using the passphrase
|
|
||||||
plainMessage, err := crypto.DecryptMessageWithPassword(encryptedMessage, _passphrase)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New(fmt.Sprintf("Error decrypting file: %s", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save the decrypted file (restore it)
|
|
||||||
err = os.WriteFile(RemoveLastExtension(inputFile), plainMessage.GetBinary(), 0644)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New(fmt.Sprintf("Error saving decrypted file: %s", err))
|
|
||||||
}
|
|
||||||
utils.Info("Decrypting backup using passphrase...done")
|
|
||||||
utils.Info("Backup file decrypted successful!")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// encryptWithGPG encrypts backup using a passphrase
|
|
||||||
func encryptWithGPG(inputFile string, passphrase string) error {
|
|
||||||
utils.Info("Encrypting backup using passphrase...")
|
|
||||||
// Read the file to be encrypted
|
|
||||||
plainFileContent, err := os.ReadFile(inputFile)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New(fmt.Sprintf("Error reading file: %s", err))
|
|
||||||
}
|
|
||||||
// Define the passphrase to encrypt the file
|
|
||||||
_passphrase := []byte(passphrase)
|
|
||||||
|
|
||||||
// Create a message object from the file content
|
|
||||||
message := crypto.NewPlainMessage(plainFileContent)
|
|
||||||
// Encrypt the message using the passphrase
|
|
||||||
encryptedMessage, err := crypto.EncryptMessageWithPassword(message, _passphrase)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New(fmt.Sprintf("Error encrypting backup file: %s", err))
|
|
||||||
}
|
|
||||||
// Save the encrypted .tar file
|
|
||||||
err = os.WriteFile(fmt.Sprintf("%s.%s", inputFile, gpgExtension), encryptedMessage.GetBinary(), 0644)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New(fmt.Sprintf("Error saving encrypted filee: %s", err))
|
|
||||||
}
|
|
||||||
utils.Info("Encrypting backup using passphrase...done")
|
|
||||||
utils.Info("Backup file encrypted successful!")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// encryptWithGPGPublicKey encrypts backup using a public key
|
|
||||||
func encryptWithGPGPublicKey(inputFile string, publicKey string) error {
|
|
||||||
utils.Info("Encrypting backup using public key...")
|
|
||||||
// Read the public key
|
|
||||||
pubKeyBytes, err := os.ReadFile(publicKey)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New(fmt.Sprintf("Error reading public key: %s", err))
|
|
||||||
}
|
|
||||||
// Create a new keyring with the public key
|
|
||||||
publicKeyObj, err := crypto.NewKeyFromArmored(string(pubKeyBytes))
|
|
||||||
if err != nil {
|
|
||||||
return errors.New(fmt.Sprintf("Error parsing public key: %s", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
keyRing, err := crypto.NewKeyRing(publicKeyObj)
|
|
||||||
if err != nil {
|
|
||||||
|
|
||||||
return errors.New(fmt.Sprintf("Error creating key ring: %v", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the file to encryptWithGPGPublicKey
|
|
||||||
fileContent, err := os.ReadFile(inputFile)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New(fmt.Sprintf("Error reading file: %v", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// encryptWithGPG the file
|
|
||||||
message := crypto.NewPlainMessage(fileContent)
|
|
||||||
encMessage, err := keyRing.Encrypt(message, nil)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New(fmt.Sprintf("Error encrypting file: %v", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save the encrypted file
|
|
||||||
err = os.WriteFile(fmt.Sprintf("%s.%s", inputFile, gpgExtension), encMessage.GetBinary(), 0644)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New(fmt.Sprintf("Error saving encrypted file: %v", err))
|
|
||||||
}
|
|
||||||
utils.Info("Encrypting backup using public key...done")
|
|
||||||
utils.Info("Backup file encrypted successful!")
|
|
||||||
return nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// decryptWithGPGPrivateKey decrypts backup file using a private key and passphrase.
|
|
||||||
// privateKey GPG private key
|
|
||||||
// passphrase GPG passphrase
|
|
||||||
func decryptWithGPGPrivateKey(inputFile, privateKey, passphrase string) error {
|
|
||||||
utils.Info("Encrypting backup using private key...")
|
|
||||||
|
|
||||||
// Read the private key
|
|
||||||
priKeyBytes, err := os.ReadFile(privateKey)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New(fmt.Sprintf("Error reading private key: %s", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the password for the private key (if it’s password-protected)
|
|
||||||
password := []byte(passphrase)
|
|
||||||
|
|
||||||
// Create a key object from the armored private key
|
|
||||||
privateKeyObj, err := crypto.NewKeyFromArmored(string(priKeyBytes))
|
|
||||||
if err != nil {
|
|
||||||
return errors.New(fmt.Sprintf("Error parsing private key: %s", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unlock the private key with the password
|
|
||||||
if passphrase != "" {
|
|
||||||
// Unlock the private key with the password
|
|
||||||
_, err = privateKeyObj.Unlock(password)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New(fmt.Sprintf("Error unlocking private key: %s", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new keyring with the private key
|
|
||||||
keyRing, err := crypto.NewKeyRing(privateKeyObj)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New(fmt.Sprintf("Error creating key ring: %v", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the encrypted file
|
|
||||||
encFileContent, err := os.ReadFile(inputFile)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New(fmt.Sprintf("Error reading encrypted file: %s", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// decryptWithGPG the file
|
|
||||||
encryptedMessage := crypto.NewPGPMessage(encFileContent)
|
|
||||||
message, err := keyRing.Decrypt(encryptedMessage, nil, 0)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New(fmt.Sprintf("Error decrypting file: %s", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save the decrypted file
|
|
||||||
err = os.WriteFile(RemoveLastExtension(inputFile), message.GetBinary(), 0644)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New(fmt.Sprintf("Error saving decrypted file: %s", err))
|
|
||||||
}
|
|
||||||
utils.Info("Encrypting backup using public key...done")
|
|
||||||
fmt.Println("File successfully decrypted!")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func RemoveLastExtension(filename string) string {
|
|
||||||
if idx := strings.LastIndex(filename, "."); idx != -1 {
|
|
||||||
return filename[:idx]
|
|
||||||
}
|
|
||||||
return filename
|
|
||||||
}
|
|
||||||
81
pkg/ftp.go
81
pkg/ftp.go
@@ -1,81 +0,0 @@
|
|||||||
package pkg
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/jlaffaye/ftp"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// initFtpClient initializes and authenticates an FTP client
|
|
||||||
func initFtpClient() (*ftp.ServerConn, error) {
|
|
||||||
ftpConfig := initFtpConfig()
|
|
||||||
ftpClient, err := ftp.Dial(fmt.Sprintf("%s:%s", ftpConfig.host, ftpConfig.port), ftp.DialWithTimeout(5*time.Second))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to connect to FTP: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = ftpClient.Login(ftpConfig.user, ftpConfig.password)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to log in to FTP: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ftpClient, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyToFTP uploads a file to the remote FTP server
|
|
||||||
func CopyToFTP(fileName, remotePath string) (err error) {
|
|
||||||
ftpConfig := initFtpConfig()
|
|
||||||
ftpClient, err := initFtpClient()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer ftpClient.Quit()
|
|
||||||
|
|
||||||
filePath := filepath.Join(tmpPath, fileName)
|
|
||||||
file, err := os.Open(filePath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to open file %s: %w", fileName, err)
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
remoteFilePath := filepath.Join(ftpConfig.remotePath, fileName)
|
|
||||||
err = ftpClient.Stor(remoteFilePath, file)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to upload file %s: %w", fileName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyFromFTP downloads a file from the remote FTP server
|
|
||||||
func CopyFromFTP(fileName, remotePath string) (err error) {
|
|
||||||
ftpClient, err := initFtpClient()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer ftpClient.Quit()
|
|
||||||
|
|
||||||
remoteFilePath := filepath.Join(remotePath, fileName)
|
|
||||||
r, err := ftpClient.Retr(remoteFilePath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to retrieve file %s: %w", fileName, err)
|
|
||||||
}
|
|
||||||
defer r.Close()
|
|
||||||
|
|
||||||
localFilePath := filepath.Join(tmpPath, fileName)
|
|
||||||
outFile, err := os.Create(localFilePath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create local file %s: %w", fileName, err)
|
|
||||||
}
|
|
||||||
defer outFile.Close()
|
|
||||||
|
|
||||||
_, err = io.Copy(outFile, r)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to copy data to local file %s: %w", fileName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
120
pkg/helper.go
120
pkg/helper.go
@@ -1,9 +1,27 @@
|
|||||||
// Package pkg /
|
/*
|
||||||
/*****
|
MIT License
|
||||||
@author Jonas Kaninda
|
|
||||||
@license MIT License <https://opensource.org/licenses/MIT>
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
@Copyright © 2024 Jonas Kaninda
|
|
||||||
**/
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package pkg
|
package pkg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -14,75 +32,16 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
func copyToTmp(sourcePath string, backupFileName string) {
|
func intro() {
|
||||||
//Copy backup from storage to /tmp
|
fmt.Println("Starting MySQL Backup...")
|
||||||
err := utils.CopyFile(filepath.Join(sourcePath, backupFileName), filepath.Join(tmpPath, backupFileName))
|
fmt.Printf("Version: %s\n", utils.Version)
|
||||||
if err != nil {
|
fmt.Println("Copyright (c) 2024 Jonas Kaninda")
|
||||||
utils.Fatal(fmt.Sprintf("Error copying file %s %s", backupFileName, err))
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
func moveToBackup(backupFileName string, destinationPath string) {
|
|
||||||
//Copy backup from tmp folder to storage destination
|
|
||||||
err := utils.CopyFile(filepath.Join(tmpPath, backupFileName), filepath.Join(destinationPath, backupFileName))
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatal(fmt.Sprintf("Error copying file %s %s", backupFileName, err))
|
|
||||||
|
|
||||||
}
|
// copyToTmp copy file to temporary directory
|
||||||
//Delete backup file from tmp folder
|
|
||||||
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName))
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("Error deleting file:", err)
|
|
||||||
|
|
||||||
}
|
|
||||||
utils.Done("Database has been backed up and copied to %s", filepath.Join(destinationPath, backupFileName))
|
|
||||||
}
|
|
||||||
func deleteOldBackup(retentionDays int) {
|
|
||||||
utils.Info("Deleting old backups...")
|
|
||||||
storagePath = os.Getenv("STORAGE_PATH")
|
|
||||||
// Define the directory path
|
|
||||||
backupDir := storagePath + "/"
|
|
||||||
// Get current time
|
|
||||||
currentTime := time.Now()
|
|
||||||
// Delete file
|
|
||||||
deleteFile := func(filePath string) error {
|
|
||||||
err := os.Remove(filePath)
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatal(fmt.Sprintf("Error: %s", err))
|
|
||||||
} else {
|
|
||||||
utils.Done("File %s has been deleted successfully", filePath)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Walk through the directory and delete files modified more than specified days ago
|
|
||||||
err := filepath.Walk(backupDir, func(filePath string, fileInfo os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Check if it's a regular file and if it was modified more than specified days ago
|
|
||||||
if fileInfo.Mode().IsRegular() {
|
|
||||||
timeDiff := currentTime.Sub(fileInfo.ModTime())
|
|
||||||
if timeDiff.Hours() > 24*float64(retentionDays) {
|
|
||||||
err := deleteFile(filePath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatal(fmt.Sprintf("Error: %s", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
utils.Done("Deleting old backups...done")
|
|
||||||
|
|
||||||
}
|
|
||||||
func deleteTemp() {
|
func deleteTemp() {
|
||||||
utils.Info("Deleting %s ...", tmpPath)
|
utils.Info("Deleting %s ...", tmpPath)
|
||||||
err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error {
|
err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error {
|
||||||
@@ -126,10 +85,8 @@ func testDatabaseConnection(db *dbConfig) {
|
|||||||
utils.Info("Successfully connected to %s database", db.dbName)
|
utils.Info("Successfully connected to %s database", db.dbName)
|
||||||
|
|
||||||
}
|
}
|
||||||
func intro() {
|
|
||||||
utils.Info("Starting MySQL Backup...")
|
// checkPubKeyFile checks gpg public key
|
||||||
utils.Info("Copyright (c) 2024 Jonas Kaninda ")
|
|
||||||
}
|
|
||||||
func checkPubKeyFile(pubKey string) (string, error) {
|
func checkPubKeyFile(pubKey string) (string, error) {
|
||||||
// Define possible key file names
|
// Define possible key file names
|
||||||
keyFiles := []string{filepath.Join(gpgHome, "public_key.asc"), filepath.Join(gpgHome, "public_key.gpg"), pubKey}
|
keyFiles := []string{filepath.Join(gpgHome, "public_key.asc"), filepath.Join(gpgHome, "public_key.gpg"), pubKey}
|
||||||
@@ -151,6 +108,8 @@ func checkPubKeyFile(pubKey string) (string, error) {
|
|||||||
// Return an error if neither file exists
|
// Return an error if neither file exists
|
||||||
return "", fmt.Errorf("no public key file found")
|
return "", fmt.Errorf("no public key file found")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// checkPrKeyFile checks private key
|
||||||
func checkPrKeyFile(prKey string) (string, error) {
|
func checkPrKeyFile(prKey string) (string, error) {
|
||||||
// Define possible key file names
|
// Define possible key file names
|
||||||
keyFiles := []string{filepath.Join(gpgHome, "private_key.asc"), filepath.Join(gpgHome, "private_key.gpg"), prKey}
|
keyFiles := []string{filepath.Join(gpgHome, "private_key.asc"), filepath.Join(gpgHome, "private_key.gpg"), prKey}
|
||||||
@@ -172,8 +131,9 @@ func checkPrKeyFile(prKey string) (string, error) {
|
|||||||
// Return an error if neither file exists
|
// Return an error if neither file exists
|
||||||
return "", fmt.Errorf("no public key file found")
|
return "", fmt.Errorf("no public key file found")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// readConf reads config file and returns Config
|
||||||
func readConf(configFile string) (*Config, error) {
|
func readConf(configFile string) (*Config, error) {
|
||||||
//configFile := filepath.Join("./", filename)
|
|
||||||
if utils.FileExists(configFile) {
|
if utils.FileExists(configFile) {
|
||||||
buf, err := os.ReadFile(configFile)
|
buf, err := os.ReadFile(configFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -190,6 +150,8 @@ func readConf(configFile string) (*Config, error) {
|
|||||||
}
|
}
|
||||||
return nil, fmt.Errorf("config file %q not found", configFile)
|
return nil, fmt.Errorf("config file %q not found", configFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// checkConfigFile checks config files and returns one config file
|
||||||
func checkConfigFile(filePath string) (string, error) {
|
func checkConfigFile(filePath string) (string, error) {
|
||||||
// Define possible config file names
|
// Define possible config file names
|
||||||
configFiles := []string{filepath.Join(workingDir, "config.yaml"), filepath.Join(workingDir, "config.yml"), filePath}
|
configFiles := []string{filepath.Join(workingDir, "config.yaml"), filepath.Join(workingDir, "config.yml"), filePath}
|
||||||
@@ -211,3 +173,9 @@ func checkConfigFile(filePath string) (string, error) {
|
|||||||
// Return an error if neither file exists
|
// Return an error if neither file exists
|
||||||
return "", fmt.Errorf("no config file found")
|
return "", fmt.Errorf("no config file found")
|
||||||
}
|
}
|
||||||
|
func RemoveLastExtension(filename string) string {
|
||||||
|
if idx := strings.LastIndex(filename, "."); idx != -1 {
|
||||||
|
return filename[:idx]
|
||||||
|
}
|
||||||
|
return filename
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,9 +1,27 @@
|
|||||||
// Package pkg /
|
/*
|
||||||
/*****
|
MIT License
|
||||||
@author Jonas Kaninda
|
|
||||||
@license MIT License <https://opensource.org/licenses/MIT>
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
@Copyright © 2024 Jonas Kaninda
|
|
||||||
**/
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package pkg
|
package pkg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -16,11 +34,11 @@ import (
|
|||||||
func StartMigration(cmd *cobra.Command) {
|
func StartMigration(cmd *cobra.Command) {
|
||||||
intro()
|
intro()
|
||||||
utils.Info("Starting database migration...")
|
utils.Info("Starting database migration...")
|
||||||
//Get DB config
|
// Get DB config
|
||||||
dbConf = initDbConfig(cmd)
|
dbConf = initDbConfig(cmd)
|
||||||
targetDbConf = initTargetDbConfig()
|
targetDbConf = initTargetDbConfig()
|
||||||
|
|
||||||
//Defining the target database variables
|
// Defining the target database variables
|
||||||
newDbConfig := dbConfig{}
|
newDbConfig := dbConfig{}
|
||||||
newDbConfig.dbHost = targetDbConf.targetDbHost
|
newDbConfig.dbHost = targetDbConf.targetDbHost
|
||||||
newDbConfig.dbPort = targetDbConf.targetDbPort
|
newDbConfig.dbPort = targetDbConf.targetDbPort
|
||||||
@@ -28,13 +46,13 @@ func StartMigration(cmd *cobra.Command) {
|
|||||||
newDbConfig.dbUserName = targetDbConf.targetDbUserName
|
newDbConfig.dbUserName = targetDbConf.targetDbUserName
|
||||||
newDbConfig.dbPassword = targetDbConf.targetDbPassword
|
newDbConfig.dbPassword = targetDbConf.targetDbPassword
|
||||||
|
|
||||||
//Generate file name
|
// Generate file name
|
||||||
backupFileName := fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
|
backupFileName := fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405"))
|
||||||
conf := &RestoreConfig{}
|
conf := &RestoreConfig{}
|
||||||
conf.file = backupFileName
|
conf.file = backupFileName
|
||||||
//Backup source Database
|
// Backup source Database
|
||||||
BackupDatabase(dbConf, backupFileName, true)
|
BackupDatabase(dbConf, backupFileName, true)
|
||||||
//Restore source database into target database
|
// Restore source database into target database
|
||||||
utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
|
utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
|
||||||
RestoreDatabase(&newDbConfig, conf)
|
RestoreDatabase(&newDbConfig, conf)
|
||||||
utils.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
|
utils.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
|
||||||
|
|||||||
217
pkg/remote.go
Normal file
217
pkg/remote.go
Normal file
@@ -0,0 +1,217 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/jkaninda/go-storage/pkg/ftp"
|
||||||
|
"github.com/jkaninda/go-storage/pkg/ssh"
|
||||||
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
|
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func sshBackup(db *dbConfig, config *BackupConfig) {
|
||||||
|
utils.Info("Backup database to Remote server")
|
||||||
|
startTime = time.Now().Format(utils.TimeFormat())
|
||||||
|
// Backup database
|
||||||
|
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||||
|
finalFileName := config.backupFileName
|
||||||
|
if config.encryption {
|
||||||
|
encryptBackup(config)
|
||||||
|
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||||
|
}
|
||||||
|
utils.Info("Uploading backup archive to remote storage ... ")
|
||||||
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
|
sshConfig, err := loadSSHConfig()
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error loading ssh config: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sshStorage, err := ssh.NewStorage(ssh.Config{
|
||||||
|
Host: sshConfig.hostName,
|
||||||
|
Port: sshConfig.port,
|
||||||
|
User: sshConfig.user,
|
||||||
|
Password: sshConfig.password,
|
||||||
|
RemotePath: config.remotePath,
|
||||||
|
LocalPath: tmpPath,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error creating SSH storage: %s", err)
|
||||||
|
}
|
||||||
|
err = sshStorage.Copy(finalFileName)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error copying backup file: %s", err)
|
||||||
|
}
|
||||||
|
// Get backup info
|
||||||
|
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error: %s", err)
|
||||||
|
}
|
||||||
|
backupSize = fileInfo.Size()
|
||||||
|
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||||
|
|
||||||
|
// Delete backup file from tmp folder
|
||||||
|
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error deleting file: %v", err)
|
||||||
|
|
||||||
|
}
|
||||||
|
if config.prune {
|
||||||
|
err := sshStorage.Prune(config.backupRetention)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
utils.Info("Uploading backup archive to remote storage ... done ")
|
||||||
|
// Send notification
|
||||||
|
utils.NotifySuccess(&utils.NotificationData{
|
||||||
|
File: finalFileName,
|
||||||
|
BackupSize: backupSize,
|
||||||
|
Database: db.dbName,
|
||||||
|
Storage: config.storage,
|
||||||
|
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||||
|
StartTime: startTime,
|
||||||
|
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||||
|
})
|
||||||
|
// Delete temp
|
||||||
|
deleteTemp()
|
||||||
|
utils.Info("Backup completed successfully")
|
||||||
|
|
||||||
|
}
|
||||||
|
func ftpBackup(db *dbConfig, config *BackupConfig) {
|
||||||
|
utils.Info("Backup database to the remote FTP server")
|
||||||
|
startTime = time.Now().Format(utils.TimeFormat())
|
||||||
|
|
||||||
|
// Backup database
|
||||||
|
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||||
|
finalFileName := config.backupFileName
|
||||||
|
if config.encryption {
|
||||||
|
encryptBackup(config)
|
||||||
|
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||||
|
}
|
||||||
|
utils.Info("Uploading backup archive to the remote FTP server ... ")
|
||||||
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
|
ftpConfig := loadFtpConfig()
|
||||||
|
ftpStorage, err := ftp.NewStorage(ftp.Config{
|
||||||
|
Host: ftpConfig.host,
|
||||||
|
Port: ftpConfig.port,
|
||||||
|
User: ftpConfig.user,
|
||||||
|
Password: ftpConfig.password,
|
||||||
|
RemotePath: config.remotePath,
|
||||||
|
LocalPath: tmpPath,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error creating SSH storage: %s", err)
|
||||||
|
}
|
||||||
|
err = ftpStorage.Copy(finalFileName)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error copying backup file: %s", err)
|
||||||
|
}
|
||||||
|
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||||
|
// Get backup info
|
||||||
|
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error: %s", err)
|
||||||
|
}
|
||||||
|
backupSize = fileInfo.Size()
|
||||||
|
// Delete backup file from tmp folder
|
||||||
|
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||||
|
if err != nil {
|
||||||
|
utils.Error("Error deleting file: %v", err)
|
||||||
|
|
||||||
|
}
|
||||||
|
if config.prune {
|
||||||
|
err := ftpStorage.Prune(config.backupRetention)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
utils.Info("Uploading backup archive to the remote FTP server ... done ")
|
||||||
|
|
||||||
|
// Send notification
|
||||||
|
utils.NotifySuccess(&utils.NotificationData{
|
||||||
|
File: finalFileName,
|
||||||
|
BackupSize: backupSize,
|
||||||
|
Database: db.dbName,
|
||||||
|
Storage: config.storage,
|
||||||
|
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||||
|
StartTime: startTime,
|
||||||
|
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||||
|
})
|
||||||
|
// Delete temp
|
||||||
|
deleteTemp()
|
||||||
|
utils.Info("Backup completed successfully")
|
||||||
|
}
|
||||||
|
func remoteRestore(db *dbConfig, conf *RestoreConfig) {
|
||||||
|
utils.Info("Restore database from remote server")
|
||||||
|
sshConfig, err := loadSSHConfig()
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error loading ssh config: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sshStorage, err := ssh.NewStorage(ssh.Config{
|
||||||
|
Host: sshConfig.hostName,
|
||||||
|
Port: sshConfig.port,
|
||||||
|
User: sshConfig.user,
|
||||||
|
Password: sshConfig.password,
|
||||||
|
IdentifyFile: sshConfig.identifyFile,
|
||||||
|
RemotePath: conf.remotePath,
|
||||||
|
LocalPath: tmpPath,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error creating SSH storage: %s", err)
|
||||||
|
}
|
||||||
|
err = sshStorage.CopyFrom(conf.file)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error copying backup file: %s", err)
|
||||||
|
}
|
||||||
|
RestoreDatabase(db, conf)
|
||||||
|
}
|
||||||
|
func ftpRestore(db *dbConfig, conf *RestoreConfig) {
|
||||||
|
utils.Info("Restore database from FTP server")
|
||||||
|
ftpConfig := loadFtpConfig()
|
||||||
|
ftpStorage, err := ftp.NewStorage(ftp.Config{
|
||||||
|
Host: ftpConfig.host,
|
||||||
|
Port: ftpConfig.port,
|
||||||
|
User: ftpConfig.user,
|
||||||
|
Password: ftpConfig.password,
|
||||||
|
RemotePath: conf.remotePath,
|
||||||
|
LocalPath: tmpPath,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error creating SSH storage: %s", err)
|
||||||
|
}
|
||||||
|
err = ftpStorage.CopyFrom(conf.file)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error copying backup file: %s", err)
|
||||||
|
}
|
||||||
|
RestoreDatabase(db, conf)
|
||||||
|
}
|
||||||
114
pkg/restore.go
114
pkg/restore.go
@@ -1,12 +1,32 @@
|
|||||||
// Package pkg /
|
/*
|
||||||
/*****
|
MIT License
|
||||||
@author Jonas Kaninda
|
|
||||||
@license MIT License <https://opensource.org/licenses/MIT>
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
@Copyright © 2024 Jonas Kaninda
|
|
||||||
**/
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package pkg
|
package pkg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/jkaninda/encryptor"
|
||||||
|
"github.com/jkaninda/go-storage/pkg/local"
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"os"
|
"os"
|
||||||
@@ -21,45 +41,31 @@ func StartRestore(cmd *cobra.Command) {
|
|||||||
|
|
||||||
switch restoreConf.storage {
|
switch restoreConf.storage {
|
||||||
case "local":
|
case "local":
|
||||||
utils.Info("Restore database from local")
|
localRestore(dbConf, restoreConf)
|
||||||
copyToTmp(storagePath, restoreConf.file)
|
|
||||||
RestoreDatabase(dbConf, restoreConf)
|
|
||||||
case "s3", "S3":
|
case "s3", "S3":
|
||||||
restoreFromS3(dbConf, restoreConf)
|
s3Restore(dbConf, restoreConf)
|
||||||
case "ssh", "SSH", "remote":
|
case "ssh", "SSH", "remote":
|
||||||
restoreFromRemote(dbConf, restoreConf)
|
remoteRestore(dbConf, restoreConf)
|
||||||
case "ftp", "FTP":
|
case "ftp", "FTP":
|
||||||
restoreFromFTP(dbConf, restoreConf)
|
ftpRestore(dbConf, restoreConf)
|
||||||
|
case "azure":
|
||||||
|
azureRestore(dbConf, restoreConf)
|
||||||
default:
|
default:
|
||||||
utils.Info("Restore database from local")
|
localRestore(dbConf, restoreConf)
|
||||||
copyToTmp(storagePath, restoreConf.file)
|
|
||||||
RestoreDatabase(dbConf, restoreConf)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) {
|
||||||
|
utils.Info("Restore database from local")
|
||||||
|
localStorage := local.NewStorage(local.Config{
|
||||||
|
RemotePath: storagePath,
|
||||||
|
LocalPath: tmpPath,
|
||||||
|
})
|
||||||
|
err := localStorage.CopyFrom(restoreConf.file)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error copying backup file: %s", err)
|
||||||
|
}
|
||||||
|
RestoreDatabase(dbConf, restoreConf)
|
||||||
|
|
||||||
func restoreFromS3(db *dbConfig, conf *RestoreConfig) {
|
|
||||||
utils.Info("Restore database from s3")
|
|
||||||
err := DownloadFile(tmpPath, conf.file, conf.bucket, conf.s3Path)
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatal("Error download file from s3 %s %v ", conf.file, err)
|
|
||||||
}
|
|
||||||
RestoreDatabase(db, conf)
|
|
||||||
}
|
|
||||||
func restoreFromRemote(db *dbConfig, conf *RestoreConfig) {
|
|
||||||
utils.Info("Restore database from remote server")
|
|
||||||
err := CopyFromRemote(conf.file, conf.remotePath)
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatal("Error download file from remote server: %s %v", filepath.Join(conf.remotePath, conf.file), err)
|
|
||||||
}
|
|
||||||
RestoreDatabase(db, conf)
|
|
||||||
}
|
|
||||||
func restoreFromFTP(db *dbConfig, conf *RestoreConfig) {
|
|
||||||
utils.Info("Restore database from FTP server")
|
|
||||||
err := CopyFromFTP(conf.file, conf.remotePath)
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatal("Error download file from FTP server: %s %v", filepath.Join(conf.remotePath, conf.file), err)
|
|
||||||
}
|
|
||||||
RestoreDatabase(db, conf)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RestoreDatabase restore database
|
// RestoreDatabase restore database
|
||||||
@@ -68,25 +74,39 @@ func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
|
|||||||
utils.Fatal("Error, file required")
|
utils.Fatal("Error, file required")
|
||||||
}
|
}
|
||||||
extension := filepath.Ext(filepath.Join(tmpPath, conf.file))
|
extension := filepath.Ext(filepath.Join(tmpPath, conf.file))
|
||||||
|
rFile, err := os.ReadFile(filepath.Join(tmpPath, conf.file))
|
||||||
|
outputFile := RemoveLastExtension(filepath.Join(tmpPath, conf.file))
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error reading backup file: %s ", err)
|
||||||
|
}
|
||||||
|
|
||||||
if extension == ".gpg" {
|
if extension == ".gpg" {
|
||||||
|
|
||||||
if conf.usingKey {
|
if conf.usingKey {
|
||||||
|
utils.Info("Decrypting backup using private key...")
|
||||||
utils.Warn("Backup decryption using a private key is not fully supported")
|
utils.Warn("Backup decryption using a private key is not fully supported")
|
||||||
err := decryptWithGPGPrivateKey(filepath.Join(tmpPath, conf.file), conf.privateKey, conf.passphrase)
|
prKey, err := os.ReadFile(conf.privateKey)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatal("Error reading public key: %s ", err)
|
||||||
|
}
|
||||||
|
err = encryptor.DecryptWithPrivateKey(rFile, outputFile, prKey, conf.passphrase)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("error during decrypting backup %v", err)
|
utils.Fatal("error during decrypting backup %v", err)
|
||||||
}
|
}
|
||||||
|
utils.Info("Decrypting backup using private key...done")
|
||||||
} else {
|
} else {
|
||||||
if conf.passphrase == "" {
|
if conf.passphrase == "" {
|
||||||
utils.Error("Error, passphrase or private key required")
|
utils.Error("Error, passphrase or private key required")
|
||||||
utils.Fatal("Your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE or GPG_PRIVATE_KEY environment variable is required.")
|
utils.Fatal("Your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE or GPG_PRIVATE_KEY environment variable is required.")
|
||||||
} else {
|
} else {
|
||||||
//decryptWithGPG file
|
utils.Info("Decrypting backup using passphrase...")
|
||||||
err := decryptWithGPG(filepath.Join(tmpPath, conf.file), conf.passphrase)
|
// decryptWithGPG file
|
||||||
|
err := encryptor.Decrypt(rFile, outputFile, conf.passphrase)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error decrypting file %s %v", file, err)
|
utils.Fatal("Error decrypting file %s %v", file, err)
|
||||||
}
|
}
|
||||||
//Update file name
|
utils.Info("Decrypting backup using passphrase...done")
|
||||||
|
// Update file name
|
||||||
conf.file = RemoveLastExtension(file)
|
conf.file = RemoveLastExtension(file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -110,20 +130,20 @@ func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
|
|||||||
utils.Fatal("Error, in restoring the database %v", err)
|
utils.Fatal("Error, in restoring the database %v", err)
|
||||||
}
|
}
|
||||||
utils.Info("Restoring database... done")
|
utils.Info("Restoring database... done")
|
||||||
utils.Done("Database has been restored")
|
utils.Info("Database has been restored")
|
||||||
//Delete temp
|
// Delete temp
|
||||||
deleteTemp()
|
deleteTemp()
|
||||||
|
|
||||||
} else if extension == ".sql" {
|
} else if extension == ".sql" {
|
||||||
//Restore from sql file
|
// Restore from sql file
|
||||||
str := "cat " + filepath.Join(tmpPath, conf.file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
|
str := "cat " + filepath.Join(tmpPath, conf.file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
|
||||||
_, err := exec.Command("sh", "-c", str).Output()
|
_, err := exec.Command("sh", "-c", str).Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatal("Error in restoring the database %v", err)
|
utils.Fatal("Error in restoring the database %v", err)
|
||||||
}
|
}
|
||||||
utils.Info("Restoring database... done")
|
utils.Info("Restoring database... done")
|
||||||
utils.Done("Database has been restored")
|
utils.Info("Database has been restored")
|
||||||
//Delete temp
|
// Delete temp
|
||||||
deleteTemp()
|
deleteTemp()
|
||||||
} else {
|
} else {
|
||||||
utils.Fatal("Unknown file extension %s", extension)
|
utils.Fatal("Unknown file extension %s", extension)
|
||||||
|
|||||||
230
pkg/s3.go
230
pkg/s3.go
@@ -1,148 +1,134 @@
|
|||||||
// Package pkg
|
/*
|
||||||
/*****
|
MIT License
|
||||||
@author Jonas Kaninda
|
|
||||||
@license MIT License <https://opensource.org/licenses/MIT>
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
@Copyright © 2024 Jonas Kaninda
|
|
||||||
**/
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package pkg
|
package pkg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"fmt"
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/jkaninda/go-storage/pkg/s3"
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
|
||||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
"github.com/jkaninda/mysql-bkup/utils"
|
||||||
"net/http"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CreateSession creates a new AWS session
|
func s3Backup(db *dbConfig, config *BackupConfig) {
|
||||||
func CreateSession() (*session.Session, error) {
|
|
||||||
|
utils.Info("Backup database to s3 storage")
|
||||||
|
startTime = time.Now().Format(utils.TimeFormat())
|
||||||
|
// Backup database
|
||||||
|
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||||
|
finalFileName := config.backupFileName
|
||||||
|
if config.encryption {
|
||||||
|
encryptBackup(config)
|
||||||
|
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||||
|
}
|
||||||
|
utils.Info("Uploading backup archive to remote storage S3 ... ")
|
||||||
awsConfig := initAWSConfig()
|
awsConfig := initAWSConfig()
|
||||||
// Configure to use MinIO Server
|
if config.remotePath == "" {
|
||||||
s3Config := &aws.Config{
|
config.remotePath = awsConfig.remotePath
|
||||||
Credentials: credentials.NewStaticCredentials(awsConfig.accessKey, awsConfig.secretKey, ""),
|
|
||||||
Endpoint: aws.String(awsConfig.endpoint),
|
|
||||||
Region: aws.String(awsConfig.region),
|
|
||||||
DisableSSL: aws.Bool(awsConfig.disableSsl),
|
|
||||||
S3ForcePathStyle: aws.Bool(awsConfig.forcePathStyle),
|
|
||||||
}
|
}
|
||||||
return session.NewSession(s3Config)
|
utils.Info("Backup name is %s", finalFileName)
|
||||||
|
s3Storage, err := s3.NewStorage(s3.Config{
|
||||||
}
|
Endpoint: awsConfig.endpoint,
|
||||||
|
Bucket: awsConfig.bucket,
|
||||||
// UploadFileToS3 uploads a file to S3 with a given prefix
|
AccessKey: awsConfig.accessKey,
|
||||||
func UploadFileToS3(filePath, key, bucket, prefix string) error {
|
SecretKey: awsConfig.secretKey,
|
||||||
sess, err := CreateSession()
|
Region: awsConfig.region,
|
||||||
if err != nil {
|
DisableSsl: awsConfig.disableSsl,
|
||||||
return err
|
ForcePathStyle: awsConfig.forcePathStyle,
|
||||||
}
|
RemotePath: config.remotePath,
|
||||||
|
LocalPath: tmpPath,
|
||||||
svc := s3.New(sess)
|
|
||||||
|
|
||||||
file, err := os.Open(filepath.Join(filePath, key))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
fileInfo, err := file.Stat()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
objectKey := filepath.Join(prefix, key)
|
|
||||||
|
|
||||||
buffer := make([]byte, fileInfo.Size())
|
|
||||||
file.Read(buffer)
|
|
||||||
fileBytes := bytes.NewReader(buffer)
|
|
||||||
fileType := http.DetectContentType(buffer)
|
|
||||||
|
|
||||||
_, err = svc.PutObject(&s3.PutObjectInput{
|
|
||||||
Bucket: aws.String(bucket),
|
|
||||||
Key: aws.String(objectKey),
|
|
||||||
Body: fileBytes,
|
|
||||||
ContentLength: aws.Int64(fileInfo.Size()),
|
|
||||||
ContentType: aws.String(fileType),
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
utils.Fatal("Error creating s3 storage: %s", err)
|
||||||
}
|
}
|
||||||
|
err = s3Storage.Copy(finalFileName)
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func DownloadFile(destinationPath, key, bucket, prefix string) error {
|
|
||||||
|
|
||||||
sess, err := CreateSession()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
utils.Fatal("Error copying backup file: %s", err)
|
||||||
}
|
}
|
||||||
utils.Info("Download data from S3 storage...")
|
// Get backup info
|
||||||
file, err := os.Create(filepath.Join(destinationPath, key))
|
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Error("Failed to create file", err)
|
utils.Error("Error: %s", err)
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
defer file.Close()
|
backupSize = fileInfo.Size()
|
||||||
|
|
||||||
objectKey := filepath.Join(prefix, key)
|
// Delete backup file from tmp folder
|
||||||
|
err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName))
|
||||||
downloader := s3manager.NewDownloader(sess)
|
|
||||||
numBytes, err := downloader.Download(file,
|
|
||||||
&s3.GetObjectInput{
|
|
||||||
Bucket: aws.String(bucket),
|
|
||||||
Key: aws.String(objectKey),
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Error("Failed to download file %s", key)
|
fmt.Println("Error deleting file: ", err)
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
utils.Info("Backup downloaded: %s bytes size %s ", file.Name(), numBytes)
|
// Delete old backup
|
||||||
|
if config.prune {
|
||||||
return nil
|
err := s3Storage.Prune(config.backupRetention)
|
||||||
}
|
if err != nil {
|
||||||
func DeleteOldBackup(bucket, prefix string, retention int) error {
|
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||||
sess, err := CreateSession()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
svc := s3.New(sess)
|
|
||||||
|
|
||||||
// Get the current time and the time threshold for 7 days ago
|
|
||||||
now := time.Now()
|
|
||||||
backupRetentionDays := now.AddDate(0, 0, -retention)
|
|
||||||
|
|
||||||
// List objects in the bucket
|
|
||||||
listObjectsInput := &s3.ListObjectsV2Input{
|
|
||||||
Bucket: aws.String(bucket),
|
|
||||||
Prefix: aws.String(prefix),
|
|
||||||
}
|
|
||||||
err = svc.ListObjectsV2Pages(listObjectsInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
|
||||||
for _, object := range page.Contents {
|
|
||||||
if object.LastModified.Before(backupRetentionDays) {
|
|
||||||
// Object is older than retention days, delete it
|
|
||||||
_, err := svc.DeleteObject(&s3.DeleteObjectInput{
|
|
||||||
Bucket: aws.String(bucket),
|
|
||||||
Key: object.Key,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
utils.Info("Failed to delete object %s: %v", *object.Key, err)
|
|
||||||
} else {
|
|
||||||
utils.Info("Deleted object %s\n", *object.Key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return !lastPage
|
}
|
||||||
|
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||||
|
utils.Info("Uploading backup archive to remote storage S3 ... done ")
|
||||||
|
// Send notification
|
||||||
|
utils.NotifySuccess(&utils.NotificationData{
|
||||||
|
File: finalFileName,
|
||||||
|
BackupSize: backupSize,
|
||||||
|
Database: db.dbName,
|
||||||
|
Storage: config.storage,
|
||||||
|
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||||
|
StartTime: startTime,
|
||||||
|
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||||
|
})
|
||||||
|
// Delete temp
|
||||||
|
deleteTemp()
|
||||||
|
utils.Info("Backup completed successfully")
|
||||||
|
|
||||||
|
}
|
||||||
|
func s3Restore(db *dbConfig, conf *RestoreConfig) {
|
||||||
|
utils.Info("Restore database from s3")
|
||||||
|
awsConfig := initAWSConfig()
|
||||||
|
if conf.remotePath == "" {
|
||||||
|
conf.remotePath = awsConfig.remotePath
|
||||||
|
}
|
||||||
|
s3Storage, err := s3.NewStorage(s3.Config{
|
||||||
|
Endpoint: awsConfig.endpoint,
|
||||||
|
Bucket: awsConfig.bucket,
|
||||||
|
AccessKey: awsConfig.accessKey,
|
||||||
|
SecretKey: awsConfig.secretKey,
|
||||||
|
Region: awsConfig.region,
|
||||||
|
DisableSsl: awsConfig.disableSsl,
|
||||||
|
ForcePathStyle: awsConfig.forcePathStyle,
|
||||||
|
RemotePath: conf.remotePath,
|
||||||
|
LocalPath: tmpPath,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Error("Failed to list objects: %v", err)
|
utils.Fatal("Error creating s3 storage: %s", err)
|
||||||
}
|
}
|
||||||
|
err = s3Storage.CopyFrom(conf.file)
|
||||||
utils.Info("Finished deleting old files.")
|
if err != nil {
|
||||||
return nil
|
utils.Fatal("Error download file from S3 storage: %s", err)
|
||||||
|
}
|
||||||
|
RestoreDatabase(db, conf)
|
||||||
}
|
}
|
||||||
|
|||||||
111
pkg/scp.go
111
pkg/scp.go
@@ -1,111 +0,0 @@
|
|||||||
// Package pkg /
|
|
||||||
/*****
|
|
||||||
@author Jonas Kaninda
|
|
||||||
@license MIT License <https://opensource.org/licenses/MIT>
|
|
||||||
@Copyright © 2024 Jonas Kaninda
|
|
||||||
**/
|
|
||||||
package pkg
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"github.com/bramvdbogaerde/go-scp"
|
|
||||||
"github.com/bramvdbogaerde/go-scp/auth"
|
|
||||||
"github.com/jkaninda/mysql-bkup/utils"
|
|
||||||
"golang.org/x/crypto/ssh"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
// createSSHClientConfig sets up the SSH client configuration based on the provided SSHConfig
|
|
||||||
func createSSHClientConfig(sshConfig *SSHConfig) (ssh.ClientConfig, error) {
|
|
||||||
if sshConfig.identifyFile != "" && utils.FileExists(sshConfig.identifyFile) {
|
|
||||||
return auth.PrivateKey(sshConfig.user, sshConfig.identifyFile, ssh.InsecureIgnoreHostKey())
|
|
||||||
} else {
|
|
||||||
if sshConfig.password == "" {
|
|
||||||
return ssh.ClientConfig{}, errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty")
|
|
||||||
}
|
|
||||||
utils.Warn("Accessing the remote server using password, which is not recommended.")
|
|
||||||
return auth.PasswordKey(sshConfig.user, sshConfig.password, ssh.InsecureIgnoreHostKey())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyToRemote copies a file to a remote server via SCP
|
|
||||||
func CopyToRemote(fileName, remotePath string) error {
|
|
||||||
// Load environment variables
|
|
||||||
sshConfig, err := loadSSHConfig()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to load SSH configuration: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize SSH client config
|
|
||||||
clientConfig, err := createSSHClientConfig(sshConfig)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create SSH client config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new SCP client
|
|
||||||
client := scp.NewClient(fmt.Sprintf("%s:%s", sshConfig.hostName, sshConfig.port), &clientConfig)
|
|
||||||
|
|
||||||
// Connect to the remote server
|
|
||||||
err = client.Connect()
|
|
||||||
if err != nil {
|
|
||||||
return errors.New("Couldn't establish a connection to the remote server\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open the local file
|
|
||||||
filePath := filepath.Join(tmpPath, fileName)
|
|
||||||
file, err := os.Open(filePath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to open file %s: %w", filePath, err)
|
|
||||||
}
|
|
||||||
defer client.Close()
|
|
||||||
// Copy file to the remote server
|
|
||||||
err = client.CopyFromFile(context.Background(), *file, filepath.Join(remotePath, fileName), "0655")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to copy file to remote server: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func CopyFromRemote(fileName, remotePath string) error {
|
|
||||||
// Load environment variables
|
|
||||||
sshConfig, err := loadSSHConfig()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to load SSH configuration: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize SSH client config
|
|
||||||
clientConfig, err := createSSHClientConfig(sshConfig)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create SSH client config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new SCP client
|
|
||||||
client := scp.NewClient(fmt.Sprintf("%s:%s", sshConfig.hostName, sshConfig.port), &clientConfig)
|
|
||||||
|
|
||||||
// Connect to the remote server
|
|
||||||
err = client.Connect()
|
|
||||||
if err != nil {
|
|
||||||
return errors.New("Couldn't establish a connection to the remote server\n")
|
|
||||||
}
|
|
||||||
// Close client connection after the file has been copied
|
|
||||||
defer client.Close()
|
|
||||||
file, err := os.OpenFile(filepath.Join(tmpPath, fileName), os.O_RDWR|os.O_CREATE, 0777)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("Couldn't open the output file")
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
// the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application.
|
|
||||||
err = client.CopyFromRemote(context.Background(), file, filepath.Join(remotePath, fileName))
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
utils.Error("Error while copying file %s ", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
|
|
||||||
}
|
|
||||||
62
pkg/var.go
62
pkg/var.go
@@ -1,30 +1,51 @@
|
|||||||
// Package pkg /
|
/*
|
||||||
/*****
|
MIT License
|
||||||
@author Jonas Kaninda
|
|
||||||
@license MIT License <https://opensource.org/licenses/MIT>
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
@Copyright © 2024 Jonas Kaninda
|
|
||||||
**/
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package pkg
|
package pkg
|
||||||
|
|
||||||
const cronLogFile = "/var/log/mysql-bkup.log"
|
|
||||||
const tmpPath = "/tmp/backup"
|
const tmpPath = "/tmp/backup"
|
||||||
const algorithm = "aes256"
|
|
||||||
const gpgHome = "/config/gnupg"
|
const gpgHome = "/config/gnupg"
|
||||||
const gpgExtension = "gpg"
|
const gpgExtension = "gpg"
|
||||||
const workingDir = "/config"
|
const timeFormat = "2006-01-02 at 15:04:05"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
storage = "local"
|
storage = "local"
|
||||||
file = ""
|
file = ""
|
||||||
storagePath = "/backup"
|
|
||||||
disableCompression = false
|
storagePath = "/backup"
|
||||||
encryption = false
|
workingDir = "/config"
|
||||||
usingKey = false
|
disableCompression = false
|
||||||
|
encryption = false
|
||||||
|
usingKey = false
|
||||||
|
backupSize int64 = 0
|
||||||
|
startTime string
|
||||||
)
|
)
|
||||||
|
|
||||||
// dbHVars Required environment variables for database
|
// dbHVars Required environment variables for database
|
||||||
var dbHVars = []string{
|
var dbHVars = []string{
|
||||||
"DB_HOST",
|
"DB_HOST",
|
||||||
|
"DB_PORT",
|
||||||
"DB_PASSWORD",
|
"DB_PASSWORD",
|
||||||
"DB_USERNAME",
|
"DB_USERNAME",
|
||||||
"DB_NAME",
|
"DB_NAME",
|
||||||
@@ -40,12 +61,12 @@ var tdbRVars = []string{
|
|||||||
var dbConf *dbConfig
|
var dbConf *dbConfig
|
||||||
var targetDbConf *targetDbConfig
|
var targetDbConf *targetDbConfig
|
||||||
|
|
||||||
// sshHVars Required environment variables for SSH remote server storage
|
// sshVars Required environment variables for SSH remote server storage
|
||||||
var sshHVars = []string{
|
var sshVars = []string{
|
||||||
"SSH_USER",
|
"SSH_USER",
|
||||||
"REMOTE_PATH",
|
|
||||||
"SSH_HOST_NAME",
|
"SSH_HOST_NAME",
|
||||||
"SSH_PORT",
|
"SSH_PORT",
|
||||||
|
"REMOTE_PATH",
|
||||||
}
|
}
|
||||||
var ftpVars = []string{
|
var ftpVars = []string{
|
||||||
"FTP_HOST_NAME",
|
"FTP_HOST_NAME",
|
||||||
@@ -53,6 +74,11 @@ var ftpVars = []string{
|
|||||||
"FTP_PASSWORD",
|
"FTP_PASSWORD",
|
||||||
"FTP_PORT",
|
"FTP_PORT",
|
||||||
}
|
}
|
||||||
|
var azureVars = []string{
|
||||||
|
"AZURE_STORAGE_CONTAINER_NAME",
|
||||||
|
"AZURE_STORAGE_ACCOUNT_NAME",
|
||||||
|
"AZURE_STORAGE_ACCOUNT_KEY",
|
||||||
|
}
|
||||||
|
|
||||||
// AwsVars Required environment variables for AWS S3 storage
|
// AwsVars Required environment variables for AWS S3 storage
|
||||||
var awsVars = []string{
|
var awsVars = []string{
|
||||||
|
|||||||
18
templates/email-error.tmpl
Normal file
18
templates/email-error.tmpl
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<title>🔴 Urgent: Database Backup Failure Notification</title>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h2>Hi,</h2>
|
||||||
|
<p>An error occurred during database backup.</p>
|
||||||
|
<h3>Failure Details:</h3>
|
||||||
|
<ul>
|
||||||
|
<li>Error Message: {{.Error}}</li>
|
||||||
|
<li>Date: {{.EndTime}}</li>
|
||||||
|
<li>Backup Reference: {{.BackupReference}} </li>
|
||||||
|
</ul>
|
||||||
|
<p>©2024 <a href="https://github.com/jkaninda/mysql-bkup">mysql-bkup</a></p>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
24
templates/email.tmpl
Normal file
24
templates/email.tmpl
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<title>✅ Database Backup Notification – {{.Database}}</title>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h2>Hi,</h2>
|
||||||
|
<p>Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.</p>
|
||||||
|
<h3>Backup Details:</h3>
|
||||||
|
<ul>
|
||||||
|
<li>Database Name: {{.Database}}</li>
|
||||||
|
<li>Backup Start Time: {{.StartTime}}</li>
|
||||||
|
<li>Backup End Time: {{.EndTime}}</li>
|
||||||
|
<li>Backup Storage: {{.Storage}}</li>
|
||||||
|
<li>Backup Location: {{.BackupLocation}}</li>
|
||||||
|
<li>Backup Size: {{.BackupSize}} bytes</li>
|
||||||
|
<li>Backup Reference: {{.BackupReference}} </li>
|
||||||
|
</ul>
|
||||||
|
<p>Best regards,</p>
|
||||||
|
<p>©2024 <a href="https://github.com/jkaninda/mysql-bkup">mysql-bkup</a></p>
|
||||||
|
<href>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
8
templates/telegram-error.tmpl
Normal file
8
templates/telegram-error.tmpl
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
🔴 Urgent: Database Backup Failure Notification
|
||||||
|
Hi,
|
||||||
|
An error occurred during database backup.
|
||||||
|
Failure Details:
|
||||||
|
- Date: {{.EndTime}}
|
||||||
|
- Backup Reference: {{.BackupReference}}
|
||||||
|
- Error Message: {{.Error}}
|
||||||
|
|
||||||
12
templates/telegram.tmpl
Normal file
12
templates/telegram.tmpl
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
[✅ Database Backup Notification – {{.Database}}
|
||||||
|
Hi,
|
||||||
|
Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}.
|
||||||
|
|
||||||
|
Backup Details:
|
||||||
|
- Database Name: {{.Database}}
|
||||||
|
- Backup Start Time: {{.StartTime}}
|
||||||
|
- Backup EndTime: {{.EndTime}}
|
||||||
|
- Backup Storage: {{.Storage}}
|
||||||
|
- Backup Location: {{.BackupLocation}}
|
||||||
|
- Backup Size: {{.BackupSize}} bytes
|
||||||
|
- Backup Reference: {{.BackupReference}}
|
||||||
83
utils/config.go
Normal file
83
utils/config.go
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package utils
|
||||||
|
|
||||||
|
import "os"
|
||||||
|
|
||||||
|
type MailConfig struct {
|
||||||
|
MailHost string
|
||||||
|
MailPort int
|
||||||
|
MailUserName string
|
||||||
|
MailPassword string
|
||||||
|
MailTo string
|
||||||
|
MailFrom string
|
||||||
|
SkipTls bool
|
||||||
|
}
|
||||||
|
type NotificationData struct {
|
||||||
|
File string
|
||||||
|
BackupSize int64
|
||||||
|
Database string
|
||||||
|
StartTime string
|
||||||
|
EndTime string
|
||||||
|
Storage string
|
||||||
|
BackupLocation string
|
||||||
|
BackupReference string
|
||||||
|
}
|
||||||
|
type ErrorMessage struct {
|
||||||
|
Database string
|
||||||
|
EndTime string
|
||||||
|
Error string
|
||||||
|
BackupReference string
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadMailConfig gets mail environment variables and returns MailConfig
|
||||||
|
func loadMailConfig() *MailConfig {
|
||||||
|
return &MailConfig{
|
||||||
|
MailHost: os.Getenv("MAIL_HOST"),
|
||||||
|
MailPort: GetIntEnv("MAIL_PORT"),
|
||||||
|
MailUserName: os.Getenv("MAIL_USERNAME"),
|
||||||
|
MailPassword: os.Getenv("MAIL_PASSWORD"),
|
||||||
|
MailTo: os.Getenv("MAIL_TO"),
|
||||||
|
MailFrom: os.Getenv("MAIL_FROM"),
|
||||||
|
SkipTls: os.Getenv("MAIL_SKIP_TLS") == "false",
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeFormat returns the format of the time
|
||||||
|
func TimeFormat() string {
|
||||||
|
format := os.Getenv("TIME_FORMAT")
|
||||||
|
if format == "" {
|
||||||
|
return "2006-01-02 at 15:04:05"
|
||||||
|
|
||||||
|
}
|
||||||
|
return format
|
||||||
|
}
|
||||||
|
|
||||||
|
func backupReference() string {
|
||||||
|
return os.Getenv("BACKUP_REFERENCE")
|
||||||
|
}
|
||||||
|
|
||||||
|
const templatePath = "/config/templates"
|
||||||
@@ -1,16 +1,35 @@
|
|||||||
// Package utils /
|
/*
|
||||||
/*****
|
MIT License
|
||||||
@author Jonas Kaninda
|
|
||||||
@license MIT License <https://opensource.org/licenses/MIT>
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
@Copyright © 2024 Jonas Kaninda
|
|
||||||
**/
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package utils
|
package utils
|
||||||
|
|
||||||
const RestoreExample = "mysql-bkup restore --dbname database --file db_20231219_022941.sql.gz\n" +
|
const RestoreExample = "restore --dbname database --file db_20231219_022941.sql.gz\n" +
|
||||||
"restore --dbname database --storage s3 --path /custom-path --file db_20231219_022941.sql.gz"
|
"restore --dbname database --storage s3 --path /custom-path --file db_20231219_022941.sql.gz"
|
||||||
const BackupExample = "mysql-bkup backup --dbname database --disable-compression\n" +
|
const BackupExample = "backup --dbname database --disable-compression\n" +
|
||||||
"backup --dbname database --storage s3 --path /custom-path --disable-compression"
|
"backup --dbname database --storage s3 --path /custom-path --disable-compression"
|
||||||
|
|
||||||
const MainExample = "mysql-bkup backup --dbname database --disable-compression\n" +
|
const MainExample = "mysql-bkup backup --dbname database --disable-compression\n" +
|
||||||
"backup --dbname database --storage s3 --path /custom-path\n" +
|
"backup --dbname database --storage s3 --path /custom-path\n" +
|
||||||
"restore --dbname database --file db_20231219_022941.sql.gz"
|
"restore --dbname database --file db_20231219_022941.sql.gz"
|
||||||
|
const traceLog = "trace"
|
||||||
|
|||||||
142
utils/logger.go
142
utils/logger.go
@@ -1,69 +1,103 @@
|
|||||||
// Package utils /
|
/*
|
||||||
/*****
|
MIT License
|
||||||
@author Jonas Kaninda
|
|
||||||
@license MIT License <https://opensource.org/licenses/MIT>
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
@Copyright © 2024 Jonas Kaninda
|
|
||||||
**/
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package utils
|
package utils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"runtime"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Info(msg string, args ...any) {
|
// Info returns info log
|
||||||
var currentTime = time.Now().Format("2006/01/02 15:04:05")
|
func Info(msg string, args ...interface{}) {
|
||||||
formattedMessage := fmt.Sprintf(msg, args...)
|
log.SetOutput(getStd("/dev/stdout"))
|
||||||
if len(args) == 0 {
|
logWithCaller("INFO", msg, args...)
|
||||||
fmt.Printf("%s INFO: %s\n", currentTime, msg)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("%s INFO: %s\n", currentTime, formattedMessage)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warn warning message
|
// Warn returns warning log
|
||||||
func Warn(msg string, args ...any) {
|
func Warn(msg string, args ...interface{}) {
|
||||||
var currentTime = time.Now().Format("2006/01/02 15:04:05")
|
log.SetOutput(getStd("/dev/stdout"))
|
||||||
formattedMessage := fmt.Sprintf(msg, args...)
|
logWithCaller("WARN", msg, args...)
|
||||||
if len(args) == 0 {
|
|
||||||
fmt.Printf("%s WARN: %s\n", currentTime, msg)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("%s WARN: %s\n", currentTime, formattedMessage)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func Error(msg string, args ...any) {
|
|
||||||
var currentTime = time.Now().Format("2006/01/02 15:04:05")
|
|
||||||
formattedMessage := fmt.Sprintf(msg, args...)
|
|
||||||
if len(args) == 0 {
|
|
||||||
fmt.Printf("%s ERROR: %s\n", currentTime, msg)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func Done(msg string, args ...any) {
|
|
||||||
var currentTime = time.Now().Format("2006/01/02 15:04:05")
|
|
||||||
formattedMessage := fmt.Sprintf(msg, args...)
|
|
||||||
if len(args) == 0 {
|
|
||||||
fmt.Printf("%s INFO: %s\n", currentTime, msg)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("%s INFO: %s\n", currentTime, formattedMessage)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fatal logs an error message and exits the program
|
// Error logs error messages
|
||||||
func Fatal(msg string, args ...any) {
|
func Error(msg string, args ...interface{}) {
|
||||||
var currentTime = time.Now().Format("2006/01/02 15:04:05")
|
log.SetOutput(getStd("/dev/stderr"))
|
||||||
// Fatal logs an error message and exits the program.
|
logWithCaller("ERROR", msg, args...)
|
||||||
formattedMessage := fmt.Sprintf(msg, args...)
|
}
|
||||||
if len(args) == 0 {
|
|
||||||
fmt.Printf("%s ERROR: %s\n", currentTime, msg)
|
|
||||||
NotifyError(msg)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
|
|
||||||
NotifyError(formattedMessage)
|
|
||||||
|
|
||||||
|
func Fatal(msg string, args ...interface{}) {
|
||||||
|
log.SetOutput(os.Stdout)
|
||||||
|
// Format message if there are additional arguments
|
||||||
|
formattedMessage := msg
|
||||||
|
if len(args) > 0 {
|
||||||
|
formattedMessage = fmt.Sprintf(msg, args...)
|
||||||
}
|
}
|
||||||
|
logWithCaller("ERROR", msg, args...)
|
||||||
|
NotifyError(formattedMessage)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Helper function to format and log messages with file and line number
|
||||||
|
func logWithCaller(level, msg string, args ...interface{}) {
|
||||||
|
// Format message if there are additional arguments
|
||||||
|
formattedMessage := msg
|
||||||
|
if len(args) > 0 {
|
||||||
|
formattedMessage = fmt.Sprintf(msg, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the caller's file and line number (skip 2 frames)
|
||||||
|
_, file, line, ok := runtime.Caller(2)
|
||||||
|
if !ok {
|
||||||
|
file = "unknown"
|
||||||
|
line = 0
|
||||||
|
}
|
||||||
|
// Log message with caller information if GOMA_LOG_LEVEL is trace
|
||||||
|
if strings.ToLower(level) != "off" {
|
||||||
|
if strings.ToLower(level) == traceLog {
|
||||||
|
log.Printf("%s: %s (File: %s, Line: %d)\n", level, formattedMessage, file, line)
|
||||||
|
} else {
|
||||||
|
log.Printf("%s: %s\n", level, formattedMessage)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getStd(out string) *os.File {
|
||||||
|
switch out {
|
||||||
|
case "/dev/stdout":
|
||||||
|
return os.Stdout
|
||||||
|
case "/dev/stderr":
|
||||||
|
return os.Stderr
|
||||||
|
case "/dev/stdin":
|
||||||
|
return os.Stdin
|
||||||
|
default:
|
||||||
|
return os.Stdout
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
202
utils/notification.go
Normal file
202
utils/notification.go
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
/*
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"github.com/go-mail/mail"
|
||||||
|
"html/template"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func parseTemplate[T any](data T, fileName string) (string, error) {
|
||||||
|
// Open the file
|
||||||
|
tmpl, err := template.ParseFiles(filepath.Join(templatePath, fileName))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err = tmpl.Execute(&buf, data); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func SendEmail(subject, body string) error {
|
||||||
|
Info("Start sending email notification....")
|
||||||
|
config := loadMailConfig()
|
||||||
|
emails := strings.Split(config.MailTo, ",")
|
||||||
|
m := mail.NewMessage()
|
||||||
|
m.SetHeader("From", config.MailFrom)
|
||||||
|
m.SetHeader("To", emails...)
|
||||||
|
m.SetHeader("Subject", subject)
|
||||||
|
m.SetBody("text/html", body)
|
||||||
|
d := mail.NewDialer(config.MailHost, config.MailPort, config.MailUserName, config.MailPassword)
|
||||||
|
d.TLSConfig = &tls.Config{InsecureSkipVerify: config.SkipTls}
|
||||||
|
|
||||||
|
if err := d.DialAndSend(m); err != nil {
|
||||||
|
Error("Error could not send email : %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
Info("Email notification has been sent")
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
func sendMessage(msg string) error {
|
||||||
|
|
||||||
|
Info("Sending Telegram notification... ")
|
||||||
|
chatId := os.Getenv("TG_CHAT_ID")
|
||||||
|
body, _ := json.Marshal(map[string]string{
|
||||||
|
"chat_id": chatId,
|
||||||
|
"text": msg,
|
||||||
|
})
|
||||||
|
url := fmt.Sprintf("%s/sendMessage", getTgUrl())
|
||||||
|
// Create an HTTP post request
|
||||||
|
request, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
request.Header.Add("Content-Type", "application/json")
|
||||||
|
client := &http.Client{}
|
||||||
|
response, err := client.Do(request)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
code := response.StatusCode
|
||||||
|
if code == 200 {
|
||||||
|
Info("Telegram notification has been sent")
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
body, _ := io.ReadAll(response.Body)
|
||||||
|
Error("Error could not send message, error: %s", string(body))
|
||||||
|
return fmt.Errorf("error could not send message %s", string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
func NotifySuccess(notificationData *NotificationData) {
|
||||||
|
notificationData.BackupReference = backupReference()
|
||||||
|
var vars = []string{
|
||||||
|
"TG_TOKEN",
|
||||||
|
"TG_CHAT_ID",
|
||||||
|
}
|
||||||
|
var mailVars = []string{
|
||||||
|
"MAIL_HOST",
|
||||||
|
"MAIL_PORT",
|
||||||
|
"MAIL_USERNAME",
|
||||||
|
"MAIL_PASSWORD",
|
||||||
|
"MAIL_FROM",
|
||||||
|
"MAIL_TO",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Email notification
|
||||||
|
err := CheckEnvVars(mailVars)
|
||||||
|
if err == nil {
|
||||||
|
body, err := parseTemplate(*notificationData, "email.tmpl")
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not parse email template: %v", err)
|
||||||
|
}
|
||||||
|
err = SendEmail(fmt.Sprintf("✅ Database Backup Notification – %s", notificationData.Database), body)
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not send email: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Telegram notification
|
||||||
|
err = CheckEnvVars(vars)
|
||||||
|
if err == nil {
|
||||||
|
message, err := parseTemplate(*notificationData, "telegram.tmpl")
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not parse telegram template: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = sendMessage(message)
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not send Telegram message: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func NotifyError(error string) {
|
||||||
|
var vars = []string{
|
||||||
|
"TG_TOKEN",
|
||||||
|
"TG_CHAT_ID",
|
||||||
|
}
|
||||||
|
var mailVars = []string{
|
||||||
|
"MAIL_HOST",
|
||||||
|
"MAIL_PORT",
|
||||||
|
"MAIL_USERNAME",
|
||||||
|
"MAIL_PASSWORD",
|
||||||
|
"MAIL_FROM",
|
||||||
|
"MAIL_TO",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Email notification
|
||||||
|
err := CheckEnvVars(mailVars)
|
||||||
|
if err == nil {
|
||||||
|
body, err := parseTemplate(ErrorMessage{
|
||||||
|
Error: error,
|
||||||
|
EndTime: time.Now().Format(TimeFormat()),
|
||||||
|
BackupReference: os.Getenv("BACKUP_REFERENCE"),
|
||||||
|
}, "email-error.tmpl")
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not parse error template: %v", err)
|
||||||
|
}
|
||||||
|
err = SendEmail("🔴 Urgent: Database Backup Failure Notification", body)
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not send email: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Telegram notification
|
||||||
|
err = CheckEnvVars(vars)
|
||||||
|
if err == nil {
|
||||||
|
message, err := parseTemplate(ErrorMessage{
|
||||||
|
Error: error,
|
||||||
|
EndTime: time.Now().Format(TimeFormat()),
|
||||||
|
BackupReference: os.Getenv("BACKUP_REFERENCE"),
|
||||||
|
}, "telegram-error.tmpl")
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not parse error template: %v", err)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
err = sendMessage(message)
|
||||||
|
if err != nil {
|
||||||
|
Error("Could not send telegram message: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTgUrl() string {
|
||||||
|
return fmt.Sprintf("https://api.telegram.org/bot%s", os.Getenv("TG_TOKEN"))
|
||||||
|
|
||||||
|
}
|
||||||
155
utils/utils.go
155
utils/utils.go
@@ -1,25 +1,43 @@
|
|||||||
// Package utils /
|
/*
|
||||||
/*****
|
MIT License
|
||||||
@author Jonas Kaninda
|
|
||||||
@license MIT License <https://opensource.org/licenses/MIT>
|
Copyright (c) 2023 Jonas Kaninda
|
||||||
@Copyright © 2024 Jonas Kaninda
|
|
||||||
**/
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
package utils
|
package utils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/robfig/cron/v3"
|
"github.com/robfig/cron/v3"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var Version = ""
|
||||||
|
|
||||||
|
// FileExists checks if the file does exist
|
||||||
func FileExists(filename string) bool {
|
func FileExists(filename string) bool {
|
||||||
info, err := os.Stat(filename)
|
info, err := os.Stat(filename)
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
@@ -33,7 +51,13 @@ func WriteToFile(filePath, content string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer func(file *os.File) {
|
||||||
|
err := file.Close()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
|
||||||
|
}
|
||||||
|
}(file)
|
||||||
|
|
||||||
_, err = file.WriteString(content)
|
_, err = file.WriteString(content)
|
||||||
return err
|
return err
|
||||||
@@ -51,14 +75,25 @@ func CopyFile(src, dst string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to open source file: %v", err)
|
return fmt.Errorf("failed to open source file: %v", err)
|
||||||
}
|
}
|
||||||
defer sourceFile.Close()
|
defer func(sourceFile *os.File) {
|
||||||
|
err := sourceFile.Close()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}(sourceFile)
|
||||||
|
|
||||||
// Create the destination file
|
// Create the destination file
|
||||||
destinationFile, err := os.Create(dst)
|
destinationFile, err := os.Create(dst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create destination file: %v", err)
|
return fmt.Errorf("failed to create destination file: %v", err)
|
||||||
}
|
}
|
||||||
defer destinationFile.Close()
|
defer func(destinationFile *os.File) {
|
||||||
|
err := destinationFile.Close()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
|
||||||
|
}
|
||||||
|
}(destinationFile)
|
||||||
|
|
||||||
// Copy the content from source to destination
|
// Copy the content from source to destination
|
||||||
_, err = io.Copy(destinationFile, sourceFile)
|
_, err = io.Copy(destinationFile, sourceFile)
|
||||||
@@ -85,7 +120,12 @@ func IsDirEmpty(name string) (bool, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer func(f *os.File) {
|
||||||
|
err := f.Close()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}(f)
|
||||||
|
|
||||||
_, err = f.Readdirnames(1)
|
_, err = f.Readdirnames(1)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -133,14 +173,11 @@ func GetEnvVariable(envName, oldEnvName string) string {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return value
|
return value
|
||||||
}
|
}
|
||||||
Warn("%s is deprecated, please use %s instead!", oldEnvName, envName)
|
Warn("%s is deprecated, please use %s instead! ", oldEnvName, envName)
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return value
|
return value
|
||||||
}
|
}
|
||||||
func ShowHistory() {
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckEnvVars checks if all the specified environment variables are set
|
// CheckEnvVars checks if all the specified environment variables are set
|
||||||
func CheckEnvVars(vars []string) error {
|
func CheckEnvVars(vars []string) error {
|
||||||
@@ -187,71 +224,33 @@ func GetIntEnv(envName string) int {
|
|||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
func sendMessage(msg string) {
|
|
||||||
|
|
||||||
Info("Sending notification... ")
|
func EnvWithDefault(envName string, defaultValue string) string {
|
||||||
chatId := os.Getenv("TG_CHAT_ID")
|
value := os.Getenv(envName)
|
||||||
body, _ := json.Marshal(map[string]string{
|
if value == "" {
|
||||||
"chat_id": chatId,
|
return defaultValue
|
||||||
"text": msg,
|
|
||||||
})
|
|
||||||
url := fmt.Sprintf("%s/sendMessage", getTgUrl())
|
|
||||||
// Create an HTTP post request
|
|
||||||
request, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
request.Header.Add("Content-Type", "application/json")
|
|
||||||
client := &http.Client{}
|
|
||||||
response, err := client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
code := response.StatusCode
|
|
||||||
if code == 200 {
|
|
||||||
Info("Notification has been sent")
|
|
||||||
} else {
|
|
||||||
body, _ := ioutil.ReadAll(response.Body)
|
|
||||||
Error("Message not sent, error: %s", string(body))
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
func NotifySuccess(fileName string) {
|
|
||||||
var vars = []string{
|
|
||||||
"TG_TOKEN",
|
|
||||||
"TG_CHAT_ID",
|
|
||||||
}
|
|
||||||
|
|
||||||
//Telegram notification
|
|
||||||
err := CheckEnvVars(vars)
|
|
||||||
if err == nil {
|
|
||||||
message := "[✅ MySQL Backup ]\n" +
|
|
||||||
"Database has been backed up \n" +
|
|
||||||
"Backup name is " + fileName
|
|
||||||
sendMessage(message)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func NotifyError(error string) {
|
|
||||||
var vars = []string{
|
|
||||||
"TG_TOKEN",
|
|
||||||
"TG_CHAT_ID",
|
|
||||||
}
|
|
||||||
|
|
||||||
//Telegram notification
|
|
||||||
err := CheckEnvVars(vars)
|
|
||||||
if err == nil {
|
|
||||||
message := "[🔴 MySQL Backup ]\n" +
|
|
||||||
"An error occurred during database backup \n" +
|
|
||||||
"Error: " + error
|
|
||||||
sendMessage(message)
|
|
||||||
}
|
}
|
||||||
|
return value
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTgUrl() string {
|
// IsValidCronExpression verify cronExpression and returns boolean
|
||||||
return fmt.Sprintf("https://api.telegram.org/bot%s", os.Getenv("TG_TOKEN"))
|
|
||||||
|
|
||||||
}
|
|
||||||
func IsValidCronExpression(cronExpr string) bool {
|
func IsValidCronExpression(cronExpr string) bool {
|
||||||
|
// Parse the cron expression
|
||||||
_, err := cron.ParseStandard(cronExpr)
|
_, err := cron.ParseStandard(cronExpr)
|
||||||
return err == nil
|
return err == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CronNextTime returns cronExpression next time
|
||||||
|
func CronNextTime(cronExpr string) time.Time {
|
||||||
|
// Parse the cron expression
|
||||||
|
schedule, err := cron.ParseStandard(cronExpr)
|
||||||
|
if err != nil {
|
||||||
|
Error("Error parsing cron expression: %s", err)
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
// Get the current time
|
||||||
|
now := time.Now()
|
||||||
|
// Get the next scheduled time
|
||||||
|
next := schedule.Next(now)
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user