Compare commits

...

28 Commits

Author SHA1 Message Date
96927cd57e Merge pull request #106 from jkaninda/refactor
Refactor
2024-10-02 04:13:20 +02:00
Jonas Kaninda
ceacfa1d9d docs: update ssh and ftp deployment example 2024-10-02 04:09:42 +02:00
Jonas Kaninda
9380a18b45 refactor: remove old arguments, refactor aws and ssh configuration 2024-10-02 04:07:14 +02:00
Jonas Kaninda
d186071df9 Merge pull request #105 from jkaninda/refactor
chore: update app version
2024-09-30 17:49:21 +02:00
Jonas Kaninda
71429b0e1a chore: update app version 2024-09-30 17:48:56 +02:00
Jonas Kaninda
0bed86ded4 Merge pull request #104 from jkaninda/refactor
chore: add Time Zone
2024-09-30 17:45:38 +02:00
Jonas Kaninda
e891801125 chore: add Time Zone 2024-09-30 17:44:45 +02:00
Jonas Kaninda
01cf8a3392 Merge pull request #103 from jkaninda/refactor
fix: MySQL 8.x -Plugin caching_sha2_password could not be loaded
2024-09-30 07:58:39 +02:00
Jonas Kaninda
efea81833a fix: MySQL 8.x -Plugin caching_sha2_password could not be loaded 2024-09-30 07:57:42 +02:00
Jonas Kaninda
1cbf65d686 Merge pull request #102 from jkaninda/refactor
fix: backup date and time
2024-09-30 02:03:08 +02:00
Jonas Kaninda
73d19913f8 fix: backup date and time 2024-09-30 02:02:37 +02:00
Jonas Kaninda
b0224e43ef Merge pull request #101 from jkaninda/docs
docs: add FTP storage
2024-09-30 00:58:42 +02:00
Jonas Kaninda
fa0485bb5a docs: add FTP storage 2024-09-30 00:58:20 +02:00
Jonas Kaninda
65ef6d3e8f Merge pull request #100 from jkaninda/develop
Merge develop
2024-09-30 00:55:42 +02:00
Jonas Kaninda
a7b6abb101 feat: add ftp backup storage 2024-09-30 00:40:35 +02:00
Jonas Kaninda
3b21c109bc chore: migrate baseos from Ubuntu to Alpine 2024-09-29 20:44:11 +02:00
Jonas Kaninda
a50a1ef6f9 Merge pull request #99 from jkaninda/refactor
refactor: replace function params by config struct
2024-09-29 20:09:02 +02:00
Jonas Kaninda
76bbfa35c4 refactor: replace function params by config struct 2024-09-29 20:08:36 +02:00
Jonas Kaninda
599d93bef4 Merge pull request #98 from jkaninda/refactor
refactoring of code
2024-09-29 19:51:07 +02:00
Jonas Kaninda
247e90f73e refactoring of code 2024-09-29 19:50:26 +02:00
Jonas Kaninda
7d544aca68 Merge pull request #97 from jkaninda/docs
chore: add test configurations before running in scheduled mode
2024-09-29 07:35:45 +02:00
Jonas Kaninda
1722ee0eeb chore: add test configurations before running in scheduled mode 2024-09-29 07:35:27 +02:00
Jonas Kaninda
726fd14831 Merge pull request #96 from jkaninda/docs
docs: add docker recurring backup examples
2024-09-29 07:01:27 +02:00
Jonas Kaninda
fdc88e6064 docs: add docker recurring backup examples 2024-09-29 07:00:55 +02:00
Jonas Kaninda
2ba1b516e9 Merge pull request #95 from jkaninda/docs
docs: fix environment variables table
2024-09-28 21:23:43 +02:00
Jonas Kaninda
301594676b docs: fix environment variables table 2024-09-28 21:23:03 +02:00
Jonas Kaninda
d06f2f2d7e Merge pull request #94 from jkaninda/docs
docs: update deployment example
2024-09-28 21:18:37 +02:00
Jonas Kaninda
2f06bd1c3a docs: update deployment example 2024-09-28 21:17:34 +02:00
29 changed files with 526 additions and 255 deletions

View File

@@ -27,6 +27,8 @@ jobs:
push: true push: true
file: "./docker/Dockerfile" file: "./docker/Dockerfile"
platforms: linux/amd64,linux/arm64,linux/arm/v7 platforms: linux/amd64,linux/arm64,linux/arm/v7
build-args: |
appVersion=develop-${{ github.sha }}
tags: | tags: |
"${{env.BUILDKIT_IMAGE}}:develop-${{ github.sha }}" "${{vars.BUILDKIT_IMAGE}}:develop-${{ github.sha }}"

View File

@@ -41,9 +41,11 @@ jobs:
push: true push: true
file: "./docker/Dockerfile" file: "./docker/Dockerfile"
platforms: linux/amd64,linux/arm64,linux/arm/v7 platforms: linux/amd64,linux/arm64,linux/arm/v7
build-args: |
appVersion=${{ env.TAG_NAME }}
tags: | tags: |
"${{env.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}" "${{vars.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
"${{env.BUILDKIT_IMAGE}}:latest" "${{vars.BUILDKIT_IMAGE}}:latest"
"ghcr.io/${{env.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}" "ghcr.io/${{vars.BUILDKIT_IMAGE}}:${{ env.TAG_NAME }}"
"ghcr.io/${{env.BUILDKIT_IMAGE}}:latest" "ghcr.io/${{vars.BUILDKIT_IMAGE}}:latest"

View File

@@ -1,9 +1,9 @@
# MySQL Backup # MySQL Backup
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage. MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, FTP and SSH compatible storage.
It also supports __encrypting__ your backups using GPG. It also supports __encrypting__ your backups using GPG.
The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes. The [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes.
It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage. It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3, FTP or SSH compatible storage.
It also supports database __encryption__ using GPG. It also supports database __encryption__ using GPG.
@@ -80,12 +80,26 @@ services:
- DB_NAME=foo - DB_NAME=foo
- DB_USERNAME=bar - DB_USERNAME=bar
- DB_PASSWORD=password - DB_PASSWORD=password
- TZ=Europe/Paris
# mysql-bkup container must be connected to the same network with your database # mysql-bkup container must be connected to the same network with your database
networks: networks:
- web - web
networks: networks:
web: web:
``` ```
### Docker recurring backup
```shell
docker run --rm --network network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=hostname" \
-e "DB_USERNAME=user" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 1m"
```
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
## Deploy on Kubernetes ## Deploy on Kubernetes
For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as Job or CronJob. For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as Job or CronJob.
@@ -102,7 +116,7 @@ spec:
template: template:
spec: spec:
containers: containers:
- name: pg-bkup - name: mysql-bkup
# In production, it is advised to lock your image tag to a proper # In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`. # release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases # Check https://github.com/jkaninda/mysql-bkup/releases
@@ -154,7 +168,7 @@ While it may work against different implementations, there are no guarantees abo
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements: We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
- The original image is based on `ubuntu` and requires additional tools, making it heavy. - The original image is based on `alpine` and requires additional tools, making it heavy.
- This image is written in Go. - This image is written in Go.
- `arm64` and `arm/v7` architectures are supported. - `arm64` and `arm/v7` architectures are supported.
- Docker in Swarm mode is supported. - Docker in Swarm mode is supported.

View File

@@ -29,8 +29,6 @@ func init() {
//Backup //Backup
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3") BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3")
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`") BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. | Deprecated")
BackupCmd.PersistentFlags().StringP("period", "", "", "Schedule period time | Deprecated")
BackupCmd.PersistentFlags().StringP("cron-expression", "", "", "Backup cron expression") BackupCmd.PersistentFlags().StringP("cron-expression", "", "", "Backup cron expression")
BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled") BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled")
BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "Delete files created more than specified days ago, default 7 days") BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "Delete files created more than specified days ago, default 7 days")

View File

@@ -9,8 +9,8 @@ RUN go mod download
# Build # Build
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/mysql-bkup RUN CGO_ENABLED=0 GOOS=linux go build -o /app/mysql-bkup
FROM ubuntu:24.04 FROM alpine:3.20.3
ENV DB_HOST="localhost" ENV DB_HOST=""
ENV DB_NAME="" ENV DB_NAME=""
ENV DB_USERNAME="" ENV DB_USERNAME=""
ENV DB_PASSWORD="" ENV DB_PASSWORD=""
@@ -20,49 +20,46 @@ ENV AWS_S3_ENDPOINT=""
ENV AWS_S3_BUCKET_NAME="" ENV AWS_S3_BUCKET_NAME=""
ENV AWS_ACCESS_KEY="" ENV AWS_ACCESS_KEY=""
ENV AWS_SECRET_KEY="" ENV AWS_SECRET_KEY=""
ENV AWS_REGION="us-west-2"
ENV AWS_S3_PATH="" ENV AWS_S3_PATH=""
ENV AWS_REGION="us-west-2"
ENV AWS_DISABLE_SSL="false" ENV AWS_DISABLE_SSL="false"
ENV AWS_FORCE_PATH_STYLE="true"
ENV GPG_PASSPHRASE="" ENV GPG_PASSPHRASE=""
ENV SSH_USER="" ENV SSH_USER=""
ENV SSH_REMOTE_PATH=""
ENV SSH_PASSWORD="" ENV SSH_PASSWORD=""
ENV SSH_HOST_NAME="" ENV SSH_HOST=""
ENV SSH_IDENTIFY_FILE="" ENV SSH_IDENTIFY_FILE=""
ENV SSH_PORT="22" ENV SSH_PORT=22
ENV REMOTE_PATH=""
ENV FTP_HOST=""
ENV FTP_PORT=21
ENV FTP_USER=""
ENV FTP_PASSWORD=""
ENV TARGET_DB_HOST="" ENV TARGET_DB_HOST=""
ENV TARGET_DB_PORT=3306 ENV TARGET_DB_PORT=3306
ENV TARGET_DB_NAME="localhost" ENV TARGET_DB_NAME=""
ENV TARGET_DB_USERNAME="" ENV TARGET_DB_USERNAME=""
ENV TARGET_DB_PASSWORD="" ENV TARGET_DB_PASSWORD=""
ARG DEBIAN_FRONTEND=noninteractive
ENV VERSION="v1.2.8"
ENV BACKUP_CRON_EXPRESSION="" ENV BACKUP_CRON_EXPRESSION=""
ENV TG_TOKEN="" ENV TG_TOKEN=""
ENV TG_CHAT_ID="" ENV TG_CHAT_ID=""
ENV TZ=UTC
ARG WORKDIR="/config" ARG WORKDIR="/config"
ARG BACKUPDIR="/backup" ARG BACKUPDIR="/backup"
ARG BACKUP_TMP_DIR="/tmp/backup" ARG BACKUP_TMP_DIR="/tmp/backup"
ARG BACKUP_CRON="/etc/cron.d/backup_cron" ARG appVersion="v1.2.12"
ARG BACKUP_CRON_SCRIPT="/usr/local/bin/backup_cron.sh" ENV VERSION=${appVersion}
LABEL author="Jonas Kaninda" LABEL author="Jonas Kaninda"
LABEL version=${appVersion}
RUN apt-get update -qq RUN apk --update add --no-cache mysql-client mariadb-connector-c gnupg tzdata
RUN apt install mysql-client cron gnupg -y
# Clear cache
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
RUN mkdir $WORKDIR RUN mkdir $WORKDIR
RUN mkdir $BACKUPDIR RUN mkdir $BACKUPDIR
RUN mkdir -p $BACKUP_TMP_DIR RUN mkdir -p $BACKUP_TMP_DIR
RUN chmod 777 $WORKDIR RUN chmod 777 $WORKDIR
RUN chmod 777 $BACKUPDIR RUN chmod 777 $BACKUPDIR
RUN chmod 777 $BACKUP_TMP_DIR RUN chmod 777 $BACKUP_TMP_DIR
RUN touch $BACKUP_CRON && \ RUN chmod 777 $WORKDIR
touch $BACKUP_CRON_SCRIPT && \
chmod 777 $BACKUP_CRON && \
chmod 777 $BACKUP_CRON_SCRIPT
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
RUN chmod +x /usr/local/bin/mysql-bkup RUN chmod +x /usr/local/bin/mysql-bkup

View File

@@ -1,13 +0,0 @@
[supervisord]
nodaemon=true
user=root
logfile=/var/log/supervisor/supervisord.log
pidfile=/var/run/supervisord.pid
[program:cron]
command = /bin/bash -c "declare -p | grep -Ev '^declare -[[:alpha:]]*r' > /run/supervisord.env && /usr/sbin/cron -f -L 15"
autostart=true
autorestart=true
user = root
stderr_logfile=/var/log/cron.err.log
stdout_logfile=/var/log/cron.out.log

BIN
docs/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.2 KiB

View File

@@ -0,0 +1,44 @@
---
title: Backup to FTP remote server
layout: default
parent: How Tos
nav_order: 4
---
# Backup to FTP remote server
As described for SSH backup section, to change the storage of your backup and use FTP Remote server as storage. You need to add `--storage ftp`.
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `REMOTE_PATH` environment variable.
{: .note }
These environment variables are required for SSH backup `FTP_HOST`, `FTP_USER`, `REMOTE_PATH`, `FTP_PORT` or `FTP_PASSWORD`.
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/mysql-bkup/releases
# for a list of available releases.
image: jkaninda/mysql-bkup
container_name: mysql-bkup
command: backup --storage ftp -d database
environment:
- DB_PORT=3306
- DB_HOST=postgres
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## FTP config
- FTP_HOST="hostname"
- FTP_PORT=21
- FTP_USER=user
- FTP_PASSWORD=password
- REMOTE_PATH=/home/jkaninda/backups
# pg-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```

View File

@@ -8,10 +8,10 @@ nav_order: 3
As described for s3 backup section, to change the storage of your backup and use SSH Remote server as storage. You need to add `--storage ssh` or `--storage remote`. As described for s3 backup section, to change the storage of your backup and use SSH Remote server as storage. You need to add `--storage ssh` or `--storage remote`.
You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `SSH_REMOTE_PATH` environment variable. You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `REMOTE_PATH` environment variable.
{: .note } {: .note }
These environment variables are required for SSH backup `SSH_HOST_NAME`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server. These environment variables are required for SSH backup `SSH_HOST`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server.
Accessing the remote server using password is not recommended, use private key instead. Accessing the remote server using password is not recommended, use private key instead.
```yml ```yml
@@ -33,10 +33,10 @@ services:
- DB_USERNAME=username - DB_USERNAME=username
- DB_PASSWORD=password - DB_PASSWORD=password
## SSH config ## SSH config
- SSH_HOST_NAME="hostname" - SSH_HOST="hostname"
- SSH_PORT=22 - SSH_PORT=22
- SSH_USER=user - SSH_USER=user
- SSH_REMOTE_PATH=/home/jkaninda/backups - REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519 - SSH_IDENTIFY_FILE=/tmp/id_ed25519
## We advise you to use a private jey instead of password ## We advise you to use a private jey instead of password
#- SSH_PASSWORD=password #- SSH_PASSWORD=password
@@ -73,10 +73,10 @@ services:
- DB_USERNAME=username - DB_USERNAME=username
- DB_PASSWORD=password - DB_PASSWORD=password
## SSH config ## SSH config
- SSH_HOST_NAME="hostname" - SSH_HOST="hostname"
- SSH_PORT=22 - SSH_PORT=22
- SSH_USER=user - SSH_USER=user
- SSH_REMOTE_PATH=/home/jkaninda/backups - REMOTE_PATH=/home/jkaninda/backups
- SSH_IDENTIFY_FILE=/tmp/id_ed25519 - SSH_IDENTIFY_FILE=/tmp/id_ed25519
# - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional # - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional
## We advise you to use a private jey instead of password ## We advise you to use a private jey instead of password
@@ -125,13 +125,13 @@ spec:
# Please use secret! # Please use secret!
- name: DB_PASSWORD - name: DB_PASSWORD
value: "" value: ""
- name: SSH_HOST_NAME - name: SSH_HOST
value: "" value: ""
- name: SSH_PORT - name: SSH_PORT
value: "22" value: "22"
- name: SSH_USER - name: SSH_USER
value: "xxx" value: "xxx"
- name: SSH_REMOTE_PATH - name: REMOTE_PATH
value: "/home/jkaninda/backups" value: "/home/jkaninda/backups"
- name: AWS_ACCESS_KEY - name: AWS_ACCESS_KEY
value: "xxxx" value: "xxxx"

View File

@@ -2,7 +2,7 @@
title: Deploy on Kubernetes title: Deploy on Kubernetes
layout: default layout: default
parent: How Tos parent: How Tos
nav_order: 8 nav_order: 9
--- ---
## Deploy on Kubernetes ## Deploy on Kubernetes

View File

@@ -2,7 +2,7 @@
title: Encrypt backups using GPG title: Encrypt backups using GPG
layout: default layout: default
parent: How Tos parent: How Tos
nav_order: 7 nav_order: 8
--- ---
# Encrypt backup # Encrypt backup

View File

@@ -2,7 +2,7 @@
title: Migrate database title: Migrate database
layout: default layout: default
parent: How Tos parent: How Tos
nav_order: 9 nav_order: 10
--- ---
# Migrate database # Migrate database

View File

@@ -2,7 +2,7 @@
title: Restore database from AWS S3 title: Restore database from AWS S3
layout: default layout: default
parent: How Tos parent: How Tos
nav_order: 5 nav_order: 6
--- ---
# Restore database from S3 storage # Restore database from S3 storage

View File

@@ -2,7 +2,7 @@
title: Restore database from SSH title: Restore database from SSH
layout: default layout: default
parent: How Tos parent: How Tos
nav_order: 6 nav_order: 7
--- ---
# Restore database from SSH remote server # Restore database from SSH remote server

View File

@@ -2,7 +2,7 @@
title: Restore database title: Restore database
layout: default layout: default
parent: How Tos parent: How Tos
nav_order: 4 nav_order: 5
--- ---
# Restore database # Restore database

View File

@@ -6,7 +6,7 @@ nav_order: 1
# About mysql-bkup # About mysql-bkup
{:.no_toc} {:.no_toc}
MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH remote storage. MySQL Backup is a Docker container image that can be used to backup, restore and migrate MySQL database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, FTP and SSH remote storage.
It also supports __encrypting__ your backups using GPG. It also supports __encrypting__ your backups using GPG.
We are open to receiving stars, PRs, and issues! We are open to receiving stars, PRs, and issues!
@@ -73,12 +73,25 @@ services:
- DB_NAME=foo - DB_NAME=foo
- DB_USERNAME=bar - DB_USERNAME=bar
- DB_PASSWORD=password - DB_PASSWORD=password
- TZ=Europe/Paris
# mysql-bkup container must be connected to the same network with your database # mysql-bkup container must be connected to the same network with your database
networks: networks:
- web - web
networks: networks:
web: web:
``` ```
### Docker recurring backup
```shell
docker run --rm --network network_name \
-v $PWD/backup:/backup/ \
-e "DB_HOST=hostname" \
-e "DB_USERNAME=user" \
-e "DB_PASSWORD=password" \
jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 1m"
```
See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
## Kubernetes ## Kubernetes
```yaml ```yaml
@@ -144,7 +157,7 @@ While it may work against different implementations, there are no guarantees abo
We decided to publish this image as a simpler and more lightweight alternative because of the following requirements: We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
- The original image is based on `ubuntu` and requires additional tools, making it heavy. - The original image is based on `alpine` and requires additional tools, making it heavy.
- This image is written in Go. - This image is written in Go.
- `arm64` and `arm/v7` architectures are supported. - `arm64` and `arm/v7` architectures are supported.
- Docker in Swarm mode is supported. - Docker in Swarm mode is supported.

View File

@@ -34,35 +34,41 @@ Backup, restore and migrate targets, schedule and retention are configured using
## Environment variables ## Environment variables
| Name | Requirement | Description | | Name | Requirement | Description |
|------------------------|--------------------------------------------------------------|------------------------------------------------------| |------------------------|---------------------------------------------------------------|------------------------------------------------------|
| DB_PORT | Optional, default 3306 | Database port number | | DB_PORT | Optional, default 3306 | Database port number |
| DB_HOST | Required | Database host | | DB_HOST | Required | Database host |
| DB_NAME | Optional if it was provided from the -d flag | Database name | | DB_NAME | Optional if it was provided from the -d flag | Database name |
| DB_USERNAME | Required | Database user name | | DB_USERNAME | Required | Database user name |
| DB_PASSWORD | Required | Database password | | DB_PASSWORD | Required | Database password |
| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key | | AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key |
| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key | | AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name | | AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name | | AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name |
| AWS_REGION | Optional, required for S3 storage | AWS Region | | AWS_REGION | Optional, required for S3 storage | AWS Region |
| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL | | AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL |
| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) | | FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) |
| BACKUP_CRON_EXPRESSION | Optional if it was provided from the --cron-expression flag | Backup cron expression for docker in scheduled mode | | GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase |
| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase | | BACKUP_CRON_EXPRESSION | Optional if it was provided from the `--cron-expression` flag | Backup cron expression for docker in scheduled mode |
| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip | | SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip |
| SSH_USER | Optional, required for SSH storage | ssh remote user | | SSH_USER | Optional, required for SSH storage | ssh remote user |
| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password | | SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password |
| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key | | SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key |
| SSH_PORT | Optional, required for SSH storage | ssh remote server port | | SSH_PORT | Optional, required for SSH storage | ssh remote server port |
| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) | | REMOTE_PATH | Optional, required for SSH or FTP storage | remote path (/home/toto/backup) |
| TARGET_DB_HOST | Optional, required for database migration | Target database host | | FTP_HOST_NAME | Optional, required for FTP storage | FTP host name |
| TARGET_DB_PORT | Optional, required for database migration | Target database port | | FTP_PORT | Optional, required for FTP storage | FTP server port number |
| TARGET_DB_NAME | Optional, required for database migration | Target database name | | FTP_USER | Optional, required for FTP storage | FTP user |
| TARGET_DB_USERNAME | Optional, required for database migration | Target database username | | FTP_PASSWORD | Optional, required for FTP storage | FTP user password |
| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password | | TARGET_DB_HOST | Optional, required for database migration | Target database host |
| TG_TOKEN | Optional, required for Telegram notification | Telegram token | | TARGET_DB_PORT | Optional, required for database migration | Target database port |
| TG_CHAT_ID | Optional, required for Telegram notification | Telegram Chat ID | | TARGET_DB_NAME | Optional, required for database migration | Target database name |
| TARGET_DB_USERNAME | Optional, required for database migration | Target database username |
| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password |
| TG_TOKEN | Optional, required for Telegram notification | Telegram token (`BOT-ID:BOT-TOKEN`) |
| TG_CHAT_ID | Optional, required for Telegram notification | Telegram Chat ID |
| TZ | Optional | Time Zone |
--- ---
## Run in Scheduled mode ## Run in Scheduled mode
@@ -122,7 +128,7 @@ You may use one of several pre-defined schedules in place of a cron expression.
| @hourly | Run once an hour, beginning of hour | 0 * * * * | | @hourly | Run once an hour, beginning of hour | 0 * * * * |
### Intervals ### Intervals
You may also schedule a job to execute at fixed intervals, starting at the time it's added or cron is run. This is supported by formatting the cron spec like this: You may also schedule backup task at fixed intervals, starting at the time it's added or cron is run. This is supported by formatting the cron spec like this:
@every <duration> @every <duration>
where "duration" is a string accepted by time. where "duration" is a string accepted by time.

View File

@@ -5,11 +5,13 @@ services:
# release version instead of using `latest`. # release version instead of using `latest`.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: backup --dbname database_name --mode scheduled --period "0 1 * * *" command: backup --dbname database_name
volumes: volumes:
- ./backup:/backup - ./backup:/backup
environment: environment:
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=mysql - DB_HOST=mysql
- DB_USERNAME=userName - DB_USERNAME=userName
- DB_PASSWORD=${DB_PASSWORD} - DB_PASSWORD=${DB_PASSWORD}
# See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
- BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * *

View File

@@ -6,7 +6,7 @@ services:
# for a list of available releases. # for a list of available releases.
image: jkaninda/mysql-bkup image: jkaninda/mysql-bkup
container_name: mysql-bkup container_name: mysql-bkup
command: backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *" command: backup --storage s3 -d my-database
environment: environment:
- DB_PORT=3306 - DB_PORT=3306
- DB_HOST=mysql - DB_HOST=mysql
@@ -21,6 +21,8 @@ services:
- AWS_SECRET_KEY=xxxxx - AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false" - AWS_DISABLE_SSL="false"
# See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
- BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * *
# mysql-bkup container must be connected to the same network with your database # mysql-bkup container must be connected to the same network with your database
networks: networks:
- web - web

3
go.mod
View File

@@ -13,7 +13,10 @@ require (
) )
require ( require (
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jlaffaye/ftp v0.2.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect
golang.org/x/sys v0.22.0 // indirect golang.org/x/sys v0.22.0 // indirect

7
go.sum
View File

@@ -7,10 +7,17 @@ github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9Hu
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=

View File

@@ -44,7 +44,10 @@ func scheduledMode(db *dbConfig, config *BackupConfig) {
//Test database connexion //Test database connexion
testDatabaseConnection(db) testDatabaseConnection(db)
//Test backup
utils.Info("Testing backup configurations...")
BackupTask(db, config)
utils.Info("Testing backup configurations...done")
utils.Info("Creating backup job...") utils.Info("Creating backup job...")
// Create a new cron instance // Create a new cron instance
c := cron.New() c := cron.New()
@@ -64,22 +67,22 @@ func scheduledMode(db *dbConfig, config *BackupConfig) {
} }
func BackupTask(db *dbConfig, config *BackupConfig) { func BackupTask(db *dbConfig, config *BackupConfig) {
//Generate backup file name //Generate backup file name
backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20240102_150405")) backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20060102_150405"))
if config.disableCompression { if config.disableCompression {
backupFileName = fmt.Sprintf("%s_%s.sql", db.dbName, time.Now().Format("20240102_150405")) backupFileName = fmt.Sprintf("%s_%s.sql", db.dbName, time.Now().Format("20060102_150405"))
} }
config.backupFileName = backupFileName config.backupFileName = backupFileName
switch config.storage { switch config.storage {
case "s3":
s3Backup(db, config.backupFileName, config.disableCompression, config.prune, config.backupRetention, config.encryption)
case "local": case "local":
localBackup(db, config.backupFileName, config.disableCompression, config.prune, config.backupRetention, config.encryption) localBackup(db, config)
case "s3":
s3Backup(db, config)
case "ssh", "remote": case "ssh", "remote":
sshBackup(db, config.backupFileName, config.remotePath, config.disableCompression, config.prune, config.backupRetention, config.encryption) sshBackup(db, config)
case "ftp": case "ftp":
utils.Fatal("Not supported storage type: %s", config.storage) ftpBackup(db, config)
default: default:
localBackup(db, config.backupFileName, config.disableCompression, config.prune, config.backupRetention, config.encryption) localBackup(db, config)
} }
} }
@@ -154,54 +157,54 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
} }
} }
func localBackup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { func localBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to local storage") utils.Info("Backup database to local storage")
BackupDatabase(db, backupFileName, disableCompression) BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := backupFileName finalFileName := config.backupFileName
if encrypt { if config.encryption {
encryptBackup(backupFileName) encryptBackup(config.backupFileName, config.passphrase)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, gpgExtension) finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, gpgExtension)
} }
utils.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
moveToBackup(finalFileName, storagePath) moveToBackup(finalFileName, storagePath)
//Send notification //Send notification
utils.NotifySuccess(finalFileName) utils.NotifySuccess(finalFileName)
//Delete old backup //Delete old backup
if prune { if config.prune {
deleteOldBackup(backupRetention) deleteOldBackup(config.backupRetention)
} }
//Delete temp //Delete temp
deleteTemp() deleteTemp()
} }
func s3Backup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { func s3Backup(db *dbConfig, config *BackupConfig) {
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME") bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH") s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH")
utils.Info("Backup database to s3 storage") utils.Info("Backup database to s3 storage")
//Backup database //Backup database
BackupDatabase(db, backupFileName, disableCompression) BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := backupFileName finalFileName := config.backupFileName
if encrypt { if config.encryption {
encryptBackup(backupFileName) encryptBackup(config.backupFileName, config.passphrase)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg") finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
} }
utils.Info("Uploading backup archive to remote storage S3 ... ") utils.Info("Uploading backup archive to remote storage S3 ... ")
utils.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path) err := UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
if err != nil { if err != nil {
utils.Fatal("Error uploading file to S3: %s ", err) utils.Fatal("Error uploading file to S3: %s ", err)
} }
//Delete backup file from tmp folder //Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName)) err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName))
if err != nil { if err != nil {
fmt.Println("Error deleting file: ", err) fmt.Println("Error deleting file: ", err)
} }
// Delete old backup // Delete old backup
if prune { if config.prune {
err := utils.DeleteOldBackup(bucket, s3Path, backupRetention) err := DeleteOldBackup(bucket, s3Path, config.backupRetention)
if err != nil { if err != nil {
utils.Fatal("Error deleting old backup from S3: %s ", err) utils.Fatal("Error deleting old backup from S3: %s ", err)
} }
@@ -214,18 +217,18 @@ func s3Backup(db *dbConfig, backupFileName string, disableCompression bool, prun
} }
// sshBackup backup database to SSH remote server // sshBackup backup database to SSH remote server
func sshBackup(db *dbConfig, backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { func sshBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to Remote server") utils.Info("Backup database to Remote server")
//Backup database //Backup database
BackupDatabase(db, backupFileName, disableCompression) BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := backupFileName finalFileName := config.backupFileName
if encrypt { if config.encryption {
encryptBackup(backupFileName) encryptBackup(config.backupFileName, config.passphrase)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg") finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
} }
utils.Info("Uploading backup archive to remote storage ... ") utils.Info("Uploading backup archive to remote storage ... ")
utils.Info("Backup name is %s", finalFileName) utils.Info("Backup name is %s", finalFileName)
err := CopyToRemote(finalFileName, remotePath) err := CopyToRemote(finalFileName, config.remotePath)
if err != nil { if err != nil {
utils.Fatal("Error uploading file to the remote server: %s ", err) utils.Fatal("Error uploading file to the remote server: %s ", err)
@@ -237,7 +240,7 @@ func sshBackup(db *dbConfig, backupFileName, remotePath string, disableCompressi
fmt.Println("Error deleting file: ", err) fmt.Println("Error deleting file: ", err)
} }
if prune { if config.prune {
//TODO: Delete old backup from remote server //TODO: Delete old backup from remote server
utils.Info("Deleting old backup from a remote server is not implemented yet") utils.Info("Deleting old backup from a remote server is not implemented yet")
@@ -249,11 +252,45 @@ func sshBackup(db *dbConfig, backupFileName, remotePath string, disableCompressi
//Delete temp //Delete temp
deleteTemp() deleteTemp()
} }
func ftpBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to the remote FTP server")
//Backup database
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config.backupFileName, config.passphrase)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
}
utils.Info("Uploading backup archive to the remote FTP server ... ")
utils.Info("Backup name is %s", finalFileName)
err := CopyToFTP(finalFileName, config.remotePath)
if err != nil {
utils.Fatal("Error uploading file to the remote FTP server: %s ", err)
}
//Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error deleting file: %v", err)
}
if config.prune {
//TODO: Delete old backup from remote server
utils.Info("Deleting old backup from a remote server is not implemented yet")
}
utils.Done("Uploading backup archive to the remote FTP server ... done ")
//Send notification
utils.NotifySuccess(finalFileName)
//Delete temp
deleteTemp()
}
// encryptBackup encrypt backup // encryptBackup encrypt backup
func encryptBackup(backupFileName string) { func encryptBackup(backupFileName, passphrase string) {
gpgPassphrase := os.Getenv("GPG_PASSPHRASE") err := Encrypt(filepath.Join(tmpPath, backupFileName), passphrase)
err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase)
if err != nil { if err != nil {
utils.Fatal("Error during encrypting backup %s", err) utils.Fatal("Error during encrypting backup %s", err)
} }

View File

@@ -7,9 +7,11 @@
package pkg package pkg
import ( import (
"fmt"
"github.com/jkaninda/mysql-bkup/utils" "github.com/jkaninda/mysql-bkup/utils"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"os" "os"
"strconv"
) )
type Config struct { type Config struct {
@@ -29,7 +31,10 @@ type targetDbConfig struct {
targetDbPassword string targetDbPassword string
targetDbName string targetDbName string
} }
type TgConfig struct {
Token string
ChatId string
}
type BackupConfig struct { type BackupConfig struct {
backupFileName string backupFileName string
backupRetention int backupRetention int
@@ -37,17 +42,34 @@ type BackupConfig struct {
prune bool prune bool
encryption bool encryption bool
remotePath string remotePath string
gpqPassphrase string passphrase string
storage string storage string
cronExpression string cronExpression string
} }
type RestoreConfig struct { type FTPConfig struct {
s3Path string host string
remotePath string user string
storage string password string
file string port string
bucket string remotePath string
gpqPassphrase string }
// SSHConfig holds the SSH connection details
type SSHConfig struct {
user string
password string
hostName string
port string
identifyFile string
}
type AWSConfig struct {
endpoint string
bucket string
accessKey string
secretKey string
region string
disableSsl bool
forcePathStyle bool
} }
func initDbConfig(cmd *cobra.Command) *dbConfig { func initDbConfig(cmd *cobra.Command) *dbConfig {
@@ -67,25 +89,84 @@ func initDbConfig(cmd *cobra.Command) *dbConfig {
} }
return &dConf return &dConf
} }
// loadSSHConfig loads the SSH configuration from environment variables
func loadSSHConfig() (*SSHConfig, error) {
utils.GetEnvVariable("SSH_HOST", "SSH_HOST_NAME")
sshVars := []string{"SSH_USER", "SSH_HOST", "SSH_PORT", "REMOTE_PATH"}
err := utils.CheckEnvVars(sshVars)
if err != nil {
return nil, fmt.Errorf("error missing environment variables: %w", err)
}
return &SSHConfig{
user: os.Getenv("SSH_USER"),
password: os.Getenv("SSH_PASSWORD"),
hostName: os.Getenv("SSH_HOST"),
port: os.Getenv("SSH_PORT"),
identifyFile: os.Getenv("SSH_IDENTIFY_FILE"),
}, nil
}
func initFtpConfig() *FTPConfig {
//Initialize data configs
fConfig := FTPConfig{}
fConfig.host = utils.GetEnvVariable("FTP_HOST", "FTP_HOST_NAME")
fConfig.user = os.Getenv("FTP_USER")
fConfig.password = os.Getenv("FTP_PASSWORD")
fConfig.port = os.Getenv("FTP_PORT")
fConfig.remotePath = os.Getenv("REMOTE_PATH")
err := utils.CheckEnvVars(ftpVars)
if err != nil {
utils.Error("Please make sure all required environment variables for FTP are set")
utils.Fatal("Error missing environment variables: %s", err)
}
return &fConfig
}
func initAWSConfig() *AWSConfig {
//Initialize AWS configs
aConfig := AWSConfig{}
aConfig.endpoint = utils.GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
aConfig.accessKey = utils.GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
aConfig.secretKey = utils.GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
aConfig.bucket = utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
aConfig.region = os.Getenv("AWS_REGION")
disableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
if err != nil {
utils.Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
}
forcePathStyle, err := strconv.ParseBool(os.Getenv("AWS_FORCE_PATH_STYLE"))
if err != nil {
utils.Fatal("Unable to parse AWS_FORCE_PATH_STYLE env var: %s", err)
}
aConfig.disableSsl = disableSsl
aConfig.forcePathStyle = forcePathStyle
err = utils.CheckEnvVars(awsVars)
if err != nil {
utils.Error("Please make sure all required environment variables for AWS S3 are set")
utils.Fatal("Error checking environment variables: %s", err)
}
return &aConfig
}
func initBackupConfig(cmd *cobra.Command) *BackupConfig { func initBackupConfig(cmd *cobra.Command) *BackupConfig {
utils.SetEnv("STORAGE_PATH", storagePath) utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION") utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION")
utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION") utils.GetEnv(cmd, "period", "BACKUP_CRON_EXPRESSION")
utils.GetEnv(cmd, "path", "REMOTE_PATH")
//Get flag value and set env //Get flag value and set env
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH") remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE") storage = utils.GetEnv(cmd, "storage", "STORAGE")
backupRetention, _ := cmd.Flags().GetInt("keep-last") backupRetention, _ := cmd.Flags().GetInt("keep-last")
prune, _ := cmd.Flags().GetBool("prune") prune, _ := cmd.Flags().GetBool("prune")
disableCompression, _ = cmd.Flags().GetBool("disable-compression") disableCompression, _ = cmd.Flags().GetBool("disable-compression")
_, _ = cmd.Flags().GetString("mode") _, _ = cmd.Flags().GetString("mode")
gpqPassphrase := os.Getenv("GPG_PASSPHRASE") passphrase := os.Getenv("GPG_PASSPHRASE")
_ = utils.GetEnv(cmd, "path", "AWS_S3_PATH") _ = utils.GetEnv(cmd, "path", "AWS_S3_PATH")
cronExpression := os.Getenv("BACKUP_CRON_EXPRESSION") cronExpression := os.Getenv("BACKUP_CRON_EXPRESSION")
if gpqPassphrase != "" { if passphrase != "" {
encryption = true encryption = true
} }
//Initialize backup configs //Initialize backup configs
config := BackupConfig{} config := BackupConfig{}
config.backupRetention = backupRetention config.backupRetention = backupRetention
@@ -94,19 +175,29 @@ func initBackupConfig(cmd *cobra.Command) *BackupConfig {
config.storage = storage config.storage = storage
config.encryption = encryption config.encryption = encryption
config.remotePath = remotePath config.remotePath = remotePath
config.gpqPassphrase = gpqPassphrase config.passphrase = passphrase
config.cronExpression = cronExpression config.cronExpression = cronExpression
return &config return &config
} }
type RestoreConfig struct {
s3Path string
remotePath string
storage string
file string
bucket string
gpqPassphrase string
}
func initRestoreConfig(cmd *cobra.Command) *RestoreConfig { func initRestoreConfig(cmd *cobra.Command) *RestoreConfig {
utils.SetEnv("STORAGE_PATH", storagePath) utils.SetEnv("STORAGE_PATH", storagePath)
utils.GetEnv(cmd, "path", "REMOTE_PATH")
//Get flag value and set env //Get flag value and set env
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH") s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH") remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE") storage = utils.GetEnv(cmd, "storage", "STORAGE")
file = utils.GetEnv(cmd, "file", "FILE_NAME") file = utils.GetEnv(cmd, "file", "FILE_NAME")
_, _ = cmd.Flags().GetString("mode")
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME") bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
gpqPassphrase := os.Getenv("GPG_PASSPHRASE") gpqPassphrase := os.Getenv("GPG_PASSPHRASE")
//Initialize restore configs //Initialize restore configs

81
pkg/ftp.go Normal file
View File

@@ -0,0 +1,81 @@
package pkg
import (
"fmt"
"github.com/jlaffaye/ftp"
"io"
"os"
"path/filepath"
"time"
)
// initFtpClient initializes and authenticates an FTP client
func initFtpClient() (*ftp.ServerConn, error) {
ftpConfig := initFtpConfig()
ftpClient, err := ftp.Dial(fmt.Sprintf("%s:%s", ftpConfig.host, ftpConfig.port), ftp.DialWithTimeout(5*time.Second))
if err != nil {
return nil, fmt.Errorf("failed to connect to FTP: %w", err)
}
err = ftpClient.Login(ftpConfig.user, ftpConfig.password)
if err != nil {
return nil, fmt.Errorf("failed to log in to FTP: %w", err)
}
return ftpClient, nil
}
// CopyToFTP uploads a file to the remote FTP server
func CopyToFTP(fileName, remotePath string) (err error) {
ftpConfig := initFtpConfig()
ftpClient, err := initFtpClient()
if err != nil {
return err
}
defer ftpClient.Quit()
filePath := filepath.Join(tmpPath, fileName)
file, err := os.Open(filePath)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", fileName, err)
}
defer file.Close()
remoteFilePath := filepath.Join(ftpConfig.remotePath, fileName)
err = ftpClient.Stor(remoteFilePath, file)
if err != nil {
return fmt.Errorf("failed to upload file %s: %w", fileName, err)
}
return nil
}
// CopyFromFTP downloads a file from the remote FTP server
func CopyFromFTP(fileName, remotePath string) (err error) {
ftpClient, err := initFtpClient()
if err != nil {
return err
}
defer ftpClient.Quit()
remoteFilePath := filepath.Join(remotePath, fileName)
r, err := ftpClient.Retr(remoteFilePath)
if err != nil {
return fmt.Errorf("failed to retrieve file %s: %w", fileName, err)
}
defer r.Close()
localFilePath := filepath.Join(tmpPath, fileName)
outFile, err := os.Create(localFilePath)
if err != nil {
return fmt.Errorf("failed to create local file %s: %w", fileName, err)
}
defer outFile.Close()
_, err = io.Copy(outFile, r)
if err != nil {
return fmt.Errorf("failed to copy data to local file %s: %w", fileName, err)
}
return nil
}

View File

@@ -127,5 +127,5 @@ func testDatabaseConnection(db *dbConfig) {
} }
func intro() { func intro() {
utils.Info("Starting MySQL Backup...") utils.Info("Starting MySQL Backup...")
utils.Info("Copyright © 2024 Jonas Kaninda ") utils.Info("Copyright (c) 2024 Jonas Kaninda ")
} }

View File

@@ -30,7 +30,7 @@ func StartRestore(cmd *cobra.Command) {
case "ssh": case "ssh":
restoreFromRemote(dbConf, restoreConf.file, restoreConf.remotePath) restoreFromRemote(dbConf, restoreConf.file, restoreConf.remotePath)
case "ftp": case "ftp":
utils.Fatal("Restore from FTP is not yet supported") restoreFromFTP(dbConf, restoreConf.file, restoreConf.remotePath)
default: default:
utils.Info("Restore database from local") utils.Info("Restore database from local")
copyToTmp(storagePath, restoreConf.file) copyToTmp(storagePath, restoreConf.file)
@@ -40,7 +40,7 @@ func StartRestore(cmd *cobra.Command) {
func restoreFromS3(db *dbConfig, file, bucket, s3Path string) { func restoreFromS3(db *dbConfig, file, bucket, s3Path string) {
utils.Info("Restore database from s3") utils.Info("Restore database from s3")
err := utils.DownloadFile(tmpPath, file, bucket, s3Path) err := DownloadFile(tmpPath, file, bucket, s3Path)
if err != nil { if err != nil {
utils.Fatal("Error download file from s3 %s %v", file, err) utils.Fatal("Error download file from s3 %s %v", file, err)
} }
@@ -54,6 +54,14 @@ func restoreFromRemote(db *dbConfig, file, remotePath string) {
} }
RestoreDatabase(db, file) RestoreDatabase(db, file)
} }
func restoreFromFTP(db *dbConfig, file, remotePath string) {
utils.Info("Restore database from FTP server")
err := CopyFromFTP(file, remotePath)
if err != nil {
utils.Fatal("Error download file from FTP server: %s %v", filepath.Join(remotePath, file), err)
}
RestoreDatabase(db, file)
}
// RestoreDatabase restore database // RestoreDatabase restore database
func RestoreDatabase(db *dbConfig, file string) { func RestoreDatabase(db *dbConfig, file string) {
@@ -97,7 +105,7 @@ func RestoreDatabase(db *dbConfig, file string) {
// Restore from compressed file / .sql.gz // Restore from compressed file / .sql.gz
if extension == ".gz" { if extension == ".gz" {
str := "zcat " + filepath.Join(tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName str := "zcat " + filepath.Join(tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
_, err := exec.Command("bash", "-c", str).Output() _, err := exec.Command("sh", "-c", str).Output()
if err != nil { if err != nil {
utils.Fatal("Error, in restoring the database %v", err) utils.Fatal("Error, in restoring the database %v", err)
} }
@@ -109,7 +117,7 @@ func RestoreDatabase(db *dbConfig, file string) {
} else if extension == ".sql" { } else if extension == ".sql" {
//Restore from sql file //Restore from sql file
str := "cat " + filepath.Join(tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName str := "cat " + filepath.Join(tmpPath, file) + " | mysql -h " + db.dbHost + " -P " + db.dbPort + " -u " + db.dbUserName + " " + db.dbName
_, err := exec.Command("bash", "-c", str).Output() _, err := exec.Command("sh", "-c", str).Output()
if err != nil { if err != nil {
utils.Fatal("Error in restoring the database %v", err) utils.Fatal("Error in restoring the database %v", err)
} }

View File

@@ -1,62 +1,35 @@
// Package utils / // Package pkg
/***** /*****
@author Jonas Kaninda @author Jonas Kaninda
@license MIT License <https://opensource.org/licenses/MIT> @license MIT License <https://opensource.org/licenses/MIT>
@Copyright © 2024 Jonas Kaninda @Copyright © 2024 Jonas Kaninda
**/ **/
package utils package pkg
import ( import (
"bytes" "bytes"
"fmt"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/aws/aws-sdk-go/service/s3/s3manager"
"log" "github.com/jkaninda/mysql-bkup/utils"
"net/http" "net/http"
"os" "os"
"path/filepath" "path/filepath"
"strconv"
"time" "time"
) )
// CreateSession creates a new AWS session // CreateSession creates a new AWS session
func CreateSession() (*session.Session, error) { func CreateSession() (*session.Session, error) {
// AwsVars Required environment variables for AWS S3 storage awsConfig := initAWSConfig()
var awsVars = []string{ // Configure to use MinIO Server
"AWS_S3_ENDPOINT",
"AWS_S3_BUCKET_NAME",
"AWS_ACCESS_KEY",
"AWS_SECRET_KEY",
"AWS_REGION",
"AWS_REGION",
"AWS_REGION",
}
endPoint := GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT")
accessKey := GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY")
secretKey := GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY")
_ = GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
region := os.Getenv("AWS_REGION")
awsDisableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL"))
if err != nil {
Fatal("Unable to parse AWS_DISABLE_SSL env var: %s", err)
}
err = CheckEnvVars(awsVars)
if err != nil {
Fatal("Error checking environment variables\n: %s", err)
}
// S3 Config
s3Config := &aws.Config{ s3Config := &aws.Config{
Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""), Credentials: credentials.NewStaticCredentials(awsConfig.accessKey, awsConfig.secretKey, ""),
Endpoint: aws.String(endPoint), Endpoint: aws.String(awsConfig.endpoint),
Region: aws.String(region), Region: aws.String(awsConfig.region),
DisableSSL: aws.Bool(awsDisableSsl), DisableSSL: aws.Bool(awsConfig.disableSsl),
S3ForcePathStyle: aws.Bool(true), S3ForcePathStyle: aws.Bool(awsConfig.forcePathStyle),
} }
return session.NewSession(s3Config) return session.NewSession(s3Config)
@@ -108,10 +81,10 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
if err != nil { if err != nil {
return err return err
} }
Info("Download backup from S3 storage...") utils.Info("Download data from S3 storage...")
file, err := os.Create(filepath.Join(destinationPath, key)) file, err := os.Create(filepath.Join(destinationPath, key))
if err != nil { if err != nil {
fmt.Println("Failed to create file", err) utils.Error("Failed to create file", err)
return err return err
} }
defer file.Close() defer file.Close()
@@ -125,10 +98,10 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
Key: aws.String(objectKey), Key: aws.String(objectKey),
}) })
if err != nil { if err != nil {
fmt.Println("Failed to download file", err) utils.Error("Failed to download file %s", key)
return err return err
} }
Info("Backup downloaded: %s bytes size %s ", file.Name(), numBytes) utils.Info("Backup downloaded: %s bytes size %s ", file.Name(), numBytes)
return nil return nil
} }
@@ -158,18 +131,18 @@ func DeleteOldBackup(bucket, prefix string, retention int) error {
Key: object.Key, Key: object.Key,
}) })
if err != nil { if err != nil {
log.Printf("Failed to delete object %s: %v", *object.Key, err) utils.Info("Failed to delete object %s: %v", *object.Key, err)
} else { } else {
fmt.Printf("Deleted object %s\n", *object.Key) utils.Info("Deleted object %s\n", *object.Key)
} }
} }
} }
return !lastPage return !lastPage
}) })
if err != nil { if err != nil {
log.Fatalf("Failed to list objects: %v", err) utils.Error("Failed to list objects: %v", err)
} }
fmt.Println("Finished deleting old files.") utils.Info("Finished deleting old files.")
return nil return nil
} }

View File

@@ -18,83 +18,73 @@ import (
"path/filepath" "path/filepath"
) )
func CopyToRemote(fileName, remotePath string) error { // createSSHClientConfig sets up the SSH client configuration based on the provided SSHConfig
sshUser := os.Getenv("SSH_USER") func createSSHClientConfig(sshConfig *SSHConfig) (ssh.ClientConfig, error) {
sshPassword := os.Getenv("SSH_PASSWORD") if sshConfig.identifyFile != "" && utils.FileExists(sshConfig.identifyFile) {
sshHostName := os.Getenv("SSH_HOST_NAME") return auth.PrivateKey(sshConfig.user, sshConfig.identifyFile, ssh.InsecureIgnoreHostKey())
sshPort := os.Getenv("SSH_PORT")
sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE")
err := utils.CheckEnvVars(sshHVars)
if err != nil {
utils.Error("Error checking environment variables: %s", err)
os.Exit(1)
}
clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) {
clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey())
} else { } else {
if sshPassword == "" { if sshConfig.password == "" {
return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty") return ssh.ClientConfig{}, errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty")
} }
utils.Warn("Accessing the remote server using password, password is not recommended") utils.Warn("Accessing the remote server using password, which is not recommended.")
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey()) return auth.PasswordKey(sshConfig.user, sshConfig.password, ssh.InsecureIgnoreHostKey())
} }
}
// CopyToRemote copies a file to a remote server via SCP
func CopyToRemote(fileName, remotePath string) error {
// Load environment variables
sshConfig, err := loadSSHConfig()
if err != nil {
return fmt.Errorf("failed to load SSH configuration: %w", err)
}
// Initialize SSH client config
clientConfig, err := createSSHClientConfig(sshConfig)
if err != nil {
return fmt.Errorf("failed to create SSH client config: %w", err)
}
// Create a new SCP client // Create a new SCP client
client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig) client := scp.NewClient(fmt.Sprintf("%s:%s", sshConfig.hostName, sshConfig.port), &clientConfig)
// Connect to the remote server // Connect to the remote server
err = client.Connect() err = client.Connect()
if err != nil { if err != nil {
return errors.New("Couldn't establish a connection to the remote server") return errors.New("Couldn't establish a connection to the remote server\n")
} }
// Open a file // Open the local file
file, _ := os.Open(filepath.Join(tmpPath, fileName)) filePath := filepath.Join(tmpPath, fileName)
file, err := os.Open(filePath)
// Close client connection after the file has been copied if err != nil {
return fmt.Errorf("failed to open file %s: %w", filePath, err)
}
defer client.Close() defer client.Close()
// Close the file after it has been copied // Copy file to the remote server
defer file.Close()
// the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application.
err = client.CopyFromFile(context.Background(), *file, filepath.Join(remotePath, fileName), "0655") err = client.CopyFromFile(context.Background(), *file, filepath.Join(remotePath, fileName), "0655")
if err != nil { if err != nil {
fmt.Println("Error while copying file ") return fmt.Errorf("failed to copy file to remote server: %w", err)
return err
} }
return nil return nil
} }
func CopyFromRemote(fileName, remotePath string) error { func CopyFromRemote(fileName, remotePath string) error {
sshUser := os.Getenv("SSH_USER") // Load environment variables
sshPassword := os.Getenv("SSH_PASSWORD") sshConfig, err := loadSSHConfig()
sshHostName := os.Getenv("SSH_HOST_NAME")
sshPort := os.Getenv("SSH_PORT")
sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE")
err := utils.CheckEnvVars(sshHVars)
if err != nil { if err != nil {
utils.Error("Error checking environment variables\n: %s", err) return fmt.Errorf("failed to load SSH configuration: %w", err)
os.Exit(1)
} }
clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey()) // Initialize SSH client config
if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) { clientConfig, err := createSSHClientConfig(sshConfig)
clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey()) if err != nil {
return fmt.Errorf("failed to create SSH client config: %w", err)
} else {
if sshPassword == "" {
return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n")
}
utils.Warn("Accessing the remote server using password, password is not recommended")
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
} }
// Create a new SCP client // Create a new SCP client
client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig) client := scp.NewClient(fmt.Sprintf("%s:%s", sshConfig.hostName, sshConfig.port), &clientConfig)
// Connect to the remote server // Connect to the remote server
err = client.Connect() err = client.Connect()
@@ -113,7 +103,7 @@ func CopyFromRemote(fileName, remotePath string) error {
err = client.CopyFromRemote(context.Background(), file, filepath.Join(remotePath, fileName)) err = client.CopyFromRemote(context.Background(), file, filepath.Join(remotePath, fileName))
if err != nil { if err != nil {
fmt.Println("Error while copying file ", err) utils.Error("Error while copying file %s ", err)
return err return err
} }
return nil return nil

View File

@@ -8,7 +8,6 @@ package pkg
const cronLogFile = "/var/log/mysql-bkup.log" const cronLogFile = "/var/log/mysql-bkup.log"
const tmpPath = "/tmp/backup" const tmpPath = "/tmp/backup"
const backupCronFile = "/usr/local/bin/backup_cron.sh"
const algorithm = "aes256" const algorithm = "aes256"
const gpgHome = "/config/gnupg" const gpgHome = "/config/gnupg"
const gpgExtension = "gpg" const gpgExtension = "gpg"
@@ -42,7 +41,22 @@ var targetDbConf *targetDbConfig
// sshHVars Required environment variables for SSH remote server storage // sshHVars Required environment variables for SSH remote server storage
var sshHVars = []string{ var sshHVars = []string{
"SSH_USER", "SSH_USER",
"SSH_REMOTE_PATH", "REMOTE_PATH",
"SSH_HOST_NAME", "SSH_HOST_NAME",
"SSH_PORT", "SSH_PORT",
} }
var ftpVars = []string{
"FTP_HOST_NAME",
"FTP_USER",
"FTP_PASSWORD",
"FTP_PORT",
}
// AwsVars Required environment variables for AWS S3 storage
var awsVars = []string{
"AWS_S3_ENDPOINT",
"AWS_S3_BUCKET_NAME",
"AWS_ACCESS_KEY",
"AWS_SECRET_KEY",
"AWS_REGION",
}