diff --git a/cmd/backup.go b/cmd/backup.go index a17b7e2..f1545a4 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -45,9 +45,10 @@ var BackupCmd = &cobra.Command{ func init() { //Backup - BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Define storage: local, s3, ssh, ftp") - BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`") - BackupCmd.PersistentFlags().StringP("cron-expression", "", "", "Backup cron expression") + BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Define storage: local, s3, ssh, ftp, azure") + BackupCmd.PersistentFlags().StringP("path", "P", "", "Storage path without file name. e.g: /custom_path or ssh remote path `/home/foo/backup`") + BackupCmd.PersistentFlags().StringP("cron-expression", "e", "", "Backup cron expression (e.g., `0 0 * * *` or `@daily`)") + BackupCmd.PersistentFlags().StringP("config", "c", "", "Configuration file for multi database backup. (e.g: `/backup/config.yaml`)") BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression") } diff --git a/docs/_config.yml b/docs/_config.yml index 917554c..3bccb4f 100644 --- a/docs/_config.yml +++ b/docs/_config.yml @@ -20,7 +20,7 @@ description: >- # this means to ignore newlines until "baseurl:" It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage. baseurl: "" # the subpath of your site, e.g. /blog -url: "jkaninda.github.io/mysql-bkup/" # the base hostname & protocol for your site, e.g. http://example.com +url: "" # the base hostname & protocol for your site, e.g. http://example.com twitter_username: jonaskaninda github_username: jkaninda diff --git a/docs/how-tos/azure-blob.md b/docs/how-tos/azure-blob.md index e3e5bcc..3ded41f 100644 --- a/docs/how-tos/azure-blob.md +++ b/docs/how-tos/azure-blob.md @@ -4,22 +4,43 @@ layout: default parent: How Tos nav_order: 5 --- -# Azure Blob storage -{: .note } -As described on local backup section, to change the storage of you backup and use Azure Blob as storage. You need to add `--storage azure` (-s azure). -You can also specify a folder where you want to save you data by adding `--path my-custom-path` flag. +# Backup to Azure Blob Storage +To store your backups on Azure Blob Storage, you can configure the backup process to use the `--storage azure` option. -## Backup to Azure Blob storage +This section explains how to set up and configure Azure Blob-based backups. -```yml +--- + +## Configuration Steps + +1. **Specify the Storage Type** + Add the `--storage azure` flag to your backup command. + +2. **Set the Blob Path** + Optionally, specify a custom folder within your Azure Blob container where backups will be stored using the `--path` flag. + Example: `--path my-custom-path`. + +3. **Required Environment Variables** + The following environment variables are mandatory for Azure Blob-based backups: + + - `AZURE_STORAGE_CONTAINER_NAME`: The name of the Azure Blob container where backups will be stored. + - `AZURE_STORAGE_ACCOUNT_NAME`: The name of your Azure Storage account. + - `AZURE_STORAGE_ACCOUNT_KEY`: The access key for your Azure Storage account. + +--- + +## Example Configuration + +Below is an example `docker-compose.yml` configuration for backing up to Azure Blob Storage: + +```yaml services: mysql-bkup: - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/mysqlbkup/releases + # for available releases. image: jkaninda/mysql-bkup container_name: mysql-bkup command: backup --storage azure -d database --path my-custom-path @@ -29,16 +50,23 @@ services: - DB_NAME=database - DB_USERNAME=username - DB_PASSWORD=password - ## Azure Blob configurations + ## Azure Blob Configuration - AZURE_STORAGE_CONTAINER_NAME=backup-container - AZURE_STORAGE_ACCOUNT_NAME=account-name - AZURE_STORAGE_ACCOUNT_KEY=Ppby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw== - # mysql-bkup container must be connected to the same network with your database + + # Ensure the mysql-bkup container is connected to the same network as your database networks: - web + networks: web: ``` +--- +## Key Notes +- **Custom Path**: Use the `--path` flag to specify a folder within your Azure Blob container for organizing backups. +- **Security**: Ensure your `AZURE_STORAGE_ACCOUNT_KEY` is kept secure and not exposed in public repositories. +- **Compatibility**: This configuration works with Azure Blob Storage and other compatible storage solutions. diff --git a/docs/how-tos/backup-to-ftp.md b/docs/how-tos/backup-to-ftp.md index a1ff18a..a70a95c 100644 --- a/docs/how-tos/backup-to-ftp.md +++ b/docs/how-tos/backup-to-ftp.md @@ -4,41 +4,72 @@ layout: default parent: How Tos nav_order: 4 --- -# Backup to FTP remote server +# Backup to FTP Remote Server -As described for SSH backup section, to change the storage of your backup and use FTP Remote server as storage. You need to add `--storage ftp`. -You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `REMOTE_PATH` environment variable. +To store your backups on an FTP remote server, you can configure the backup process to use the `--storage ftp` option. -{: .note } -These environment variables are required for SSH backup `FTP_HOST`, `FTP_USER`, `REMOTE_PATH`, `FTP_PORT` or `FTP_PASSWORD`. +This section explains how to set up and configure FTP-based backups. -```yml +--- + +## Configuration Steps + +1. **Specify the Storage Type** + Add the `--storage ftp` flag to your backup command. + +2. **Set the Remote Path** + Define the full remote path where backups will be stored using the `--path` flag or the `REMOTE_PATH` environment variable. + Example: `--path /home/jkaninda/backups`. + +3. **Required Environment Variables** + The following environment variables are mandatory for FTP-based backups: + + - `FTP_HOST`: The hostname or IP address of the FTP server. + - `FTP_PORT`: The FTP port (default is `21`). + - `FTP_USER`: The username for FTP authentication. + - `FTP_PASSWORD`: The password for FTP authentication. + - `REMOTE_PATH`: The directory on the FTP server where backups will be stored. + +--- + +## Example Configuration + +Below is an example `docker-compose.yml` configuration for backing up to an FTP remote server: + +```yaml services: mysql-bkup: - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases + # for available releases. image: jkaninda/mysql-bkup container_name: mysql-bkup command: backup --storage ftp -d database environment: - DB_PORT=3306 - - DB_HOST=postgres + - DB_HOST=mysql - DB_NAME=database - DB_USERNAME=username - DB_PASSWORD=password - ## FTP config + ## FTP Configuration - FTP_HOST="hostname" - FTP_PORT=21 - FTP_USER=user - FTP_PASSWORD=password - REMOTE_PATH=/home/jkaninda/backups - # pg-bkup container must be connected to the same network with your database + # Ensure the mysql-bkup container is connected to the same network as your database networks: - web + networks: web: -``` \ No newline at end of file +``` + +--- + +## Key Notes + +- **Security**: FTP transmits data, including passwords, in plaintext. For better security, consider using SFTP (SSH File Transfer Protocol) or FTPS (FTP Secure) if supported by your server. +- **Remote Path**: Ensure the `REMOTE_PATH` directory exists on the FTP server and is writable by the specified `FTP_USER`. \ No newline at end of file diff --git a/docs/how-tos/backup-to-s3.md b/docs/how-tos/backup-to-s3.md index 5bb266b..93b4c9c 100644 --- a/docs/how-tos/backup-to-s3.md +++ b/docs/how-tos/backup-to-s3.md @@ -4,85 +4,123 @@ layout: default parent: How Tos nav_order: 2 --- -# Backup to AWS S3 +# Backup to AWS S3 -{: .note } -As described on local backup section, to change the storage of you backup and use S3 as storage. You need to add `--storage s3` (-s s3). -You can also specify a specify folder where you want to save you data by adding `--path /my-custom-path` flag. +To store your backups on AWS S3, you can configure the backup process to use the `--storage s3` option. This section explains how to set up and configure S3-based backups. +--- -## Backup to S3 +## Configuration Steps -```yml +1. **Specify the Storage Type** + Add the `--storage s3` flag to your backup command. + +2. **Set the S3 Path** + Optionally, specify a custom folder within your S3 bucket where backups will be stored using the `--path` flag. + Example: `--path /my-custom-path`. + +3. **Required Environment Variables** + The following environment variables are mandatory for S3-based backups: + + - `AWS_S3_ENDPOINT`: The S3 endpoint URL (e.g., `https://s3.amazonaws.com`). + - `AWS_S3_BUCKET_NAME`: The name of the S3 bucket where backups will be stored. + - `AWS_REGION`: The AWS region where the bucket is located (e.g., `us-west-2`). + - `AWS_ACCESS_KEY`: Your AWS access key. + - `AWS_SECRET_KEY`: Your AWS secret key. + - `AWS_DISABLE_SSL`: Set to `"true"` if using an S3 alternative like Minio without SSL (default is `"false"`). + - `AWS_FORCE_PATH_STYLE`: Set to `"true"` if using an S3 alternative like Minio (default is `"false"`). + +--- + +## Example Configuration + +Below is an example `docker-compose.yml` configuration for backing up to AWS S3: + +```yaml services: mysql-bkup: - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. - image: jkaninda/mysql-bkup - container_name: mysql-bkup + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/pg-bkup/releases + # for available releases. + image: jkaninda/pg-bkup + container_name: pg-bkup command: backup --storage s3 -d database --path /my-custom-path environment: - - DB_PORT=3306 - - DB_HOST=mysql + - DB_PORT=5432 + - DB_HOST=postgres - DB_NAME=database - DB_USERNAME=username - DB_PASSWORD=password - ## AWS configurations + ## AWS Configuration - AWS_S3_ENDPOINT=https://s3.amazonaws.com - AWS_S3_BUCKET_NAME=backup - - AWS_REGION="us-west-2" + - AWS_REGION=us-west-2 - AWS_ACCESS_KEY=xxxx - AWS_SECRET_KEY=xxxxx - ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true + ## Optional: Disable SSL for S3 alternatives like Minio - AWS_DISABLE_SSL="false" - - AWS_FORCE_PATH_STYLE=true # true for S3 alternative such as Minio - - # mysql-bkup container must be connected to the same network with your database + ## Optional: Enable path-style access for S3 alternatives like Minio + - AWS_FORCE_PATH_STYLE=false + + # Ensure the mysql-bkup container is connected to the same network as your database networks: - web + networks: web: ``` -### Recurring backups to S3 +--- -As explained above, you need just to add AWS environment variables and specify the storage type `--storage s3`. -In case you need to use recurring backups, you can use `--cron-expression "0 1 * * *"` flag or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below. +## Recurring Backups to S3 -```yml +To schedule recurring backups to S3, use the `--cron-expression` flag or the `BACKUP_CRON_EXPRESSION` environment variable. This allows you to define a cron schedule for automated backups. + +### Example: Recurring Backup Configuration + +```yaml services: mysql-bkup: - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases + # for available releases. image: jkaninda/mysql-bkup container_name: mysql-bkup - command: backup --storage s3 -d my-database --cron-expression "0 1 * * *" + command: backup --storage s3 -d database --cron-expression "0 1 * * *" environment: - DB_PORT=3306 - DB_HOST=mysql - DB_NAME=database - DB_USERNAME=username - DB_PASSWORD=password - ## AWS configurations + ## AWS Configuration - AWS_S3_ENDPOINT=https://s3.amazonaws.com - AWS_S3_BUCKET_NAME=backup - - AWS_REGION="us-west-2" + - AWS_REGION=us-west-2 - AWS_ACCESS_KEY=xxxx - AWS_SECRET_KEY=xxxxx - # - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional - #Delete old backup created more than specified days ago + ## Optional: Define a cron schedule for recurring backups + #- BACKUP_CRON_EXPRESSION=0 1 * * * + ## Optional: Delete old backups after a specified number of days #- BACKUP_RETENTION_DAYS=7 - ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true + ## Optional: Disable SSL for S3 alternatives like Minio - AWS_DISABLE_SSL="false" - - AWS_FORCE_PATH_STYLE=true # true for S3 alternative such as Minio - # mysql-bkup container must be connected to the same network with your database + ## Optional: Enable path-style access for S3 alternatives like Minio + - AWS_FORCE_PATH_STYLE=false + + # Ensure the pg-bkup container is connected to the same network as your database networks: - web + networks: web: ``` +--- + +## Key Notes + +- **Cron Expression**: Use the `--cron-expression` flag or `BACKUP_CRON_EXPRESSION` environment variable to define the backup schedule. For example, `0 1 * * *` runs the backup daily at 1:00 AM. +- **Backup Retention**: Optionally, use the `BACKUP_RETENTION_DAYS` environment variable to automatically delete backups older than a specified number of days. +- **S3 Alternatives**: If using an S3 alternative like Minio, set `AWS_DISABLE_SSL="true"` and `AWS_FORCE_PATH_STYLE="true"` as needed. + diff --git a/docs/how-tos/backup-to-ssh.md b/docs/how-tos/backup-to-ssh.md index 5311e08..b7a469f 100644 --- a/docs/how-tos/backup-to-ssh.md +++ b/docs/how-tos/backup-to-ssh.md @@ -1,91 +1,129 @@ --- -title: Backup to SSH +title: Backup to SSH or SFTP layout: default parent: How Tos nav_order: 3 --- -# Backup to SSH remote server +# Backup to SFTP or SSH Remote Server +To store your backups on an `SFTP` or `SSH` remote server instead of the default storage, you can configure the backup process to use the `--storage ssh` or `--storage remote` option. +This section explains how to set up and configure SSH-based backups. -As described for s3 backup section, to change the storage of your backup and use SSH Remote server as storage. You need to add `--storage ssh` or `--storage remote`. -You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `REMOTE_PATH` environment variable. +--- -{: .note } -These environment variables are required for SSH backup `SSH_HOST`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server. -Accessing the remote server using password is not recommended, use private key instead. +## Configuration Steps -```yml +1. **Specify the Storage Type** + Add the `--storage ssh` or `--storage remote` flag to your backup command. + +2. **Set the Remote Path** + Define the full remote path where backups will be stored using the `--path` flag or the `REMOTE_PATH` environment variable. + Example: `--path /home/jkaninda/backups`. + +3. **Required Environment Variables** + The following environment variables are mandatory for SSH-based backups: + + - `SSH_HOST`: The hostname or IP address of the remote server. + - `SSH_USER`: The username for SSH authentication. + - `REMOTE_PATH`: The directory on the remote server where backups will be stored. + - `SSH_IDENTIFY_FILE`: The path to the private key file for SSH authentication. + - `SSH_PORT`: The SSH port (default is `22`). + - `SSH_PASSWORD`: (Optional) Use this only if you are not using a private key for authentication. + + {: .note } + **Security Recommendation**: Using a private key (`SSH_IDENTIFY_FILE`) is strongly recommended over password-based authentication (`SSH_PASSWORD`) for better security. + +--- + +## Example Configuration + +Below is an example `docker-compose.yml` configuration for backing up to an SSH remote server: + +```yaml services: - mysql-bkup: - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. - image: jkaninda/mysql-bkup - container_name: mysql-bkup - command: backup --storage remote -d database - volumes: - - ./id_ed25519:/tmp/id_ed25519" - environment: - - DB_PORT=3306 - - DB_HOST=mysql - #- DB_NAME=database - - DB_USERNAME=username - - DB_PASSWORD=password - ## SSH config - - SSH_HOST="hostname" - - SSH_PORT=22 - - SSH_USER=user - - REMOTE_PATH=/home/jkaninda/backups - - SSH_IDENTIFY_FILE=/tmp/id_ed25519 - ## We advise you to use a private jey instead of password - #- SSH_PASSWORD=password + mysql-bkup: + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases + # for available releases. + image: jkaninda/mysql-bkup + container_name: mysql-bkup + command: backup --storage remote -d database + volumes: + - ./id_ed25519:/tmp/id_ed25519 + environment: + - DB_PORT=3306 + - DB_HOST=mysql + - DB_NAME=database + - DB_USERNAME=username + - DB_PASSWORD=password + ## SSH Configuration + - SSH_HOST="hostname" + - SSH_PORT=22 + - SSH_USER=user + - REMOTE_PATH=/home/jkaninda/backups + - SSH_IDENTIFY_FILE=/tmp/id_ed25519 + ## Optional: Use password instead of private key (not recommended) + #- SSH_PASSWORD=password + + # Ensure the mysql-bkup container is connected to the same network as your database + networks: + - web - # mysql-bkup container must be connected to the same network with your database - networks: - - web networks: - web: + web: ``` +--- -### Recurring backups to SSH remote server +## Recurring Backups to SSH Remote Server -As explained above, you need just to add required environment variables and specify the storage type `--storage ssh`. -You can use `--cron-expression "* * * * *"` or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below. +To schedule recurring backups, you can use the `--cron-expression` flag or the `BACKUP_CRON_EXPRESSION` environment variable. +This allows you to define a cron schedule for automated backups. -```yml +### Example: Recurring Backup Configuration + +```yaml services: - mysql-bkup: - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. + mysql-bkup: + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases + # for available releases. image: jkaninda/mysql-bkup container_name: mysql-bkup - command: backup -d database --storage ssh --cron-expression "0 1 * * *" + command: backup -d database --storage ssh --cron-expression "@daily" volumes: - - ./id_ed25519:/tmp/id_ed25519" + - ./id_ed25519:/tmp/id_ed25519 environment: - DB_PORT=3306 - - DB_HOST=mysql + - DB_HOST=postgres - DB_NAME=database - DB_USERNAME=username - DB_PASSWORD=password - ## SSH config + ## SSH Configuration - SSH_HOST="hostname" - SSH_PORT=22 - SSH_USER=user - REMOTE_PATH=/home/jkaninda/backups - SSH_IDENTIFY_FILE=/tmp/id_ed25519 - # - BACKUP_CRON_EXPRESSION=0 1 * * * # Optional - #Delete old backup created more than specified days ago + ## Optional: Delete old backups after a specified number of days #- BACKUP_RETENTION_DAYS=7 - ## We advise you to use a private jey instead of password + ## Optional: Use password instead of private key (not recommended) #- SSH_PASSWORD=password - # mysql-bkup container must be connected to the same network with your database + + # Ensure the mysql-bkup container is connected to the same network as your database networks: - web + networks: web: ``` + +--- + +## Key Notes + +- **Cron Expression**: Use the `--cron-expression` flag or `BACKUP_CRON_EXPRESSION` environment variable to define the backup schedule. For example, `0 1 * * *` runs the backup daily at 1:00 AM. +- **Backup Retention**: Optionally, use the `BACKUP_RETENTION_DAYS` environment variable to automatically delete backups older than a specified number of days. +- **Security**: Always prefer private key authentication (`SSH_IDENTIFY_FILE`) over password-based authentication (`SSH_PASSWORD`) for enhanced security. + +--- \ No newline at end of file diff --git a/docs/how-tos/backup.md b/docs/how-tos/backup.md index 75e2bdf..e513ac0 100644 --- a/docs/how-tos/backup.md +++ b/docs/how-tos/backup.md @@ -5,28 +5,37 @@ parent: How Tos nav_order: 1 --- -# Backup database +# Backup Database -To backup the database, you need to add `backup` command. +To back up your database, use the `backup` command. + +This section explains how to configure and run backups, including recurring backups, using Docker or Kubernetes. + +--- + +## Default Configuration + +- **Storage**: By default, backups are stored locally in the `/backup` directory. +- **Compression**: Backups are compressed using `gzip` by default. Use the `--disable-compression` flag to disable compression. +- **Security**: It is recommended to create a dedicated user with read-only access for backup tasks. {: .note } -The default storage is local storage mounted to __/backup__. The backup is compressed by default using gzip. The flag __`disable-compression`__ can be used when you need to disable backup compression. +The backup process supports recurring backups on Docker or Docker Swarm. On Kubernetes, it can be deployed as a CronJob. -{: .warning } -Creating a user for backup tasks who has read-only access is recommended! +--- -The backup process can be run in scheduled mode for the recurring backups. -It handles __recurring__ backups of mysql database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage. +## Example: Basic Backup Configuration -```yml +Below is an example `docker-compose.yml` configuration for backing up a database: + +```yaml services: - mysql-bkup: - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. - image: jkaninda/mysql-bkup - container_name: mysql-bkup + pg-bkup: + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/pg-bkup/releases + # for available releases. + image: jkaninda/pg-bkup + container_name: pg-bkup command: backup -d database volumes: - ./backup:/backup @@ -36,36 +45,47 @@ services: - DB_NAME=database - DB_USERNAME=username - DB_PASSWORD=password - # mysql-bkup container must be connected to the same network with your database + + # Ensure the pg-bkup container is connected to the same network as your database networks: - web + networks: web: ``` -### Backup using Docker CLI +--- -```shell - docker run --rm --network your_network_name \ - -v $PWD/backup:/backup/ \ - -e "DB_HOST=dbhost" \ - -e "DB_USERNAME=username" \ - -e "DB_PASSWORD=password" \ - jkaninda/mysql-bkup backup -d database_name +## Backup Using Docker CLI + +You can also run backups directly using the Docker CLI: + +```bash +docker run --rm --network your_network_name \ + -v $PWD/backup:/backup/ \ + -e "DB_HOST=dbhost" \ + -e "DB_USERNAME=username" \ + -e "DB_PASSWORD=password" \ + jkaninda/pg-bkup backup -d database_name ``` -In case you need to use recurring backups, you can use `--cron-expression "0 1 * * *"` flag or `BACKUP_CRON_EXPRESSION=0 1 * * *` as described below. +--- -```yml +## Recurring Backups + +To schedule recurring backups, use the `--cron-expression` flag or the `BACKUP_CRON_EXPRESSION` environment variable. This allows you to define a cron schedule for automated backups. + +### Example: Recurring Backup Configuration + +```yaml services: mysql-bkup: - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases + # for available releases. image: jkaninda/mysql-bkup container_name: mysql-bkup - command: backup -d database --cron-expression "0 1 * * *" + command: backup -d database --cron-expression @midnight volumes: - ./backup:/backup environment: @@ -74,13 +94,24 @@ services: - DB_NAME=database - DB_USERNAME=username - DB_PASSWORD=password - - BACKUP_CRON_EXPRESSION=0 1 * * * - #Delete old backup created more than specified days ago + ## Optional: Define a cron schedule for recurring backups + - BACKUP_CRON_EXPRESSION=@midnight + ## Optional: Delete old backups after a specified number of days #- BACKUP_RETENTION_DAYS=7 - # mysql-bkup container must be connected to the same network with your database + + # Ensure the mysql-bkup container is connected to the same network as your database networks: - web + networks: web: ``` +--- + +## Key Notes + +- **Cron Expression**: Use the `--cron-expression` flag or `BACKUP_CRON_EXPRESSION` environment variable to define the backup schedule. For example: + - `@midnight`: Runs the backup daily at midnight. + - `0 1 * * *`: Runs the backup daily at 1:00 AM. +- **Backup Retention**: Optionally, use the `BACKUP_RETENTION_DAYS` environment variable to automatically delete backups older than a specified number of days. diff --git a/docs/how-tos/deploy-on-kubernetes.md b/docs/how-tos/deploy-on-kubernetes.md index 97d9bee..290f971 100644 --- a/docs/how-tos/deploy-on-kubernetes.md +++ b/docs/how-tos/deploy-on-kubernetes.md @@ -5,12 +5,17 @@ parent: How Tos nav_order: 9 --- -## Deploy on Kubernetes +# Deploy on Kubernetes -To deploy MySQL Backup on Kubernetes, you can use Job to backup or Restore your database. -For recurring backup you can use CronJob, you don't need to run it in scheduled mode. as described bellow. +To deploy MySQL Backup on Kubernetes, you can use a `Job` for one-time backups or restores, and a `CronJob` for recurring backups. -## Backup to S3 storage +Below are examples for different use cases. + +--- + +## Backup Job to S3 Storage + +This example demonstrates how to configure a Kubernetes `Job` to back up a MySQL database to an S3-compatible storage. ```yaml apiVersion: batch/v1 @@ -21,50 +26,53 @@ spec: template: spec: containers: - - name: mysql-bkup - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. - image: jkaninda/mysql-bkup - command: - - /bin/sh - - -c - - backup --storage s3 - resources: - limits: - memory: "128Mi" - cpu: "500m" - env: - - name: DB_PORT - value: "3306" - - name: DB_HOST - value: "" - - name: DB_NAME - value: "dbname" - - name: DB_USERNAME - value: "username" - # Please use secret! - - name: DB_PASSWORD - value: "" - - name: AWS_S3_ENDPOINT - value: "https://s3.amazonaws.com" - - name: AWS_S3_BUCKET_NAME - value: "xxx" - - name: AWS_REGION - value: "us-west-2" - - name: AWS_ACCESS_KEY - value: "xxxx" - - name: AWS_SECRET_KEY - value: "xxxx" - - name: AWS_DISABLE_SSL - value: "false" - - name: AWS_FORCE_PATH_STYLE - value: "false" + - name: mysql-bkup + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases + # for available releases. + image: jkaninda/mysql-bkup + command: + - /bin/sh + - -c + - backup --storage s3 + resources: + limits: + memory: "128Mi" + cpu: "500m" + env: + - name: DB_PORT + value: "3306" + - name: DB_HOST + value: "" + - name: DB_NAME + value: "" + - name: DB_USERNAME + value: "" + # Use Kubernetes Secrets for sensitive data like passwords + - name: DB_PASSWORD + value: "" + - name: AWS_S3_ENDPOINT + value: "https://s3.amazonaws.com" + - name: AWS_S3_BUCKET_NAME + value: "xxx" + - name: AWS_REGION + value: "us-west-2" + - name: AWS_ACCESS_KEY + value: "xxxx" + - name: AWS_SECRET_KEY + value: "xxxx" + - name: AWS_DISABLE_SSL + value: "false" + - name: AWS_FORCE_PATH_STYLE + value: "false" restartPolicy: Never ``` -## Backup Job to SSH remote server +--- + +## Backup Job to SSH Remote Server + +This example demonstrates how to configure a Kubernetes `Job` to back up a MySQL database to an SSH remote server. ```yaml apiVersion: batch/v1 @@ -77,15 +85,14 @@ spec: spec: containers: - name: mysql-bkup - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases + # for available releases. image: jkaninda/mysql-bkup command: - - /bin/sh - - -c - - backup --storage ssh + - /bin/sh + - -c + - backup --storage ssh --disable-compression resources: limits: memory: "128Mi" @@ -98,8 +105,8 @@ spec: - name: DB_NAME value: "dbname" - name: DB_USERNAME - value: "username" - # Please use secret! + value: "postgres" + # Use Kubernetes Secrets for sensitive data like passwords - name: DB_PASSWORD value: "" - name: SSH_HOST_NAME @@ -112,14 +119,18 @@ spec: value: "xxxx" - name: SSH_REMOTE_PATH value: "/home/toto/backup" - # Optional, required if you want to encrypt your backup + # Optional: Required if you want to encrypt your backup - name: GPG_PASSPHRASE - value: "secure-passphrase" + value: "xxxx" restartPolicy: Never ``` +--- + ## Restore Job +This example demonstrates how to configure a Kubernetes `Job` to restore a MySQL database from a backup stored on an SSH remote server. + ```yaml apiVersion: batch/v1 kind: Job @@ -131,48 +142,51 @@ spec: spec: containers: - name: mysql-bkup - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases + # for available releases. image: jkaninda/mysql-bkup command: - - /bin/sh - - -c - - backup --storage ssh --file store_20231219_022941.sql.gz + - /bin/sh + - -c + - restore --storage ssh --file store_20231219_022941.sql.gz resources: limits: memory: "128Mi" cpu: "500m" env: - - name: DB_PORT - value: "3306" - - name: DB_HOST - value: "" - - name: DB_NAME - value: "dbname" - - name: DB_USERNAME - value: "username" - # Please use secret! - - name: DB_PASSWORD - value: "" - - name: SSH_HOST_NAME - value: "xxx" - - name: SSH_PORT - value: "22" - - name: SSH_USER - value: "xxx" - - name: SSH_PASSWORD - value: "xxxx" - - name: SSH_REMOTE_PATH - value: "/home/xxxx/backup" - # Optional, required if your backup was encrypted - #- name: GPG_PASSPHRASE - # value: "xxxx" + - name: DB_PORT + value: "3306" + - name: DB_HOST + value: "" + - name: DB_NAME + value: "dbname" + - name: DB_USERNAME + value: "postgres" + # Use Kubernetes Secrets for sensitive data like passwords + - name: DB_PASSWORD + value: "" + - name: SSH_HOST_NAME + value: "xxx" + - name: SSH_PORT + value: "22" + - name: SSH_USER + value: "xxx" + - name: SSH_PASSWORD + value: "xxxx" + - name: SSH_REMOTE_PATH + value: "/home/toto/backup" + # Optional: Required if your backup was encrypted + #- name: GPG_PASSPHRASE + # value: "xxxx" restartPolicy: Never ``` -## Recurring backup +--- + +## Recurring Backup with CronJob + +This example demonstrates how to configure a Kubernetes `CronJob` for recurring backups to an SSH remote server. ```yaml apiVersion: batch/v1 @@ -187,51 +201,51 @@ spec: spec: containers: - name: mysql-bkup + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases + # for available releases. image: jkaninda/mysql-bkup command: - /bin/sh - -c - - bkup - - backup - - --storage - - ssh - - --disable-compression + - backup --storage ssh --disable-compression resources: limits: memory: "128Mi" cpu: "500m" env: - - name: DB_PORT - value: "3306" - - name: DB_HOST - value: "" - - name: DB_NAME - value: "username" - - name: DB_USERNAME - value: "username" - # Please use secret! - - name: DB_PASSWORD - value: "" - - name: SSH_HOST_NAME - value: "xxx" - - name: SSH_PORT - value: "xxx" - - name: SSH_USER - value: "jkaninda" - - name: SSH_REMOTE_PATH - value: "/home/jkaninda/backup" - - name: SSH_PASSWORD - value: "password" - # Optional, required if you want to encrypt your backup - #- name: GPG_PASSPHRASE - # value: "xxx" + - name: DB_PORT + value: "3306" + - name: DB_HOST + value: "" + - name: DB_NAME + value: "test" + - name: DB_USERNAME + value: "postgres" + # Use Kubernetes Secrets for sensitive data like passwords + - name: DB_PASSWORD + value: "" + - name: SSH_HOST_NAME + value: "192.168.1.16" + - name: SSH_PORT + value: "2222" + - name: SSH_USER + value: "jkaninda" + - name: SSH_REMOTE_PATH + value: "/config/backup" + - name: SSH_PASSWORD + value: "password" + # Optional: Required if you want to encrypt your backup + #- name: GPG_PASSPHRASE + # value: "xxx" restartPolicy: Never ``` -## Kubernetes Rootless +--- -This image also supports Kubernetes security context, you can run it in Rootless environment. -It has been tested on Openshift, it works well. +## Kubernetes Rootless Deployment + +This example demonstrates how to run the backup container in a rootless environment, suitable for platforms like OpenShift. ```yaml apiVersion: batch/v1 @@ -249,53 +263,52 @@ spec: runAsGroup: 3000 fsGroup: 2000 containers: - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. - - name: mysql-bkup - image: jkaninda/mysql-bkup - command: - - /bin/sh - - -c - - bkup - - backup - - --storage - - ssh - - --disable-compression - resources: - limits: - memory: "128Mi" - cpu: "500m" - env: - - name: DB_PORT - value: "3306" - - name: DB_HOST - value: "" - - name: DB_NAME - value: "xxx" - - name: DB_USERNAME - value: "xxx" - # Please use secret! - - name: DB_PASSWORD - value: "" - - name: SSH_HOST_NAME - value: "xxx" - - name: SSH_PORT - value: "22" - - name: SSH_USER - value: "jkaninda" - - name: SSH_REMOTE_PATH - value: "/home/jkaninda/backup" - - name: SSH_PASSWORD - value: "password" - # Optional, required if you want to encrypt your backup + - name: mysql-bkup + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases + # for available releases. + image: jkaninda/mysql-bkup + command: + - /bin/sh + - -c + - backup --storage ssh --disable-compression + resources: + limits: + memory: "128Mi" + cpu: "500m" + env: + - name: DB_PORT + value: "3306" + - name: DB_HOST + value: "" + - name: DB_NAME + value: "test" + - name: DB_USERNAME + value: "postgres" + # Use Kubernetes Secrets for sensitive data like passwords + - name: DB_PASSWORD + value: "" + - name: SSH_HOST_NAME + value: "192.168.1.16" + - name: SSH_PORT + value: "2222" + - name: SSH_USER + value: "jkaninda" + - name: SSH_REMOTE_PATH + value: "/config/backup" + - name: SSH_PASSWORD + value: "password" + # Optional: Required if you want to encrypt your backup #- name: GPG_PASSPHRASE # value: "xxx" restartPolicy: OnFailure ``` -## Migrate database +--- + +## Migrate Database + +This example demonstrates how to configure a Kubernetes `Job` to migrate a MySQL database from one server to another. ```yaml apiVersion: batch/v1 @@ -308,10 +321,9 @@ spec: spec: containers: - name: mysql-bkup - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases + # for available releases. image: jkaninda/mysql-bkup command: - /bin/sh @@ -322,11 +334,11 @@ spec: memory: "128Mi" cpu: "500m" env: - ## Source Database + ## Source Database - name: DB_HOST - value: "mysql" + value: "postgres" - name: DB_PORT - value: "3306" + value: "3306" - name: DB_NAME value: "dbname" - name: DB_USERNAME @@ -335,7 +347,7 @@ spec: value: "password" ## Target Database - name: TARGET_DB_HOST - value: "target-mysql" + value: "target-postgres" - name: TARGET_DB_PORT value: "3306" - name: TARGET_DB_NAME @@ -345,4 +357,13 @@ spec: - name: TARGET_DB_PASSWORD value: "password" restartPolicy: Never -``` \ No newline at end of file +``` + +--- + +## Key Notes + +- **Security**: Always use Kubernetes Secrets for sensitive data like passwords and access keys. +- **Resource Limits**: Adjust resource limits (`memory` and `cpu`) based on your workload requirements. +- **Cron Schedule**: Use standard cron expressions for scheduling recurring backups. +- **Rootless Deployment**: The image supports running in rootless environments, making it suitable for platforms like OpenShift. diff --git a/docs/how-tos/encrypt-backup.md b/docs/how-tos/encrypt-backup.md index 3d19ec1..71199a6 100644 --- a/docs/how-tos/encrypt-backup.md +++ b/docs/how-tos/encrypt-backup.md @@ -1,47 +1,38 @@ --- -title: Encrypt backups +title: Encrypt backups using GPG layout: default parent: How Tos nav_order: 8 --- -# Encrypt backup +# Encrypt Backup -The image supports encrypting backups using one of two available methods: GPG with passphrase or GPG with a public key. - - -The image supports encrypting backups using GPG out of the box. In case a `GPG_PASSPHRASE` or `GPG_PUBLIC_KEY` environment variable is set, the backup archive will be encrypted using the given key and saved as a sql.gpg file instead or sql.gz.gpg. +The image supports encrypting backups using one of two methods: **GPG with a passphrase** or **GPG with a public key**. When a `GPG_PASSPHRASE` or `GPG_PUBLIC_KEY` environment variable is set, the backup archive will be encrypted and saved as a `.sql.gpg` or `.sql.gz.gpg` file. {: .warning } -To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process. +To restore an encrypted backup, you must provide the same GPG passphrase or private key used during the backup process. -- GPG home directory `/config/gnupg` -- Cipher algorithm `aes256` +--- -{: .note } -The backup encrypted using `GPG passphrase` method can be restored automatically, no need to decrypt it before restoration. -Suppose you used a GPG public key during the backup process. In that case, you need to decrypt your backup before restoration because decryption using a `GPG private` key is not fully supported. +## Key Features -To decrypt manually, you need to install `gnupg` +- **Cipher Algorithm**: `aes256` +- **Automatic Restoration**: Backups encrypted with a GPG passphrase can be restored automatically without manual decryption. +- **Manual Decryption**: Backups encrypted with a GPG public key require manual decryption before restoration. -```shell -gpg --batch --passphrase "my-passphrase" \ ---output database_20240730_044201.sql.gz \ ---decrypt database_20240730_044201.sql.gz.gpg -``` -Using your private key +--- -```shell -gpg --output database_20240730_044201.sql.gz --decrypt database_20240730_044201.sql.gz.gpg -``` -## Using GPG passphrase +## Using GPG Passphrase -```yml +To encrypt backups using a GPG passphrase, set the `GPG_PASSPHRASE` environment variable. The backup will be encrypted and can be restored automatically. + +### Example Configuration + +```yaml services: mysql-bkup: - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases + # for available releases. image: jkaninda/mysql-bkup container_name: mysql-bkup command: backup -d database @@ -55,26 +46,34 @@ services: - DB_PASSWORD=password ## Required to encrypt backup - GPG_PASSPHRASE=my-secure-passphrase - # mysql-bkup container must be connected to the same network with your database + # Ensure the pg-bkup container is connected to the same network as your database networks: - web + networks: web: ``` + +--- + ## Using GPG Public Key -```yml +To encrypt backups using a GPG public key, set the `GPG_PUBLIC_KEY` environment variable to the path of your public key file. Backups encrypted with a public key require manual decryption before restoration. + +### Example Configuration + +```yaml services: mysql-bkup: - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases + # for available releases. image: jkaninda/mysql-bkup container_name: mysql-bkup command: backup -d database volumes: - ./backup:/backup + - ./public_key.asc:/config/public_key.asc environment: - DB_PORT=3306 - DB_HOST=mysql @@ -83,9 +82,39 @@ services: - DB_PASSWORD=password ## Required to encrypt backup - GPG_PUBLIC_KEY=/config/public_key.asc - # mysql-bkup container must be connected to the same network with your database + # Ensure the pg-bkup container is connected to the same network as your database networks: - web + networks: web: -``` \ No newline at end of file +``` + +--- + +## Manual Decryption + +If you encrypted your backup using a GPG public key, you must manually decrypt it before restoration. Use the `gnupg` tool for decryption. + +### Decrypt Using a Passphrase + +```bash +gpg --batch --passphrase "my-passphrase" \ + --output database_20240730_044201.sql.gz \ + --decrypt database_20240730_044201.sql.gz.gpg +``` + +### Decrypt Using a Private Key + +```bash +gpg --output database_20240730_044201.sql.gz \ + --decrypt database_20240730_044201.sql.gz.gpg +``` + +--- + +## Key Notes + +- **Automatic Restoration**: Backups encrypted with a GPG passphrase can be restored directly without manual decryption. +- **Manual Decryption**: Backups encrypted with a GPG public key require manual decryption using the corresponding private key. +- **Security**: Always keep your GPG passphrase and private key secure. Use Kubernetes Secrets or other secure methods to manage sensitive data. diff --git a/docs/how-tos/migrate.md b/docs/how-tos/migrate.md index 189e516..b4e316c 100644 --- a/docs/how-tos/migrate.md +++ b/docs/how-tos/migrate.md @@ -5,76 +5,102 @@ parent: How Tos nav_order: 10 --- -# Migrate database +# Migrate Database -To migrate the database, you need to add `migrate` command. +To migrate a MySQL database from a source to a target database, you can use the `migrate` command. This feature simplifies the process by combining the backup and restore operations into a single step. {: .note } -The Mysql backup has another great feature: migrating your database from a source database to a target. - -As you know, to restore a database from a source to a target database, you need 2 operations: which is to start by backing up the source database and then restoring the source backed database to the target database. -Instead of proceeding like that, you can use the integrated feature `(migrate)`, which will help you migrate your database by doing only one operation. +The `migrate` command eliminates the need for separate backup and restore operations. It directly transfers data from the source database to the target database. {: .warning } -The `migrate` operation is irreversible, please backup your target database before this action. +The `migrate` operation is **irreversible**. Always back up your target database before performing this action. -### Docker compose -```yml +--- + +## Configuration Steps + +1. **Source Database**: Provide connection details for the source database. +2. **Target Database**: Provide connection details for the target database. +3. **Run the Migration**: Use the `migrate` command to initiate the migration. + +--- + +## Example: Docker Compose Configuration + +Below is an example `docker-compose.yml` configuration for migrating a database: + +```yaml services: mysql-bkup: - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/mysqlbkup/releases + # for available releases. image: jkaninda/mysql-bkup container_name: mysql-bkup command: migrate volumes: - ./backup:/backup environment: - ## Source database + ## Source Database - DB_PORT=3306 - DB_HOST=mysql - DB_NAME=database - DB_USERNAME=username - DB_PASSWORD=password - ## Target database - - TARGET_DB_HOST=target-mysql + + ## Target Database + - TARGET_DB_HOST=target-postgres - TARGET_DB_PORT=3306 - TARGET_DB_NAME=dbname - TARGET_DB_USERNAME=username - TARGET_DB_PASSWORD=password - # mysql-bkup container must be connected to the same network with your database + + # Ensure the mysql-bkup container is connected to the same network as your database networks: - web + networks: web: ``` +--- -### Migrate database using Docker CLI +## Migrate Database Using Docker CLI +You can also run the migration directly using the Docker CLI. Below is an example: -``` -## Source database -DB_HOST=mysql +### Environment Variables + +Save your source and target database connection details in an environment file (e.g., `your-env`): + +```bash +## Source Database +DB_HOST=postgres DB_PORT=3306 DB_NAME=dbname DB_USERNAME=username DB_PASSWORD=password -## Taget database -TARGET_DB_HOST=target-mysql +## Target Database +TARGET_DB_HOST=target-postgres TARGET_DB_PORT=3306 TARGET_DB_NAME=dbname TARGET_DB_USERNAME=username TARGET_DB_PASSWORD=password ``` -```shell - docker run --rm --network your_network_name \ - --env-file your-env - -v $PWD/backup:/backup/ \ - jkaninda/mysql-bkup migrate +### Run the Migration + +```bash +docker run --rm --network your_network_name \ + --env-file your-env \ + -v $PWD/backup:/backup/ \ + jkaninda/pg-bkup migrate ``` +--- + +## Key Notes + +- **Irreversible Operation**: The `migrate` command directly transfers data from the source to the target database. Ensure you have a backup of the target database before proceeding. +- **Network Configuration**: Ensure the `mysql-bkup` container is connected to the same network as your source and target databases. diff --git a/docs/how-tos/mutli-backup.md b/docs/how-tos/mutli-backup.md index 89d50e0..e7b67e4 100644 --- a/docs/how-tos/mutli-backup.md +++ b/docs/how-tos/mutli-backup.md @@ -1,63 +1,96 @@ --- -title: Run multiple backup schedules in the same container +title: Run multiple database backup schedules in the same container layout: default parent: How Tos nav_order: 11 --- -Multiple backup schedules with different configuration can be configured by mounting a configuration file into `/config/config.yaml` `/config/config.yml` or by defining an environment variable `BACKUP_CONFIG_FILE=/backup/config.yaml`. -## Configuration file +# Multiple Backup Schedules + +You can configure multiple backup schedules with different configurations by using a configuration file. + +This file can be mounted into the container at `/config/config.yaml`, `/config/config.yml`, or specified via the `BACKUP_CONFIG_FILE` environment variable. + +--- + +## Configuration File + +The configuration file allows you to define multiple databases and their respective backup settings. + +Below is an example configuration file: ```yaml -#cronExpression: "@every 20m" //Optional for scheduled backups -cronExpression: "" +# Optional: Define a global cron expression for scheduled backups +# cronExpression: "@every 20m" +cronExpression: "" + databases: - host: mysql1 port: 3306 name: database1 user: database1 password: password - path: /s3-path/database1 #For SSH or FTP you need to define the full path (/home/toto/backup/) + path: /s3-path/database1 # For SSH or FTP, define the full path (e.g., /home/toto/backup/) + - host: mysql2 port: 3306 name: lldap user: lldap password: password - path: /s3-path/lldap #For SSH or FTP you need to define the full path (/home/toto/backup/) + path: /s3-path/lldap # For SSH or FTP, define the full path (e.g., /home/toto/backup/) + - host: mysql3 port: 3306 name: keycloak user: keycloak password: password - path: /s3-path/keycloak #For SSH or FTP you need to define the full path (/home/toto/backup/) + path: /s3-path/keycloak # For SSH or FTP, define the full path (e.g., /home/toto/backup/) + - host: mysql4 port: 3306 name: joplin user: joplin password: password - path: /s3-path/joplin #For SSH or FTP you need to define the full path (/home/toto/backup/) + path: /s3-path/joplin # For SSH or FTP, define the full path (e.g., /home/toto/backup/) ``` -## Docker compose file + +--- + +## Docker Compose Configuration + +To use the configuration file in a Docker Compose setup, mount the file and specify its path using the `BACKUP_CONFIG_FILE` environment variable. + +### Example: Docker Compose File ```yaml services: mysql-bkup: - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases + # for available releases. image: jkaninda/mysql-bkup container_name: mysql-bkup command: backup volumes: - - ./backup:/backup + - ./backup:/backup # Mount the backup directory + - ./config.yaml:/backup/config.yaml # Mount the configuration file environment: - ## Multi backup config file + ## Specify the path to the configuration file - BACKUP_CONFIG_FILE=/backup/config.yaml - # mysql-bkup container must be connected to the same network with your database + # Ensure the pg-bkup container is connected to the same network as your database networks: - web + networks: web: -``` \ No newline at end of file +``` + +--- + +## Key Notes + +- **Global Cron Expression**: You can define a global `cronExpression` in the configuration file to schedule backups for all databases. If omitted, backups will run immediately. +- **Database-Specific Paths**: For SSH or FTP storage, ensure the `path` field contains the full remote path (e.g., `/home/toto/backup/`). +- **Environment Variables**: Use the `BACKUP_CONFIG_FILE` environment variable to specify the path to the configuration file. +- **Security**: Avoid hardcoding sensitive information like passwords in the configuration file. Use environment variables or secrets management tools instead. diff --git a/docs/how-tos/receive-notification.md b/docs/how-tos/receive-notification.md index fac6a5c..3067c92 100644 --- a/docs/how-tos/receive-notification.md +++ b/docs/how-tos/receive-notification.md @@ -4,10 +4,20 @@ layout: default parent: How Tos nav_order: 12 --- -Send Email or Telegram notifications on successfully or failed backup. -### Email -To send out email notifications on failed or successfully backup runs, provide SMTP credentials, a sender and a recipient: +# Receive Notifications + +You can configure the system to send email or Telegram notifications when a backup succeeds or fails. + +This section explains how to set up and customize notifications. + +--- + +## Email Notifications + +To send email notifications, provide SMTP credentials, a sender address, and recipient addresses. Notifications will be sent for both successful and failed backup runs. + +### Example: Email Notification Configuration ```yaml services: @@ -23,25 +33,33 @@ services: - DB_NAME=database - DB_USERNAME=username - DB_PASSWORD=password - - MAIL_HOST= + ## SMTP Configuration + - MAIL_HOST=smtp.example.com - MAIL_PORT=587 - - MAIL_USERNAME= - - MAIL_PASSWORD=! + - MAIL_USERNAME=your-email@example.com + - MAIL_PASSWORD=your-email-password - MAIL_FROM=Backup Jobs ## Multiple recipients separated by a comma - MAIL_TO=me@example.com,team@example.com,manager@example.com - MAIL_SKIP_TLS=false - ## Time format for notification + ## Time format for notifications - TIME_FORMAT=2006-01-02 at 15:04:05 - ## Backup reference, in case you want to identify every backup instance + ## Backup reference (e.g., database/cluster name or server name) - BACKUP_REFERENCE=database/Paris cluster networks: - web + networks: web: ``` -### Telegram +--- + +## Telegram Notifications + +To send Telegram notifications, provide your bot token and chat ID. Notifications will be sent for both successful and failed backup runs. + +### Example: Telegram Notification Configuration ```yaml services: @@ -57,41 +75,49 @@ services: - DB_NAME=database - DB_USERNAME=username - DB_PASSWORD=password + ## Telegram Configuration - TG_TOKEN=[BOT ID]:[BOT TOKEN] - - TG_CHAT_ID= - ## Time format for notification + - TG_CHAT_ID=your-chat-id + ## Time format for notifications - TIME_FORMAT=2006-01-02 at 15:04:05 - ## Backup reference, in case you want to identify every backup instance + ## Backup reference (e.g., database/cluster name or server name) - BACKUP_REFERENCE=database/Paris cluster networks: - web + networks: web: ``` -### Customize notifications +--- -The title and body of the notifications can be tailored to your needs using Go templates. -Template sources must be mounted inside the container in /config/templates: +## Customize Notifications -- email.tmpl: Email notification template -- telegram.tmpl: Telegram notification template -- email-error.tmpl: Error notification template -- telegram-error.tmpl: Error notification template +You can customize the title and body of notifications using Go templates. Template files must be mounted inside the container at `/config/templates`. The following templates are supported: -### Data +- `email.tmpl`: Template for successful email notifications. +- `telegram.tmpl`: Template for successful Telegram notifications. +- `email-error.tmpl`: Template for failed email notifications. +- `telegram-error.tmpl`: Template for failed Telegram notifications. -Here is a list of all data passed to the template: -- `Database` : Database name -- `StartTime`: Backup start time process -- `EndTime`: Backup start time process -- `Storage`: Backup storage -- `BackupLocation`: Backup location -- `BackupSize`: Backup size -- `BackupReference`: Backup reference(eg: database/cluster name or server name) +### Template Data -> email.template: +The following data is passed to the templates: +- `Database`: Database name. +- `StartTime`: Backup start time. +- `EndTime`: Backup end time. +- `Storage`: Backup storage type (e.g., local, S3, SSH). +- `BackupLocation`: Backup file location. +- `BackupSize`: Backup file size in bytes. +- `BackupReference`: Backup reference (e.g., database/cluster name or server name). +- `Error`: Error message (only for error templates). + +--- + +### Example Templates + +#### `email.tmpl` (Successful Backup) ```html

Hi,

@@ -104,29 +130,29 @@ Here is a list of all data passed to the template:
  • Backup Storage: {{.Storage}}
  • Backup Location: {{.BackupLocation}}
  • Backup Size: {{.BackupSize}} bytes
  • -
  • Backup Reference: {{.BackupReference}}
  • +
  • Backup Reference: {{.BackupReference}}
  • Best regards,

    ``` -> telegram.template +#### `telegram.tmpl` (Successful Backup) ```html -βœ… Database Backup Notification – {{.Database}} +βœ… Database Backup Notification – {{.Database}} Hi, Backup of the {{.Database}} database has been successfully completed on {{.EndTime}}. Backup Details: - Database Name: {{.Database}} - Backup Start Time: {{.StartTime}} -- Backup EndTime: {{.EndTime}} +- Backup End Time: {{.EndTime}} - Backup Storage: {{.Storage}} - Backup Location: {{.BackupLocation}} - Backup Size: {{.BackupSize}} bytes - Backup Reference: {{.BackupReference}} ``` -> email-error.template +#### `email-error.tmpl` (Failed Backup) ```html @@ -140,16 +166,15 @@ Backup Details:

    An error occurred during database backup.

    Failure Details:

    ``` -> telegram-error.template - +#### `telegram-error.tmpl` (Failed Backup) ```html πŸ”΄ Urgent: Database Backup Failure Notification @@ -159,4 +184,14 @@ Failure Details: Error Message: {{.Error}} Date: {{.EndTime}} -``` \ No newline at end of file +Backup Reference: {{.BackupReference}} +``` + +--- + +## Key Notes + +- **SMTP Configuration**: Ensure your SMTP server supports TLS unless `MAIL_SKIP_TLS` is set to `true`. +- **Telegram Configuration**: Obtain your bot token and chat ID from Telegram. +- **Custom Templates**: Mount custom templates to `/config/templates` to override default notifications. +- **Time Format**: Use the `TIME_FORMAT` environment variable to customize the timestamp format in notifications. \ No newline at end of file diff --git a/docs/how-tos/restore-from-s3.md b/docs/how-tos/restore-from-s3.md index c6ff46e..947104b 100644 --- a/docs/how-tos/restore-from-s3.md +++ b/docs/how-tos/restore-from-s3.md @@ -5,45 +5,71 @@ parent: How Tos nav_order: 6 --- -# Restore database from S3 storage +# Restore Database from S3 Storage -To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`. +To restore a MySQL database from a backup stored in S3, use the `restore` command and specify the backup file with the `--file` flag. The system supports the following file formats: -{: .note } -It supports __.sql__,__.sql.gpg__ and __.sql.gz__,__.sql.gz.gpg__ compressed file. +- `.sql` (uncompressed SQL dump) +- `.sql.gz` (gzip-compressed SQL dump) +- `.sql.gpg` (GPG-encrypted SQL dump) +- `.sql.gz.gpg` (GPG-encrypted and gzip-compressed SQL dump) -### Restore +--- -```yml +## Configuration Steps + +1. **Specify the Backup File**: Use the `--file` flag to specify the backup file to restore. +2. **Set the Storage Type**: Add the `--storage s3` flag to indicate that the backup is stored in S3. +3. **Provide S3 Configuration**: Include the necessary AWS S3 credentials and configuration. +4. **Provide Database Credentials**: Ensure the correct database connection details are provided. + +--- + +## Example: Restore from S3 Configuration + +Below is an example `docker-compose.yml` configuration for restoring a database from S3 storage: + +```yaml services: mysql-bkup: - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases + # for available releases. image: jkaninda/mysql-bkup container_name: mysql-bkup command: restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path volumes: - - ./backup:/backup + - ./backup:/backup # Mount the directory for local operations (if needed) environment: - DB_PORT=3306 - DB_HOST=mysql - DB_NAME=database - DB_USERNAME=username - DB_PASSWORD=password - ## AWS configurations + ## AWS S3 Configuration - AWS_S3_ENDPOINT=https://s3.amazonaws.com - AWS_S3_BUCKET_NAME=backup - - AWS_REGION="us-west-2" + - AWS_REGION=us-west-2 - AWS_ACCESS_KEY=xxxx - AWS_SECRET_KEY=xxxxx - ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true - - AWS_DISABLE_SSL="false" - - AWS_FORCE_PATH_STYLE="false" - # mysql-bkup container must be connected to the same network with your database + ## Optional: Disable SSL for S3 alternatives like Minio + - AWS_DISABLE_SSL=false + ## Optional: Enable path-style access for S3 alternatives like Minio + - AWS_FORCE_PATH_STYLE=false + # Ensure the pg-bkup container is connected to the same network as your database networks: - web + networks: web: ``` + +--- + +## Key Notes + +- **Supported File Formats**: The restore process supports `.sql`, `.sql.gz`, `.sql.gpg`, and `.sql.gz.gpg` files. +- **S3 Path**: Use the `--path` flag to specify the folder within the S3 bucket where the backup file is located. +- **Encrypted Backups**: If the backup is encrypted with GPG, ensure the `GPG_PASSPHRASE` environment variable is set for automatic decryption. +- **S3 Alternatives**: For S3-compatible storage like Minio, set `AWS_DISABLE_SSL` and `AWS_FORCE_PATH_STYLE` as needed. +- **Network Configuration**: Ensure the `pg-bkup` container is connected to the same network as your database. \ No newline at end of file diff --git a/docs/how-tos/restore-from-ssh.md b/docs/how-tos/restore-from-ssh.md index 70c905c..880bdba 100644 --- a/docs/how-tos/restore-from-ssh.md +++ b/docs/how-tos/restore-from-ssh.md @@ -4,44 +4,71 @@ layout: default parent: How Tos nav_order: 7 --- -# Restore database from SSH remote server -To restore the database from your remote server, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`. +# Restore Database from SSH Remote Server -{: .note } -It supports __.sql__,__.sql.gpg__ and __.sql.gz__,__.sql.gz.gpg__ compressed file. +To restore a MySQL database from a backup stored on an SSH remote server, use the `restore` command and specify the backup file with the `--file` flag. The system supports the following file formats: -### Restore +- `.sql` (uncompressed SQL dump) +- `.sql.gz` (gzip-compressed SQL dump) +- `.sql.gpg` (GPG-encrypted SQL dump) +- `.sql.gz.gpg` (GPG-encrypted and gzip-compressed SQL dump) -```yml +--- + +## Configuration Steps + +1. **Specify the Backup File**: Use the `--file` flag to specify the backup file to restore. +2. **Set the Storage Type**: Add the `--storage ssh` flag to indicate that the backup is stored on an SSH remote server. +3. **Provide SSH Configuration**: Include the necessary SSH credentials and configuration. +4. **Provide Database Credentials**: Ensure the correct database connection details are provided. + +--- + +## Example: Restore from SSH Remote Server Configuration + +Below is an example `docker-compose.yml` configuration for restoring a database from an SSH remote server: + +```yaml services: mysql-bkup: - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases + # for available releases. image: jkaninda/mysql-bkup container_name: mysql-bkup command: restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups volumes: - - ./backup:/backup + - ./backup:/backup # Mount the directory for local operations (if needed) + - ./id_ed25519:/tmp/id_ed25519 # Mount the SSH private key file environment: - DB_PORT=3306 - - DB_HOST=postgres + - DB_HOST=mysql - DB_NAME=database - DB_USERNAME=username - DB_PASSWORD=password - ## SSH config - - SSH_HOST_NAME="hostname" + ## SSH Configuration + - SSH_HOST_NAME=hostname - SSH_PORT=22 - SSH_USER=user - SSH_REMOTE_PATH=/home/jkaninda/backups - SSH_IDENTIFY_FILE=/tmp/id_ed25519 - ## We advise you to use a private jey instead of password + ## Optional: Use password instead of private key (not recommended) #- SSH_PASSWORD=password - # mysql-bkup container must be connected to the same network with your database + # Ensure the mysql-bkup container is connected to the same network as your database networks: - web + networks: web: -``` \ No newline at end of file +``` + +--- + +## Key Notes + +- **Supported File Formats**: The restore process supports `.sql`, `.sql.gz`, `.sql.gpg`, and `.sql.gz.gpg` files. +- **SSH Path**: Use the `--path` flag to specify the folder on the SSH remote server where the backup file is located. +- **Encrypted Backups**: If the backup is encrypted with GPG, ensure the `GPG_PASSPHRASE` environment variable is set for automatic decryption. +- **SSH Authentication**: Use a private key (`SSH_IDENTIFY_FILE`) for SSH authentication instead of a password for better security. +- **Network Configuration**: Ensure the `mysql-bkup` container is connected to the same network as your database. \ No newline at end of file diff --git a/docs/how-tos/restore.md b/docs/how-tos/restore.md index 8eceba0..5a4e9cf 100644 --- a/docs/how-tos/restore.md +++ b/docs/how-tos/restore.md @@ -5,36 +5,60 @@ parent: How Tos nav_order: 5 --- -# Restore database -To restore the database, you need to add `restore` command and specify the file to restore by adding `--file store_20231219_022941.sql.gz`. +# Restore Database -{: .note } -It supports __.sql__,__.sql.gpg__ and __.sql.gz__,__.sql.gz.gpg__ compressed file. +To restore a MySQL database, use the `restore` command and specify the backup file to restore with the `--file` flag. -### Restore +The system supports the following file formats: -```yml +- `.sql` (uncompressed SQL dump) +- `.sql.gz` (gzip-compressed SQL dump) +- `.sql.gpg` (GPG-encrypted SQL dump) +- `.sql.gz.gpg` (GPG-encrypted and gzip-compressed SQL dump) + +--- + +## Configuration Steps + +1. **Specify the Backup File**: Use the `--file` flag to specify the backup file to restore. +2. **Provide Database Credentials**: Ensure the correct database connection details are provided. + +--- + +## Example: Restore Configuration + +Below is an example `docker-compose.yml` configuration for restoring a database: + +```yaml services: mysql-bkup: - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. + # In production, lock your image tag to a specific release version + # instead of using `latest`. Check https://github.com/jkaninda/mysql-bkup/releases + # for available releases. image: jkaninda/mysql-bkup container_name: mysql-bkup command: restore -d database -f store_20231219_022941.sql.gz volumes: - - ./backup:/backup + - ./backup:/backup # Mount the directory containing the backup file environment: - DB_PORT=3306 - - DB_HOST=mysql + - DB_HOST=postgres - DB_NAME=database - DB_USERNAME=username - DB_PASSWORD=password - # mysql-bkup container must be connected to the same network with your database + # Ensure the pg-bkup container is connected to the same network as your database networks: - web + networks: web: -``` \ No newline at end of file +``` + +--- + +## Key Notes + +- **Supported File Formats**: The restore process supports `.sql`, `.sql.gz`, `.sql.gpg`, and `.sql.gz.gpg` files. +- **Encrypted Backups**: If the backup is encrypted with GPG, ensure the `GPG_PASSPHRASE` environment variable is set for automatic decryption. +- **Network Configuration**: Ensure the `mysql-bkup` container is connected to the same network as your database. diff --git a/docs/index.md b/docs/index.md index 37576d0..f3b6051 100644 --- a/docs/index.md +++ b/docs/index.md @@ -10,175 +10,76 @@ nav_order: 1 **MYSQL-BKUP** is a Docker container image designed to **backup, restore, and migrate MySQL databases**. It supports a variety of storage options and ensures data security through GPG encryption. -## Features +--- -- **Storage Options:** - - Local storage - - AWS S3 or any S3-compatible object storage - - FTP - - SSH-compatible storage - - Azure Blob storage +## Key Features -- **Data Security:** - - Backups can be encrypted using **GPG** to ensure confidentiality. +### Storage Options +- **Local storage** +- **AWS S3** or any S3-compatible object storage +- **FTP** +- **SFTP** +- **SSH-compatible storage** +- **Azure Blob storage** -- **Deployment Flexibility:** - - Available as the [jkaninda/mysql-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image. - - Deployable on **Docker**, **Docker Swarm**, and **Kubernetes**. - - Supports recurring backups of MySQL databases when deployed: - - On Docker for automated backup schedules. - - As a **Job** or **CronJob** on Kubernetes. +### Data Security +- Backups can be encrypted using **GPG** to ensure data confidentiality. -- **Notifications:** - - Get real-time updates on backup success or failure via: - - **Telegram** - - **Email** +### Deployment Flexibility +- Available as the [jkaninda/pg-bkup](https://hub.docker.com/r/jkaninda/mysql-bkup) Docker image. +- Deployable on **Docker**, **Docker Swarm**, and **Kubernetes**. +- Supports recurring backups of MySQL databases: + - On Docker for automated backup schedules. + - As a **Job** or **CronJob** on Kubernetes. + +### Notifications +- Receive real-time updates on backup success or failure via: + - **Telegram** + - **Email** + +--- ## Use Cases - **Automated Recurring Backups:** Schedule regular backups for MySQL databases. -- **Cross-Environment Migration:** Easily migrate your MySQL databases across different environments using supported storage options. +- **Cross-Environment Migration:** Easily migrate MySQL databases across different environments using supported storage options. - **Secure Backup Management:** Protect your data with GPG encryption. +--- +## Get Involved + +We welcome contributions! Feel free to give us a ⭐, submit PRs, or open issues on our [GitHub repository](https://github.com/jkaninda/mysql-bkup). + +{: .fs-6 .fw-300 } + +--- {: .note } -Code and documentation for `v1` version on [this branch][v1-branch]. +Code and documentation for the `v1` version are available on [this branch][v1-branch]. [v1-branch]: https://github.com/jkaninda/mysql-bkup --- -## Quickstart +## Available Image Registries -### Simple backup using Docker CLI +The Docker image is published to both **Docker Hub** and the **GitHub Container Registry**. You can use either of the following: -To run a one time backup, bind your local volume to `/backup` in the container and run the `backup` command: - -```shell - docker run --rm --network your_network_name \ - -v $PWD/backup:/backup/ \ - -e "DB_HOST=dbhost" \ - -e "DB_USERNAME=username" \ - -e "DB_PASSWORD=password" \ - jkaninda/mysql-bkup backup -d database_name -``` - -Alternatively, pass a `--env-file` in order to use a full config as described below. - -```yaml - docker run --rm --network your_network_name \ - --env-file your-env-file \ - -v $PWD/backup:/backup/ \ - jkaninda/mysql-bkup backup -d database_name -``` - -### Simple backup in docker compose file - -```yaml -services: - mysql-bkup: - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. - image: jkaninda/mysql-bkup - container_name: mysql-bkup - command: backup - volumes: - - ./backup:/backup - environment: - - DB_PORT=3306 - - DB_HOST=mysql - - DB_NAME=foo - - DB_USERNAME=bar - - DB_PASSWORD=password - - TZ=Europe/Paris - # mysql-bkup container must be connected to the same network with your database - networks: - - web -networks: - web: -``` -### Docker recurring backup - -```shell - docker run --rm --network network_name \ - -v $PWD/backup:/backup/ \ - -e "DB_HOST=hostname" \ - -e "DB_USERNAME=user" \ - -e "DB_PASSWORD=password" \ - jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 15m" #@midnight -``` -See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules - -## Kubernetes - -```yaml -apiVersion: batch/v1 -kind: Job -metadata: - name: backup-job -spec: - ttlSecondsAfterFinished: 100 - template: - spec: - containers: - - name: mysql-bkup - # In production, it is advised to lock your image tag to a proper - # release version instead of using `latest`. - # Check https://github.com/jkaninda/mysql-bkup/releases - # for a list of available releases. - image: jkaninda/mysql-bkup - command: - - /bin/sh - - -c - - backup -d dbname - resources: - limits: - memory: "128Mi" - cpu: "500m" - env: - - name: DB_HOST - value: "mysql" - - name: DB_USERNAME - value: "user" - - name: DB_PASSWORD - value: "password" - volumeMounts: - - mountPath: /backup - name: backup - volumes: - - name: backup - hostPath: - path: /home/toto/backup # directory location on host - type: Directory # this field is optional - restartPolicy: Never -``` - -## Available image registries - -This Docker image is published to both Docker Hub and the GitHub container registry. -Depending on your preferences and needs, you can reference both `jkaninda/mysql-bkup` as well as `ghcr.io/jkaninda/mysql-bkup`: - -``` +```bash docker pull jkaninda/mysql-bkup docker pull ghcr.io/jkaninda/mysql-bkup ``` -Documentation references Docker Hub, but all examples will work using ghcr.io just as well. +While the documentation references Docker Hub, all examples work seamlessly with `ghcr.io`. -## Supported Engines - -This image is developed and tested against the Docker CE engine and Kubernetes exclusively. -While it may work against different implementations, there are no guarantees about support for non-Docker engines. +--- ## References -We decided to publish this image as a simpler and more lightweight alternative because of the following requirements: +We created this image as a simpler and more lightweight alternative to existing solutions. Here’s why: -- The original image is based on `alpine` and requires additional tools, making it heavy. -- This image is written in Go. -- `arm64` and `arm/v7` architectures are supported. -- Docker in Swarm mode is supported. -- Kubernetes is supported. +- **Lightweight:** Written in Go, the image is optimized for performance and minimal resource usage. +- **Multi-Architecture Support:** Supports `arm64` and `arm/v7` architectures. +- **Docker Swarm Support:** Fully compatible with Docker in Swarm mode. +- **Kubernetes Support:** Designed to work seamlessly with Kubernetes. diff --git a/docs/quickstart/index.md b/docs/quickstart/index.md new file mode 100644 index 0000000..92393b8 --- /dev/null +++ b/docs/quickstart/index.md @@ -0,0 +1,138 @@ +--- +title: Quickstart +layout: home +nav_order: 2 +--- + +# Quickstart + +This guide provides quick examples for running backups using Docker CLI, Docker Compose, and Kubernetes. + +--- + +## Simple Backup Using Docker CLI + +To run a one-time backup, bind your local volume to `/backup` in the container and execute the `backup` command: + +```bash +docker run --rm --network your_network_name \ + -v $PWD/backup:/backup/ \ + -e "DB_HOST=dbhost" \ + -e "DB_USERNAME=username" \ + -e "DB_PASSWORD=password" \ + jkaninda/mysql-bkup backup -d database_name +``` + +### Using an Environment File + +Alternatively, you can use an `--env-file` to pass a full configuration: + +```bash +docker run --rm --network your_network_name \ + --env-file your-env-file \ + -v $PWD/backup:/backup/ \ + jkaninda/mysql-bkup backup -d database_name +``` + +--- + +## Simple Backup Using Docker Compose + +Below is an example `docker-compose.yml` configuration for running a backup: + +```yaml +services: + mysql-bkup: + # In production, lock the image tag to a specific release version. + # Check https://github.com/jkaninda/mysql-bkup/releases for available releases. + image: jkaninda/mysql-bkup + container_name: mysql-bkup + command: backup + volumes: + - ./backup:/backup + environment: + - DB_PORT=3306 + - DB_HOST=mysql + - DB_NAME=foo + - DB_USERNAME=bar + - DB_PASSWORD=password + - TZ=Europe/Paris + # Ensure the mysql-bkup container is connected to the same network as your database. + networks: + - web + +networks: + web: +``` + +--- + +## Recurring Backup with Docker + +To schedule recurring backups, use the `--cron-expression` flag: + +```bash +docker run --rm --network network_name \ + -v $PWD/backup:/backup/ \ + -e "DB_HOST=hostname" \ + -e "DB_USERNAME=user" \ + -e "DB_PASSWORD=password" \ + jkaninda/mysql-bkup backup -d dbName --cron-expression "@every 15m" +``` + +For predefined schedules, refer to the [documentation](https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules). + +--- + +## Backup Using Kubernetes + +Below is an example Kubernetes `Job` configuration for running a backup: + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: backup-job +spec: + ttlSecondsAfterFinished: 100 + template: + spec: + containers: + - name: mysql-bkup + # In production, lock the image tag to a specific release version. + # Check https://github.com/jkaninda/mysql-bkup/releases for available releases. + image: jkaninda/mysql-bkup + command: + - /bin/sh + - -c + - backup -d dbname + resources: + limits: + memory: "128Mi" + cpu: "500m" + env: + - name: DB_HOST + value: "mysql" + - name: DB_USERNAME + value: "postgres" + - name: DB_PASSWORD + value: "password" + volumeMounts: + - mountPath: /backup + name: backup + volumes: + - name: backup + hostPath: + path: /home/toto/backup # Directory location on the host + type: Directory # Optional field + restartPolicy: Never +``` + +--- + +## Key Notes + +- **Volume Binding**: Ensure the `/backup` directory is mounted to persist backup files. +- **Environment Variables**: Use environment variables or an `--env-file` to pass database credentials and other configurations. +- **Cron Expressions**: Use standard cron expressions or predefined schedules for recurring backups. +- **Kubernetes Jobs**: Use Kubernetes `Job` or `CronJob` for running backups in a Kubernetes cluster. \ No newline at end of file diff --git a/docs/reference/index.md b/docs/reference/index.md index 559f7a0..aa3c81d 100644 --- a/docs/reference/index.md +++ b/docs/reference/index.md @@ -1,139 +1,127 @@ --- title: Configuration Reference layout: default -nav_order: 2 +nav_order: 3 --- -# Configuration reference +# Configuration Reference -Backup, restore and migrate targets, schedule and retention are configured using environment variables or flags. +Backup, restore, and migration targets, schedules, and retention policies are configured using **environment variables** or **CLI flags**. - - - - -### CLI utility Usage - -| Options | Shorts | Usage | -|-----------------------|--------|----------------------------------------------------------------------------------------| -| mysql-bkup | bkup | CLI utility | -| backup | | Backup database operation | -| restore | | Restore database operation | -| migrate | | Migrate database from one instance to another one | -| --storage | -s | Storage. local or s3 (default: local) | -| --file | -f | File name for restoration | -| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` | -| --dbname | -d | Database name | -| --port | -p | Database port (default: 3306) | -| --disable-compression | | Disable database backup compression | -| --cron-expression | | Backup cron expression, eg: (* * * * *) or @daily | -| --help | -h | Print this help message and exit | -| --version | -V | Print version information and exit | - -## Environment variables - -| Name | Requirement | Description | -|------------------------------|---------------------------------------------------------------|-----------------------------------------------------------------| -| DB_PORT | Optional, default 3306 | Database port number | -| DB_HOST | Required | Database host | -| DB_NAME | Optional if it was provided from the -d flag | Database name | -| DB_USERNAME | Required | Database user name | -| DB_PASSWORD | Required | Database password | -| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key | -| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key | -| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name | -| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name | -| AWS_REGION | Optional, required for S3 storage | AWS Region | -| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL | -| AWS_FORCE_PATH_STYLE | Optional, required for S3 storage | Force path style | -| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) | -| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase | -| GPG_PUBLIC_KEY | Optional, required to encrypt backup | GPG public key, used to encrypt backup (/config/public_key.asc) | -| BACKUP_CRON_EXPRESSION | Optional if it was provided from the `--cron-expression` flag | Backup cron expression for docker in scheduled mode | -| BACKUP_RETENTION_DAYS | Optional | Delete old backup created more than specified days ago | -| SSH_HOST | Optional, required for SSH storage | ssh remote hostname or ip | -| SSH_USER | Optional, required for SSH storage | ssh remote user | -| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password | -| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key | -| SSH_PORT | Optional, required for SSH storage | ssh remote server port | -| REMOTE_PATH | Optional, required for SSH or FTP storage | remote path (/home/toto/backup) | -| FTP_HOST | Optional, required for FTP storage | FTP host name | -| FTP_PORT | Optional, required for FTP storage | FTP server port number | -| FTP_USER | Optional, required for FTP storage | FTP user | -| FTP_PASSWORD | Optional, required for FTP storage | FTP user password | -| TARGET_DB_HOST | Optional, required for database migration | Target database host | -| TARGET_DB_PORT | Optional, required for database migration | Target database port | -| TARGET_DB_NAME | Optional, required for database migration | Target database name | -| TARGET_DB_USERNAME | Optional, required for database migration | Target database username | -| TARGET_DB_PASSWORD | Optional, required for database migration | Target database password | -| TG_TOKEN | Optional, required for Telegram notification | Telegram token (`BOT-ID:BOT-TOKEN`) | -| TG_CHAT_ID | Optional, required for Telegram notification | Telegram Chat ID | -| TZ | Optional | Time Zone | -| AZURE_STORAGE_CONTAINER_NAME | Optional, required for Azure Blob Storage storage | Azure storage container name | -| AZURE_STORAGE_ACCOUNT_NAME | Optional, required for Azure Blob Storage storage | Azure storage account name | -| AZURE_STORAGE_ACCOUNT_KEY | Optional, required for Azure Blob Storage storage | Azure storage account key | --- -## Run in Scheduled mode -This image can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources. -For Docker, you need to run it in scheduled mode by adding `--cron-expression "* * * * *"` flag or by defining `BACKUP_CRON_EXPRESSION=0 1 * * *` environment variable. +## CLI Utility Usage -## Syntax of crontab (field description) +| Option | Short Flag | Description | +|-------------------------|------------|-------------------------------------------------------------------------------| +| `pg-bkup` | `bkup` | CLI utility for managing PostgreSQL backups. | +| `backup` | | Perform a backup operation. | +| `restore` | | Perform a restore operation. | +| `migrate` | | Migrate a database from one instance to another. | +| `--storage` | `-s` | Storage type (`local`, `s3`, `ssh`, etc.). Default: `local`. | +| `--file` | `-f` | File name for restoration. | +| `--path` | | Path for storage (e.g., `/custom_path` for S3 or `/home/foo/backup` for SSH). | +| `--config` | `-c` | Configuration file for multi database backup. (e.g: `/backup/config.yaml`). | +| `--dbname` | `-d` | Database name. | +| `--port` | `-p` | Database port. Default: `3306`. | +| `--disable-compression` | | Disable compression for database backups. | +| `--cron-expression` | `-e` | Cron expression for scheduled backups (e.g., `0 0 * * *` or `@daily`). | +| `--help` | `-h` | Display help message and exit. | +| `--version` | `-V` | Display version information and exit. | -The syntax is: +--- -- 1: Minute (0-59) -- 2: Hours (0-23) -- 3: Day (0-31) -- 4: Month (0-12 [12 == December]) -- 5: Day of the week(0-7 [7 or 0 == sunday]) +## Environment Variables -Easy to remember format: +| Name | Requirement | Description | +|--------------------------------|--------------------------------------|----------------------------------------------------------------------------| +| `DB_PORT` | Optional (default: `3306`) | Database port number. | +| `DB_HOST` | Required | Database host. | +| `DB_NAME` | Optional (if provided via `-d` flag) | Database name. | +| `DB_USERNAME` | Required | Database username. | +| `DB_PASSWORD` | Required | Database password. | +| `AWS_ACCESS_KEY` | Required for S3 storage | AWS S3 Access Key. | +| `AWS_SECRET_KEY` | Required for S3 storage | AWS S3 Secret Key. | +| `AWS_BUCKET_NAME` | Required for S3 storage | AWS S3 Bucket Name. | +| `AWS_REGION` | Required for S3 storage | AWS Region. | +| `AWS_DISABLE_SSL` | Optional | Disable SSL for S3 storage. | +| `AWS_FORCE_PATH_STYLE` | Optional | Force path-style access for S3 storage. | +| `FILE_NAME` | Optional (if provided via `--file`) | File name for restoration (e.g., `.sql`, `.sql.gz`). | +| `GPG_PASSPHRASE` | Optional | GPG passphrase for encrypting/decrypting backups. | +| `GPG_PUBLIC_KEY` | Optional | GPG public key for encrypting backups (e.g., `/config/public_key.asc`). | +| `BACKUP_CRON_EXPRESSION` | Optional (flag `-e`) | Cron expression for scheduled backups. | +| `BACKUP_RETENTION_DAYS` | Optional | Delete backups older than the specified number of days. | +| `BACKUP_CONFIG_FILE` | Optional (flag `-c`) | Configuration file for multi database backup. (e.g: `/backup/config.yaml`) | +| `SSH_HOST` | Required for SSH storage | SSH remote hostname or IP. | +| `SSH_USER` | Required for SSH storage | SSH remote username. | +| `SSH_PASSWORD` | Optional | SSH remote user's password. | +| `SSH_IDENTIFY_FILE` | Optional | SSH remote user's private key. | +| `SSH_PORT` | Optional (default: `22`) | SSH remote server port. | +| `REMOTE_PATH` | Required for SSH/FTP storage | Remote path (e.g., `/home/toto/backup`). | +| `FTP_HOST` | Required for FTP storage | FTP hostname. | +| `FTP_PORT` | Optional (default: `21`) | FTP server port. | +| `FTP_USER` | Required for FTP storage | FTP username. | +| `FTP_PASSWORD` | Required for FTP storage | FTP user password. | +| `TARGET_DB_HOST` | Required for migration | Target database host. | +| `TARGET_DB_PORT` | Optional (default: `5432`) | Target database port. | +| `TARGET_DB_NAME` | Required for migration | Target database name. | +| `TARGET_DB_USERNAME` | Required for migration | Target database username. | +| `TARGET_DB_PASSWORD` | Required for migration | Target database password. | +| `TARGET_DB_URL` | Optional | Target database URL in JDBC URI format. | +| `TG_TOKEN` | Required for Telegram notifications | Telegram token (`BOT-ID:BOT-TOKEN`). | +| `TG_CHAT_ID` | Required for Telegram notifications | Telegram Chat ID. | +| `TZ` | Optional | Time zone for scheduling. | +| `AZURE_STORAGE_CONTAINER_NAME` | Required for Azure Blob Storage | Azure storage container name. | +| `AZURE_STORAGE_ACCOUNT_NAME` | Required for Azure Blob Storage | Azure storage account name. | +| `AZURE_STORAGE_ACCOUNT_KEY` | Required for Azure Blob Storage | Azure storage account key. | + +--- + +## Scheduled Backups + +### Running in Scheduled Mode + +- **Docker**: Use the `--cron-expression` flag or the `BACKUP_CRON_EXPRESSION` environment variable to schedule backups. +- **Kubernetes**: Use a `CronJob` resource for scheduled backups. + +### Cron Syntax + +The cron syntax consists of five fields: ```conf -* * * * * command to be executed +* * * * * command ``` +| Field | Description | Values | +|---------------|------------------------------|----------------| +| Minute | Minute of the hour | `0-59` | +| Hour | Hour of the day | `0-23` | +| Day of Month | Day of the month | `1-31` | +| Month | Month of the year | `1-12` | +| Day of Week | Day of the week (0 = Sunday) | `0-7` | + +#### Examples + +- **Every 30 minutes**: `*/30 * * * *` +- **Every hour at minute 0**: `0 * * * *` +- **Every day at 1:00 AM**: `0 1 * * *` + +### Predefined Schedules + +| Entry | Description | Equivalent To | +|----------------------------|--------------------------------------------|---------------| +| `@yearly` (or `@annually`) | Run once a year, midnight, Jan. 1st | `0 0 1 1 *` | +| `@monthly` | Run once a month, midnight, first of month | `0 0 1 * *` | +| `@weekly` | Run once a week, midnight between Sat/Sun | `0 0 * * 0` | +| `@daily` (or `@midnight`) | Run once a day, midnight | `0 0 * * *` | +| `@hourly` | Run once an hour, beginning of hour | `0 * * * *` | + +### Intervals + +You can also schedule backups at fixed intervals using the format: + ```conf -- - - - - -| | | | | -| | | | ----- Day of week (0 - 7) (Sunday=0 or 7) -| | | ------- Month (1 - 12) -| | --------- Day of month (1 - 31) -| ----------- Hour (0 - 23) -------------- Minute (0 - 59) -``` - -> At every 30th minute - -```conf -*/30 * * * * -``` -> β€œAt minute 0.” every hour -```conf -0 * * * * -``` - -> β€œAt 01:00.” every day - -```conf -0 1 * * * -``` -## Predefined schedules -You may use one of several pre-defined schedules in place of a cron expression. - -| Entry | Description | Equivalent To | -|------------------------|--------------------------------------------|---------------| -| @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 * | -| @monthly | Run once a month, midnight, first of month | 0 0 1 * * | -| @weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0 | -| @daily (or @midnight) | Run once a day, midnight | 0 0 * * * | -| @hourly | Run once an hour, beginning of hour | 0 * * * * | - -### Intervals -You may also schedule backup task at fixed intervals, starting at the time it's added or cron is run. This is supported by formatting the cron spec like this: - @every -where "duration" is a string accepted by time. +``` -For example, "@every 1h30m10s" would indicate a schedule that activates after 1 hour, 30 minutes, 10 seconds, and then every interval after that. \ No newline at end of file +- Example: `@every 1h30m10s` runs the backup every 1 hour, 30 minutes, and 10 seconds. diff --git a/pkg/config.go b/pkg/config.go index fbd99f3..1b95ec0 100644 --- a/pkg/config.go +++ b/pkg/config.go @@ -214,6 +214,7 @@ func initBackupConfig(cmd *cobra.Command) *BackupConfig { utils.SetEnv("STORAGE_PATH", storagePath) utils.GetEnv(cmd, "cron-expression", "BACKUP_CRON_EXPRESSION") utils.GetEnv(cmd, "path", "REMOTE_PATH") + utils.GetEnv(cmd, "config", "BACKUP_CONFIG_FILE") // Get flag value and set env remotePath := utils.GetEnvVariable("REMOTE_PATH", "SSH_REMOTE_PATH") storage = utils.GetEnv(cmd, "storage", "STORAGE")