diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..5c1db16 --- /dev/null +++ b/Makefile @@ -0,0 +1,50 @@ +BINARY_NAME=mysql-bkup +IMAGE_NAME=jkaninda/mysql-bkup + +include .env +export +run: + go run . backup + +build: + go build -o bin/${BINARY_NAME} . + +compile: + GOOS=darwin GOARCH=arm64 go build -o bin/${BINARY_NAME}-darwin-arm64 . + GOOS=darwin GOARCH=amd64 go build -o bin/${BINARY_NAME}-darwin-amd64 . + GOOS=linux GOARCH=arm64 go build -o bin/${BINARY_NAME}-linux-arm64 . + GOOS=linux GOARCH=amd64 go build -o bin/${BINARY_NAME}-linux-amd64 . + +docker-build: + docker build -f docker/Dockerfile -t jkaninda/mysql-bkup:latest . + +docker-run: docker-build + docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --prune --keep-last 2 +docker-restore: docker-build + docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} restore -f ${FILE_NAME} + +docker-run-migrate: docker-build + docker run --rm --network web --name mysql-bkup --env-file .env -v "./backup:/backup" ${IMAGE_NAME} migrate + +docker-run-scheduled: #docker-build + docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --mode scheduled --period "* * * * *" + + +docker-run-scheduled-s3: docker-build + docker run --rm --network web --name mysql-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *" + +docker-run-s3: docker-build + docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "AWS_S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --storage s3 --path /custom-path + + +docker-restore-s3: docker-build + docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} restore --storage s3 -f ${FILE_NAME} --path /custom-path + +docker-run-ssh: docker-build + docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" ${IMAGE_NAME} backup --storage ssh + +docker-restore-ssh: docker-build + docker run --rm --network web --name mysql-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" ${IMAGE_NAME} restore --storage ssh -f ${FILE_NAME} + +run-docs: + cd docs && bundle exec jekyll serve -H 0.0.0.0 -t \ No newline at end of file diff --git a/README.md b/README.md index fce36d2..95e0832 100644 --- a/README.md +++ b/README.md @@ -95,12 +95,13 @@ For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as apiVersion: batch/v1 kind: Job metadata: - name: backup + name: backup-job spec: + ttlSecondsAfterFinished: 100 template: spec: containers: - - name: mysql-bkup + - name: pg-bkup # In production, it is advised to lock your image tag to a proper # release version instead of using `latest`. # Check https://github.com/jkaninda/mysql-bkup/releases @@ -109,38 +110,26 @@ spec: command: - /bin/sh - -c - - bkup - - backup - - --storage - - s3 + - backup -d dbname resources: limits: memory: "128Mi" cpu: "500m" env: - - name: DB_PORT - value: "3306" - name: DB_HOST - value: "" - - name: DB_NAME - value: "dbname" + value: "mysql" - name: DB_USERNAME - value: "username" - # Please use secret! + value: "user" - name: DB_PASSWORD - value: "" - - name: AWS_S3_ENDPOINT - value: "https://s3.amazonaws.com" - - name: AWS_S3_BUCKET_NAME - value: "xxx" - - name: AWS_REGION - value: "us-west-2" - - name: AWS_ACCESS_KEY - value: "xxxx" - - name: AWS_SECRET_KEY - value: "xxxx" - - name: AWS_DISABLE_SSL - value: "false" + value: "password" + volumeMounts: + - mountPath: /backup + name: backup + volumes: + - name: backup + hostPath: + path: /home/toto/backup # directory location on host + type: Directory # this field is optional restartPolicy: Never ``` ## Available image registries diff --git a/cmd/backup.go b/cmd/backup.go index 1319e7e..7f85cc2 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -21,6 +21,8 @@ var BackupCmd = &cobra.Command{ func init() { //Backup + BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3") + BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`") BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. default or scheduled") BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Schedule period time") BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled") diff --git a/cmd/migrate.go b/cmd/migrate.go new file mode 100644 index 0000000..cc8c1d4 --- /dev/null +++ b/cmd/migrate.go @@ -0,0 +1,21 @@ +package cmd + +import ( + "github.com/jkaninda/mysql-bkup/pkg" + "github.com/jkaninda/mysql-bkup/utils" + "github.com/spf13/cobra" +) + +var MigrateCmd = &cobra.Command{ + Use: "migrate", + Short: "Migrate database from a source database to a target database", + Run: func(cmd *cobra.Command, args []string) { + if len(args) == 0 { + pkg.StartMigration(cmd) + } else { + utils.Fatal("Error, no argument required") + + } + + }, +} diff --git a/cmd/restore.go b/cmd/restore.go index 53f57d0..c1e76ff 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -24,5 +24,7 @@ var RestoreCmd = &cobra.Command{ func init() { //Restore RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database") + RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3") + RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`") } diff --git a/cmd/root.go b/cmd/root.go index 2237b0a..5a08ee6 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -30,13 +30,12 @@ func Execute() { } func init() { - rootCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3") - rootCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`") rootCmd.PersistentFlags().StringP("dbname", "d", "", "Database name") rootCmd.PersistentFlags().IntP("port", "p", 3306, "Database port") rootCmd.PersistentFlags().StringVarP(&operation, "operation", "o", "", "Set operation, for old version only") - rootCmd.AddCommand(VersionCmd) rootCmd.AddCommand(BackupCmd) rootCmd.AddCommand(RestoreCmd) + rootCmd.AddCommand(MigrateCmd) + } diff --git a/docker/Dockerfile b/docker/Dockerfile index e34fccd..dd1641a 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -14,7 +14,7 @@ ENV DB_HOST="" ENV DB_NAME="" ENV DB_USERNAME="" ENV DB_PASSWORD="" -ENV DB_PORT="3306" +ENV DB_PORT=3306 ENV STORAGE=local ENV AWS_S3_ENDPOINT="" ENV AWS_S3_BUCKET_NAME="" @@ -30,11 +30,15 @@ ENV SSH_PASSWORD="" ENV SSH_HOST_NAME="" ENV SSH_IDENTIFY_FILE="" ENV SSH_PORT="22" +ENV SOURCE_DB_HOST="" +ENV SOURCE_DB_PORT=3306 +ENV SOURCE_DB_NAME="" +ENV SOURCE_DB_USERNAME="" +ENV SOURCE_DB_PASSWORD="" ARG DEBIAN_FRONTEND=noninteractive -ENV VERSION="v1.2.2" +ENV VERSION="v1.2.3" ENV BACKUP_CRON_EXPRESSION="" -ENV GNUPGHOME="/tmp/gnupg" -ARG WORKDIR="/app" +ARG WORKDIR="/config" ARG BACKUPDIR="/backup" ARG BACKUP_TMP_DIR="/tmp/backup" ARG BACKUP_CRON="/etc/cron.d/backup_cron" @@ -49,16 +53,14 @@ RUN apt-get clean && rm -rf /var/lib/apt/lists/* RUN mkdir $WORKDIR RUN mkdir $BACKUPDIR -RUN mkdir -p $BACKUP_TMP_DIR && \ - mkdir -p $GNUPGHOME +RUN mkdir -p $BACKUP_TMP_DIR RUN chmod 777 $WORKDIR RUN chmod 777 $BACKUPDIR RUN chmod 777 $BACKUP_TMP_DIR RUN touch $BACKUP_CRON && \ touch $BACKUP_CRON_SCRIPT && \ chmod 777 $BACKUP_CRON && \ - chmod 777 $BACKUP_CRON_SCRIPT && \ - chmod 777 $GNUPGHOME + chmod 777 $BACKUP_CRON_SCRIPT COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup RUN chmod +x /usr/local/bin/mysql-bkup @@ -67,19 +69,15 @@ RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup ADD docker/supervisord.conf /etc/supervisor/supervisord.conf -WORKDIR $WORKDIR -# Create backup shell script -COPY < /usr/local/bin/backup && \ + chmod +x /usr/local/bin/backup +# Create restore script and make it executable +RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup restore "$@"' > /usr/local/bin/restore && \ chmod +x /usr/local/bin/restore +# Create migrate script and make it executable +RUN echo '#!/bin/sh\n/usr/local/bin/mysql-bkup migrate "$@"' > /usr/local/bin/migrate && \ + chmod +x /usr/local/bin/migrate + +WORKDIR $WORKDIR ENTRYPOINT ["/usr/local/bin/mysql-bkup"] diff --git a/docs/how-tos/migrate.md b/docs/how-tos/migrate.md new file mode 100644 index 0000000..9d553ba --- /dev/null +++ b/docs/how-tos/migrate.md @@ -0,0 +1,123 @@ +--- +title: Migrate database +layout: default +parent: How Tos +nav_order: 9 +--- + +# Migrate database + +To migrate the database, you need to add `migrate` command. + +{: .note } +The Mysql backup has another great feature: migrating your database from a source database to another. + +As you know, to restore a database from a source to a target database, you need 2 operations: which is to start by backing up the source database and then restoring the source backed database to the target database. +Instead of proceeding like that, you can use the integrated feature `(migrate)`, which will help you migrate your database by doing only one operation. + + +### Docker compose +```yml +services: + mysql-bkup: + # In production, it is advised to lock your image tag to a proper + # release version instead of using `latest`. + # Check https://github.com/jkaninda/mysql-bkup/releases + # for a list of available releases. + image: jkaninda/mysql-bkup + container_name: mysql-bkup + command: migrate + volumes: + - ./backup:/backup + environment: + ## Target database + - DB_PORT=3306 + - DB_HOST=mysql + - DB_NAME=database + - DB_USERNAME=username + - DB_PASSWORD=password + ## Source database + - SOURCE_DB_HOST=mysql2 + - SOURCE_DB_PORT=3306 + - SOURCE_DB_NAME=sourcedb + - SOURCE_DB_USERNAME=jonas + - SOURCE_DB_PASSWORD=password + # mysql-bkup container must be connected to the same network with your database + networks: + - web +networks: + web: +``` + +### Migrate database using Docker CLI + + +``` +## Target database +DB_PORT=3306 +DB_HOST=mysql +DB_NAME=targetdb +DB_USERNAME=targetuser +DB_PASSWORD=password + +## Source database +SOURCE_DB_HOST=mysql2 +SOURCE_DB_PORT=3306 +SOURCE_DB_NAME=sourcedb +SOURCE_DB_USERNAME=sourceuser +SOURCE_DB_PASSWORD=password +``` + +```shell + docker run --rm --network your_network_name \ + --env-file your-env + -v $PWD/backup:/backup/ \ + jkaninda/mysql-bkup migrate -d database_name +``` + +## Kubernetes + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: migrate-db +spec: + ttlSecondsAfterFinished: 100 + template: + spec: + containers: + - name: mysql-bkup + # In production, it is advised to lock your image tag to a proper + # release version instead of using `latest`. + # Check https://github.com/jkaninda/mysql-bkup/releases + # for a list of available releases. + image: jkaninda/mysql-bkup + command: + - /bin/sh + - -c + - migrate -d targetdb + resources: + limits: + memory: "128Mi" + cpu: "500m" + env: + ## Target DB + - name: DB_HOST + value: "postgres-target" + - name: DB_USERNAME + value: "mysql" + - name: DB_PASSWORD + value: "password" + ## Source DB + - name: SOURCE_DB_HOST + value: "postgres-source" + - name: SOURCE_DB_NAME + value: "sourcedb" + - name: SOURCE_DB_USERNAME + value: "postgres" + # Please use secret! + - name: SOURCE_DB_PASSWORD + value: "password" + restartPolicy: Never +``` \ No newline at end of file diff --git a/docs/index.md b/docs/index.md index b7eed94..d1a25db 100644 --- a/docs/index.md +++ b/docs/index.md @@ -78,6 +78,49 @@ services: networks: web: ``` +## Kubernetes + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: backup-job +spec: + ttlSecondsAfterFinished: 100 + template: + spec: + containers: + - name: mysql-bkup + # In production, it is advised to lock your image tag to a proper + # release version instead of using `latest`. + # Check https://github.com/jkaninda/mysql-bkup/releases + # for a list of available releases. + image: jkaninda/mysql-bkup + command: + - /bin/sh + - -c + - backup -d dbname + resources: + limits: + memory: "128Mi" + cpu: "500m" + env: + - name: DB_HOST + value: "mysql" + - name: DB_USERNAME + value: "user" + - name: DB_PASSWORD + value: "password" + volumeMounts: + - mountPath: /backup + name: backup + volumes: + - name: backup + hostPath: + path: /home/toto/backup # directory location on host + type: Directory # this field is optional + restartPolicy: Never +``` ## Available image registries diff --git a/docs/reference/index.md b/docs/reference/index.md index ab7175f..bec4272 100644 --- a/docs/reference/index.md +++ b/docs/reference/index.md @@ -6,7 +6,7 @@ nav_order: 2 # Configuration reference -Backup and restore targets, schedule and retention are configured using environment variables or flags. +Backup, restore and migrate targets, schedule and retention are configured using environment variables or flags. @@ -19,6 +19,7 @@ Backup and restore targets, schedule and retention are configured using environm | mysql-bkup | bkup | CLI utility | | backup | | Backup database operation | | restore | | Restore database operation | +| migrate | | Migrate database from one instance to another one | | --storage | -s | Storage. local or s3 (default: local) | | --file | -f | File name for restoration | | --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` | @@ -34,28 +35,33 @@ Backup and restore targets, schedule and retention are configured using environm ## Environment variables -| Name | Requirement | Description | -|------------------------|-----------------------------------------------------|------------------------------------------------------| -| DB_PORT | Optional, default 3306 | Database port number | -| DB_HOST | Required | Database host | -| DB_NAME | Optional if it was provided from the -d flag | Database name | -| DB_USERNAME | Required | Database user name | -| DB_PASSWORD | Required | Database password | -| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key | -| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key | -| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name | -| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name | -| AWS_REGION | Optional, required for S3 storage | AWS Region | -| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL | -| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) | -| BACKUP_CRON_EXPRESSION | Optional if it was provided from the --period flag | Cron expression | -| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase | -| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip | -| SSH_USER | Optional, required for SSH storage | ssh remote user | -| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password | -| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key | -| SSH_PORT | Optional, required for SSH storage | ssh remote server port | -| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) | +| Name | Requirement | Description | +|------------------------|----------------------------------------------------|------------------------------------------------------| +| DB_PORT | Optional, default 3306 | Database port number | +| DB_HOST | Required | Database host | +| DB_NAME | Optional if it was provided from the -d flag | Database name | +| DB_USERNAME | Required | Database user name | +| DB_PASSWORD | Required | Database password | +| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key | +| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key | +| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name | +| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name | +| AWS_REGION | Optional, required for S3 storage | AWS Region | +| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL | +| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) | +| BACKUP_CRON_EXPRESSION | Optional if it was provided from the --period flag | Backup cron expression for docker in scheduled mode | +| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase | +| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip | +| SSH_USER | Optional, required for SSH storage | ssh remote user | +| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password | +| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key | +| SSH_PORT | Optional, required for SSH storage | ssh remote server port | +| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) | +| SOURCE_DB_HOST | Optional, required for database migration | Source database host | +| SOURCE_DB_PORT | Optional, required for database migration | Source database port | +| SOURCE_DB_NAME | Optional, required for database migration | Source database name | +| SOURCE_DB_USERNAME | Optional, required for database migration | Source database username | +| SOURCE_DB_PASSWORD | Optional, required for database migration | Source database password | --- ## Run in Scheduled mode diff --git a/pkg/backup.go b/pkg/backup.go index a86b8f7..da35765 100644 --- a/pkg/backup.go +++ b/pkg/backup.go @@ -32,37 +32,38 @@ func StartBackup(cmd *cobra.Command) { prune, _ := cmd.Flags().GetBool("prune") disableCompression, _ = cmd.Flags().GetBool("disable-compression") executionMode, _ = cmd.Flags().GetString("mode") - dbName = os.Getenv("DB_NAME") gpqPassphrase := os.Getenv("GPG_PASSPHRASE") _ = utils.GetEnv(cmd, "path", "AWS_S3_PATH") + dbConf = getDbConfig(cmd) + // if gpqPassphrase != "" { encryption = true } //Generate file name - backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405")) + backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbConf.dbName, time.Now().Format("20060102_150405")) if disableCompression { - backupFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405")) + backupFileName = fmt.Sprintf("%s_%s.sql", dbConf.dbName, time.Now().Format("20060102_150405")) } if executionMode == "default" { switch storage { case "s3": - s3Backup(backupFileName, disableCompression, prune, backupRetention, encryption) + s3Backup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption) case "local": - localBackup(backupFileName, disableCompression, prune, backupRetention, encryption) + localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption) case "ssh", "remote": - sshBackup(backupFileName, remotePath, disableCompression, prune, backupRetention, encryption) + sshBackup(dbConf, backupFileName, remotePath, disableCompression, prune, backupRetention, encryption) case "ftp": utils.Fatal("Not supported storage type: %s", storage) default: - localBackup(backupFileName, disableCompression, prune, backupRetention, encryption) + localBackup(dbConf, backupFileName, disableCompression, prune, backupRetention, encryption) } } else if executionMode == "scheduled" { - scheduledMode(storage) + scheduledMode(dbConf, storage) } else { utils.Fatal("Error, unknown execution mode!") } @@ -70,7 +71,7 @@ func StartBackup(cmd *cobra.Command) { } // Run in scheduled mode -func scheduledMode(storage string) { +func scheduledMode(db *dbConfig, storage string) { fmt.Println() fmt.Println("**********************************") @@ -81,7 +82,7 @@ func scheduledMode(storage string) { utils.Info("Storage type %s ", storage) //Test database connexion - utils.TestDatabaseConnection() + testDatabaseConnection(db) utils.Info("Creating backup job...") CreateCrontabScript(disableCompression, storage) @@ -117,12 +118,7 @@ func scheduledMode(storage string) { } // BackupDatabase backup database -func BackupDatabase(backupFileName string, disableCompression bool) { - dbHost = os.Getenv("DB_HOST") - dbPassword = os.Getenv("DB_PASSWORD") - dbUserName = os.Getenv("DB_USERNAME") - dbName = os.Getenv("DB_NAME") - dbPort = os.Getenv("DB_PORT") +func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) { storagePath = os.Getenv("STORAGE_PATH") err := utils.CheckEnvVars(dbHVars) @@ -132,7 +128,7 @@ func BackupDatabase(backupFileName string, disableCompression bool) { } utils.Info("Starting database backup...") - utils.TestDatabaseConnection() + testDatabaseConnection(db) // Backup Database database utils.Info("Backing up database...") @@ -140,11 +136,11 @@ func BackupDatabase(backupFileName string, disableCompression bool) { if disableCompression { // Execute mysqldump cmd := exec.Command("mysqldump", - "-h", dbHost, - "-P", dbPort, - "-u", dbUserName, - "--password="+dbPassword, - dbName, + "-h", db.dbHost, + "-P", db.dbPort, + "-u", db.dbUserName, + "--password="+db.dbPassword, + db.dbName, ) output, err := cmd.Output() if err != nil { @@ -166,7 +162,7 @@ func BackupDatabase(backupFileName string, disableCompression bool) { } else { // Execute mysqldump - cmd := exec.Command("mysqldump", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName) + cmd := exec.Command("mysqldump", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, "--password="+db.dbPassword, db.dbName) stdout, err := cmd.StdoutPipe() if err != nil { log.Fatal(err) @@ -189,9 +185,9 @@ func BackupDatabase(backupFileName string, disableCompression bool) { } } -func localBackup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { +func localBackup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { utils.Info("Backup database to local storage") - BackupDatabase(backupFileName, disableCompression) + BackupDatabase(db, backupFileName, disableCompression) finalFileName := backupFileName if encrypt { encryptBackup(backupFileName) @@ -207,12 +203,12 @@ func localBackup(backupFileName string, disableCompression bool, prune bool, bac deleteTemp() } -func s3Backup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { +func s3Backup(db *dbConfig, backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME") s3Path := utils.GetEnvVariable("AWS_S3_PATH", "S3_PATH") utils.Info("Backup database to s3 storage") //Backup database - BackupDatabase(backupFileName, disableCompression) + BackupDatabase(db, backupFileName, disableCompression) finalFileName := backupFileName if encrypt { encryptBackup(backupFileName) @@ -243,10 +239,10 @@ func s3Backup(backupFileName string, disableCompression bool, prune bool, backup //Delete temp deleteTemp() } -func sshBackup(backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { +func sshBackup(db *dbConfig, backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { utils.Info("Backup database to Remote server") //Backup database - BackupDatabase(backupFileName, disableCompression) + BackupDatabase(db, backupFileName, disableCompression) finalFileName := backupFileName if encrypt { encryptBackup(backupFileName) diff --git a/pkg/config.go b/pkg/config.go index d0b5e01..59478b3 100644 --- a/pkg/config.go +++ b/pkg/config.go @@ -1,4 +1,59 @@ package pkg +import ( + "github.com/jkaninda/mysql-bkup/utils" + "github.com/spf13/cobra" + "os" +) + type Config struct { } + +type dbConfig struct { + dbHost string + dbPort string + dbName string + dbUserName string + dbPassword string +} +type dbSourceConfig struct { + sourceDbHost string + sourceDbPort string + sourceDbUserName string + sourceDbPassword string + sourceDbName string +} + +func getDbConfig(cmd *cobra.Command) *dbConfig { + //Set env + utils.GetEnv(cmd, "dbname", "DB_NAME") + utils.GetEnv(cmd, "port", "DB_PORT") + dConf := dbConfig{} + dConf.dbHost = os.Getenv("DB_HOST") + dConf.dbPort = os.Getenv("DB_PORT") + dConf.dbName = os.Getenv("DB_NAME") + dConf.dbUserName = os.Getenv("DB_USERNAME") + dConf.dbPassword = os.Getenv("DB_PASSWORD") + + err := utils.CheckEnvVars(dbHVars) + if err != nil { + utils.Error("Please make sure all required environment variables for database are set") + utils.Fatal("Error checking environment variables: %s", err) + } + return &dConf +} +func getSourceDbConfig() *dbSourceConfig { + sdbConfig := dbSourceConfig{} + sdbConfig.sourceDbHost = os.Getenv("SOURCE_DB_HOST") + sdbConfig.sourceDbPort = os.Getenv("SOURCE_DB_PORT") + sdbConfig.sourceDbName = os.Getenv("SOURCE_DB_NAME") + sdbConfig.sourceDbUserName = os.Getenv("SOURCE_DB_USERNAME") + sdbConfig.sourceDbPassword = os.Getenv("SOURCE_DB_PASSWORD") + + err := utils.CheckEnvVars(sdbRVars) + if err != nil { + utils.Error("Please make sure all required environment variables for source database are set") + utils.Fatal("Error checking environment variables: %s", err) + } + return &sdbConfig +} diff --git a/pkg/encrypt.go b/pkg/encrypt.go index 5425ac0..01ee5ad 100644 --- a/pkg/encrypt.go +++ b/pkg/encrypt.go @@ -9,11 +9,17 @@ import ( func Decrypt(inputFile string, passphrase string) error { utils.Info("Decrypting backup file: " + inputFile + " ...") + //Create gpg home dir + err := utils.MakeDir(gpgHome) + if err != nil { + return err + } + utils.SetEnv("GNUPGHOME", gpgHome) cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr - err := cmd.Run() + err = cmd.Run() if err != nil { return err } @@ -24,11 +30,17 @@ func Decrypt(inputFile string, passphrase string) error { func Encrypt(inputFile string, passphrase string) error { utils.Info("Encrypting backup...") + //Create gpg home dir + err := utils.MakeDir(gpgHome) + if err != nil { + return err + } + utils.SetEnv("GNUPGHOME", gpgHome) cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--symmetric", "--cipher-algo", algorithm, inputFile) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr - err := cmd.Run() + err = cmd.Run() if err != nil { return err } diff --git a/pkg/helper.go b/pkg/helper.go index e59296e..a2c00e3 100644 --- a/pkg/helper.go +++ b/pkg/helper.go @@ -1,9 +1,11 @@ package pkg import ( + "bytes" "fmt" "github.com/jkaninda/mysql-bkup/utils" "os" + "os/exec" "path/filepath" "time" ) @@ -96,3 +98,24 @@ func deleteTemp() { utils.Info("Deleting %s ... done", tmpPath) } } + +// TestDatabaseConnection tests the database connection +func testDatabaseConnection(db *dbConfig) { + + utils.Info("Connecting to %s database ...", db.dbName) + + cmd := exec.Command("mysql", "-h", db.dbHost, "-P", db.dbPort, "-u", db.dbUserName, "--password="+db.dbPassword, db.dbName, "-e", "quit") + + // Capture the output + var out bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &out + err := cmd.Run() + if err != nil { + utils.Error("Error testing database connection: %v\nOutput: %s", err, out.String()) + os.Exit(1) + + } + utils.Info("Successfully connected to %s database", db.dbName) + +} diff --git a/pkg/migrate.go b/pkg/migrate.go new file mode 100644 index 0000000..fd80703 --- /dev/null +++ b/pkg/migrate.go @@ -0,0 +1,31 @@ +package pkg + +import ( + "fmt" + "github.com/jkaninda/mysql-bkup/utils" + "github.com/spf13/cobra" + "time" +) + +func StartMigration(cmd *cobra.Command) { + utils.Info("Starting database migration...") + //Get DB config + dbConf = getDbConfig(cmd) + sDbConf = getSourceDbConfig() + + //Generate file name + backupFileName := fmt.Sprintf("%s_%s.sql", sDbConf.sourceDbName, time.Now().Format("20060102_150405")) + //Backup Source Database + newDbConfig := dbConfig{} + newDbConfig.dbHost = sDbConf.sourceDbHost + newDbConfig.dbPort = sDbConf.sourceDbPort + newDbConfig.dbName = sDbConf.sourceDbName + newDbConfig.dbUserName = sDbConf.sourceDbUserName + newDbConfig.dbPassword = sDbConf.sourceDbPassword + BackupDatabase(&newDbConfig, backupFileName, true) + //Restore source database into target database + utils.Info("Restoring [%s] database into [%s] database...", sDbConf.sourceDbName, dbConf.dbName) + RestoreDatabase(dbConf, backupFileName) + utils.Info("[%s] database has been restored into [%s] database", sDbConf.sourceDbName, dbConf.dbName) + utils.Info("Database migration completed!") +} diff --git a/pkg/restore.go b/pkg/restore.go index 592cddd..bb40d4a 100644 --- a/pkg/restore.go +++ b/pkg/restore.go @@ -13,8 +13,6 @@ func StartRestore(cmd *cobra.Command) { //Set env utils.SetEnv("STORAGE_PATH", storagePath) - utils.GetEnv(cmd, "dbname", "DB_NAME") - utils.GetEnv(cmd, "port", "DB_PORT") //Get flag value and set env s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH") @@ -23,47 +21,45 @@ func StartRestore(cmd *cobra.Command) { file = utils.GetEnv(cmd, "file", "FILE_NAME") executionMode, _ = cmd.Flags().GetString("mode") bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME") + dbConf = getDbConfig(cmd) + switch storage { case "s3": - restoreFromS3(file, bucket, s3Path) + restoreFromS3(dbConf, file, bucket, s3Path) case "local": utils.Info("Restore database from local") copyToTmp(storagePath, file) - RestoreDatabase(file) + RestoreDatabase(dbConf, file) case "ssh": - restoreFromRemote(file, remotePath) + restoreFromRemote(dbConf, file, remotePath) case "ftp": utils.Fatal("Restore from FTP is not yet supported") default: utils.Info("Restore database from local") - RestoreDatabase(file) + copyToTmp(storagePath, file) + RestoreDatabase(dbConf, file) } } -func restoreFromS3(file, bucket, s3Path string) { +func restoreFromS3(db *dbConfig, file, bucket, s3Path string) { utils.Info("Restore database from s3") err := utils.DownloadFile(tmpPath, file, bucket, s3Path) if err != nil { utils.Fatal("Error download file from s3 %s %v", file, err) } - RestoreDatabase(file) + RestoreDatabase(db, file) } -func restoreFromRemote(file, remotePath string) { +func restoreFromRemote(db *dbConfig, file, remotePath string) { utils.Info("Restore database from remote server") err := CopyFromRemote(file, remotePath) if err != nil { utils.Fatal("Error download file from remote server: %s %v ", filepath.Join(remotePath, file), err) } - RestoreDatabase(file) + RestoreDatabase(db, file) } // RestoreDatabase restore database -func RestoreDatabase(file string) { - dbHost = os.Getenv("DB_HOST") - dbPassword = os.Getenv("DB_PASSWORD") - dbUserName = os.Getenv("DB_USERNAME") - dbName = os.Getenv("DB_NAME") - dbPort = os.Getenv("DB_PORT") +func RestoreDatabase(db *dbConfig, file string) { gpgPassphrase := os.Getenv("GPG_PASSPHRASE") if file == "" { utils.Fatal("Error, file required") @@ -93,7 +89,7 @@ func RestoreDatabase(file string) { } if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) { - utils.TestDatabaseConnection() + testDatabaseConnection(db) utils.Info("Restoring database...") extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file)) diff --git a/pkg/var.go b/pkg/var.go index 7fd1b58..a7afd00 100644 --- a/pkg/var.go +++ b/pkg/var.go @@ -4,16 +4,12 @@ const cronLogFile = "/var/log/mysql-bkup.log" const tmpPath = "/tmp/backup" const backupCronFile = "/usr/local/bin/backup_cron.sh" const algorithm = "aes256" +const gpgHome = "gnupg" const gpgExtension = "gpg" var ( storage = "local" file = "" - dbPassword = "" - dbUserName = "" - dbName = "" - dbHost = "" - dbPort = "3306" executionMode = "default" storagePath = "/backup" disableCompression = false @@ -27,6 +23,16 @@ var dbHVars = []string{ "DB_USERNAME", "DB_NAME", } +var sdbRVars = []string{ + "SOURCE_DB_HOST", + "SOURCE_DB_PORT", + "SOURCE_DB_NAME", + "SOURCE_DB_USERNAME", + "SOURCE_DB_PASSWORD", +} + +var dbConf *dbConfig +var sDbConf *dbSourceConfig // sshHVars Required environment variables for SSH remote server storage var sshHVars = []string{ diff --git a/utils/utils.go b/utils/utils.go index e53abcd..6b839c4 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -7,13 +7,11 @@ package utils * @link https://github.com/jkaninda/mysql-bkup **/ import ( - "bytes" "fmt" "github.com/spf13/cobra" "io" "io/fs" "os" - "os/exec" ) func FileExists(filename string) bool { @@ -90,34 +88,6 @@ func IsDirEmpty(name string) (bool, error) { return true, nil } -// TestDatabaseConnection tests the database connection -func TestDatabaseConnection() { - dbHost := os.Getenv("DB_HOST") - dbPassword := os.Getenv("DB_PASSWORD") - dbUserName := os.Getenv("DB_USERNAME") - dbName := os.Getenv("DB_NAME") - dbPort := os.Getenv("DB_PORT") - - if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" { - Fatal("Please make sure all required database environment variables are set") - } else { - Info("Connecting to database ...") - - cmd := exec.Command("mysql", "-h", dbHost, "-P", dbPort, "-u", dbUserName, "--password="+dbPassword, dbName, "-e", "quit") - - // Capture the output - var out bytes.Buffer - cmd.Stdout = &out - cmd.Stderr = &out - err := cmd.Run() - if err != nil { - Error("Error testing database connection: %v\nOutput: %s", err, out.String()) - os.Exit(1) - - } - Info("Successfully connected to database") - } -} func GetEnv(cmd *cobra.Command, flagName, envName string) string { value, _ := cmd.Flags().GetString(flagName) if value != "" { @@ -182,3 +152,21 @@ func CheckEnvVars(vars []string) error { return nil } + +// MakeDir create directory +func MakeDir(dirPath string) error { + err := os.Mkdir(dirPath, 0700) + if err != nil { + return err + } + return nil +} + +// MakeDirAll create directory +func MakeDirAll(dirPath string) error { + err := os.MkdirAll(dirPath, 0700) + if err != nil { + return err + } + return nil +}