Merge pull request #132 from jkaninda/refactor

Refactor
This commit is contained in:
2024-10-21 08:41:50 +02:00
committed by GitHub
12 changed files with 27 additions and 38 deletions

View File

@@ -22,20 +22,12 @@ LABEL version=${appVersion}
LABEL github="github.com/jkaninda/mysql-bkup" LABEL github="github.com/jkaninda/mysql-bkup"
RUN apk --update add --no-cache mysql-client mariadb-connector-c tzdata ca-certificates RUN apk --update add --no-cache mysql-client mariadb-connector-c tzdata ca-certificates
RUN mkdir $WORKDIR RUN mkdir -p $WORKDIR $BACKUPDIR $TEMPLATES_DIR $BACKUP_TMP_DIR && \
RUN mkdir $BACKUPDIR chmod a+rw $WORKDIR $BACKUPDIR $BACKUP_TMP_DIR
RUN mkdir $TEMPLATES_DIR
RUN mkdir -p $BACKUP_TMP_DIR
RUN chmod 777 $WORKDIR
RUN chmod 777 $BACKUPDIR
RUN chmod 777 $BACKUP_TMP_DIR
RUN chmod 777 $WORKDIR
COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup COPY --from=build /app/mysql-bkup /usr/local/bin/mysql-bkup
COPY ./templates/* $TEMPLATES_DIR/ COPY ./templates/* $TEMPLATES_DIR/
RUN chmod +x /usr/local/bin/mysql-bkup RUN chmod +x /usr/local/bin/mysql-bkup && \
ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
RUN ln -s /usr/local/bin/mysql-bkup /usr/local/bin/bkup
# Create backup script and make it executable # Create backup script and make it executable
RUN printf '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \ RUN printf '#!/bin/sh\n/usr/local/bin/mysql-bkup backup "$@"' > /usr/local/bin/backup && \

View File

@@ -27,7 +27,7 @@ var BackupCmd = &cobra.Command{
func init() { func init() {
//Backup //Backup
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3") BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Define storage: local, s3, ssh, ftp")
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`") BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
BackupCmd.PersistentFlags().StringP("cron-expression", "", "", "Backup cron expression") BackupCmd.PersistentFlags().StringP("cron-expression", "", "", "Backup cron expression")
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression") BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")

View File

@@ -24,7 +24,7 @@ var RestoreCmd = &cobra.Command{
func init() { func init() {
//Restore //Restore
RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database") RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database")
RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3") RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Define storage: local, s3, ssh, ftp")
RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`") RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
} }

View File

@@ -37,7 +37,7 @@ services:
- AWS_SECRET_KEY=xxxxx - AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false" - AWS_DISABLE_SSL="false"
- AWS_FORCE_PATH_STYLE="false" - AWS_FORCE_PATH_STYLE=true # true for S3 alternative such as Minio
# mysql-bkup container must be connected to the same network with your database # mysql-bkup container must be connected to the same network with your database
networks: networks:
@@ -78,6 +78,7 @@ services:
#- BACKUP_RETENTION_DAYS=7 #- BACKUP_RETENTION_DAYS=7
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false" - AWS_DISABLE_SSL="false"
- AWS_FORCE_PATH_STYLE=true # true for S3 alternative such as Minio
# mysql-bkup container must be connected to the same network with your database # mysql-bkup container must be connected to the same network with your database
networks: networks:
- web - web

View File

@@ -21,6 +21,7 @@ services:
- AWS_SECRET_KEY=xxxxx - AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false" - AWS_DISABLE_SSL="false"
- AWS_FORCE_PATH_STYLE=true # true for S3 alternative such as Minio
# mysql-bkup container must be connected to the same network with your database # mysql-bkup container must be connected to the same network with your database
networks: networks:
- web - web

View File

@@ -21,6 +21,7 @@ services:
- AWS_SECRET_KEY=xxxxx - AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false" - AWS_DISABLE_SSL="false"
- AWS_FORCE_PATH_STYLE=true # true for S3 alternative such as Minio
# See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules # See: https://jkaninda.github.io/mysql-bkup/reference/#predefined-schedules
- BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * * - BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * *
# mysql-bkup container must be connected to the same network with your database # mysql-bkup container must be connected to the same network with your database

View File

@@ -45,5 +45,5 @@ spec:
- name: AWS_DISABLE_SSL - name: AWS_DISABLE_SSL
value: "false" value: "false"
- name: AWS_FORCE_PATH_STYLE - name: AWS_FORCE_PATH_STYLE
value: "false" value: "true"
restartPolicy: Never restartPolicy: Never

View File

@@ -195,7 +195,7 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
utils.Done("Database has been backed up") utils.Info("Database has been backed up")
} else { } else {
// Execute mysqldump // Execute mysqldump
@@ -217,7 +217,7 @@ func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool
if err := gzipCmd.Wait(); err != nil { if err := gzipCmd.Wait(); err != nil {
log.Fatal(err) log.Fatal(err)
} }
utils.Done("Database has been backed up") utils.Info("Database has been backed up")
} }
@@ -301,7 +301,7 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
utils.Fatal("Error deleting old backup from S3: %s ", err) utils.Fatal("Error deleting old backup from S3: %s ", err)
} }
} }
utils.Done("Uploading backup archive to remote storage S3 ... done ") utils.Info("Uploading backup archive to remote storage S3 ... done ")
//Send notification //Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
File: finalFileName, File: finalFileName,
@@ -353,7 +353,7 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
} }
utils.Done("Uploading backup archive to remote storage ... done ") utils.Info("Uploading backup archive to remote storage ... done ")
//Send notification //Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
File: finalFileName, File: finalFileName,
@@ -405,7 +405,7 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
} }
utils.Done("Uploading backup archive to the remote FTP server ... done ") utils.Info("Uploading backup archive to the remote FTP server ... done ")
//Send notification //Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
File: finalFileName, File: finalFileName,

View File

@@ -39,7 +39,7 @@ func moveToBackup(backupFileName string, destinationPath string) {
fmt.Println("Error deleting file:", err) fmt.Println("Error deleting file:", err)
} }
utils.Done("Database has been backed up and copied to %s", filepath.Join(destinationPath, backupFileName)) utils.Info("Database has been backed up and copied to %s", filepath.Join(destinationPath, backupFileName))
} }
func deleteOldBackup(retentionDays int) { func deleteOldBackup(retentionDays int) {
utils.Info("Deleting old backups...") utils.Info("Deleting old backups...")
@@ -54,7 +54,7 @@ func deleteOldBackup(retentionDays int) {
if err != nil { if err != nil {
utils.Fatal(fmt.Sprintf("Error: %s", err)) utils.Fatal(fmt.Sprintf("Error: %s", err))
} else { } else {
utils.Done("File %s has been deleted successfully", filePath) utils.Info("File %s has been deleted successfully", filePath)
} }
return err return err
} }
@@ -81,7 +81,7 @@ func deleteOldBackup(retentionDays int) {
utils.Fatal(fmt.Sprintf("Error: %s", err)) utils.Fatal(fmt.Sprintf("Error: %s", err))
return return
} }
utils.Done("Deleting old backups...done") utils.Info("Deleting old backups...done")
} }
func deleteTemp() { func deleteTemp() {

View File

@@ -125,7 +125,7 @@ func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
utils.Fatal("Error, in restoring the database %v", err) utils.Fatal("Error, in restoring the database %v", err)
} }
utils.Info("Restoring database... done") utils.Info("Restoring database... done")
utils.Done("Database has been restored") utils.Info("Database has been restored")
//Delete temp //Delete temp
deleteTemp() deleteTemp()
@@ -137,7 +137,7 @@ func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
utils.Fatal("Error in restoring the database %v", err) utils.Fatal("Error in restoring the database %v", err)
} }
utils.Info("Restoring database... done") utils.Info("Restoring database... done")
utils.Done("Database has been restored") utils.Info("Database has been restored")
//Delete temp //Delete temp
deleteTemp() deleteTemp()
} else { } else {

View File

@@ -106,6 +106,8 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
return nil return nil
} }
func DeleteOldBackup(bucket, prefix string, retention int) error { func DeleteOldBackup(bucket, prefix string, retention int) error {
utils.Info("Deleting old backups...")
utils.Info("Bucket %s Prefix: %s Retention: %d", bucket, prefix, retention)
sess, err := CreateSession() sess, err := CreateSession()
if err != nil { if err != nil {
return err return err
@@ -113,7 +115,7 @@ func DeleteOldBackup(bucket, prefix string, retention int) error {
svc := s3.New(sess) svc := s3.New(sess)
// Get the current time and the time threshold for 7 days ago // Get the current time
now := time.Now() now := time.Now()
backupRetentionDays := now.AddDate(0, 0, -retention) backupRetentionDays := now.AddDate(0, 0, -retention)
@@ -125,6 +127,7 @@ func DeleteOldBackup(bucket, prefix string, retention int) error {
err = svc.ListObjectsV2Pages(listObjectsInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool { err = svc.ListObjectsV2Pages(listObjectsInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, object := range page.Contents { for _, object := range page.Contents {
if object.LastModified.Before(backupRetentionDays) { if object.LastModified.Before(backupRetentionDays) {
utils.Info("Deleting old backup: %s", *object.Key)
// Object is older than retention days, delete it // Object is older than retention days, delete it
_, err := svc.DeleteObject(&s3.DeleteObjectInput{ _, err := svc.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(bucket), Bucket: aws.String(bucket),
@@ -133,7 +136,7 @@ func DeleteOldBackup(bucket, prefix string, retention int) error {
if err != nil { if err != nil {
utils.Info("Failed to delete object %s: %v", *object.Key, err) utils.Info("Failed to delete object %s: %v", *object.Key, err)
} else { } else {
utils.Info("Deleted object %s\n", *object.Key) utils.Info("Deleted object %s", *object.Key)
} }
} }
} }
@@ -143,6 +146,6 @@ func DeleteOldBackup(bucket, prefix string, retention int) error {
utils.Error("Failed to list objects: %v", err) utils.Error("Failed to list objects: %v", err)
} }
utils.Info("Finished deleting old files.") utils.Info("Deleting old backups...done")
return nil return nil
} }

View File

@@ -41,15 +41,6 @@ func Error(msg string, args ...any) {
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage) fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
} }
} }
func Done(msg string, args ...any) {
var currentTime = time.Now().Format("2006/01/02 15:04:05")
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s INFO: %s\n", currentTime, msg)
} else {
fmt.Printf("%s INFO: %s\n", currentTime, formattedMessage)
}
}
// Fatal logs an error message and exits the program // Fatal logs an error message and exits the program
func Fatal(msg string, args ...any) { func Fatal(msg string, args ...any) {