Merge pull request #120 from jkaninda/refactor

Refactor
This commit is contained in:
2024-10-21 08:40:57 +02:00
committed by GitHub
11 changed files with 24 additions and 35 deletions

View File

@@ -22,19 +22,12 @@ LABEL version=${appVersion}
LABEL github="github.com/jkaninda/pg-bkup" LABEL github="github.com/jkaninda/pg-bkup"
RUN apk --update add --no-cache postgresql-client tzdata ca-certificates RUN apk --update add --no-cache postgresql-client tzdata ca-certificates
RUN mkdir $WORKDIR RUN mkdir -p $WORKDIR $BACKUPDIR $TEMPLATES_DIR $BACKUP_TMP_DIR && \
RUN mkdir $BACKUPDIR chmod a+rw $WORKDIR $BACKUPDIR $BACKUP_TMP_DIR
RUN mkdir $TEMPLATES_DIR
RUN mkdir -p $BACKUP_TMP_DIR
RUN chmod 777 $WORKDIR
RUN chmod 777 $BACKUPDIR
RUN chmod 777 $BACKUP_TMP_DIR
RUN chmod 777 $WORKDIR
COPY --from=build /app/pg-bkup /usr/local/bin/pg-bkup COPY --from=build /app/pg-bkup /usr/local/bin/pg-bkup
COPY ./templates/* $TEMPLATES_DIR/ COPY ./templates/* $TEMPLATES_DIR/
RUN chmod +x /usr/local/bin/pg-bkup RUN chmod +x /usr/local/bin/pg-bkup && \
ln -s /usr/local/bin/pg-bkup /usr/local/bin/bkup
RUN ln -s /usr/local/bin/pg-bkup /usr/local/bin/bkup
# Create the backup script and make it executable # Create the backup script and make it executable
RUN printf '#!/bin/sh\n/usr/local/bin/pg-bkup backup "$@"' > /usr/local/bin/backup && \ RUN printf '#!/bin/sh\n/usr/local/bin/pg-bkup backup "$@"' > /usr/local/bin/backup && \

View File

@@ -28,7 +28,7 @@ var BackupCmd = &cobra.Command{
func init() { func init() {
//Backup //Backup
BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3") BackupCmd.PersistentFlags().StringP("storage", "s", "local", "Define storage: local, s3, ssh, ftp")
BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`") BackupCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
BackupCmd.PersistentFlags().StringP("cron-expression", "", "", "Backup cron expression") BackupCmd.PersistentFlags().StringP("cron-expression", "", "", "Backup cron expression")
BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression") BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression")

View File

@@ -30,7 +30,7 @@ var RestoreCmd = &cobra.Command{
func init() { func init() {
//Restore //Restore
RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database") RestoreCmd.PersistentFlags().StringP("file", "f", "", "File name of database")
RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3") RestoreCmd.PersistentFlags().StringP("storage", "s", "local", "Define storage: local, s3, ssh, ftp")
RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`") RestoreCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`")
} }

View File

@@ -37,7 +37,7 @@ services:
- AWS_SECRET_KEY=xxxxx - AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false" - AWS_DISABLE_SSL="false"
- AWS_FORCE_PATH_STYLE="false" - AWS_FORCE_PATH_STYLE=false # true for S3 alternative such as Minio
# pg-bkup container must be connected to the same network with your database # pg-bkup container must be connected to the same network with your database
networks: networks:
@@ -78,6 +78,7 @@ services:
#- BACKUP_RETENTION_DAYS=7 #- BACKUP_RETENTION_DAYS=7
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false" - AWS_DISABLE_SSL="false"
- AWS_FORCE_PATH_STYLE=true # true for S3 alternative such as Minio
# pg-bkup container must be connected to the same network with your database # pg-bkup container must be connected to the same network with your database
networks: networks:
- web - web

View File

@@ -21,6 +21,7 @@ services:
- AWS_SECRET_KEY=xxxxx - AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false" - AWS_DISABLE_SSL="false"
- AWS_FORCE_PATH_STYLE=false # true for S3 alternative such as Minio
# pg-bkup container must be connected to the same network with your database # pg-bkup container must be connected to the same network with your database
networks: networks:
- web - web

View File

@@ -21,6 +21,7 @@ services:
- AWS_SECRET_KEY=xxxxx - AWS_SECRET_KEY=xxxxx
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
- AWS_DISABLE_SSL="false" - AWS_DISABLE_SSL="false"
- AWS_FORCE_PATH_STYLE=false # true for S3 alternative such as Minio
# Check https://jkaninda.github.io/pg-bkup/reference/#predefined-schedules # Check https://jkaninda.github.io/pg-bkup/reference/#predefined-schedules
- BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * * - BACKUP_CRON_EXPRESSION=@daily #@every 5m|@weekly | @monthly |0 1 * * *
# pg-bkup container must be connected to the same network with your database # pg-bkup container must be connected to the same network with your database

View File

@@ -308,7 +308,7 @@ func s3Backup(db *dbConfig, config *BackupConfig) {
utils.Fatal("Error deleting old backup from S3: %s ", err) utils.Fatal("Error deleting old backup from S3: %s ", err)
} }
} }
utils.Done("Uploading backup archive to remote storage S3 ... done ") utils.Info("Uploading backup archive to remote storage S3 ... done ")
//Send notification //Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
File: finalFileName, File: finalFileName,
@@ -360,7 +360,7 @@ func sshBackup(db *dbConfig, config *BackupConfig) {
} }
utils.Done("Uploading backup archive to remote storage ... done ") utils.Info("Uploading backup archive to remote storage ... done ")
//Send notification //Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{
File: finalFileName, File: finalFileName,
@@ -412,7 +412,7 @@ func ftpBackup(db *dbConfig, config *BackupConfig) {
} }
utils.Done("Uploading backup archive to the remote FTP server ... done ") utils.Info("Uploading backup archive to the remote FTP server ... done ")
//Send notification //Send notification
utils.NotifySuccess(&utils.NotificationData{ utils.NotifySuccess(&utils.NotificationData{

View File

@@ -45,7 +45,7 @@ func moveToBackup(backupFileName string, destinationPath string) {
utils.Error("Error deleting file: %s", err) utils.Error("Error deleting file: %s", err)
} }
utils.Done("Database has been backed up and copied to %s", filepath.Join(destinationPath, backupFileName)) utils.Info("Database has been backed up and copied to %s", filepath.Join(destinationPath, backupFileName))
} }
func deleteOldBackup(retentionDays int) { func deleteOldBackup(retentionDays int) {
utils.Info("Deleting old backups...") utils.Info("Deleting old backups...")
@@ -60,7 +60,7 @@ func deleteOldBackup(retentionDays int) {
if err != nil { if err != nil {
utils.Fatal("Error:", err) utils.Fatal("Error:", err)
} else { } else {
utils.Done("File %s deleted successfully", filePath) utils.Info("File %s deleted successfully", filePath)
} }
return err return err
} }
@@ -87,7 +87,7 @@ func deleteOldBackup(retentionDays int) {
utils.Fatal("Error:", err) utils.Fatal("Error:", err)
return return
} }
utils.Done("Deleting old backups...done") utils.Info("Deleting old backups...done")
} }
func deleteTemp() { func deleteTemp() {
utils.Info("Deleting %s ...", tmpPath) utils.Info("Deleting %s ...", tmpPath)

View File

@@ -127,7 +127,7 @@ func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
utils.Fatal("Error, in restoring the database %v", err) utils.Fatal("Error, in restoring the database %v", err)
} }
utils.Info("Restoring database... done") utils.Info("Restoring database... done")
utils.Done("Database has been restored") utils.Info("Database has been restored")
//Delete temp //Delete temp
deleteTemp() deleteTemp()
@@ -139,7 +139,7 @@ func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
utils.Fatal("Error in restoring the database %v", err) utils.Fatal("Error in restoring the database %v", err)
} }
utils.Info("Restoring database... done") utils.Info("Restoring database... done")
utils.Done("Database has been restored") utils.Info("Database has been restored")
//Delete temp //Delete temp
deleteTemp() deleteTemp()
} else { } else {

View File

@@ -106,6 +106,8 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
return nil return nil
} }
func DeleteOldBackup(bucket, prefix string, retention int) error { func DeleteOldBackup(bucket, prefix string, retention int) error {
utils.Info("Deleting old backups...")
utils.Info("Bucket %s Prefix: %s Retention: %d", bucket, prefix, retention)
sess, err := CreateSession() sess, err := CreateSession()
if err != nil { if err != nil {
return err return err
@@ -113,7 +115,7 @@ func DeleteOldBackup(bucket, prefix string, retention int) error {
svc := s3.New(sess) svc := s3.New(sess)
// Get the current time and the time threshold for 7 days ago // Get the current time
now := time.Now() now := time.Now()
backupRetentionDays := now.AddDate(0, 0, -retention) backupRetentionDays := now.AddDate(0, 0, -retention)
@@ -125,6 +127,7 @@ func DeleteOldBackup(bucket, prefix string, retention int) error {
err = svc.ListObjectsV2Pages(listObjectsInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool { err = svc.ListObjectsV2Pages(listObjectsInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, object := range page.Contents { for _, object := range page.Contents {
if object.LastModified.Before(backupRetentionDays) { if object.LastModified.Before(backupRetentionDays) {
utils.Info("Deleting old backup: %s", *object.Key)
// Object is older than retention days, delete it // Object is older than retention days, delete it
_, err := svc.DeleteObject(&s3.DeleteObjectInput{ _, err := svc.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(bucket), Bucket: aws.String(bucket),
@@ -133,7 +136,7 @@ func DeleteOldBackup(bucket, prefix string, retention int) error {
if err != nil { if err != nil {
utils.Info("Failed to delete object %s: %v", *object.Key, err) utils.Info("Failed to delete object %s: %v", *object.Key, err)
} else { } else {
utils.Info("Deleted object %s\n", *object.Key) utils.Info("Deleted object %s", *object.Key)
} }
} }
} }
@@ -143,6 +146,6 @@ func DeleteOldBackup(bucket, prefix string, retention int) error {
utils.Error("Failed to list objects: %v", err) utils.Error("Failed to list objects: %v", err)
} }
utils.Info("Finished deleting old files.") utils.Info("Deleting old backups...done")
return nil return nil
} }

View File

@@ -44,16 +44,6 @@ func Error(msg string, args ...any) {
fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage) fmt.Printf("%s ERROR: %s\n", currentTime, formattedMessage)
} }
} }
func Done(msg string, args ...any) {
var currentTime = time.Now().Format("2006/01/02 15:04:05")
formattedMessage := fmt.Sprintf(msg, args...)
if len(args) == 0 {
fmt.Printf("%s INFO: %s\n", currentTime, msg)
} else {
fmt.Printf("%s INFO: %s\n", currentTime, formattedMessage)
}
}
func Fatal(msg string, args ...any) { func Fatal(msg string, args ...any) {
var currentTime = time.Now().Format("2006/01/02 15:04:05") var currentTime = time.Now().Format("2006/01/02 15:04:05")
// Fatal logs an error message and exits the program. // Fatal logs an error message and exits the program.