From 53e8bfed350a8114c9bece4371c80119777b19ad Mon Sep 17 00:00:00 2001 From: Jonas Kaninda Date: Fri, 19 Jan 2024 06:32:30 +0100 Subject: [PATCH] refactor: move backup, restore, s3fs tasks in pkg folder --- build.sh | 2 +- cmd/root.go | 54 +++ {example => examples}/docker-compose.s3.yaml | 2 +- .../docker-compose.scheduled.local.yaml | 0 .../docker-compose.scheduled.s3.yaml | 2 +- {example => examples}/docker-compose.yaml | 0 {example => examples}/k8s-job.yaml | 0 go.mod | 8 +- go.sum | 6 + main.go | 347 +++--------------- pkg/backup.go | 110 ++++++ pkg/restore.go | 58 +++ pkg/s3fs.go | 76 ++++ pkg/scripts.go | 79 ++++ utils/utils.go | 11 +- 15 files changed, 456 insertions(+), 299 deletions(-) create mode 100644 cmd/root.go rename {example => examples}/docker-compose.s3.yaml (94%) rename {example => examples}/docker-compose.scheduled.local.yaml (100%) rename {example => examples}/docker-compose.scheduled.s3.yaml (88%) rename {example => examples}/docker-compose.yaml (100%) rename {example => examples}/k8s-job.yaml (100%) create mode 100644 pkg/backup.go create mode 100644 pkg/restore.go create mode 100644 pkg/s3fs.go create mode 100644 pkg/scripts.go diff --git a/build.sh b/build.sh index 3e7bf7f..3ffb8f1 100755 --- a/build.sh +++ b/build.sh @@ -7,7 +7,7 @@ if [ $# -eq 0 ] fi #go build -#CGO_ENABLED=0 GOOS=linux go build +CGO_ENABLED=0 GOOS=linux go build docker build -f docker/Dockerfile -t jkaninda/pg-bkup:$tag . diff --git a/cmd/root.go b/cmd/root.go new file mode 100644 index 0000000..d2db072 --- /dev/null +++ b/cmd/root.go @@ -0,0 +1,54 @@ +// Package cmd /* +/* +Copyright © 2024 Jonas Kaninda +*/ +package cmd + +import ( + "os" + + "github.com/spf13/cobra" +) + +// rootCmd represents the base command when called without any subcommands +var rootCmd = &cobra.Command{ + Use: "pg-bkup", + Short: "PostgreSQL Backup tool, backup database to S3 or Object Storage", + Long: `PostgreSQL Backup and Restoration tool. Backup database to AWS S3 storage or any S3 Alternatives for Object Storage.`, + // Uncomment the following line if your bare application + // has an action associated with it: + // Run: func(cmd *cobra.Command, args []string) { }, +} + +// Execute adds all child commands to the root command and sets flags appropriately. +// This is called by main.main(). It only needs to happen once to the rootCmd. +func Execute() { + err := rootCmd.Execute() + if err != nil { + os.Exit(1) + } +} + +func init() { + // Here you will define your flags and configuration settings. + // Cobra supports persistent flags, which, if defined here, + // will be global for your application. + + // rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.mysql-bkup.yaml)") + + // Cobra also supports local flags, which will only run + // when this action is called directly. + rootCmd.PersistentFlags().StringP("operation", "o", "backup", "Set operation") + rootCmd.PersistentFlags().StringP("storage", "s", "local", "Set storage. local or s3") + rootCmd.PersistentFlags().StringP("file", "f", "", "Set file name") + rootCmd.PersistentFlags().StringP("path", "P", "/mysql-bkup", "Set s3 path, without file name") + rootCmd.PersistentFlags().StringP("dbname", "d", "", "Set database name") + rootCmd.PersistentFlags().StringP("mode", "m", "default", "Set execution mode. default or scheduled") + rootCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Set schedule period time") + rootCmd.PersistentFlags().IntP("timeout", "t", 30, "Set timeout") + rootCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression") + rootCmd.PersistentFlags().IntP("port", "p", 5432, "Set database port") + rootCmd.PersistentFlags().BoolP("help", "h", false, "Print this help message") + rootCmd.PersistentFlags().BoolP("version", "v", false, "shows version information") + +} diff --git a/example/docker-compose.s3.yaml b/examples/docker-compose.s3.yaml similarity index 94% rename from example/docker-compose.s3.yaml rename to examples/docker-compose.s3.yaml index f4247b0..96e3032 100644 --- a/example/docker-compose.s3.yaml +++ b/examples/docker-compose.s3.yaml @@ -9,7 +9,7 @@ services: command: - /bin/sh - -c - - bkup --operation backup --storage s3 --path /mys3_custome_path --dbname database_name + - bkup --operation backup --storage s3 --path /mys3_custom_path --dbname database_name environment: - DB_PORT=5432 - DB_HOST=postgress diff --git a/example/docker-compose.scheduled.local.yaml b/examples/docker-compose.scheduled.local.yaml similarity index 100% rename from example/docker-compose.scheduled.local.yaml rename to examples/docker-compose.scheduled.local.yaml diff --git a/example/docker-compose.scheduled.s3.yaml b/examples/docker-compose.scheduled.s3.yaml similarity index 88% rename from example/docker-compose.scheduled.s3.yaml rename to examples/docker-compose.scheduled.s3.yaml index 0bc3a83..15cb628 100644 --- a/example/docker-compose.scheduled.s3.yaml +++ b/examples/docker-compose.scheduled.s3.yaml @@ -9,7 +9,7 @@ services: command: - /bin/sh - -c - - bkup --operation backup --storage s3 --path /mys3_custome_path --dbname database_name --mode scheduled --period "0 1 * * *" + - bkup --operation backup --storage s3 --path /mys3_custom_path --dbname database_name --mode scheduled --period "0 1 * * *" environment: - DB_PORT=5432 - DB_HOST=postgress diff --git a/example/docker-compose.yaml b/examples/docker-compose.yaml similarity index 100% rename from example/docker-compose.yaml rename to examples/docker-compose.yaml diff --git a/example/k8s-job.yaml b/examples/k8s-job.yaml similarity index 100% rename from example/k8s-job.yaml rename to examples/k8s-job.yaml diff --git a/go.mod b/go.mod index 647a54d..21ff690 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,10 @@ module github.com/jkaninda/pg-bkup go 1.21.0 -require( -github.com/spf13/pflag v1.0.5 + +require github.com/spf13/pflag v1.0.5 + +require ( + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/spf13/cobra v1.8.0 // indirect ) diff --git a/go.sum b/go.sum index 287f6fa..aab9122 100644 --- a/go.sum +++ b/go.sum @@ -1,2 +1,8 @@ +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/main.go b/main.go index c735787..34fd4d3 100644 --- a/main.go +++ b/main.go @@ -8,14 +8,11 @@ package main **/ import ( "fmt" - "log" - "os" - "os/exec" - "path/filepath" - "time" - + "github.com/jkaninda/pg-bkup/pkg" "github.com/jkaninda/pg-bkup/utils" flag "github.com/spf13/pflag" + "os" + "os/exec" ) var appVersion string = os.Getenv("VERSION") @@ -23,43 +20,43 @@ var appVersion string = os.Getenv("VERSION") const s3MountPath string = "/s3mnt" var ( - operation string = "backup" - storage string = "local" - file string = "" - s3Path string = "/pg-bkup" - dbName string = "" - dbHost string = "" - dbPort string = "5432" - dbPassword string = "" - dbUserName string = "" - executionMode string = "default" - storagePath string = "/backup" - accessKey string = "" - secretKey string = "" - bucketName string = "" - s3Endpoint string = "" - s3fsPasswdFile string = "/etc/passwd-s3fs" - disableCompression bool = false - startBackup bool = true - outputContent string = "" - timeout int = 30 - period string = "0 1 * * *" + operation = "backup" + storage = "local" + file = "" + s3Path = "/pg-bkup" + dbName = "" + dbHost = "" + dbPort = "5432" + dbPassword = "" + dbUserName = "" + executionMode = "default" + storagePath = "/backup" + accessKey = "" + secretKey = "" + bucketName = "" + s3Endpoint = "" + s3fsPasswdFile = "/etc/passwd-s3fs" + disableCompression = false + startBackup = true + + timeout int = 30 + period string = "0 1 * * *" ) func init() { var ( - operationFlag = flag.StringP("operation", "o", "backup", "Set operation") - storageFlag = flag.StringP("storage", "s", "local", "Set storage. local or s3") - fileFlag = flag.StringP("file", "f", "", "Set file name") - pathFlag = flag.StringP("path", "P", "/mysql-bkup", "Set s3 path, without file name") - dbnameFlag = flag.StringP("dbname", "d", "", "Set database name") - modeFlag = flag.StringP("mode", "m", "default", "Set execution mode. default or scheduled") - periodFlag = flag.StringP("period", "", "0 1 * * *", "Set schedule period time") - timeoutFlag = flag.IntP("timeout", "t", 30, "Set timeout") + operationFlag = flag.StringP("operation", "o", "backup", "Operation") + storageFlag = flag.StringP("storage", "s", "local", "Storage, local or s3") + fileFlag = flag.StringP("file", "f", "", "File name") + pathFlag = flag.StringP("path", "P", "/mysql-bkup", "S3 path, without file name") + dbnameFlag = flag.StringP("dbname", "d", "", "Database name") + modeFlag = flag.StringP("mode", "m", "default", "Execution mode. default or scheduled") + periodFlag = flag.StringP("period", "", "0 1 * * *", "Schedule period time") + timeoutFlag = flag.IntP("timeout", "t", 30, "Timeout (in seconds) to stop database connexion") disableCompressionFlag = flag.BoolP("disable-compression", "", false, "Disable backup compression") - portFlag = flag.IntP("port", "p", 5432, "Set database port") + portFlag = flag.IntP("port", "p", 5432, "Database port") helpFlag = flag.BoolP("help", "h", false, "Print this help message") - versionFlag = flag.BoolP("version", "v", false, "shows version information") + versionFlag = flag.BoolP("version", "v", false, "Version information") ) flag.Parse() @@ -75,7 +72,8 @@ func init() { disableCompression = *disableCompressionFlag flag.Usage = func() { - fmt.Print("Usage: bkup -o backup -s s3 -d databasename --path /my_path ...\n") + fmt.Print("PostgreSQL Backup and Restoration tool. Backup database to AWS S3 storage or any S3 Alternatives for Object Storage.\n\n") + fmt.Print("Usage: bkup --operation backup -storage s3 --dbname databasename --path /my_path ...\n") fmt.Print(" bkup -o backup -d databasename --disable-compression ...\n") fmt.Print(" Restore: bkup -o restore -d databasename -f db_20231217_051339.sql.gz ...\n\n") flag.PrintDefaults() @@ -113,7 +111,7 @@ func init() { } } - if *portFlag != 5432 { + if *portFlag != 3306 { err := os.Setenv("DB_PORT", fmt.Sprint(*portFlag)) if err != nil { return @@ -131,18 +129,11 @@ func init() { return } } - dbHost = os.Getenv("DB_HOST") - dbPassword = os.Getenv("DB_PASSWORD") - dbUserName = os.Getenv("DB_USERNAME") - dbName = os.Getenv("DB_NAME") - dbPort = os.Getenv("DB_PORT") - period = os.Getenv("SCHEDULE_PERIOD") storage = os.Getenv("STORAGE") - - accessKey = os.Getenv("ACCESS_KEY") - secretKey = os.Getenv("SECRET_KEY") - bucketName = os.Getenv("BUCKETNAME") - s3Endpoint = os.Getenv("S3_ENDPOINT") + err := os.Setenv("STORAGE_PATH", storagePath) + if err != nil { + return + } } @@ -151,6 +142,8 @@ func version() { fmt.Print() } func main() { + //cmd.Execute() + err := os.Setenv("STORAGE_PATH", storagePath) if err != nil { return @@ -167,7 +160,7 @@ func start() { if operation != "backup" { if storage != "s3" { utils.Info("Restore from local") - restore() + pkg.Restore(file) } else { utils.Info("Restore from s3") s3Restore() @@ -175,7 +168,7 @@ func start() { } else { if storage != "s3" { utils.Info("Backup to local storage") - backup() + pkg.Backup(disableCompression) } else { utils.Info("Backup to s3 storage") s3Backup() @@ -187,128 +180,9 @@ func start() { utils.Fatal("Error, unknown execution mode!") } } -func backup() { - if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" { - utils.Fatal("Please make sure all required environment variables for database are set") - } else { - testDatabaseConnection() - // Backup database - utils.Info("Backing up database...") - bkFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405")) - - err := os.Setenv("PGPASSWORD", dbPassword) - if err != nil { - return - } - if disableCompression { - bkFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405")) - cmd := exec.Command("pg_dump", - "-h", dbHost, - "-p", dbPort, - "-U", dbUserName, - "-d", dbName, - ) - output, err := cmd.Output() - if err != nil { - log.Fatal(err) - } - - file, err := os.Create(fmt.Sprintf("%s/%s", storagePath, bkFileName)) - if err != nil { - log.Fatal(err) - } - defer file.Close() - - _, err = file.Write(output) - if err != nil { - log.Fatal(err) - } - utils.Info("Database has been backed up") - - } else { - cmd := exec.Command("pg_dump", - "-h", dbHost, - "-p", dbPort, - "-U", dbUserName, - "-d", dbName, - ) - stdout, err := cmd.StdoutPipe() - if err != nil { - log.Fatal(err) - } - gzipCmd := exec.Command("gzip") - gzipCmd.Stdin = stdout - gzipCmd.Stdout, err = os.Create(fmt.Sprintf("%s/%s", storagePath, bkFileName)) - gzipCmd.Start() - if err != nil { - log.Fatal(err) - } - if err := cmd.Run(); err != nil { - log.Fatal(err) - } - if err := gzipCmd.Wait(); err != nil { - log.Fatal(err) - } - utils.Info("Database has been backed up") - - } - - historyFile, err := os.OpenFile(fmt.Sprintf("%s/history.txt", storagePath), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - log.Fatal(err) - } - defer historyFile.Close() - if _, err := historyFile.WriteString(bkFileName + "\n"); err != nil { - log.Fatal(err) - } - } -} -func restore() { - if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" || file == "" { - utils.Fatal("Please make sure all required environment variables are set") - } else { - - if utils.FileExists(fmt.Sprintf("%s/%s", storagePath, file)) { - testDatabaseConnection() - err := os.Setenv("PGPASSWORD", dbPassword) - if err != nil { - return - } - - extension := filepath.Ext(fmt.Sprintf("%s/%s", storagePath, file)) - // GZ compressed file - if extension == ".gz" { - str := "zcat " + fmt.Sprintf("%s/%s", storagePath, file) + " | psql -h " + os.Getenv("DB_HOST") + " -p " + os.Getenv("DB_PORT") + " -U " + os.Getenv("DB_USERNAME") + " -v -d " + os.Getenv("DB_NAME") - output, err := exec.Command("bash", "-c", str).Output() - if err != nil { - utils.Fatal("Error, in restoring the database") - } - outputContent = string(output) - utils.Info("Database has been restored") - - } else if extension == ".sql" { - //SQL file - str := "cat " + fmt.Sprintf("%s/%s", storagePath, file) + " | psql -h " + os.Getenv("DB_HOST") + " -p " + os.Getenv("DB_PORT") + " -U " + os.Getenv("DB_USERNAME") + " -v -d " + os.Getenv("DB_NAME") - output, err := exec.Command("bash", "-c", str).Output() - if err != nil { - utils.Fatalf("Error in restoring the database", err) - } - outputContent = string(output) - utils.Info("Database has been restored") - } else { - utils.Fatal("Unknown file extension ", extension) - } - - } else { - utils.Fatal("File not found in ", fmt.Sprintf("%s/%s", storagePath, file)) - } - - } -} func s3Backup() { - // Implement S3 backup logic - s3Mount() - backup() + pkg.MountS3Storage(s3Path) + pkg.Backup(disableCompression) } // Run in scheduled mode @@ -323,9 +197,12 @@ func scheduledMode() { utils.Info("Running in Scheduled mode") utils.Info("Log file in /var/log/pg-bkup.log") utils.Info("Execution period ", os.Getenv("SCHEDULE_PERIOD")) - testDatabaseConnection() + //Test database connexion + utils.TestDatabaseConnection() + utils.Info("Creating backup job...") - createCrontabScript() + pkg.CreateCrontabScript(disableCompression, storage) + supervisordCmd := exec.Command("supervisord", "-c", "/etc/supervisor/supervisord.conf") if err := supervisordCmd.Run(); err != nil { utils.Fatalf("Error starting supervisord: %v\n", err) @@ -335,122 +212,8 @@ func scheduledMode() { } } -// Mount s3 using s3fs -func s3Mount() { - if accessKey == "" || secretKey == "" || bucketName == "" { - utils.Fatal("Please make sure all environment variables are set") - } else { - storagePath = fmt.Sprintf("%s%s", s3MountPath, s3Path) - err := os.Setenv("STORAGE_PATH", storagePath) - if err != nil { - return - } - - //Write file - err = utils.WriteToFile(s3fsPasswdFile, fmt.Sprintf("%s:%s", accessKey, secretKey)) - if err != nil { - utils.Fatal("Error creating file") - } - //Change file permission - utils.ChangePermission(s3fsPasswdFile, 0600) - utils.Info("Mounting Object storage in ", s3MountPath) - if isEmpty, _ := utils.IsDirEmpty(s3MountPath); isEmpty { - cmd := exec.Command("s3fs", bucketName, s3MountPath, - "-o", "passwd_file="+s3fsPasswdFile, - "-o", "use_cache=/tmp/s3cache", - "-o", "allow_other", - "-o", "url="+s3Endpoint, - "-o", "use_path_request_style", - ) - - if err := cmd.Run(); err != nil { - utils.Fatal("Error mounting Object storage:", err) - } - - if err := os.MkdirAll(storagePath, os.ModePerm); err != nil { - utils.Fatalf("Error creating directory %v %v", storagePath, err) - } - - } else { - utils.Info("Object storage already mounted in " + s3MountPath) - if err := os.MkdirAll(storagePath, os.ModePerm); err != nil { - utils.Fatal("Error creating directory "+storagePath, err) - } - - } - - } -} func s3Restore() { - // Implement S3 restore logic\ - s3Mount() - restore() -} - -func createCrontabScript() { - task := "/usr/local/bin/backup_cron.sh" - touchCmd := exec.Command("touch", task) - if err := touchCmd.Run(); err != nil { - utils.Fatalf("Error creating file %s: %v\n", task, err) - } - var disableC = "" - if disableCompression { - disableC = "--disable-compression" - } - - var scriptContent string - - if storage == "s3" { - scriptContent = fmt.Sprintf(`#!/usr/bin/env bash -set -e -bkup --operation backup --dbname %s --port %s --storage s3 --path %s %v -`, os.Getenv("DB_NAME"), os.Getenv("DB_PORT"), os.Getenv("S3_PATH"), disableC) - } else { - scriptContent = fmt.Sprintf(`#!/usr/bin/env bash -set -e -bkup --operation backup --dbname %s --port %s %v -`, os.Getenv("DB_NAME"), os.Getenv("DB_PORT"), disableC) - } - - if err := utils.WriteToFile(task, scriptContent); err != nil { - utils.Fatalf("Error writing to %s: %v\n", task, err) - } - - chmodCmd := exec.Command("chmod", "+x", "/usr/local/bin/backup_cron.sh") - if err := chmodCmd.Run(); err != nil { - utils.Fatalf("Error changing permissions of %s: %v\n", task, err) - } - - lnCmd := exec.Command("ln", "-s", "/usr/local/bin/backup_cron.sh", "/usr/local/bin/backup_cron") - if err := lnCmd.Run(); err != nil { - utils.Fatalf("Error creating symbolic link: %v\n", err) - - } - - cronJob := "/etc/cron.d/backup_cron" - touchCronCmd := exec.Command("touch", cronJob) - if err := touchCronCmd.Run(); err != nil { - utils.Fatalf("Error creating file %s: %v\n", cronJob, err) - } - - cronContent := fmt.Sprintf(`%s root exec /bin/bash -c ". /run/supervisord.env; /usr/local/bin/backup_cron.sh >> /var/log/mysql-bkup.log" -`, os.Getenv("SCHEDULE_PERIOD")) - - if err := utils.WriteToFile(cronJob, cronContent); err != nil { - utils.Fatalf("Error writing to %s: %v\n", cronJob, err) - } - utils.ChangePermission("/etc/cron.d/backup_cron", 0644) - - crontabCmd := exec.Command("crontab", "/etc/cron.d/backup_cron") - if err := crontabCmd.Run(); err != nil { - utils.Fatal("Error updating crontab: ", err) - } - utils.Info("Starting backup in scheduled mode") -} - -// testDatabaseConnection tests the database connection -func testDatabaseConnection() { - utils.Info("Testing database connection...") - // Test database connection - + // Restore database from S3 + pkg.MountS3Storage(s3Path) + pkg.Restore(file) } diff --git a/pkg/backup.go b/pkg/backup.go new file mode 100644 index 0000000..c70cbe1 --- /dev/null +++ b/pkg/backup.go @@ -0,0 +1,110 @@ +// Package pkg /* +/* +Copyright © 2024 Jonas Kaninda +*/ +package pkg + +import ( + "fmt" + "github.com/jkaninda/pg-bkup/utils" + "log" + "os" + "os/exec" + "time" +) + +var ( + dbName = "" + dbHost = "" + dbPort = "" + dbPassword = "" + dbUserName = "" + storagePath = "/backup" +) + +// Backup backup database +func Backup(disableCompression bool) { + dbHost = os.Getenv("DB_HOST") + dbPassword = os.Getenv("DB_PASSWORD") + dbUserName = os.Getenv("DB_USERNAME") + dbName = os.Getenv("DB_NAME") + dbPort = os.Getenv("DB_PORT") + storagePath = os.Getenv("STORAGE_PATH") + + if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" { + utils.Fatal("Please make sure all required environment variables for database are set") + } else { + err := os.Setenv("PGPASSWORD", dbPassword) + if err != nil { + return + } + //Test Database connexion + utils.TestDatabaseConnection() + // Backup database + utils.Info("Backing up database...") + bkFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405")) + + if disableCompression { + bkFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405")) + cmd := exec.Command("pg_dump", + "-h", dbHost, + "-p", dbPort, + "-U", dbUserName, + "-d", dbName, + ) + output, err := cmd.Output() + if err != nil { + log.Fatal(err) + } + + file, err := os.Create(fmt.Sprintf("%s/%s", storagePath, bkFileName)) + if err != nil { + log.Fatal(err) + } + defer file.Close() + + _, err = file.Write(output) + if err != nil { + log.Fatal(err) + } + utils.Info("Database has been backed up") + + } else { + cmd := exec.Command("pg_dump", + "-h", dbHost, + "-p", dbPort, + "-U", dbUserName, + "-d", dbName, + ) + stdout, err := cmd.StdoutPipe() + if err != nil { + log.Fatal(err) + } + gzipCmd := exec.Command("gzip") + gzipCmd.Stdin = stdout + gzipCmd.Stdout, err = os.Create(fmt.Sprintf("%s/%s", storagePath, bkFileName)) + gzipCmd.Start() + if err != nil { + log.Fatal(err) + } + if err := cmd.Run(); err != nil { + log.Fatal(err) + } + if err := gzipCmd.Wait(); err != nil { + log.Fatal(err) + } + utils.Info("Database has been backed up") + + } + + historyFile, err := os.OpenFile(fmt.Sprintf("%s/history.txt", storagePath), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Fatal(err) + } + defer historyFile.Close() + if _, err := historyFile.WriteString(bkFileName + "\n"); err != nil { + log.Fatal(err) + } + } + +} diff --git a/pkg/restore.go b/pkg/restore.go new file mode 100644 index 0000000..2fd7a73 --- /dev/null +++ b/pkg/restore.go @@ -0,0 +1,58 @@ +package pkg + +import ( + "fmt" + "github.com/jkaninda/pg-bkup/utils" + "os" + "os/exec" + "path/filepath" +) + +// Restore restore database +func Restore(file string) { + dbHost = os.Getenv("DB_HOST") + dbPassword = os.Getenv("DB_PASSWORD") + dbUserName = os.Getenv("DB_USERNAME") + dbName = os.Getenv("DB_NAME") + dbPort = os.Getenv("DB_PORT") + storagePath = os.Getenv("STORAGE_PATH") + + if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" || file == "" { + utils.Fatal("Please make sure all required environment variables are set") + } else { + + if utils.FileExists(fmt.Sprintf("%s/%s", storagePath, file)) { + + err := os.Setenv("PGPASSWORD", dbPassword) + if err != nil { + return + } + utils.TestDatabaseConnection() + + extension := filepath.Ext(fmt.Sprintf("%s/%s", storagePath, file)) + // GZ compressed file + if extension == ".gz" { + str := "zcat " + fmt.Sprintf("%s/%s", storagePath, file) + " | psql -h " + os.Getenv("DB_HOST") + " -p " + os.Getenv("DB_PORT") + " -U " + os.Getenv("DB_USERNAME") + " -v -d " + os.Getenv("DB_NAME") + _, err := exec.Command("bash", "-c", str).Output() + if err != nil { + utils.Fatal("Error, in restoring the database") + } + utils.Info("Database has been restored") + + } else if extension == ".sql" { + //SQL file + str := "cat " + fmt.Sprintf("%s/%s", storagePath, file) + " | psql -h " + os.Getenv("DB_HOST") + " -p " + os.Getenv("DB_PORT") + " -U " + os.Getenv("DB_USERNAME") + " -v -d " + os.Getenv("DB_NAME") + _, err := exec.Command("bash", "-c", str).Output() + if err != nil { + utils.Fatal("Error in restoring the database", err) + } + utils.Info("Database has been restored") + } else { + utils.Fatal("Unknown file extension ", extension) + } + + } else { + utils.Fatal("File not found in ", fmt.Sprintf("%s/%s", storagePath, file)) + } + } +} diff --git a/pkg/s3fs.go b/pkg/s3fs.go new file mode 100644 index 0000000..42ffa26 --- /dev/null +++ b/pkg/s3fs.go @@ -0,0 +1,76 @@ +// Package pkg /* +/* +Copyright © 2024 Jonas Kaninda +*/ +package pkg + +import ( + "fmt" + "github.com/jkaninda/pg-bkup/utils" + "os" + "os/exec" +) + +const s3MountPath string = "/s3mnt" +const s3fsPasswdFile string = "/etc/passwd-s3fs" + +var ( + accessKey = "" + secretKey = "" + bucketName = "" + s3Endpoint = "" +) + +func init() { + accessKey = os.Getenv("ACCESS_KEY") + secretKey = os.Getenv("SECRET_KEY") + bucketName = os.Getenv("BUCKETNAME") + s3Endpoint = os.Getenv("S3_ENDPOINT") +} + +// MountS3Storage Mount s3 storage using s3fs +func MountS3Storage(s3Path string) { + if accessKey == "" || secretKey == "" || bucketName == "" { + utils.Fatal("Please make sure all environment variables are set") + } else { + storagePath := fmt.Sprintf("%s%s", s3MountPath, s3Path) + err := os.Setenv("STORAGE_PATH", storagePath) + if err != nil { + return + } + + //Write file + err = utils.WriteToFile(s3fsPasswdFile, fmt.Sprintf("%s:%s", accessKey, secretKey)) + if err != nil { + utils.Fatal("Error creating file") + } + //Change file permission + utils.ChangePermission(s3fsPasswdFile, 0600) + utils.Info("Mounting Object storage in", s3MountPath) + if isEmpty, _ := utils.IsDirEmpty(s3MountPath); isEmpty { + cmd := exec.Command("s3fs", bucketName, s3MountPath, + "-o", "passwd_file="+s3fsPasswdFile, + "-o", "use_cache=/tmp/s3cache", + "-o", "allow_other", + "-o", "url="+s3Endpoint, + "-o", "use_path_request_style", + ) + + if err := cmd.Run(); err != nil { + utils.Fatal("Error mounting Object storage:", err) + } + + if err := os.MkdirAll(storagePath, os.ModePerm); err != nil { + utils.Fatalf("Error creating directory %v %v", storagePath, err) + } + + } else { + utils.Info("Object storage already mounted in " + s3MountPath) + if err := os.MkdirAll(storagePath, os.ModePerm); err != nil { + utils.Fatal("Error creating directory "+storagePath, err) + } + + } + + } +} diff --git a/pkg/scripts.go b/pkg/scripts.go new file mode 100644 index 0000000..68c9122 --- /dev/null +++ b/pkg/scripts.go @@ -0,0 +1,79 @@ +package pkg + +// Package pkg /* +/* +Copyright © 2024 Jonas Kaninda +*/ +import ( + "fmt" + "github.com/jkaninda/pg-bkup/utils" + "os" + "os/exec" +) + +const cronLogFile = "/var/log/pg-bkup.log" +const backupCronFile = "/usr/local/bin/backup_cron.sh" + +func init() { + +} +func CreateCrontabScript(disableCompression bool, storage string) { + //task := "/usr/local/bin/backup_cron.sh" + touchCmd := exec.Command("touch", backupCronFile) + if err := touchCmd.Run(); err != nil { + utils.Fatalf("Error creating file %s: %v\n", backupCronFile, err) + } + var disableC = "" + if disableCompression { + disableC = "--disable-compression" + } + + var scriptContent string + + if storage == "s3" { + scriptContent = fmt.Sprintf(`#!/usr/bin/env bash +set -e +bkup --operation backup --dbname %s --port %s --storage s3 --path %s %v +`, os.Getenv("DB_NAME"), os.Getenv("DB_PORT"), os.Getenv("S3_PATH"), disableC) + } else { + scriptContent = fmt.Sprintf(`#!/usr/bin/env bash +set -e +bkup --operation backup --dbname %s --port %s %v +`, os.Getenv("DB_NAME"), os.Getenv("DB_PORT"), disableC) + } + + if err := utils.WriteToFile(backupCronFile, scriptContent); err != nil { + utils.Fatalf("Error writing to %s: %v\n", backupCronFile, err) + } + + chmodCmd := exec.Command("chmod", "+x", "/usr/local/bin/backup_cron.sh") + if err := chmodCmd.Run(); err != nil { + utils.Fatalf("Error changing permissions of %s: %v\n", backupCronFile, err) + } + + lnCmd := exec.Command("ln", "-s", "/usr/local/bin/backup_cron.sh", "/usr/local/bin/backup_cron") + if err := lnCmd.Run(); err != nil { + utils.Fatalf("Error creating symbolic link: %v\n", err) + + } + + cronJob := "/etc/cron.d/backup_cron" + touchCronCmd := exec.Command("touch", cronJob) + if err := touchCronCmd.Run(); err != nil { + utils.Fatalf("Error creating file %s: %v\n", cronJob, err) + } + + cronContent := fmt.Sprintf(`%s root exec /bin/bash -c ". /run/supervisord.env; /usr/local/bin/backup_cron.sh >> %s" +`, os.Getenv("SCHEDULE_PERIOD"), cronLogFile) + + if err := utils.WriteToFile(cronJob, cronContent); err != nil { + utils.Fatalf("Error writing to %s: %v\n", cronJob, err) + } + utils.ChangePermission("/etc/cron.d/backup_cron", 0644) + + crontabCmd := exec.Command("crontab", "/etc/cron.d/backup_cron") + if err := crontabCmd.Run(); err != nil { + utils.Fatal("Error updating crontab: ", err) + } + utils.Info("Starting backup in scheduled mode") +} diff --git a/utils/utils.go b/utils/utils.go index e2877f5..9e04dfe 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -1,10 +1,10 @@ package utils /***** -* PostgreSQL Backup & Restore +* MySQL Backup & Restore * @author Jonas Kaninda * @license MIT License -* @link https://github.com/jkaninda/pg-bkup +* @link https://github.com/jkaninda/mysql-bkup **/ import ( "fmt" @@ -70,3 +70,10 @@ func IsDirEmpty(name string) (bool, error) { } return true, nil } + +// TestDatabaseConnection tests the database connection +func TestDatabaseConnection() { + Info("Testing database connection...") + // Test database connection + +}