Add restore from SSH

This commit is contained in:
2024-07-31 22:32:07 +02:00
parent 6976bf7597
commit 5b0d450740
9 changed files with 47 additions and 27 deletions

View File

@@ -30,9 +30,14 @@ docker-run-scheduled-s3: docker-build
docker run --rm --network internal --user 1000:1000 --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *" docker run --rm --network internal --user 1000:1000 --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
docker-run-s3: docker-build docker-run-s3: docker-build
docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --path /custom-path docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 ##--path /custom-path
docker-restore-s3: docker-build docker-restore-s3: docker-build
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage s3 --path /custom-path -f $FILE_NAME docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage s3 -f ${FILE_NAME} #--path /custom-path
docker-run-ssh: docker-build
docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage ssh
docker-restore-ssh: docker-build
docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage ssh -f uzaraka_20240731_200104.sql.gz.gpg

View File

@@ -19,7 +19,6 @@ var rootCmd = &cobra.Command{
Version: appVersion, Version: appVersion,
} }
var operation = "" var operation = ""
var s3Path = "/pg-bkup"
// Execute adds all child commands to the root command and sets flags appropriately. // Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd. // This is called by main.main(). It only needs to happen once to the rootCmd.
@@ -32,7 +31,7 @@ func Execute() {
func init() { func init() {
rootCmd.PersistentFlags().StringP("storage", "s", "local", "Set storage. local or s3") rootCmd.PersistentFlags().StringP("storage", "s", "local", "Set storage. local or s3")
rootCmd.PersistentFlags().StringP("path", "P", s3Path, "Set s3 path, without file name. for S3 storage only") rootCmd.PersistentFlags().StringP("path", "P", "", "Set s3 path, without file name. for S3 storage only")
rootCmd.PersistentFlags().StringP("dbname", "d", "", "Set database name") rootCmd.PersistentFlags().StringP("dbname", "d", "", "Set database name")
rootCmd.PersistentFlags().IntP("timeout", "t", 30, "Set timeout") rootCmd.PersistentFlags().IntP("timeout", "t", 30, "Set timeout")
rootCmd.PersistentFlags().IntP("port", "p", 5432, "Set database port") rootCmd.PersistentFlags().IntP("port", "p", 5432, "Set database port")

View File

@@ -1,7 +1,6 @@
package cmd package cmd
import ( import (
"github.com/jkaninda/pg-bkup/pkg"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@@ -9,6 +8,6 @@ var S3MountCmd = &cobra.Command{
Use: "s3mount", Use: "s3mount",
Short: "Mount AWS S3 storage", Short: "Mount AWS S3 storage",
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
pkg.S3Mount() //pkg.S3Mount()
}, },
} }

View File

@@ -25,7 +25,8 @@ func StartBackup(cmd *cobra.Command) {
utils.GetEnv(cmd, "period", "SCHEDULE_PERIOD") utils.GetEnv(cmd, "period", "SCHEDULE_PERIOD")
//Get flag value and set env //Get flag value and set env
s3Path = utils.GetEnv(cmd, "path", "S3_PATH") s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE") storage = utils.GetEnv(cmd, "storage", "STORAGE")
file = utils.GetEnv(cmd, "file", "FILE_NAME") file = utils.GetEnv(cmd, "file", "FILE_NAME")
backupRetention, _ := cmd.Flags().GetInt("keep-last") backupRetention, _ := cmd.Flags().GetInt("keep-last")
@@ -52,7 +53,7 @@ func StartBackup(cmd *cobra.Command) {
case "local": case "local":
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption) localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
case "ssh": case "ssh":
sshBackup(backupFileName, s3Path, disableCompression, prune, backupRetention, encryption) sshBackup(backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
case "ftp": case "ftp":
utils.Fatalf("Not supported storage type: %s", storage) utils.Fatalf("Not supported storage type: %s", storage)
default: default:
@@ -241,7 +242,7 @@ func s3Backup(backupFileName string, s3Path string, disableCompression bool, pru
} }
utils.Done("Database has been backed up and uploaded to s3 ") utils.Done("Database has been backed up and uploaded to s3 ")
} }
func sshBackup(backupFileName string, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { func sshBackup(backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
utils.Info("Backup database to Remote server") utils.Info("Backup database to Remote server")
//Backup database //Backup database
BackupDatabase(backupFileName, disableCompression) BackupDatabase(backupFileName, disableCompression)
@@ -250,9 +251,9 @@ func sshBackup(backupFileName string, remotePath string, disableCompression bool
encryptBackup(backupFileName) encryptBackup(backupFileName)
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg") finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
} }
utils.Info("Uploading backup file to S3 storage...") utils.Info("Uploading backup file to remote server...")
utils.Info("Backup name is ", backupFileName) utils.Info("Backup name is ", backupFileName)
err := CopyToRemote(filepath.Join(tmpPath, finalFileName), remotePath) err := CopyToRemote(finalFileName, remotePath)
if err != nil { if err != nil {
utils.Fatalf("Error uploading file to S3: %s ", err) utils.Fatalf("Error uploading file to S3: %s ", err)

View File

@@ -17,31 +17,44 @@ func StartRestore(cmd *cobra.Command) {
utils.GetEnv(cmd, "port", "DB_PORT") utils.GetEnv(cmd, "port", "DB_PORT")
//Get flag value and set env //Get flag value and set env
s3Path = utils.GetEnv(cmd, "path", "S3_PATH") s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
storage = utils.GetEnv(cmd, "storage", "STORAGE") storage = utils.GetEnv(cmd, "storage", "STORAGE")
file = utils.GetEnv(cmd, "file", "FILE_NAME") file = utils.GetEnv(cmd, "file", "FILE_NAME")
executionMode, _ = cmd.Flags().GetString("mode") executionMode, _ = cmd.Flags().GetString("mode")
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME") bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
switch storage { switch storage {
case "s3": case "s3":
restoreFromS3(file, bucket, s3Path)
case "local":
utils.Info("Restore database from local")
copyToTmp(storagePath, file)
RestoreDatabase(file)
case "ssh":
restoreFromRemote(file, remotePath)
case "ftp":
utils.Fatalf("Restore from FTP is not yet supported")
default:
utils.Info("Restore database from local")
RestoreDatabase(file)
}
}
func restoreFromS3(file, bucket, s3Path string) {
utils.Info("Restore database from s3") utils.Info("Restore database from s3")
err := utils.DownloadFile(tmpPath, file, bucket, s3Path) err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
if err != nil { if err != nil {
utils.Fatal("Error download file from s3 ", file, err) utils.Fatal("Error download file from s3 ", file, err)
} }
RestoreDatabase(file) RestoreDatabase(file)
case "local": }
utils.Info("Restore database from local") func restoreFromRemote(file, remotePath string) {
copyToTmp(storagePath, file) utils.Info("Restore database from remote server")
RestoreDatabase(file) err := CopyFromRemote(file, remotePath)
case "ssh": if err != nil {
fmt.Println("x is 2") utils.Fatal("Error download file from remote server: ", filepath.Join(remotePath, file), err)
case "ftp":
fmt.Println("x is 3")
default:
utils.Info("Restore database from local")
RestoreDatabase(file)
} }
RestoreDatabase(file)
} }
// RestoreDatabase restore database // RestoreDatabase restore database
@@ -52,7 +65,6 @@ func RestoreDatabase(file string) {
dbName = os.Getenv("DB_NAME") dbName = os.Getenv("DB_NAME")
dbPort = os.Getenv("DB_PORT") dbPort = os.Getenv("DB_PORT")
gpgPassphrase := os.Getenv("GPG_PASSPHRASE") gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
//storagePath = os.Getenv("STORAGE_PATH")
if file == "" { if file == "" {
utils.Fatal("Error, file required") utils.Fatal("Error, file required")
} }

View File

@@ -4,6 +4,7 @@ Copyright © 2024 Jonas Kaninda
*/ */
package pkg package pkg
/*
import ( import (
"fmt" "fmt"
"github.com/jkaninda/pg-bkup/utils" "github.com/jkaninda/pg-bkup/utils"
@@ -78,3 +79,5 @@ func MountS3Storage(s3Path string) {
} }
} }
*/

View File

@@ -9,6 +9,7 @@ import (
"github.com/jkaninda/pg-bkup/utils" "github.com/jkaninda/pg-bkup/utils"
"golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh"
"os" "os"
"path/filepath"
) )
func CopyToRemote(fileName, remotePath string) error { func CopyToRemote(fileName, remotePath string) error {
@@ -39,14 +40,14 @@ func CopyToRemote(fileName, remotePath string) error {
} }
// Open a file // Open a file
file, _ := os.Open(fileName) file, _ := os.Open(filepath.Join(tmpPath, fileName))
// Close client connection after the file has been copied // Close client connection after the file has been copied
defer client.Close() defer client.Close()
// Close the file after it has been copied // Close the file after it has been copied
defer file.Close() defer file.Close()
// the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application. // the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application.
err = client.CopyFromFile(context.Background(), *file, remotePath, "0655") err = client.CopyFromFile(context.Background(), *file, filepath.Join(remotePath, fileName), "0655")
if err != nil { if err != nil {
fmt.Println("Error while copying file ") fmt.Println("Error while copying file ")
return err return err
@@ -72,9 +73,9 @@ func CopyFromRemote(fileName, remotePath string) error {
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey()) clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
} }
// Create a new SCP client // Create a new SCP client
client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig) client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig)
// Connect to the remote server // Connect to the remote server
err := client.Connect() err := client.Connect()
if err != nil { if err != nil {

View File

@@ -11,7 +11,6 @@ const gpgExtension = "gpg"
var ( var (
storage = "local" storage = "local"
file = "" file = ""
s3Path = "/pg-bkup"
dbPassword = "" dbPassword = ""
dbUserName = "" dbUserName = ""
dbName = "" dbName = ""

View File

@@ -59,7 +59,8 @@ func UploadFileToS3(filePath, key, bucket, prefix string) error {
return err return err
} }
objectKey := fmt.Sprintf("%s/%s", prefix, key) objectKey := filepath.Join(prefix, key)
//fmt.Sprintf("%s/%s", prefix, key)
buffer := make([]byte, fileInfo.Size()) buffer := make([]byte, fileInfo.Size())
file.Read(buffer) file.Read(buffer)
@@ -93,7 +94,7 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
} }
defer file.Close() defer file.Close()
objectKey := fmt.Sprintf("%s/%s", prefix, key) objectKey := filepath.Join(prefix, key)
downloader := s3manager.NewDownloader(sess) downloader := s3manager.NewDownloader(sess)
numBytes, err := downloader.Download(file, numBytes, err := downloader.Download(file,