Add restore from SSH
This commit is contained in:
9
Makefile
9
Makefile
@@ -30,9 +30,14 @@ docker-run-scheduled-s3: docker-build
|
||||
docker run --rm --network internal --user 1000:1000 --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
|
||||
|
||||
docker-run-s3: docker-build
|
||||
docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --path /custom-path
|
||||
docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 ##--path /custom-path
|
||||
|
||||
|
||||
docker-restore-s3: docker-build
|
||||
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage s3 --path /custom-path -f $FILE_NAME
|
||||
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage s3 -f ${FILE_NAME} #--path /custom-path
|
||||
|
||||
docker-run-ssh: docker-build
|
||||
docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage ssh
|
||||
|
||||
docker-restore-ssh: docker-build
|
||||
docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage ssh -f uzaraka_20240731_200104.sql.gz.gpg
|
||||
@@ -19,7 +19,6 @@ var rootCmd = &cobra.Command{
|
||||
Version: appVersion,
|
||||
}
|
||||
var operation = ""
|
||||
var s3Path = "/pg-bkup"
|
||||
|
||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||
@@ -32,7 +31,7 @@ func Execute() {
|
||||
|
||||
func init() {
|
||||
rootCmd.PersistentFlags().StringP("storage", "s", "local", "Set storage. local or s3")
|
||||
rootCmd.PersistentFlags().StringP("path", "P", s3Path, "Set s3 path, without file name. for S3 storage only")
|
||||
rootCmd.PersistentFlags().StringP("path", "P", "", "Set s3 path, without file name. for S3 storage only")
|
||||
rootCmd.PersistentFlags().StringP("dbname", "d", "", "Set database name")
|
||||
rootCmd.PersistentFlags().IntP("timeout", "t", 30, "Set timeout")
|
||||
rootCmd.PersistentFlags().IntP("port", "p", 5432, "Set database port")
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/jkaninda/pg-bkup/pkg"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -9,6 +8,6 @@ var S3MountCmd = &cobra.Command{
|
||||
Use: "s3mount",
|
||||
Short: "Mount AWS S3 storage",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
pkg.S3Mount()
|
||||
//pkg.S3Mount()
|
||||
},
|
||||
}
|
||||
|
||||
@@ -25,7 +25,8 @@ func StartBackup(cmd *cobra.Command) {
|
||||
utils.GetEnv(cmd, "period", "SCHEDULE_PERIOD")
|
||||
|
||||
//Get flag value and set env
|
||||
s3Path = utils.GetEnv(cmd, "path", "S3_PATH")
|
||||
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
|
||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
||||
backupRetention, _ := cmd.Flags().GetInt("keep-last")
|
||||
@@ -52,7 +53,7 @@ func StartBackup(cmd *cobra.Command) {
|
||||
case "local":
|
||||
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||
case "ssh":
|
||||
sshBackup(backupFileName, s3Path, disableCompression, prune, backupRetention, encryption)
|
||||
sshBackup(backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
|
||||
case "ftp":
|
||||
utils.Fatalf("Not supported storage type: %s", storage)
|
||||
default:
|
||||
@@ -241,7 +242,7 @@ func s3Backup(backupFileName string, s3Path string, disableCompression bool, pru
|
||||
}
|
||||
utils.Done("Database has been backed up and uploaded to s3 ")
|
||||
}
|
||||
func sshBackup(backupFileName string, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||
func sshBackup(backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) {
|
||||
utils.Info("Backup database to Remote server")
|
||||
//Backup database
|
||||
BackupDatabase(backupFileName, disableCompression)
|
||||
@@ -250,9 +251,9 @@ func sshBackup(backupFileName string, remotePath string, disableCompression bool
|
||||
encryptBackup(backupFileName)
|
||||
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
|
||||
}
|
||||
utils.Info("Uploading backup file to S3 storage...")
|
||||
utils.Info("Uploading backup file to remote server...")
|
||||
utils.Info("Backup name is ", backupFileName)
|
||||
err := CopyToRemote(filepath.Join(tmpPath, finalFileName), remotePath)
|
||||
err := CopyToRemote(finalFileName, remotePath)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error uploading file to S3: %s ", err)
|
||||
|
||||
|
||||
@@ -17,33 +17,46 @@ func StartRestore(cmd *cobra.Command) {
|
||||
utils.GetEnv(cmd, "port", "DB_PORT")
|
||||
|
||||
//Get flag value and set env
|
||||
s3Path = utils.GetEnv(cmd, "path", "S3_PATH")
|
||||
s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH")
|
||||
remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH")
|
||||
storage = utils.GetEnv(cmd, "storage", "STORAGE")
|
||||
file = utils.GetEnv(cmd, "file", "FILE_NAME")
|
||||
executionMode, _ = cmd.Flags().GetString("mode")
|
||||
bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME")
|
||||
switch storage {
|
||||
case "s3":
|
||||
utils.Info("Restore database from s3")
|
||||
err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
|
||||
if err != nil {
|
||||
utils.Fatal("Error download file from s3 ", file, err)
|
||||
}
|
||||
RestoreDatabase(file)
|
||||
restoreFromS3(file, bucket, s3Path)
|
||||
case "local":
|
||||
utils.Info("Restore database from local")
|
||||
copyToTmp(storagePath, file)
|
||||
RestoreDatabase(file)
|
||||
case "ssh":
|
||||
fmt.Println("x is 2")
|
||||
restoreFromRemote(file, remotePath)
|
||||
case "ftp":
|
||||
fmt.Println("x is 3")
|
||||
utils.Fatalf("Restore from FTP is not yet supported")
|
||||
default:
|
||||
utils.Info("Restore database from local")
|
||||
RestoreDatabase(file)
|
||||
}
|
||||
}
|
||||
|
||||
func restoreFromS3(file, bucket, s3Path string) {
|
||||
utils.Info("Restore database from s3")
|
||||
err := utils.DownloadFile(tmpPath, file, bucket, s3Path)
|
||||
if err != nil {
|
||||
utils.Fatal("Error download file from s3 ", file, err)
|
||||
}
|
||||
RestoreDatabase(file)
|
||||
}
|
||||
func restoreFromRemote(file, remotePath string) {
|
||||
utils.Info("Restore database from remote server")
|
||||
err := CopyFromRemote(file, remotePath)
|
||||
if err != nil {
|
||||
utils.Fatal("Error download file from remote server: ", filepath.Join(remotePath, file), err)
|
||||
}
|
||||
RestoreDatabase(file)
|
||||
}
|
||||
|
||||
// RestoreDatabase restore database
|
||||
func RestoreDatabase(file string) {
|
||||
dbHost = os.Getenv("DB_HOST")
|
||||
@@ -52,7 +65,6 @@ func RestoreDatabase(file string) {
|
||||
dbName = os.Getenv("DB_NAME")
|
||||
dbPort = os.Getenv("DB_PORT")
|
||||
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||
//storagePath = os.Getenv("STORAGE_PATH")
|
||||
if file == "" {
|
||||
utils.Fatal("Error, file required")
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ Copyright © 2024 Jonas Kaninda
|
||||
*/
|
||||
package pkg
|
||||
|
||||
/*
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jkaninda/pg-bkup/utils"
|
||||
@@ -78,3 +79,5 @@ func MountS3Storage(s3Path string) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/jkaninda/pg-bkup/utils"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func CopyToRemote(fileName, remotePath string) error {
|
||||
@@ -39,14 +40,14 @@ func CopyToRemote(fileName, remotePath string) error {
|
||||
}
|
||||
|
||||
// Open a file
|
||||
file, _ := os.Open(fileName)
|
||||
file, _ := os.Open(filepath.Join(tmpPath, fileName))
|
||||
|
||||
// Close client connection after the file has been copied
|
||||
defer client.Close()
|
||||
// Close the file after it has been copied
|
||||
defer file.Close()
|
||||
// the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application.
|
||||
err = client.CopyFromFile(context.Background(), *file, remotePath, "0655")
|
||||
err = client.CopyFromFile(context.Background(), *file, filepath.Join(remotePath, fileName), "0655")
|
||||
if err != nil {
|
||||
fmt.Println("Error while copying file ")
|
||||
return err
|
||||
@@ -72,9 +73,9 @@ func CopyFromRemote(fileName, remotePath string) error {
|
||||
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
|
||||
|
||||
}
|
||||
|
||||
// Create a new SCP client
|
||||
client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig)
|
||||
|
||||
// Connect to the remote server
|
||||
err := client.Connect()
|
||||
if err != nil {
|
||||
|
||||
@@ -11,7 +11,6 @@ const gpgExtension = "gpg"
|
||||
var (
|
||||
storage = "local"
|
||||
file = ""
|
||||
s3Path = "/pg-bkup"
|
||||
dbPassword = ""
|
||||
dbUserName = ""
|
||||
dbName = ""
|
||||
|
||||
@@ -59,7 +59,8 @@ func UploadFileToS3(filePath, key, bucket, prefix string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
objectKey := fmt.Sprintf("%s/%s", prefix, key)
|
||||
objectKey := filepath.Join(prefix, key)
|
||||
//fmt.Sprintf("%s/%s", prefix, key)
|
||||
|
||||
buffer := make([]byte, fileInfo.Size())
|
||||
file.Read(buffer)
|
||||
@@ -93,7 +94,7 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
objectKey := fmt.Sprintf("%s/%s", prefix, key)
|
||||
objectKey := filepath.Join(prefix, key)
|
||||
|
||||
downloader := s3manager.NewDownloader(sess)
|
||||
numBytes, err := downloader.Download(file,
|
||||
|
||||
Reference in New Issue
Block a user