Fix restore from ssh, refactoring of code
This commit is contained in:
8
Makefile
8
Makefile
@@ -30,14 +30,14 @@ docker-run-scheduled-s3: docker-build
|
||||
docker run --rm --network internal --user 1000:1000 --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *"
|
||||
|
||||
docker-run-s3: docker-build
|
||||
docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 ##--path /custom-path
|
||||
docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 #--path /custom-path
|
||||
|
||||
|
||||
docker-restore-s3: docker-build
|
||||
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage s3 -f ${FILE_NAME} #--path /custom-path
|
||||
docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage s3 -f ${FILE_NAME} #--path /custom-path
|
||||
|
||||
docker-run-ssh: docker-build
|
||||
docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage ssh
|
||||
docker run --rm --network internal --name pg-bkup -v "/Users/jonas/.ssh/id_ed25519:/tmp/id_ed25519" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage ssh
|
||||
|
||||
docker-restore-ssh: docker-build
|
||||
docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage ssh -f uzaraka_20240731_200104.sql.gz.gpg
|
||||
docker run --rm --network internal --name pg-bkup -v "/Users/jonas/.ssh/id_ed25519:/tmp/id_ed25519" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" jkaninda/pg-bkup bkup restore --storage ssh -f uzaraka_20240731_200104.sql.gz.gpg
|
||||
@@ -52,7 +52,7 @@ func StartBackup(cmd *cobra.Command) {
|
||||
s3Backup(backupFileName, s3Path, disableCompression, prune, backupRetention, encryption)
|
||||
case "local":
|
||||
localBackup(backupFileName, disableCompression, prune, backupRetention, encryption)
|
||||
case "ssh":
|
||||
case "ssh", "remote":
|
||||
sshBackup(backupFileName, remotePath, disableCompression, prune, backupRetention, encryption)
|
||||
case "ftp":
|
||||
utils.Fatalf("Not supported storage type: %s", storage)
|
||||
@@ -220,7 +220,7 @@ func s3Backup(backupFileName string, s3Path string, disableCompression bool, pru
|
||||
finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg")
|
||||
}
|
||||
utils.Info("Uploading backup file to S3 storage...")
|
||||
utils.Info("Backup name is ", backupFileName)
|
||||
utils.Info("Backup name is ", finalFileName)
|
||||
err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error uploading file to S3: %s ", err)
|
||||
@@ -255,9 +255,10 @@ func sshBackup(backupFileName, remotePath string, disableCompression bool, prune
|
||||
utils.Info("Backup name is ", backupFileName)
|
||||
err := CopyToRemote(finalFileName, remotePath)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error uploading file to S3: %s ", err)
|
||||
utils.Fatalf("Error uploading file to the remote server: %s ", err)
|
||||
|
||||
}
|
||||
|
||||
//Delete backup file from tmp folder
|
||||
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
@@ -275,7 +276,6 @@ func sshBackup(backupFileName, remotePath string, disableCompression bool, prune
|
||||
|
||||
func encryptBackup(backupFileName string) {
|
||||
gpgPassphrase := os.Getenv("GPG_PASSPHRASE")
|
||||
|
||||
err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase)
|
||||
if err != nil {
|
||||
utils.Fatalf("Error during encrypting backup %s", err)
|
||||
|
||||
@@ -71,7 +71,8 @@ func RestoreDatabase(file string) {
|
||||
extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file))
|
||||
if extension == ".gpg" {
|
||||
if gpgPassphrase == "" {
|
||||
utils.Fatal("Error, GPG_PASSPHRASE environment variable required, you need to set the GPG_PASSPHRASE")
|
||||
utils.Fatal("Error: GPG passphrase is required, your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE environment variable is required.")
|
||||
|
||||
} else {
|
||||
//Decrypt file
|
||||
err := Decrypt(filepath.Join(tmpPath, file), gpgPassphrase)
|
||||
|
||||
@@ -27,6 +27,7 @@ func CopyToRemote(fileName, remotePath string) error {
|
||||
if sshPassword == "" {
|
||||
return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n")
|
||||
}
|
||||
utils.Info("Accessing the remote server using password, private key is recommended\n")
|
||||
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
|
||||
|
||||
}
|
||||
@@ -70,6 +71,7 @@ func CopyFromRemote(fileName, remotePath string) error {
|
||||
if sshPassword == "" {
|
||||
return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n")
|
||||
}
|
||||
utils.Info("Accessing the remote server using password, private key is recommended\n")
|
||||
clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey())
|
||||
|
||||
}
|
||||
@@ -83,14 +85,14 @@ func CopyFromRemote(fileName, remotePath string) error {
|
||||
}
|
||||
// Close client connection after the file has been copied
|
||||
defer client.Close()
|
||||
file, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0777)
|
||||
file, err := os.OpenFile(filepath.Join(tmpPath, fileName), os.O_RDWR|os.O_CREATE, 0777)
|
||||
if err != nil {
|
||||
fmt.Println("Couldn't open the output file")
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application.
|
||||
err = client.CopyFromRemote(context.Background(), file, remotePath)
|
||||
err = client.CopyFromRemote(context.Background(), file, filepath.Join(remotePath, fileName))
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("Error while copying file ", err)
|
||||
|
||||
@@ -60,7 +60,6 @@ func UploadFileToS3(filePath, key, bucket, prefix string) error {
|
||||
}
|
||||
|
||||
objectKey := filepath.Join(prefix, key)
|
||||
//fmt.Sprintf("%s/%s", prefix, key)
|
||||
|
||||
buffer := make([]byte, fileInfo.Size())
|
||||
file.Read(buffer)
|
||||
@@ -106,8 +105,7 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error {
|
||||
fmt.Println("Failed to download file", err)
|
||||
return err
|
||||
}
|
||||
Info("Backup downloaded: ", file.Name())
|
||||
Info("Bytes size: ", numBytes)
|
||||
Info("Backup downloaded: ", file.Name(), " bytes size ", numBytes)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user