mirror of
https://github.com/jkaninda/mysql-bkup.git
synced 2025-12-06 13:39:41 +01:00
feat: add Azure Blob storage
This commit is contained in:
@@ -16,6 +16,7 @@ It supports a variety of storage options and ensures data security through GPG e
|
||||
- AWS S3 or any S3-compatible object storage
|
||||
- FTP
|
||||
- SSH-compatible storage
|
||||
- Azure Blob storage
|
||||
|
||||
- **Data Security:**
|
||||
- Backups can be encrypted using **GPG** to ensure confidentiality.
|
||||
|
||||
48
docs/how-tos/azure-blob.md
Normal file
48
docs/how-tos/azure-blob.md
Normal file
@@ -0,0 +1,48 @@
|
||||
---
|
||||
title: Azure Blob storage
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 5
|
||||
---
|
||||
# Azure Blob storage
|
||||
|
||||
{: .note }
|
||||
As described on local backup section, to change the storage of you backup and use Azure Blob as storage. You need to add `--storage azure` (-s azure).
|
||||
You can also specify a folder where you want to save you data by adding `--path my-custom-path` flag.
|
||||
|
||||
|
||||
## Backup to S3
|
||||
|
||||
```yml
|
||||
services:
|
||||
mysql-bkup:
|
||||
# In production, it is advised to lock your image tag to a proper
|
||||
# release version instead of using `latest`.
|
||||
# Check https://github.com/jkaninda/mysql-bkup/releases
|
||||
# for a list of available releases.
|
||||
image: jkaninda/mysql-bkup
|
||||
container_name: mysql-bkup
|
||||
command: backup --storage s3 -d database --path /my-custom-path
|
||||
environment:
|
||||
- DB_PORT=3306
|
||||
- DB_HOST=mysql
|
||||
- DB_NAME=database
|
||||
- DB_USERNAME=username
|
||||
- DB_PASSWORD=password
|
||||
## Azure Blob configurations
|
||||
- AZURE_STORAGE_CONTAINER_NAME=backup-container
|
||||
- AZURE_STORAGE_ACCOUNT_NAME=account-name
|
||||
- AZURE_STORAGE_ACCOUNT_KEY=Ppby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==
|
||||
## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true
|
||||
- AWS_DISABLE_SSL="false"
|
||||
- AWS_FORCE_PATH_STYLE=true # true for S3 alternative such as Minio
|
||||
|
||||
# mysql-bkup container must be connected to the same network with your database
|
||||
networks:
|
||||
- web
|
||||
networks:
|
||||
web:
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ It supports a variety of storage options and ensures data security through GPG e
|
||||
- AWS S3 or any S3-compatible object storage
|
||||
- FTP
|
||||
- SSH-compatible storage
|
||||
- Azure Blob storage
|
||||
|
||||
- **Data Security:**
|
||||
- Backups can be encrypted using **GPG** to ensure confidentiality.
|
||||
|
||||
6
go.mod
6
go.mod
@@ -7,13 +7,16 @@ require github.com/spf13/pflag v1.0.5 // indirect
|
||||
require (
|
||||
github.com/go-mail/mail v2.3.1+incompatible
|
||||
github.com/jkaninda/encryptor v0.0.0-20241013064832-ed4bd6a1b221
|
||||
github.com/jkaninda/go-storage v0.1.1
|
||||
github.com/jkaninda/go-storage v0.1.2
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/spf13/cobra v1.8.1
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 // indirect
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.7.5 // indirect
|
||||
@@ -27,6 +30,7 @@ require (
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
golang.org/x/crypto v0.28.0 // indirect
|
||||
golang.org/x/net v0.29.0 // indirect
|
||||
golang.org/x/sys v0.26.0 // indirect
|
||||
golang.org/x/text v0.19.0 // indirect
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
||||
|
||||
10
go.sum
10
go.sum
@@ -1,3 +1,9 @@
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 h1:mlmW46Q0B79I+Aj4azKC6xDMFN9a9SyZWESlGWYXbFs=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0/go.mod h1:PXe2h+LKcWTX9afWdZoHyODqR4fBa5boUM/8uJfZ0Jo=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=
|
||||
@@ -30,6 +36,8 @@ github.com/jkaninda/encryptor v0.0.0-20241013064832-ed4bd6a1b221 h1:AwkCf7el1kze
|
||||
github.com/jkaninda/encryptor v0.0.0-20241013064832-ed4bd6a1b221/go.mod h1:9F8ZJ+ZXE8DZBo77+aneGj8LMjrYXX6eFUCC/uqZOUo=
|
||||
github.com/jkaninda/go-storage v0.1.1 h1:vjpdD/fh39S5HGyfHvLE5HGYOEPIukINlOX3OnM3GW4=
|
||||
github.com/jkaninda/go-storage v0.1.1/go.mod h1:7VK5gQISQaLxtLfBtc+een8spcgLVSBAKTRuyF1N81I=
|
||||
github.com/jkaninda/go-storage v0.1.2 h1:d7+TRPjmHXdSqO0wne3KAB8zt9ih8lf5D8aL4n7/Dds=
|
||||
github.com/jkaninda/go-storage v0.1.2/go.mod h1:zVRnLprBk/9AUz2+za6Y03MgoNYrqKLy3edVtjqMaps=
|
||||
github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
|
||||
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
@@ -66,6 +74,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
|
||||
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
||||
122
internal/azure.go
Normal file
122
internal/azure.go
Normal file
@@ -0,0 +1,122 @@
|
||||
/*
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 Jonas Kaninda
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jkaninda/go-storage/pkg/azure"
|
||||
"github.com/jkaninda/mysql-bkup/pkg/logger"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
func azureBackup(db *dbConfig, config *BackupConfig) {
|
||||
logger.Info("Backup database to the remote FTP server")
|
||||
startTime = time.Now().Format(utils.TimeFormat())
|
||||
|
||||
// Backup database
|
||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||
finalFileName := config.backupFileName
|
||||
if config.encryption {
|
||||
encryptBackup(config)
|
||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||
}
|
||||
logger.Info("Uploading backup archive to Azure Blob storage ...")
|
||||
logger.Info("Backup name is %s", finalFileName)
|
||||
azureConfig := loadAzureConfig()
|
||||
azureStorage, err := azure.NewStorage(azure.Config{
|
||||
ContainerName: azureConfig.containerName,
|
||||
AccountName: azureConfig.accountName,
|
||||
AccountKey: azureConfig.accountKey,
|
||||
RemotePath: config.remotePath,
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Fatal("Error creating SSH storage: %s", err)
|
||||
}
|
||||
err = azureStorage.Copy(finalFileName)
|
||||
if err != nil {
|
||||
logger.Fatal("Error copying backup file: %s", err)
|
||||
}
|
||||
logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||
// Get backup info
|
||||
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
logger.Error("Error: %s", err)
|
||||
}
|
||||
backupSize = fileInfo.Size()
|
||||
// Delete backup file from tmp folder
|
||||
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
logger.Error("Error deleting file: %v", err)
|
||||
|
||||
}
|
||||
if config.prune {
|
||||
err := azureStorage.Prune(config.backupRetention)
|
||||
if err != nil {
|
||||
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
logger.Info("Uploading backup archive to Azure Blob storage ... done ")
|
||||
|
||||
// Send notification
|
||||
utils.NotifySuccess(&utils.NotificationData{
|
||||
File: finalFileName,
|
||||
BackupSize: backupSize,
|
||||
Database: db.dbName,
|
||||
Storage: config.storage,
|
||||
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||
StartTime: startTime,
|
||||
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||
})
|
||||
// Delete temp
|
||||
deleteTemp()
|
||||
logger.Info("Backup completed successfully")
|
||||
}
|
||||
func azureRestore(db *dbConfig, conf *RestoreConfig) {
|
||||
logger.Info("Restore database from Azure Blob storage")
|
||||
azureConfig := loadAzureConfig()
|
||||
azureStorage, err := azure.NewStorage(azure.Config{
|
||||
ContainerName: azureConfig.containerName,
|
||||
AccountName: azureConfig.accountName,
|
||||
AccountKey: azureConfig.accountKey,
|
||||
RemotePath: conf.remotePath,
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Fatal("Error creating SSH storage: %s", err)
|
||||
}
|
||||
|
||||
err = azureStorage.CopyFrom(conf.file)
|
||||
if err != nil {
|
||||
logger.Fatal("Error downloading backup file: %s", err)
|
||||
}
|
||||
RestoreDatabase(db, conf)
|
||||
}
|
||||
@@ -27,10 +27,7 @@ package internal
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jkaninda/encryptor"
|
||||
"github.com/jkaninda/go-storage/pkg/ftp"
|
||||
"github.com/jkaninda/go-storage/pkg/local"
|
||||
"github.com/jkaninda/go-storage/pkg/s3"
|
||||
"github.com/jkaninda/go-storage/pkg/ssh"
|
||||
"github.com/jkaninda/mysql-bkup/pkg/logger"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"github.com/robfig/cron/v3"
|
||||
@@ -125,6 +122,8 @@ func BackupTask(db *dbConfig, config *BackupConfig) {
|
||||
sshBackup(db, config)
|
||||
case "ftp", "FTP":
|
||||
ftpBackup(db, config)
|
||||
case "azure":
|
||||
azureBackup(db, config)
|
||||
default:
|
||||
localBackup(db, config)
|
||||
}
|
||||
@@ -302,214 +301,6 @@ func localBackup(db *dbConfig, config *BackupConfig) {
|
||||
logger.Info("Backup completed successfully")
|
||||
}
|
||||
|
||||
func s3Backup(db *dbConfig, config *BackupConfig) {
|
||||
|
||||
logger.Info("Backup database to s3 storage")
|
||||
startTime = time.Now().Format(utils.TimeFormat())
|
||||
// Backup database
|
||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||
finalFileName := config.backupFileName
|
||||
if config.encryption {
|
||||
encryptBackup(config)
|
||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||
}
|
||||
logger.Info("Uploading backup archive to remote storage S3 ... ")
|
||||
awsConfig := initAWSConfig()
|
||||
if config.remotePath == "" {
|
||||
config.remotePath = awsConfig.remotePath
|
||||
}
|
||||
logger.Info("Backup name is %s", finalFileName)
|
||||
s3Storage, err := s3.NewStorage(s3.Config{
|
||||
Endpoint: awsConfig.endpoint,
|
||||
Bucket: awsConfig.bucket,
|
||||
AccessKey: awsConfig.accessKey,
|
||||
SecretKey: awsConfig.secretKey,
|
||||
Region: awsConfig.region,
|
||||
DisableSsl: awsConfig.disableSsl,
|
||||
ForcePathStyle: awsConfig.forcePathStyle,
|
||||
RemotePath: awsConfig.remotePath,
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Fatal("Error creating s3 storage: %s", err)
|
||||
}
|
||||
err = s3Storage.Copy(finalFileName)
|
||||
if err != nil {
|
||||
logger.Fatal("Error copying backup file: %s", err)
|
||||
}
|
||||
// Get backup info
|
||||
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
logger.Error("Error: %s", err)
|
||||
}
|
||||
backupSize = fileInfo.Size()
|
||||
|
||||
// Delete backup file from tmp folder
|
||||
err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName))
|
||||
if err != nil {
|
||||
fmt.Println("Error deleting file: ", err)
|
||||
|
||||
}
|
||||
// Delete old backup
|
||||
if config.prune {
|
||||
err := s3Storage.Prune(config.backupRetention)
|
||||
if err != nil {
|
||||
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||
}
|
||||
}
|
||||
logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||
logger.Info("Uploading backup archive to remote storage S3 ... done ")
|
||||
// Send notification
|
||||
utils.NotifySuccess(&utils.NotificationData{
|
||||
File: finalFileName,
|
||||
BackupSize: backupSize,
|
||||
Database: db.dbName,
|
||||
Storage: config.storage,
|
||||
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||
StartTime: startTime,
|
||||
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||
})
|
||||
// Delete temp
|
||||
deleteTemp()
|
||||
logger.Info("Backup completed successfully")
|
||||
|
||||
}
|
||||
func sshBackup(db *dbConfig, config *BackupConfig) {
|
||||
logger.Info("Backup database to Remote server")
|
||||
startTime = time.Now().Format(utils.TimeFormat())
|
||||
// Backup database
|
||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||
finalFileName := config.backupFileName
|
||||
if config.encryption {
|
||||
encryptBackup(config)
|
||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||
}
|
||||
logger.Info("Uploading backup archive to remote storage ... ")
|
||||
logger.Info("Backup name is %s", finalFileName)
|
||||
sshConfig, err := loadSSHConfig()
|
||||
if err != nil {
|
||||
logger.Fatal("Error loading ssh config: %s", err)
|
||||
}
|
||||
|
||||
sshStorage, err := ssh.NewStorage(ssh.Config{
|
||||
Host: sshConfig.hostName,
|
||||
Port: sshConfig.port,
|
||||
User: sshConfig.user,
|
||||
Password: sshConfig.password,
|
||||
RemotePath: config.remotePath,
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Fatal("Error creating SSH storage: %s", err)
|
||||
}
|
||||
err = sshStorage.Copy(finalFileName)
|
||||
if err != nil {
|
||||
logger.Fatal("Error copying backup file: %s", err)
|
||||
}
|
||||
// Get backup info
|
||||
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
logger.Error("Error: %s", err)
|
||||
}
|
||||
backupSize = fileInfo.Size()
|
||||
logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||
|
||||
// Delete backup file from tmp folder
|
||||
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
logger.Error("Error deleting file: %v", err)
|
||||
|
||||
}
|
||||
if config.prune {
|
||||
err := sshStorage.Prune(config.backupRetention)
|
||||
if err != nil {
|
||||
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||
}
|
||||
|
||||
}
|
||||
logger.Info("Uploading backup archive to remote storage ... done ")
|
||||
// Send notification
|
||||
utils.NotifySuccess(&utils.NotificationData{
|
||||
File: finalFileName,
|
||||
BackupSize: backupSize,
|
||||
Database: db.dbName,
|
||||
Storage: config.storage,
|
||||
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||
StartTime: startTime,
|
||||
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||
})
|
||||
// Delete temp
|
||||
deleteTemp()
|
||||
logger.Info("Backup completed successfully")
|
||||
|
||||
}
|
||||
func ftpBackup(db *dbConfig, config *BackupConfig) {
|
||||
logger.Info("Backup database to the remote FTP server")
|
||||
startTime = time.Now().Format(utils.TimeFormat())
|
||||
|
||||
// Backup database
|
||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||
finalFileName := config.backupFileName
|
||||
if config.encryption {
|
||||
encryptBackup(config)
|
||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||
}
|
||||
logger.Info("Uploading backup archive to the remote FTP server ... ")
|
||||
logger.Info("Backup name is %s", finalFileName)
|
||||
ftpConfig := loadFtpConfig()
|
||||
ftpStorage, err := ftp.NewStorage(ftp.Config{
|
||||
Host: ftpConfig.host,
|
||||
Port: ftpConfig.port,
|
||||
User: ftpConfig.user,
|
||||
Password: ftpConfig.password,
|
||||
RemotePath: config.remotePath,
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Fatal("Error creating SSH storage: %s", err)
|
||||
}
|
||||
err = ftpStorage.Copy(finalFileName)
|
||||
if err != nil {
|
||||
logger.Fatal("Error copying backup file: %s", err)
|
||||
}
|
||||
logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||
// Get backup info
|
||||
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
logger.Error("Error: %s", err)
|
||||
}
|
||||
backupSize = fileInfo.Size()
|
||||
// Delete backup file from tmp folder
|
||||
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
logger.Error("Error deleting file: %v", err)
|
||||
|
||||
}
|
||||
if config.prune {
|
||||
err := ftpStorage.Prune(config.backupRetention)
|
||||
if err != nil {
|
||||
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
logger.Info("Uploading backup archive to the remote FTP server ... done ")
|
||||
|
||||
// Send notification
|
||||
utils.NotifySuccess(&utils.NotificationData{
|
||||
File: finalFileName,
|
||||
BackupSize: backupSize,
|
||||
Database: db.dbName,
|
||||
Storage: config.storage,
|
||||
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||
StartTime: startTime,
|
||||
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||
})
|
||||
// Delete temp
|
||||
deleteTemp()
|
||||
logger.Info("Backup completed successfully")
|
||||
}
|
||||
|
||||
func encryptBackup(config *BackupConfig) {
|
||||
backupFile, err := os.ReadFile(filepath.Join(tmpPath, config.backupFileName))
|
||||
outputFile := fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension)
|
||||
|
||||
@@ -84,6 +84,11 @@ type FTPConfig struct {
|
||||
port string
|
||||
remotePath string
|
||||
}
|
||||
type AzureConfig struct {
|
||||
accountName string
|
||||
accountKey string
|
||||
containerName string
|
||||
}
|
||||
|
||||
// SSHConfig holds the SSH connection details
|
||||
type SSHConfig struct {
|
||||
@@ -164,6 +169,21 @@ func loadFtpConfig() *FTPConfig {
|
||||
}
|
||||
return &fConfig
|
||||
}
|
||||
func loadAzureConfig() *AzureConfig {
|
||||
// Initialize data configs
|
||||
aConfig := AzureConfig{}
|
||||
aConfig.containerName = os.Getenv("AZURE_STORAGE_CONTAINER_NAME")
|
||||
aConfig.accountName = os.Getenv("AZURE_STORAGE_ACCOUNT_NAME")
|
||||
aConfig.accountKey = os.Getenv("AZURE_STORAGE_ACCOUNT_KEY")
|
||||
|
||||
err := utils.CheckEnvVars(azureVars)
|
||||
if err != nil {
|
||||
logger.Error("Please make sure all required environment variables for Azure Blob storage are set")
|
||||
logger.Fatal("Error missing environment variables: %s", err)
|
||||
}
|
||||
return &aConfig
|
||||
}
|
||||
|
||||
func initAWSConfig() *AWSConfig {
|
||||
// Initialize AWS configs
|
||||
aConfig := AWSConfig{}
|
||||
|
||||
218
internal/remote.go
Normal file
218
internal/remote.go
Normal file
@@ -0,0 +1,218 @@
|
||||
/*
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 Jonas Kaninda
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jkaninda/go-storage/pkg/ftp"
|
||||
"github.com/jkaninda/go-storage/pkg/ssh"
|
||||
"github.com/jkaninda/mysql-bkup/pkg/logger"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
func sshBackup(db *dbConfig, config *BackupConfig) {
|
||||
logger.Info("Backup database to Remote server")
|
||||
startTime = time.Now().Format(utils.TimeFormat())
|
||||
// Backup database
|
||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||
finalFileName := config.backupFileName
|
||||
if config.encryption {
|
||||
encryptBackup(config)
|
||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||
}
|
||||
logger.Info("Uploading backup archive to remote storage ... ")
|
||||
logger.Info("Backup name is %s", finalFileName)
|
||||
sshConfig, err := loadSSHConfig()
|
||||
if err != nil {
|
||||
logger.Fatal("Error loading ssh config: %s", err)
|
||||
}
|
||||
|
||||
sshStorage, err := ssh.NewStorage(ssh.Config{
|
||||
Host: sshConfig.hostName,
|
||||
Port: sshConfig.port,
|
||||
User: sshConfig.user,
|
||||
Password: sshConfig.password,
|
||||
RemotePath: config.remotePath,
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Fatal("Error creating SSH storage: %s", err)
|
||||
}
|
||||
err = sshStorage.Copy(finalFileName)
|
||||
if err != nil {
|
||||
logger.Fatal("Error copying backup file: %s", err)
|
||||
}
|
||||
// Get backup info
|
||||
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
logger.Error("Error: %s", err)
|
||||
}
|
||||
backupSize = fileInfo.Size()
|
||||
logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||
|
||||
// Delete backup file from tmp folder
|
||||
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
logger.Error("Error deleting file: %v", err)
|
||||
|
||||
}
|
||||
if config.prune {
|
||||
err := sshStorage.Prune(config.backupRetention)
|
||||
if err != nil {
|
||||
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||
}
|
||||
|
||||
}
|
||||
logger.Info("Uploading backup archive to remote storage ... done ")
|
||||
// Send notification
|
||||
utils.NotifySuccess(&utils.NotificationData{
|
||||
File: finalFileName,
|
||||
BackupSize: backupSize,
|
||||
Database: db.dbName,
|
||||
Storage: config.storage,
|
||||
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||
StartTime: startTime,
|
||||
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||
})
|
||||
// Delete temp
|
||||
deleteTemp()
|
||||
logger.Info("Backup completed successfully")
|
||||
|
||||
}
|
||||
func ftpBackup(db *dbConfig, config *BackupConfig) {
|
||||
logger.Info("Backup database to the remote FTP server")
|
||||
startTime = time.Now().Format(utils.TimeFormat())
|
||||
|
||||
// Backup database
|
||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||
finalFileName := config.backupFileName
|
||||
if config.encryption {
|
||||
encryptBackup(config)
|
||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||
}
|
||||
logger.Info("Uploading backup archive to the remote FTP server ... ")
|
||||
logger.Info("Backup name is %s", finalFileName)
|
||||
ftpConfig := loadFtpConfig()
|
||||
ftpStorage, err := ftp.NewStorage(ftp.Config{
|
||||
Host: ftpConfig.host,
|
||||
Port: ftpConfig.port,
|
||||
User: ftpConfig.user,
|
||||
Password: ftpConfig.password,
|
||||
RemotePath: config.remotePath,
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Fatal("Error creating SSH storage: %s", err)
|
||||
}
|
||||
err = ftpStorage.Copy(finalFileName)
|
||||
if err != nil {
|
||||
logger.Fatal("Error copying backup file: %s", err)
|
||||
}
|
||||
logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||
// Get backup info
|
||||
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
logger.Error("Error: %s", err)
|
||||
}
|
||||
backupSize = fileInfo.Size()
|
||||
// Delete backup file from tmp folder
|
||||
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
logger.Error("Error deleting file: %v", err)
|
||||
|
||||
}
|
||||
if config.prune {
|
||||
err := ftpStorage.Prune(config.backupRetention)
|
||||
if err != nil {
|
||||
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
logger.Info("Uploading backup archive to the remote FTP server ... done ")
|
||||
|
||||
// Send notification
|
||||
utils.NotifySuccess(&utils.NotificationData{
|
||||
File: finalFileName,
|
||||
BackupSize: backupSize,
|
||||
Database: db.dbName,
|
||||
Storage: config.storage,
|
||||
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||
StartTime: startTime,
|
||||
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||
})
|
||||
// Delete temp
|
||||
deleteTemp()
|
||||
logger.Info("Backup completed successfully")
|
||||
}
|
||||
func remoteRestore(db *dbConfig, conf *RestoreConfig) {
|
||||
logger.Info("Restore database from remote server")
|
||||
sshConfig, err := loadSSHConfig()
|
||||
if err != nil {
|
||||
logger.Fatal("Error loading ssh config: %s", err)
|
||||
}
|
||||
|
||||
sshStorage, err := ssh.NewStorage(ssh.Config{
|
||||
Host: sshConfig.hostName,
|
||||
Port: sshConfig.port,
|
||||
User: sshConfig.user,
|
||||
Password: sshConfig.password,
|
||||
IdentifyFile: sshConfig.identifyFile,
|
||||
RemotePath: conf.remotePath,
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Fatal("Error creating SSH storage: %s", err)
|
||||
}
|
||||
err = sshStorage.CopyFrom(conf.file)
|
||||
if err != nil {
|
||||
logger.Fatal("Error copying backup file: %s", err)
|
||||
}
|
||||
RestoreDatabase(db, conf)
|
||||
}
|
||||
func ftpRestore(db *dbConfig, conf *RestoreConfig) {
|
||||
logger.Info("Restore database from FTP server")
|
||||
ftpConfig := loadFtpConfig()
|
||||
ftpStorage, err := ftp.NewStorage(ftp.Config{
|
||||
Host: ftpConfig.host,
|
||||
Port: ftpConfig.port,
|
||||
User: ftpConfig.user,
|
||||
Password: ftpConfig.password,
|
||||
RemotePath: conf.remotePath,
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Fatal("Error creating SSH storage: %s", err)
|
||||
}
|
||||
err = ftpStorage.CopyFrom(conf.file)
|
||||
if err != nil {
|
||||
logger.Fatal("Error copying backup file: %s", err)
|
||||
}
|
||||
RestoreDatabase(db, conf)
|
||||
}
|
||||
@@ -26,10 +26,7 @@ SOFTWARE.
|
||||
*/
|
||||
import (
|
||||
"github.com/jkaninda/encryptor"
|
||||
"github.com/jkaninda/go-storage/pkg/ftp"
|
||||
"github.com/jkaninda/go-storage/pkg/local"
|
||||
"github.com/jkaninda/go-storage/pkg/s3"
|
||||
"github.com/jkaninda/go-storage/pkg/ssh"
|
||||
"github.com/jkaninda/mysql-bkup/pkg/logger"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -47,11 +44,13 @@ func StartRestore(cmd *cobra.Command) {
|
||||
case "local":
|
||||
localRestore(dbConf, restoreConf)
|
||||
case "s3", "S3":
|
||||
restoreFromS3(dbConf, restoreConf)
|
||||
s3Restore(dbConf, restoreConf)
|
||||
case "ssh", "SSH", "remote":
|
||||
restoreFromRemote(dbConf, restoreConf)
|
||||
remoteRestore(dbConf, restoreConf)
|
||||
case "ftp", "FTP":
|
||||
restoreFromFTP(dbConf, restoreConf)
|
||||
ftpRestore(dbConf, restoreConf)
|
||||
case "azure":
|
||||
azureRestore(dbConf, restoreConf)
|
||||
default:
|
||||
localRestore(dbConf, restoreConf)
|
||||
}
|
||||
@@ -69,77 +68,6 @@ func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) {
|
||||
RestoreDatabase(dbConf, restoreConf)
|
||||
|
||||
}
|
||||
func restoreFromS3(db *dbConfig, conf *RestoreConfig) {
|
||||
logger.Info("Restore database from s3")
|
||||
awsConfig := initAWSConfig()
|
||||
if conf.remotePath == "" {
|
||||
conf.remotePath = awsConfig.remotePath
|
||||
}
|
||||
s3Storage, err := s3.NewStorage(s3.Config{
|
||||
Endpoint: awsConfig.endpoint,
|
||||
Bucket: awsConfig.bucket,
|
||||
AccessKey: awsConfig.accessKey,
|
||||
SecretKey: awsConfig.secretKey,
|
||||
Region: awsConfig.region,
|
||||
DisableSsl: awsConfig.disableSsl,
|
||||
ForcePathStyle: awsConfig.forcePathStyle,
|
||||
RemotePath: awsConfig.remotePath,
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Fatal("Error creating s3 storage: %s", err)
|
||||
}
|
||||
err = s3Storage.CopyFrom(conf.file)
|
||||
if err != nil {
|
||||
logger.Fatal("Error download file from S3 storage: %s", err)
|
||||
}
|
||||
RestoreDatabase(db, conf)
|
||||
}
|
||||
func restoreFromRemote(db *dbConfig, conf *RestoreConfig) {
|
||||
logger.Info("Restore database from remote server")
|
||||
sshConfig, err := loadSSHConfig()
|
||||
if err != nil {
|
||||
logger.Fatal("Error loading ssh config: %s", err)
|
||||
}
|
||||
|
||||
sshStorage, err := ssh.NewStorage(ssh.Config{
|
||||
Host: sshConfig.hostName,
|
||||
Port: sshConfig.port,
|
||||
User: sshConfig.user,
|
||||
Password: sshConfig.password,
|
||||
IdentifyFile: sshConfig.identifyFile,
|
||||
RemotePath: conf.remotePath,
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Fatal("Error creating SSH storage: %s", err)
|
||||
}
|
||||
err = sshStorage.CopyFrom(conf.file)
|
||||
if err != nil {
|
||||
logger.Fatal("Error copying backup file: %s", err)
|
||||
}
|
||||
RestoreDatabase(db, conf)
|
||||
}
|
||||
func restoreFromFTP(db *dbConfig, conf *RestoreConfig) {
|
||||
logger.Info("Restore database from FTP server")
|
||||
ftpConfig := loadFtpConfig()
|
||||
ftpStorage, err := ftp.NewStorage(ftp.Config{
|
||||
Host: ftpConfig.host,
|
||||
Port: ftpConfig.port,
|
||||
User: ftpConfig.user,
|
||||
Password: ftpConfig.password,
|
||||
RemotePath: conf.remotePath,
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Fatal("Error creating SSH storage: %s", err)
|
||||
}
|
||||
err = ftpStorage.CopyFrom(conf.file)
|
||||
if err != nil {
|
||||
logger.Fatal("Error copying backup file: %s", err)
|
||||
}
|
||||
RestoreDatabase(db, conf)
|
||||
}
|
||||
|
||||
// RestoreDatabase restore database
|
||||
func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
|
||||
|
||||
135
internal/s3.go
Normal file
135
internal/s3.go
Normal file
@@ -0,0 +1,135 @@
|
||||
/*
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 Jonas Kaninda
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jkaninda/go-storage/pkg/s3"
|
||||
"github.com/jkaninda/mysql-bkup/pkg/logger"
|
||||
"github.com/jkaninda/mysql-bkup/utils"
|
||||
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
func s3Backup(db *dbConfig, config *BackupConfig) {
|
||||
|
||||
logger.Info("Backup database to s3 storage")
|
||||
startTime = time.Now().Format(utils.TimeFormat())
|
||||
// Backup database
|
||||
BackupDatabase(db, config.backupFileName, disableCompression)
|
||||
finalFileName := config.backupFileName
|
||||
if config.encryption {
|
||||
encryptBackup(config)
|
||||
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
|
||||
}
|
||||
logger.Info("Uploading backup archive to remote storage S3 ... ")
|
||||
awsConfig := initAWSConfig()
|
||||
if config.remotePath == "" {
|
||||
config.remotePath = awsConfig.remotePath
|
||||
}
|
||||
logger.Info("Backup name is %s", finalFileName)
|
||||
s3Storage, err := s3.NewStorage(s3.Config{
|
||||
Endpoint: awsConfig.endpoint,
|
||||
Bucket: awsConfig.bucket,
|
||||
AccessKey: awsConfig.accessKey,
|
||||
SecretKey: awsConfig.secretKey,
|
||||
Region: awsConfig.region,
|
||||
DisableSsl: awsConfig.disableSsl,
|
||||
ForcePathStyle: awsConfig.forcePathStyle,
|
||||
RemotePath: awsConfig.remotePath,
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Fatal("Error creating s3 storage: %s", err)
|
||||
}
|
||||
err = s3Storage.Copy(finalFileName)
|
||||
if err != nil {
|
||||
logger.Fatal("Error copying backup file: %s", err)
|
||||
}
|
||||
// Get backup info
|
||||
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
|
||||
if err != nil {
|
||||
logger.Error("Error: %s", err)
|
||||
}
|
||||
backupSize = fileInfo.Size()
|
||||
|
||||
// Delete backup file from tmp folder
|
||||
err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName))
|
||||
if err != nil {
|
||||
fmt.Println("Error deleting file: ", err)
|
||||
|
||||
}
|
||||
// Delete old backup
|
||||
if config.prune {
|
||||
err := s3Storage.Prune(config.backupRetention)
|
||||
if err != nil {
|
||||
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
|
||||
}
|
||||
}
|
||||
logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
|
||||
logger.Info("Uploading backup archive to remote storage S3 ... done ")
|
||||
// Send notification
|
||||
utils.NotifySuccess(&utils.NotificationData{
|
||||
File: finalFileName,
|
||||
BackupSize: backupSize,
|
||||
Database: db.dbName,
|
||||
Storage: config.storage,
|
||||
BackupLocation: filepath.Join(config.remotePath, finalFileName),
|
||||
StartTime: startTime,
|
||||
EndTime: time.Now().Format(utils.TimeFormat()),
|
||||
})
|
||||
// Delete temp
|
||||
deleteTemp()
|
||||
logger.Info("Backup completed successfully")
|
||||
|
||||
}
|
||||
func s3Restore(db *dbConfig, conf *RestoreConfig) {
|
||||
logger.Info("Restore database from s3")
|
||||
awsConfig := initAWSConfig()
|
||||
if conf.remotePath == "" {
|
||||
conf.remotePath = awsConfig.remotePath
|
||||
}
|
||||
s3Storage, err := s3.NewStorage(s3.Config{
|
||||
Endpoint: awsConfig.endpoint,
|
||||
Bucket: awsConfig.bucket,
|
||||
AccessKey: awsConfig.accessKey,
|
||||
SecretKey: awsConfig.secretKey,
|
||||
Region: awsConfig.region,
|
||||
DisableSsl: awsConfig.disableSsl,
|
||||
ForcePathStyle: awsConfig.forcePathStyle,
|
||||
RemotePath: awsConfig.remotePath,
|
||||
LocalPath: tmpPath,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Fatal("Error creating s3 storage: %s", err)
|
||||
}
|
||||
err = s3Storage.CopyFrom(conf.file)
|
||||
if err != nil {
|
||||
logger.Fatal("Error download file from S3 storage: %s", err)
|
||||
}
|
||||
RestoreDatabase(db, conf)
|
||||
}
|
||||
@@ -74,6 +74,11 @@ var ftpVars = []string{
|
||||
"FTP_PASSWORD",
|
||||
"FTP_PORT",
|
||||
}
|
||||
var azureVars = []string{
|
||||
"AZURE_STORAGE_CONTAINER_NAME",
|
||||
"AZURE_STORAGE_ACCOUNT_NAME",
|
||||
"AZURE_STORAGE_ACCOUNT_KEY",
|
||||
}
|
||||
|
||||
// AwsVars Required environment variables for AWS S3 storage
|
||||
var awsVars = []string{
|
||||
|
||||
Reference in New Issue
Block a user