From a8fa58dacbabe43b53f1bb76161d0bb79b07a380 Mon Sep 17 00:00:00 2001 From: Jonas Kaninda Date: Mon, 29 Jul 2024 07:33:26 +0200 Subject: [PATCH 1/9] Migrate from s3fs to go aws s3 client --- .github/workflows/deploy-docs.yml | 55 +++++++++++++++ Makefile | 10 ++- docker/Dockerfile | 12 +++- go.mod | 3 +- go.sum | 35 ++++++++++ pkg/backup.go | 89 +++++++++++++++++++------ pkg/encrypt_archive.go | 1 + pkg/restore.go | 50 ++++++++++---- pkg/var.go | 1 + utils/s3.go | 107 ++++++++++++++++++++++++++++++ utils/utils.go | 37 +++++++++++ 11 files changed, 359 insertions(+), 41 deletions(-) create mode 100644 .github/workflows/deploy-docs.yml create mode 100644 pkg/encrypt_archive.go create mode 100644 utils/s3.go diff --git a/.github/workflows/deploy-docs.yml b/.github/workflows/deploy-docs.yml new file mode 100644 index 0000000..17ae6ed --- /dev/null +++ b/.github/workflows/deploy-docs.yml @@ -0,0 +1,55 @@ +name: Deploy Documenation site to GitHub Pages + +on: + push: + branches: ['main'] + paths: + - 'docs/**' + - '.github/workflows/deploy-docs.yml' + workflow_dispatch: + +permissions: + contents: read + pages: write + id-token: write + +concurrency: + group: 'pages' + cancel-in-progress: true + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Setup Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: '3.2' + bundler-cache: true + cache-version: 0 + working-directory: docs + - name: Setup Pages + id: pages + uses: actions/configure-pages@v2 + - name: Build with Jekyll + working-directory: docs + run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}" + env: + JEKYLL_ENV: production + - name: Upload artifact + uses: actions/upload-pages-artifact@v1 + with: + path: 'docs/_site/' + + deploy: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + needs: build + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v1 \ No newline at end of file diff --git a/Makefile b/Makefile index e720a31..2fa83c6 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,7 @@ docker-build: docker build -f docker/Dockerfile -t jkaninda/pg-bkup:latest . docker-run: docker-build - docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" jkaninda/pg-bkup bkup backup --prune --keep-last 2 + docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" jkaninda/pg-bkup bkup backup --prune --keep-last 2 docker-run-scheduled: docker-build @@ -27,6 +27,10 @@ docker-run-scheduled: docker-build docker-run-scheduled-s3: docker-build docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" jkaninda/pg-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *" -docker-restore-s3: docker-build - docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "FILE_NAME=${FILE_NAME}" jkaninda/pg-bkup bkup restore --storage s3 --path /custom-path +docker-run-s3: docker-build + docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "REGION=eu2" jkaninda/pg-bkup bkup backup --storage s3 --path /custom-path + + +docker-restore-s3: docker-build + docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "REGION=eu2" -e "FILE_NAME=${FILE_NAME}" jkaninda/pg-bkup bkup restore --storage s3 --path /custom-path diff --git a/docker/Dockerfile b/docker/Dockerfile index 38e82b8..bb1e3a5 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -19,6 +19,13 @@ ENV STORAGE=local ENV BUCKET_NAME="" ENV ACCESS_KEY="" ENV SECRET_KEY="" +ENV REGION="" +ENV SSH_USER="" +ENV SSH_PASSWORD="" +ENV SSH_HOST_NAME="" +ENV SSH_IDENTIFY_FILE="/root/.ssh/id_rsa" +ENV GPG_PASS_PHRASE="" +ENV SSH_PORT="22" ENV S3_ENDPOINT=https://s3.amazonaws.com ARG DEBIAN_FRONTEND=noninteractive ENV VERSION="v0.6" @@ -26,7 +33,7 @@ LABEL authors="Jonas Kaninda" RUN apt-get update -qq -RUN apt install s3fs postgresql-client postgresql-client-common libpq-dev supervisor cron -y +RUN apt install postgresql-client postgresql-client-common supervisor cron openssh-client -y # Clear cache RUN apt-get clean && rm -rf /var/lib/apt/lists/* @@ -45,4 +52,5 @@ ADD docker/supervisord.conf /etc/supervisor/supervisord.conf RUN mkdir /backup -WORKDIR /backup \ No newline at end of file +RUN mkdir /tmp/pg-bkup +WORKDIR /root diff --git a/go.mod b/go.mod index 789cfc7..83d89a2 100644 --- a/go.mod +++ b/go.mod @@ -8,8 +8,9 @@ require ( ) require ( + github.com/aws/aws-sdk-go v1.55.3 // indirect github.com/hpcloud/tail v1.0.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect golang.org/x/sys v0.22.0 // indirect gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect diff --git a/go.sum b/go.sum index 426c610..2d2cfef 100644 --- a/go.sum +++ b/go.sum @@ -1,18 +1,53 @@ +github.com/aws/aws-sdk-go v1.55.3 h1:0B5hOX+mIx7I5XPOrjrHlKSDQV/+ypFZpIHOx5LOk3E= +github.com/aws/aws-sdk-go v1.55.3/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= +github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= +github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= +github.com/minio/minio-go/v7 v7.0.74 h1:fTo/XlPBTSpo3BAMshlwKL5RspXRv9us5UeHEGYCFe0= +github.com/minio/minio-go/v7 v7.0.74/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/backup.go b/pkg/backup.go index f21a78b..ba4676a 100644 --- a/pkg/backup.go +++ b/pkg/backup.go @@ -33,16 +33,35 @@ func StartBackup(cmd *cobra.Command) { prune, _ := cmd.Flags().GetBool("prune") disableCompression, _ = cmd.Flags().GetBool("disable-compression") executionMode, _ = cmd.Flags().GetString("mode") + dbName = os.Getenv("DB_NAME") + storagePath = os.Getenv("STORAGE_PATH") + + //Generate file name + backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405")) + if disableCompression { + backupFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405")) + } if executionMode == "default" { - if storage == "s3" { + switch storage { + case "s3": utils.Info("Backup database to s3 storage") - s3Backup(disableCompression, s3Path, prune, keepLast) - } else { + BackupDatabase(backupFileName, disableCompression, prune, keepLast) + s3Upload(backupFileName, s3Path) + case "local": utils.Info("Backup database to local storage") - BackupDatabase(disableCompression, prune, keepLast) - + BackupDatabase(backupFileName, disableCompression, prune, keepLast) + moveToBackup(backupFileName, storagePath) + case "ssh": + fmt.Println("x is 2") + case "ftp": + fmt.Println("x is 3") + default: + utils.Info("Backup database to local storage") + BackupDatabase(backupFileName, disableCompression, prune, keepLast) + moveToBackup(backupFileName, storagePath) } + } else if executionMode == "scheduled" { scheduledMode() } else { @@ -98,7 +117,7 @@ func scheduledMode() { } // BackupDatabase backup database -func BackupDatabase(disableCompression bool, prune bool, keepLast int) { +func BackupDatabase(backupFileName string, disableCompression bool, prune bool, keepLast int) { dbHost = os.Getenv("DB_HOST") dbPassword = os.Getenv("DB_PASSWORD") dbUserName = os.Getenv("DB_USERNAME") @@ -117,12 +136,9 @@ func BackupDatabase(disableCompression bool, prune bool, keepLast int) { utils.TestDatabaseConnection() // Backup Database database utils.Info("Backing up database...") - //Generate file name - bkFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405")) // Verify is compression is disabled if disableCompression { - bkFileName = fmt.Sprintf("%s_%s.sql", dbName, time.Now().Format("20060102_150405")) // Execute pg_dump cmd := exec.Command("pg_dump", "-h", dbHost, @@ -135,7 +151,7 @@ func BackupDatabase(disableCompression bool, prune bool, keepLast int) { log.Fatal(err) } // save output - file, err := os.Create(fmt.Sprintf("%s/%s", storagePath, bkFileName)) + file, err := os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName)) if err != nil { log.Fatal(err) } @@ -145,7 +161,6 @@ func BackupDatabase(disableCompression bool, prune bool, keepLast int) { if err != nil { log.Fatal(err) } - utils.Done("Database has been backed up") } else { // Execute pg_dump @@ -162,7 +177,7 @@ func BackupDatabase(disableCompression bool, prune bool, keepLast int) { gzipCmd := exec.Command("gzip") gzipCmd.Stdin = stdout // save output - gzipCmd.Stdout, err = os.Create(fmt.Sprintf("%s/%s", storagePath, bkFileName)) + gzipCmd.Stdout, err = os.Create(fmt.Sprintf("%s/%s", tmpPath, backupFileName)) gzipCmd.Start() if err != nil { log.Fatal(err) @@ -173,30 +188,62 @@ func BackupDatabase(disableCompression bool, prune bool, keepLast int) { if err := gzipCmd.Wait(); err != nil { log.Fatal(err) } - utils.Done("Database has been backed up") } + utils.Done("Database has been backed up") + //Delete old backup - if prune { - deleteOldBackup(keepLast) - } + //if prune { + // deleteOldBackup(keepLast) + //} - historyFile, err := os.OpenFile(fmt.Sprintf("%s/history.txt", storagePath), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + historyFile, err := os.OpenFile(fmt.Sprintf("%s/history.txt", tmpPath), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { log.Fatal(err) } defer historyFile.Close() - if _, err := historyFile.WriteString(bkFileName + "\n"); err != nil { + if _, err := historyFile.WriteString(backupFileName + "\n"); err != nil { log.Fatal(err) } } } +func moveToBackup(backupFileName string, destinationPath string) { + //Copy backup from tmp folder to storage destination + err := utils.CopyFile(filepath.Join(tmpPath, backupFileName), filepath.Join(destinationPath, backupFileName)) + if err != nil { + utils.Fatal("Error copying file ", backupFileName, err) -func s3Backup(disableCompression bool, s3Path string, prune bool, keepLast int) { + } + //Delete backup file from tmp folder + err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName)) + if err != nil { + fmt.Println("Error deleting file:", err) + + } + utils.Done("Database has been backed up and copied to destination ") +} +func s3Upload(backupFileName string, s3Path string) { + bucket := os.Getenv("BUCKET_NAME") + utils.Info("Uploading file to S3 storage") + err := utils.UploadFileToS3(tmpPath, backupFileName, bucket, s3Path) + if err != nil { + utils.Fatalf("Error uploading file to S3: %s ", err) + + } + + //Delete backup file from tmp folder + err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName)) + if err != nil { + fmt.Println("Error deleting file:", err) + + } + utils.Done("Database has been backed up and uploaded to s3 ") +} +func s3Backup(backupFileName string, disableCompression bool, s3Path string, prune bool, keepLast int) { // Backup Database to S3 storage - MountS3Storage(s3Path) - BackupDatabase(disableCompression, prune, keepLast) + //MountS3Storage(s3Path) + //BackupDatabase(backupFileName, disableCompression, prune, keepLast) } func deleteOldBackup(keepLast int) { utils.Info("Deleting old backups...") diff --git a/pkg/encrypt_archive.go b/pkg/encrypt_archive.go new file mode 100644 index 0000000..c1caffe --- /dev/null +++ b/pkg/encrypt_archive.go @@ -0,0 +1 @@ +package pkg diff --git a/pkg/restore.go b/pkg/restore.go index 05541f2..619c40a 100644 --- a/pkg/restore.go +++ b/pkg/restore.go @@ -21,13 +21,34 @@ func StartRestore(cmd *cobra.Command) { storage = utils.GetEnv(cmd, "storage", "STORAGE") file = utils.GetEnv(cmd, "file", "FILE_NAME") executionMode, _ = cmd.Flags().GetString("mode") + bucket := os.Getenv("BUCKET_NAME") - if storage == "s3" { + switch storage { + case "s3": utils.Info("Restore database from s3") - s3Restore(file, s3Path) - } else { + err := utils.DownloadFile(tmpPath, file, bucket, s3Path) + if err != nil { + utils.Fatal("Error download file from s3 ", file, err) + } + RestoreDatabase(file) + case "local": + utils.Info("Restore database from local") + copyTmp(storagePath, file) + RestoreDatabase(file) + case "ssh": + fmt.Println("x is 2") + case "ftp": + fmt.Println("x is 3") + default: utils.Info("Restore database from local") RestoreDatabase(file) + } +} +func copyTmp(sourcePath string, backupFileName string) { + //Copy backup from tmp folder to storage destination + err := utils.CopyFile(filepath.Join(sourcePath, backupFileName), filepath.Join(tmpPath, backupFileName)) + if err != nil { + utils.Fatal("Error copying file ", backupFileName, err) } } @@ -39,7 +60,7 @@ func RestoreDatabase(file string) { dbUserName = os.Getenv("DB_USERNAME") dbName = os.Getenv("DB_NAME") dbPort = os.Getenv("DB_PORT") - storagePath = os.Getenv("STORAGE_PATH") + //storagePath = os.Getenv("STORAGE_PATH") if file == "" { utils.Fatal("Error, file required") } @@ -48,7 +69,7 @@ func RestoreDatabase(file string) { utils.Fatal("Please make sure all required environment variables are set") } else { - if utils.FileExists(fmt.Sprintf("%s/%s", storagePath, file)) { + if utils.FileExists(fmt.Sprintf("%s/%s", tmpPath, file)) { err := os.Setenv("PGPASSWORD", dbPassword) if err != nil { @@ -56,10 +77,10 @@ func RestoreDatabase(file string) { } utils.TestDatabaseConnection() - extension := filepath.Ext(fmt.Sprintf("%s/%s", storagePath, file)) + extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file)) // Restore from compressed file / .sql.gz if extension == ".gz" { - str := "zcat " + fmt.Sprintf("%s/%s", storagePath, file) + " | psql -h " + os.Getenv("DB_HOST") + " -p " + os.Getenv("DB_PORT") + " -U " + os.Getenv("DB_USERNAME") + " -v -d " + os.Getenv("DB_NAME") + str := "zcat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | psql -h " + os.Getenv("DB_HOST") + " -p " + os.Getenv("DB_PORT") + " -U " + os.Getenv("DB_USERNAME") + " -v -d " + os.Getenv("DB_NAME") _, err := exec.Command("bash", "-c", str).Output() if err != nil { utils.Fatal("Error, in restoring the database") @@ -68,7 +89,7 @@ func RestoreDatabase(file string) { } else if extension == ".sql" { //Restore from sql file - str := "cat " + fmt.Sprintf("%s/%s", storagePath, file) + " | psql -h " + os.Getenv("DB_HOST") + " -p " + os.Getenv("DB_PORT") + " -U " + os.Getenv("DB_USERNAME") + " -v -d " + os.Getenv("DB_NAME") + str := "cat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | psql -h " + os.Getenv("DB_HOST") + " -p " + os.Getenv("DB_PORT") + " -U " + os.Getenv("DB_USERNAME") + " -v -d " + os.Getenv("DB_NAME") _, err := exec.Command("bash", "-c", str).Output() if err != nil { utils.Fatal("Error in restoring the database", err) @@ -79,12 +100,13 @@ func RestoreDatabase(file string) { } } else { - utils.Fatal("File not found in ", fmt.Sprintf("%s/%s", storagePath, file)) + utils.Fatal("File not found in ", fmt.Sprintf("%s/%s", tmpPath, file)) } } } -func s3Restore(file, s3Path string) { - // Restore database from S3 - MountS3Storage(s3Path) - RestoreDatabase(file) -} + +//func s3Restore(file, s3Path string) { +// // Restore database from S3 +// MountS3Storage(s3Path) +// RestoreDatabase(file) +//} diff --git a/pkg/var.go b/pkg/var.go index ca9af1e..d849cab 100644 --- a/pkg/var.go +++ b/pkg/var.go @@ -3,6 +3,7 @@ package pkg const s3MountPath string = "/s3mnt" const s3fsPasswdFile string = "/etc/passwd-s3fs" const cronLogFile = "/var/log/pg-bkup.log" +const tmpPath = "/tmp/pg-bkup" const backupCronFile = "/usr/local/bin/backup_cron.sh" var ( diff --git a/utils/s3.go b/utils/s3.go new file mode 100644 index 0000000..1566dca --- /dev/null +++ b/utils/s3.go @@ -0,0 +1,107 @@ +package utils + +import ( + "bytes" + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + "net/http" + "os" + "path/filepath" +) + +// CreateSession creates a new AWS session +func CreateSession() (*session.Session, error) { + + //key := aws.String("testobject") + endPoint := os.Getenv("S3_ENDPOINT") + //bucket := os.Getenv("BUCKET_NAME") + region := os.Getenv("REGION") + accessKey := os.Getenv("ACCESS_KEY") + secretKey := os.Getenv("SECRET_KEY") + + // Configure to use MinIO Server + s3Config := &aws.Config{ + Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""), + Endpoint: aws.String(endPoint), + Region: aws.String(region), + DisableSSL: aws.Bool(false), + S3ForcePathStyle: aws.Bool(true), + } + return session.NewSession(s3Config) + +} + +// UploadFileToS3 uploads a file to S3 with a given prefix +func UploadFileToS3(filePath, key, bucket, prefix string) error { + sess, err := CreateSession() + if err != nil { + return err + } + + svc := s3.New(sess) + + file, err := os.Open(filepath.Join(filePath, key)) + if err != nil { + return err + } + defer file.Close() + + fileInfo, err := file.Stat() + if err != nil { + return err + } + + objectKey := fmt.Sprintf("%s/%s", prefix, key) + + buffer := make([]byte, fileInfo.Size()) + file.Read(buffer) + fileBytes := bytes.NewReader(buffer) + fileType := http.DetectContentType(buffer) + + _, err = svc.PutObject(&s3.PutObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(objectKey), + Body: fileBytes, + ContentLength: aws.Int64(fileInfo.Size()), + ContentType: aws.String(fileType), + }) + if err != nil { + return err + } + + return nil +} +func DownloadFile(destinationPath, key, bucket, prefix string) error { + + sess, err := CreateSession() + if err != nil { + return err + } + + file, err := os.Create(filepath.Join(destinationPath, key)) + if err != nil { + fmt.Println("Failed to create file", err) + return err + } + defer file.Close() + + objectKey := fmt.Sprintf("%s/%s", prefix, key) + + downloader := s3manager.NewDownloader(sess) + numBytes, err := downloader.Download(file, + &s3.GetObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(objectKey), + }) + if err != nil { + fmt.Println("Failed to download file", err) + return err + } + fmt.Println("Bytes size", numBytes) + Info("Backup downloaded to ", file.Name()) + return nil +} diff --git a/utils/utils.go b/utils/utils.go index 194233b..c2aa642 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -9,6 +9,7 @@ package utils import ( "fmt" "github.com/spf13/cobra" + "io" "io/fs" "os" ) @@ -46,6 +47,42 @@ func WriteToFile(filePath, content string) error { _, err = file.WriteString(content) return err } +func DeleteFile(filePath string) error { + err := os.Remove(filePath) + if err != nil { + return fmt.Errorf("failed to delete file: %v", err) + } + return nil +} +func CopyFile(src, dst string) error { + // Open the source file for reading + sourceFile, err := os.Open(src) + if err != nil { + return fmt.Errorf("failed to open source file: %v", err) + } + defer sourceFile.Close() + + // Create the destination file + destinationFile, err := os.Create(dst) + if err != nil { + return fmt.Errorf("failed to create destination file: %v", err) + } + defer destinationFile.Close() + + // Copy the content from source to destination + _, err = io.Copy(destinationFile, sourceFile) + if err != nil { + return fmt.Errorf("failed to copy file: %v", err) + } + + // Flush the buffer to ensure all data is written + err = destinationFile.Sync() + if err != nil { + return fmt.Errorf("failed to sync destination file: %v", err) + } + + return nil +} func ChangePermission(filePath string, mod int) { if err := os.Chmod(filePath, fs.FileMode(mod)); err != nil { Fatalf("Error changing permissions of %s: %v\n", filePath, err) From c277228ab3497c2c5aaf407fa08ff2430ca25093 Mon Sep 17 00:00:00 2001 From: Jonas Kaninda Date: Mon, 29 Jul 2024 23:03:28 +0200 Subject: [PATCH 2/9] Add backup encryption and decryption with GPG --- Makefile | 8 ++- docker/Dockerfile | 4 +- pkg/backup.go | 124 +++++++++++++++-------------------------- pkg/config.go | 4 ++ pkg/encrypt.go | 48 ++++++++++++++++ pkg/encrypt_archive.go | 1 - pkg/helper.go | 74 ++++++++++++++++++++++++ pkg/restore.go | 34 +++++------ pkg/var.go | 5 +- utils/s3.go | 50 ++++++++++++++++- 10 files changed, 247 insertions(+), 105 deletions(-) create mode 100644 pkg/config.go create mode 100644 pkg/encrypt.go delete mode 100644 pkg/encrypt_archive.go create mode 100644 pkg/helper.go diff --git a/Makefile b/Makefile index 2fa83c6..06ec18d 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,9 @@ docker-build: docker build -f docker/Dockerfile -t jkaninda/pg-bkup:latest . docker-run: docker-build - docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" jkaninda/pg-bkup bkup backup --prune --keep-last 2 + docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --prune --keep-last 2 +docker-restore: docker-build + docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore -f uzaraka_20240729_200543.sql.gz.gpg docker-run-scheduled: docker-build @@ -28,9 +30,9 @@ docker-run-scheduled-s3: docker-build docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" jkaninda/pg-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *" docker-run-s3: docker-build - docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "REGION=eu2" jkaninda/pg-bkup bkup backup --storage s3 --path /custom-path + docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --path /custom-path docker-restore-s3: docker-build - docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "REGION=eu2" -e "FILE_NAME=${FILE_NAME}" jkaninda/pg-bkup bkup restore --storage s3 --path /custom-path + docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage s3 --path /custom-path -f uzaraka_20240729_205710.sql.gz.gpg diff --git a/docker/Dockerfile b/docker/Dockerfile index bb1e3a5..356d7bf 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -33,7 +33,7 @@ LABEL authors="Jonas Kaninda" RUN apt-get update -qq -RUN apt install postgresql-client postgresql-client-common supervisor cron openssh-client -y +RUN apt install postgresql-client postgresql-client-common supervisor cron openssh-client gnupg -y # Clear cache RUN apt-get clean && rm -rf /var/lib/apt/lists/* @@ -52,5 +52,5 @@ ADD docker/supervisord.conf /etc/supervisor/supervisord.conf RUN mkdir /backup -RUN mkdir /tmp/pg-bkup +RUN mkdir /tmp/backup WORKDIR /root diff --git a/pkg/backup.go b/pkg/backup.go index ba4676a..c4b72fb 100644 --- a/pkg/backup.go +++ b/pkg/backup.go @@ -18,7 +18,6 @@ import ( func StartBackup(cmd *cobra.Command) { _, _ = cmd.Flags().GetString("operation") - //Set env utils.SetEnv("STORAGE_PATH", storagePath) utils.GetEnv(cmd, "dbname", "DB_NAME") @@ -29,12 +28,16 @@ func StartBackup(cmd *cobra.Command) { s3Path = utils.GetEnv(cmd, "path", "S3_PATH") storage = utils.GetEnv(cmd, "storage", "STORAGE") file = utils.GetEnv(cmd, "file", "FILE_NAME") - keepLast, _ := cmd.Flags().GetInt("keep-last") + backupRetention, _ := cmd.Flags().GetInt("keep-last") prune, _ := cmd.Flags().GetBool("prune") disableCompression, _ = cmd.Flags().GetBool("disable-compression") executionMode, _ = cmd.Flags().GetString("mode") dbName = os.Getenv("DB_NAME") - storagePath = os.Getenv("STORAGE_PATH") + gpgPassphrase := os.Getenv("GPG_PASSPHRASE") + // + if gpgPassphrase != "" { + encryption = true + } //Generate file name backupFileName := fmt.Sprintf("%s_%s.sql.gz", dbName, time.Now().Format("20060102_150405")) @@ -46,20 +49,17 @@ func StartBackup(cmd *cobra.Command) { switch storage { case "s3": utils.Info("Backup database to s3 storage") - BackupDatabase(backupFileName, disableCompression, prune, keepLast) - s3Upload(backupFileName, s3Path) + s3Backup(backupFileName, s3Path, disableCompression, prune, backupRetention, encryption) case "local": utils.Info("Backup database to local storage") - BackupDatabase(backupFileName, disableCompression, prune, keepLast) - moveToBackup(backupFileName, storagePath) + localBackup(backupFileName, disableCompression, prune, backupRetention, encryption) case "ssh": fmt.Println("x is 2") case "ftp": fmt.Println("x is 3") default: utils.Info("Backup database to local storage") - BackupDatabase(backupFileName, disableCompression, prune, keepLast) - moveToBackup(backupFileName, storagePath) + localBackup(backupFileName, disableCompression, prune, backupRetention, encryption) } } else if executionMode == "scheduled" { @@ -117,7 +117,7 @@ func scheduledMode() { } // BackupDatabase backup database -func BackupDatabase(backupFileName string, disableCompression bool, prune bool, keepLast int) { +func BackupDatabase(backupFileName string, disableCompression bool) { dbHost = os.Getenv("DB_HOST") dbPassword = os.Getenv("DB_PASSWORD") dbUserName = os.Getenv("DB_USERNAME") @@ -190,43 +190,38 @@ func BackupDatabase(backupFileName string, disableCompression bool, prune bool, } } - utils.Done("Database has been backed up") + utils.Info("Database has been backed up") - //Delete old backup - //if prune { - // deleteOldBackup(keepLast) - //} - - historyFile, err := os.OpenFile(fmt.Sprintf("%s/history.txt", tmpPath), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - log.Fatal(err) - } - defer historyFile.Close() - if _, err := historyFile.WriteString(backupFileName + "\n"); err != nil { - log.Fatal(err) - } } } -func moveToBackup(backupFileName string, destinationPath string) { - //Copy backup from tmp folder to storage destination - err := utils.CopyFile(filepath.Join(tmpPath, backupFileName), filepath.Join(destinationPath, backupFileName)) - if err != nil { - utils.Fatal("Error copying file ", backupFileName, err) - +func localBackup(backupFileName string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { + utils.Info("Backup database to local storage") + BackupDatabase(backupFileName, disableCompression) + finalFileName := backupFileName + if encrypt { + encryptBackup(backupFileName) + finalFileName = fmt.Sprintf("%s.%s", backupFileName, gpgExtension) } - //Delete backup file from tmp folder - err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName)) - if err != nil { - fmt.Println("Error deleting file:", err) - + moveToBackup(finalFileName, storagePath) + //Delete old backup + if prune { + deleteOldBackup(backupRetention) } - utils.Done("Database has been backed up and copied to destination ") } -func s3Upload(backupFileName string, s3Path string) { + +func s3Backup(backupFileName string, s3Path string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { bucket := os.Getenv("BUCKET_NAME") + storagePath = os.Getenv("STORAGE_PATH") + //Backup database + BackupDatabase(backupFileName, disableCompression) + finalFileName := backupFileName + if encrypt { + encryptBackup(backupFileName) + finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg") + } utils.Info("Uploading file to S3 storage") - err := utils.UploadFileToS3(tmpPath, backupFileName, bucket, s3Path) + err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path) if err != nil { utils.Fatalf("Error uploading file to S3: %s ", err) @@ -238,51 +233,22 @@ func s3Upload(backupFileName string, s3Path string) { fmt.Println("Error deleting file:", err) } + // Delete old backup + if prune { + err := utils.DeleteOldBackup(bucket, s3Path, backupRetention) + if err != nil { + utils.Fatalf("Error deleting old backup from S3: %s ", err) + } + } utils.Done("Database has been backed up and uploaded to s3 ") } -func s3Backup(backupFileName string, disableCompression bool, s3Path string, prune bool, keepLast int) { - // Backup Database to S3 storage - //MountS3Storage(s3Path) - //BackupDatabase(backupFileName, disableCompression, prune, keepLast) -} -func deleteOldBackup(keepLast int) { - utils.Info("Deleting old backups...") - storagePath = os.Getenv("STORAGE_PATH") - // Define the directory path - backupDir := storagePath + "/" - // Get current time - currentTime := time.Now() - // Delete file - deleteFile := func(filePath string) error { - err := os.Remove(filePath) - if err != nil { - utils.Fatal("Error:", err) - } else { - utils.Done("File ", filePath, " deleted successfully") - } - return err - } - // Walk through the directory and delete files modified more than specified days ago - err := filepath.Walk(backupDir, func(filePath string, fileInfo os.FileInfo, err error) error { - if err != nil { - return err - } - // Check if it's a regular file and if it was modified more than specified days ago - if fileInfo.Mode().IsRegular() { - timeDiff := currentTime.Sub(fileInfo.ModTime()) - if timeDiff.Hours() > 24*float64(keepLast) { - err := deleteFile(filePath) - if err != nil { - return err - } - } - } - return nil - }) +func encryptBackup(backupFileName string) { + gpgPassphrase := os.Getenv("GPG_PASSPHRASE") + err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase) if err != nil { - utils.Fatal("Error:", err) - return + utils.Fatalf("Error during encrypting backup %s", err) } + } diff --git a/pkg/config.go b/pkg/config.go new file mode 100644 index 0000000..d0b5e01 --- /dev/null +++ b/pkg/config.go @@ -0,0 +1,4 @@ +package pkg + +type Config struct { +} diff --git a/pkg/encrypt.go b/pkg/encrypt.go new file mode 100644 index 0000000..ecd43b5 --- /dev/null +++ b/pkg/encrypt.go @@ -0,0 +1,48 @@ +package pkg + +import ( + "fmt" + "github.com/jkaninda/pg-bkup/utils" + "os" + "os/exec" + "strings" +) + +func Decrypt(inputFile string, passphrase string) error { + utils.Info("Decrypting backup...") + cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + err := cmd.Run() + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + return err + } + + utils.Info("Backup file decrypted successful!") + return nil +} + +func Encrypt(inputFile string, passphrase string) error { + utils.Info("Encrypting backup...") + cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--symmetric", "--cipher-algo", algorithm, inputFile) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + err := cmd.Run() + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + return err + } + + utils.Info("Backup file encrypted successful!") + return nil +} + +func RemoveLastExtension(filename string) string { + if idx := strings.LastIndex(filename, "."); idx != -1 { + return filename[:idx] + } + return filename +} diff --git a/pkg/encrypt_archive.go b/pkg/encrypt_archive.go deleted file mode 100644 index c1caffe..0000000 --- a/pkg/encrypt_archive.go +++ /dev/null @@ -1 +0,0 @@ -package pkg diff --git a/pkg/helper.go b/pkg/helper.go new file mode 100644 index 0000000..95b648e --- /dev/null +++ b/pkg/helper.go @@ -0,0 +1,74 @@ +package pkg + +import ( + "fmt" + "github.com/jkaninda/pg-bkup/utils" + "os" + "path/filepath" + "time" +) + +func copyToTmp(sourcePath string, backupFileName string) { + //Copy backup from storage to /tmp + err := utils.CopyFile(filepath.Join(sourcePath, backupFileName), filepath.Join(tmpPath, backupFileName)) + if err != nil { + utils.Fatal("Error copying file ", backupFileName, err) + + } +} +func moveToBackup(backupFileName string, destinationPath string) { + //Copy backup from tmp folder to storage destination + err := utils.CopyFile(filepath.Join(tmpPath, backupFileName), filepath.Join(destinationPath, backupFileName)) + if err != nil { + utils.Fatal("Error copying file ", backupFileName, err) + + } + //Delete backup file from tmp folder + err = utils.DeleteFile(filepath.Join(tmpPath, backupFileName)) + if err != nil { + fmt.Println("Error deleting file:", err) + + } + utils.Done("Database has been backed up and copied to destination ") +} +func deleteOldBackup(retentionDays int) { + utils.Info("Deleting old backups...") + storagePath = os.Getenv("STORAGE_PATH") + // Define the directory path + backupDir := storagePath + "/" + // Get current time + currentTime := time.Now() + // Delete file + deleteFile := func(filePath string) error { + err := os.Remove(filePath) + if err != nil { + utils.Fatal("Error:", err) + } else { + utils.Done("File ", filePath, " deleted successfully") + } + return err + } + + // Walk through the directory and delete files modified more than specified days ago + err := filepath.Walk(backupDir, func(filePath string, fileInfo os.FileInfo, err error) error { + if err != nil { + return err + } + // Check if it's a regular file and if it was modified more than specified days ago + if fileInfo.Mode().IsRegular() { + timeDiff := currentTime.Sub(fileInfo.ModTime()) + if timeDiff.Hours() > 24*float64(retentionDays) { + err := deleteFile(filePath) + if err != nil { + return err + } + } + } + return nil + }) + + if err != nil { + utils.Fatal("Error:", err) + return + } +} diff --git a/pkg/restore.go b/pkg/restore.go index 619c40a..1c6f581 100644 --- a/pkg/restore.go +++ b/pkg/restore.go @@ -33,7 +33,7 @@ func StartRestore(cmd *cobra.Command) { RestoreDatabase(file) case "local": utils.Info("Restore database from local") - copyTmp(storagePath, file) + copyToTmp(storagePath, file) RestoreDatabase(file) case "ssh": fmt.Println("x is 2") @@ -44,14 +44,6 @@ func StartRestore(cmd *cobra.Command) { RestoreDatabase(file) } } -func copyTmp(sourcePath string, backupFileName string) { - //Copy backup from tmp folder to storage destination - err := utils.CopyFile(filepath.Join(sourcePath, backupFileName), filepath.Join(tmpPath, backupFileName)) - if err != nil { - utils.Fatal("Error copying file ", backupFileName, err) - - } -} // RestoreDatabase restore database func RestoreDatabase(file string) { @@ -60,10 +52,26 @@ func RestoreDatabase(file string) { dbUserName = os.Getenv("DB_USERNAME") dbName = os.Getenv("DB_NAME") dbPort = os.Getenv("DB_PORT") + gpgPassphrase := os.Getenv("GPG_PASSPHRASE") //storagePath = os.Getenv("STORAGE_PATH") if file == "" { utils.Fatal("Error, file required") } + extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file)) + if extension == ".gpg" { + if gpgPassphrase == "" { + utils.Fatal("Error, GPG_PASSPHRASE environment variable required, you need to set the GPG_PASSPHRASE") + } else { + //Decrypt file + err := Decrypt(filepath.Join(tmpPath, file), gpgPassphrase) + if err != nil { + utils.Fatal("Error decrypting file ", file, err) + } + //Update file name + file = RemoveLastExtension(file) + } + + } if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" || file == "" { utils.Fatal("Please make sure all required environment variables are set") @@ -83,7 +91,7 @@ func RestoreDatabase(file string) { str := "zcat " + fmt.Sprintf("%s/%s", tmpPath, file) + " | psql -h " + os.Getenv("DB_HOST") + " -p " + os.Getenv("DB_PORT") + " -U " + os.Getenv("DB_USERNAME") + " -v -d " + os.Getenv("DB_NAME") _, err := exec.Command("bash", "-c", str).Output() if err != nil { - utils.Fatal("Error, in restoring the database") + utils.Fatal("Error, in restoring the database ", err) } utils.Done("Database has been restored") @@ -104,9 +112,3 @@ func RestoreDatabase(file string) { } } } - -//func s3Restore(file, s3Path string) { -// // Restore database from S3 -// MountS3Storage(s3Path) -// RestoreDatabase(file) -//} diff --git a/pkg/var.go b/pkg/var.go index d849cab..255e854 100644 --- a/pkg/var.go +++ b/pkg/var.go @@ -3,8 +3,10 @@ package pkg const s3MountPath string = "/s3mnt" const s3fsPasswdFile string = "/etc/passwd-s3fs" const cronLogFile = "/var/log/pg-bkup.log" -const tmpPath = "/tmp/pg-bkup" +const tmpPath = "/tmp/backup" const backupCronFile = "/usr/local/bin/backup_cron.sh" +const algorithm = "aes256" +const gpgExtension = "gpg" var ( storage = "local" @@ -18,4 +20,5 @@ var ( executionMode = "default" storagePath = "/backup" disableCompression = false + encryption = false ) diff --git a/utils/s3.go b/utils/s3.go index 1566dca..fbc4667 100644 --- a/utils/s3.go +++ b/utils/s3.go @@ -8,9 +8,11 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3manager" + "log" "net/http" "os" "path/filepath" + "time" ) // CreateSession creates a new AWS session @@ -81,7 +83,7 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error { if err != nil { return err } - + Info("Download backup from S3 storage...") file, err := os.Create(filepath.Join(destinationPath, key)) if err != nil { fmt.Println("Failed to create file", err) @@ -101,7 +103,49 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error { fmt.Println("Failed to download file", err) return err } - fmt.Println("Bytes size", numBytes) - Info("Backup downloaded to ", file.Name()) + Info("Backup downloaded: ", file.Name()) + Info("Bytes size: ", numBytes) + + return nil +} +func DeleteOldBackup(bucket, prefix string, retention int) error { + sess, err := CreateSession() + if err != nil { + return err + } + + svc := s3.New(sess) + + // Get the current time and the time threshold for 7 days ago + now := time.Now() + backupRetentionDays := now.AddDate(0, 0, -retention) + + // List objects in the bucket + listObjectsInput := &s3.ListObjectsV2Input{ + Bucket: aws.String(bucket), + Prefix: aws.String(prefix), + } + err = svc.ListObjectsV2Pages(listObjectsInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool { + for _, object := range page.Contents { + if object.LastModified.Before(backupRetentionDays) { + // Object is older than retention days, delete it + _, err := svc.DeleteObject(&s3.DeleteObjectInput{ + Bucket: aws.String(bucket), + Key: object.Key, + }) + if err != nil { + log.Printf("Failed to delete object %s: %v", *object.Key, err) + } else { + fmt.Printf("Deleted object %s\n", *object.Key) + } + } + } + return !lastPage + }) + if err != nil { + log.Fatalf("Failed to list objects: %v", err) + } + + fmt.Println("Finished deleting old files.") return nil } From 05a195e1ba59cf0a3ed45c3c597cbc63a4eabbb8 Mon Sep 17 00:00:00 2001 From: Jonas Kaninda Date: Tue, 30 Jul 2024 07:02:18 +0200 Subject: [PATCH 3/9] Refactoring of code --- Makefile | 10 +++++----- docker/Dockerfile | 27 +++++++++++++++------------ pkg/backup.go | 10 +++++----- pkg/encrypt.go | 2 +- pkg/helper.go | 2 +- pkg/scripts.go | 2 +- utils/s3.go | 12 +++++++----- utils/utils.go | 44 ++++++++++++++++++++++++++++++++++++++++++-- 8 files changed, 77 insertions(+), 32 deletions(-) diff --git a/Makefile b/Makefile index 06ec18d..9720f2c 100644 --- a/Makefile +++ b/Makefile @@ -19,20 +19,20 @@ docker-build: docker-run: docker-build docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --prune --keep-last 2 docker-restore: docker-build - docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore -f uzaraka_20240729_200543.sql.gz.gpg + docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore -f ${FILE_NAME} docker-run-scheduled: docker-build - docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" jkaninda/pg-bkup bkup backup --mode scheduled --period "* * * * *" + docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --mode scheduled --period "* * * * *" docker-run-scheduled-s3: docker-build - docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" jkaninda/pg-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *" + docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *" docker-run-s3: docker-build - docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --path /custom-path + docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --path /custom-path docker-restore-s3: docker-build - docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage s3 --path /custom-path -f uzaraka_20240729_205710.sql.gz.gpg + docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage s3 --path /custom-path -f $FILE_NAME diff --git a/docker/Dockerfile b/docker/Dockerfile index 356d7bf..16b4074 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -19,17 +19,21 @@ ENV STORAGE=local ENV BUCKET_NAME="" ENV ACCESS_KEY="" ENV SECRET_KEY="" -ENV REGION="" +ENV AWS_REGION="us-west-2" +ENV AWS_DISABLE_SSL="false" +ENV GPG_PASSPHRASE="" ENV SSH_USER="" ENV SSH_PASSWORD="" ENV SSH_HOST_NAME="" ENV SSH_IDENTIFY_FILE="/root/.ssh/id_rsa" -ENV GPG_PASS_PHRASE="" ENV SSH_PORT="22" ENV S3_ENDPOINT=https://s3.amazonaws.com ARG DEBIAN_FRONTEND=noninteractive -ENV VERSION="v0.6" -LABEL authors="Jonas Kaninda" +ENV VERSION="v0.8" +ARG WORKDIR="/app" +ARG BACKUPDIR="/backup" +ARG BACKUP_TMP_DIR="/tmp/backup" +LABEL author="Jonas Kaninda" RUN apt-get update -qq @@ -38,10 +42,12 @@ RUN apt install postgresql-client postgresql-client-common supervisor cron opens # Clear cache RUN apt-get clean && rm -rf /var/lib/apt/lists/* -RUN mkdir /s3mnt -RUN mkdir /tmp/s3cache -RUN chmod 777 /s3mnt -RUN chmod 777 /tmp/s3cache +RUN mkdir $WORKDIR +RUN mkdir $BACKUPDIR +RUN mkdir -p $BACKUP_TMP_DIR +RUN chmod 777 $WORKDIR +RUN chmod 777 $BACKUPDIR +RUN chmod 777 $BACKUP_TMP_DIR COPY --from=build /app/pg-bkup /usr/local/bin/pg-bkup RUN chmod +x /usr/local/bin/pg-bkup @@ -50,7 +56,4 @@ RUN ln -s /usr/local/bin/pg-bkup /usr/local/bin/bkup ADD docker/supervisord.conf /etc/supervisor/supervisord.conf - -RUN mkdir /backup -RUN mkdir /tmp/backup -WORKDIR /root +WORKDIR $WORKDIR diff --git a/pkg/backup.go b/pkg/backup.go index c4b72fb..33024d4 100644 --- a/pkg/backup.go +++ b/pkg/backup.go @@ -48,17 +48,14 @@ func StartBackup(cmd *cobra.Command) { if executionMode == "default" { switch storage { case "s3": - utils.Info("Backup database to s3 storage") s3Backup(backupFileName, s3Path, disableCompression, prune, backupRetention, encryption) case "local": - utils.Info("Backup database to local storage") localBackup(backupFileName, disableCompression, prune, backupRetention, encryption) case "ssh": fmt.Println("x is 2") case "ftp": fmt.Println("x is 3") default: - utils.Info("Backup database to local storage") localBackup(backupFileName, disableCompression, prune, backupRetention, encryption) } @@ -94,7 +91,7 @@ func scheduledMode() { if err != nil { utils.Fatal("Failed to start supervisord: %v", err) } - utils.Info("Starting backup job...") + utils.Info("Backup job started") defer func() { if err := cmd.Process.Kill(); err != nil { utils.Info("Failed to kill supervisord process: %v", err) @@ -203,6 +200,7 @@ func localBackup(backupFileName string, disableCompression bool, prune bool, bac encryptBackup(backupFileName) finalFileName = fmt.Sprintf("%s.%s", backupFileName, gpgExtension) } + utils.Info("Backup name is ", finalFileName) moveToBackup(finalFileName, storagePath) //Delete old backup if prune { @@ -213,6 +211,7 @@ func localBackup(backupFileName string, disableCompression bool, prune bool, bac func s3Backup(backupFileName string, s3Path string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { bucket := os.Getenv("BUCKET_NAME") storagePath = os.Getenv("STORAGE_PATH") + utils.Info("Backup database to s3 storage") //Backup database BackupDatabase(backupFileName, disableCompression) finalFileName := backupFileName @@ -220,7 +219,8 @@ func s3Backup(backupFileName string, s3Path string, disableCompression bool, pru encryptBackup(backupFileName) finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg") } - utils.Info("Uploading file to S3 storage") + utils.Info("Uploading backup file to S3 storage...") + utils.Info("Backup name is ", backupFileName) err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path) if err != nil { utils.Fatalf("Error uploading file to S3: %s ", err) diff --git a/pkg/encrypt.go b/pkg/encrypt.go index ecd43b5..ea74108 100644 --- a/pkg/encrypt.go +++ b/pkg/encrypt.go @@ -9,7 +9,7 @@ import ( ) func Decrypt(inputFile string, passphrase string) error { - utils.Info("Decrypting backup...") + utils.Info("Decrypting backup file: " + inputFile + " ...") cmd := exec.Command("gpg", "--batch", "--passphrase", passphrase, "--output", RemoveLastExtension(inputFile), "--decrypt", inputFile) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr diff --git a/pkg/helper.go b/pkg/helper.go index 95b648e..a6cabaa 100644 --- a/pkg/helper.go +++ b/pkg/helper.go @@ -29,7 +29,7 @@ func moveToBackup(backupFileName string, destinationPath string) { fmt.Println("Error deleting file:", err) } - utils.Done("Database has been backed up and copied to destination ") + utils.Done("Database has been backed up and copied to ", filepath.Join(destinationPath, backupFileName)) } func deleteOldBackup(retentionDays int) { utils.Info("Deleting old backups...") diff --git a/pkg/scripts.go b/pkg/scripts.go index 0cc2d38..b056804 100644 --- a/pkg/scripts.go +++ b/pkg/scripts.go @@ -74,5 +74,5 @@ bkup backup --dbname %s --port %s %v if err := crontabCmd.Run(); err != nil { utils.Fatal("Error updating crontab: ", err) } - utils.Info("Starting backup in scheduled mode") + utils.Info("Backup job created.") } diff --git a/utils/s3.go b/utils/s3.go index fbc4667..2a3cb1a 100644 --- a/utils/s3.go +++ b/utils/s3.go @@ -12,25 +12,27 @@ import ( "net/http" "os" "path/filepath" + "strconv" "time" ) // CreateSession creates a new AWS session func CreateSession() (*session.Session, error) { - //key := aws.String("testobject") endPoint := os.Getenv("S3_ENDPOINT") - //bucket := os.Getenv("BUCKET_NAME") - region := os.Getenv("REGION") accessKey := os.Getenv("ACCESS_KEY") secretKey := os.Getenv("SECRET_KEY") - + region := os.Getenv("AWS_REGION") + awsDisableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL")) + if err != nil { + Fatalf("Unable to parse AWS_DISABLE_SSL env var: %s", err) + } // Configure to use MinIO Server s3Config := &aws.Config{ Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""), Endpoint: aws.String(endPoint), Region: aws.String(region), - DisableSSL: aws.Bool(false), + DisableSSL: aws.Bool(awsDisableSsl), S3ForcePathStyle: aws.Bool(true), } return session.NewSession(s3Config) diff --git a/utils/utils.go b/utils/utils.go index c2aa642..6424efc 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -7,11 +7,13 @@ package utils * @link https://github.com/jkaninda/mysql-bkup **/ import ( + "bytes" "fmt" "github.com/spf13/cobra" "io" "io/fs" "os" + "os/exec" ) func Info(v ...any) { @@ -105,8 +107,46 @@ func IsDirEmpty(name string) (bool, error) { // TestDatabaseConnection tests the database connection func TestDatabaseConnection() { - Info("Testing database connection...") - // Test database connection + dbHost := os.Getenv("DB_HOST") + dbPassword := os.Getenv("DB_PASSWORD") + dbUserName := os.Getenv("DB_USERNAME") + dbName := os.Getenv("DB_NAME") + dbPort := os.Getenv("DB_PORT") + + if os.Getenv("DB_HOST") == "" || os.Getenv("DB_NAME") == "" || os.Getenv("DB_USERNAME") == "" || os.Getenv("DB_PASSWORD") == "" { + Fatal("Please make sure all required database environment variables are set") + } else { + Info("Connecting to database ...") + // Test database connection + query := "SELECT version();" + + // Set the environment variable for the database password + err := os.Setenv("PGPASSWORD", dbPassword) + if err != nil { + return + } + // Prepare the psql command + cmd := exec.Command("psql", + "-U", dbUserName, // database user + "-d", dbName, // database name + "-h", dbHost, // host + "-p", dbPort, // port + "-c", query, // SQL command to execute + ) + // Capture the output + var out bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &out + + // Run the command and capture any errors + err = cmd.Run() + if err != nil { + fmt.Printf("Error running psql command: %v\nOutput: %s\n", err, out.String()) + return + } + Info("Successfully connected to database") + + } } func GetEnv(cmd *cobra.Command, flagName, envName string) string { value, _ := cmd.Flags().GetString(flagName) From 886773e38f7650e25f38930cfc65ff4f8246a2ef Mon Sep 17 00:00:00 2001 From: Jonas Kaninda Date: Tue, 30 Jul 2024 08:59:15 +0200 Subject: [PATCH 4/9] Refactoring of code, renaming env variables name --- Makefile | 10 +++++----- docker/Dockerfile | 16 +++++++++++----- pkg/backup.go | 8 +++++--- pkg/restore.go | 3 +-- utils/s3.go | 6 +++--- utils/utils.go | 11 ++++++++++- 6 files changed, 35 insertions(+), 19 deletions(-) diff --git a/Makefile b/Makefile index 9720f2c..54778df 100644 --- a/Makefile +++ b/Makefile @@ -17,20 +17,20 @@ docker-build: docker build -f docker/Dockerfile -t jkaninda/pg-bkup:latest . docker-run: docker-build - docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --prune --keep-last 2 + docker run --rm --network internal --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --prune --keep-last 2 docker-restore: docker-build - docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore -f ${FILE_NAME} + docker run --rm --network internal --user 1000:1000 --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore -f ${FILE_NAME} docker-run-scheduled: docker-build - docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --mode scheduled --period "* * * * *" + docker run --rm --network internal --user 1000:1000 --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --mode scheduled --period "* * * * *" docker-run-scheduled-s3: docker-build - docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *" + docker run --rm --network internal --user 1000:1000 --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *" docker-run-s3: docker-build - docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --path /custom-path + docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --path /custom-path docker-restore-s3: docker-build diff --git a/docker/Dockerfile b/docker/Dockerfile index 16b4074..a1ccbe9 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -16,9 +16,10 @@ ENV DB_USERNAME="" ENV DB_PASSWORD="" ENV DB_PORT="5432" ENV STORAGE=local -ENV BUCKET_NAME="" -ENV ACCESS_KEY="" -ENV SECRET_KEY="" +ENV AWS_S3_ENDPOINT="" +ENV AWS_S3_BUCKET_NAME="" +ENV AWS_ACCESS_KEY="" +ENV AWS_SECRET_KEY="" ENV AWS_REGION="us-west-2" ENV AWS_DISABLE_SSL="false" ENV GPG_PASSPHRASE="" @@ -27,12 +28,13 @@ ENV SSH_PASSWORD="" ENV SSH_HOST_NAME="" ENV SSH_IDENTIFY_FILE="/root/.ssh/id_rsa" ENV SSH_PORT="22" -ENV S3_ENDPOINT=https://s3.amazonaws.com ARG DEBIAN_FRONTEND=noninteractive ENV VERSION="v0.8" ARG WORKDIR="/app" ARG BACKUPDIR="/backup" ARG BACKUP_TMP_DIR="/tmp/backup" +ARG BACKUP_CRON="/etc/cron.d/backup_cron" +ARG BACKUP_CRON_SCRIPT="/usr/local/bin/backup_cron.sh" LABEL author="Jonas Kaninda" RUN apt-get update -qq @@ -48,6 +50,10 @@ RUN mkdir -p $BACKUP_TMP_DIR RUN chmod 777 $WORKDIR RUN chmod 777 $BACKUPDIR RUN chmod 777 $BACKUP_TMP_DIR +RUN touch $BACKUP_CRON && \ + touch $BACKUP_CRON_SCRIPT && \ + chmod 777 $BACKUP_CRON && \ + chmod 777 $BACKUP_CRON_SCRIPT COPY --from=build /app/pg-bkup /usr/local/bin/pg-bkup RUN chmod +x /usr/local/bin/pg-bkup @@ -56,4 +62,4 @@ RUN ln -s /usr/local/bin/pg-bkup /usr/local/bin/bkup ADD docker/supervisord.conf /etc/supervisor/supervisord.conf -WORKDIR $WORKDIR +WORKDIR $WORKDIR \ No newline at end of file diff --git a/pkg/backup.go b/pkg/backup.go index 33024d4..a2e6cc2 100644 --- a/pkg/backup.go +++ b/pkg/backup.go @@ -52,7 +52,7 @@ func StartBackup(cmd *cobra.Command) { case "local": localBackup(backupFileName, disableCompression, prune, backupRetention, encryption) case "ssh": - fmt.Println("x is 2") + sshBackup(backupFileName, s3Path, disableCompression, prune, backupRetention, encryption) case "ftp": fmt.Println("x is 3") default: @@ -209,8 +209,7 @@ func localBackup(backupFileName string, disableCompression bool, prune bool, bac } func s3Backup(backupFileName string, s3Path string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { - bucket := os.Getenv("BUCKET_NAME") - storagePath = os.Getenv("STORAGE_PATH") + bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME") utils.Info("Backup database to s3 storage") //Backup database BackupDatabase(backupFileName, disableCompression) @@ -242,6 +241,9 @@ func s3Backup(backupFileName string, s3Path string, disableCompression bool, pru } utils.Done("Database has been backed up and uploaded to s3 ") } +func sshBackup(backupFileName string, s3Path string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { + +} func encryptBackup(backupFileName string) { gpgPassphrase := os.Getenv("GPG_PASSPHRASE") diff --git a/pkg/restore.go b/pkg/restore.go index 1c6f581..a3d8149 100644 --- a/pkg/restore.go +++ b/pkg/restore.go @@ -21,8 +21,7 @@ func StartRestore(cmd *cobra.Command) { storage = utils.GetEnv(cmd, "storage", "STORAGE") file = utils.GetEnv(cmd, "file", "FILE_NAME") executionMode, _ = cmd.Flags().GetString("mode") - bucket := os.Getenv("BUCKET_NAME") - + bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME") switch storage { case "s3": utils.Info("Restore database from s3") diff --git a/utils/s3.go b/utils/s3.go index 2a3cb1a..3186344 100644 --- a/utils/s3.go +++ b/utils/s3.go @@ -19,9 +19,9 @@ import ( // CreateSession creates a new AWS session func CreateSession() (*session.Session, error) { - endPoint := os.Getenv("S3_ENDPOINT") - accessKey := os.Getenv("ACCESS_KEY") - secretKey := os.Getenv("SECRET_KEY") + endPoint := GetEnvVariable("AWS_S3_ENDPOINT", "S3_ENDPOINT") + accessKey := GetEnvVariable("AWS_ACCESS_KEY", "ACCESS_KEY") + secretKey := GetEnvVariable("AWS_SECRET_KEY", "SECRET_KEY") region := os.Getenv("AWS_REGION") awsDisableSsl, err := strconv.ParseBool(os.Getenv("AWS_DISABLE_SSL")) if err != nil { diff --git a/utils/utils.go b/utils/utils.go index 6424efc..da69d4e 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -178,6 +178,15 @@ func SetEnv(key, value string) { return } } - +func GetEnvVariable(envName, oldEnvName string) string { + value := os.Getenv(envName) + if value == "" { + value = os.Getenv(oldEnvName) + if value != "" { + fmt.Printf("%s is deprecated, please use %s instead!\n", oldEnvName, envName) + } + } + return value +} func ShowHistory() { } From 6976bf759745af22d6baf30d1f7659dff973bbc6 Mon Sep 17 00:00:00 2001 From: Jonas Kaninda Date: Tue, 30 Jul 2024 19:18:34 +0200 Subject: [PATCH 5/9] Add SSH remote backup --- docker/Dockerfile | 5 ++- go.mod | 2 + go.sum | 4 ++ pkg/backup.go | 31 +++++++++++++- pkg/scp.go | 100 ++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 138 insertions(+), 4 deletions(-) create mode 100644 pkg/scp.go diff --git a/docker/Dockerfile b/docker/Dockerfile index a1ccbe9..4e3f512 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -24,12 +24,13 @@ ENV AWS_REGION="us-west-2" ENV AWS_DISABLE_SSL="false" ENV GPG_PASSPHRASE="" ENV SSH_USER="" +ENV SSH_REMOTE_PATH="" ENV SSH_PASSWORD="" ENV SSH_HOST_NAME="" -ENV SSH_IDENTIFY_FILE="/root/.ssh/id_rsa" +ENV SSH_IDENTIFY_FILE="" ENV SSH_PORT="22" ARG DEBIAN_FRONTEND=noninteractive -ENV VERSION="v0.8" +ENV VERSION="v1.0" ARG WORKDIR="/app" ARG BACKUPDIR="/backup" ARG BACKUP_TMP_DIR="/tmp/backup" diff --git a/go.mod b/go.mod index 83d89a2..f59aa94 100644 --- a/go.mod +++ b/go.mod @@ -9,8 +9,10 @@ require ( require ( github.com/aws/aws-sdk-go v1.55.3 // indirect + github.com/bramvdbogaerde/go-scp v1.5.0 // indirect github.com/hpcloud/tail v1.0.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + golang.org/x/crypto v0.18.0 // indirect golang.org/x/sys v0.22.0 // indirect gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect diff --git a/go.sum b/go.sum index 2d2cfef..7512d7f 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ github.com/aws/aws-sdk-go v1.55.3 h1:0B5hOX+mIx7I5XPOrjrHlKSDQV/+ypFZpIHOx5LOk3E= github.com/aws/aws-sdk-go v1.55.3/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/bramvdbogaerde/go-scp v1.5.0 h1:a9BinAjTfQh273eh7vd3qUgmBC+bx+3TRDtkZWmIpzM= +github.com/bramvdbogaerde/go-scp v1.5.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= @@ -35,6 +37,8 @@ github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyh github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= diff --git a/pkg/backup.go b/pkg/backup.go index a2e6cc2..7d78cb0 100644 --- a/pkg/backup.go +++ b/pkg/backup.go @@ -54,7 +54,7 @@ func StartBackup(cmd *cobra.Command) { case "ssh": sshBackup(backupFileName, s3Path, disableCompression, prune, backupRetention, encryption) case "ftp": - fmt.Println("x is 3") + utils.Fatalf("Not supported storage type: %s", storage) default: localBackup(backupFileName, disableCompression, prune, backupRetention, encryption) } @@ -241,8 +241,35 @@ func s3Backup(backupFileName string, s3Path string, disableCompression bool, pru } utils.Done("Database has been backed up and uploaded to s3 ") } -func sshBackup(backupFileName string, s3Path string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { +func sshBackup(backupFileName string, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { + utils.Info("Backup database to Remote server") + //Backup database + BackupDatabase(backupFileName, disableCompression) + finalFileName := backupFileName + if encrypt { + encryptBackup(backupFileName) + finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg") + } + utils.Info("Uploading backup file to S3 storage...") + utils.Info("Backup name is ", backupFileName) + err := CopyToRemote(filepath.Join(tmpPath, finalFileName), remotePath) + if err != nil { + utils.Fatalf("Error uploading file to S3: %s ", err) + } + //Delete backup file from tmp folder + err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName)) + if err != nil { + fmt.Println("Error deleting file:", err) + + } + if prune { + //TODO: Delete old backup from remote server + utils.Info("Deleting old backup from a remote server is not implemented yet") + + } + + utils.Done("Database has been backed up and uploaded to remote server ") } func encryptBackup(backupFileName string) { diff --git a/pkg/scp.go b/pkg/scp.go new file mode 100644 index 0000000..6012f09 --- /dev/null +++ b/pkg/scp.go @@ -0,0 +1,100 @@ +package pkg + +import ( + "context" + "errors" + "fmt" + "github.com/bramvdbogaerde/go-scp" + "github.com/bramvdbogaerde/go-scp/auth" + "github.com/jkaninda/pg-bkup/utils" + "golang.org/x/crypto/ssh" + "os" +) + +func CopyToRemote(fileName, remotePath string) error { + sshUser := os.Getenv("SSH_USER") + sshPassword := os.Getenv("SSH_PASSWORD") + sshHostName := os.Getenv("SSH_HOST_NAME") + sshPort := os.Getenv("SSH_PORT") + sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE") + + clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey()) + if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) { + clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey()) + + } else { + if sshPassword == "" { + return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n") + } + clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey()) + + } + // Create a new SCP client + client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig) + + // Connect to the remote server + err := client.Connect() + if err != nil { + return errors.New("Couldn't establish a connection to the remote server\n") + } + + // Open a file + file, _ := os.Open(fileName) + + // Close client connection after the file has been copied + defer client.Close() + // Close the file after it has been copied + defer file.Close() + // the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application. + err = client.CopyFromFile(context.Background(), *file, remotePath, "0655") + if err != nil { + fmt.Println("Error while copying file ") + return err + } + return nil +} + +func CopyFromRemote(fileName, remotePath string) error { + sshUser := os.Getenv("SSH_USER") + sshPassword := os.Getenv("SSH_PASSWORD") + sshHostName := os.Getenv("SSH_HOST_NAME") + sshPort := os.Getenv("SSH_PORT") + sshIdentifyFile := os.Getenv("SSH_IDENTIFY_FILE") + + clientConfig, _ := auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey()) + if sshIdentifyFile != "" && utils.FileExists(sshIdentifyFile) { + clientConfig, _ = auth.PrivateKey(sshUser, sshIdentifyFile, ssh.InsecureIgnoreHostKey()) + + } else { + if sshPassword == "" { + return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n") + } + clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey()) + + } + + // Create a new SCP client + client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig) + // Connect to the remote server + err := client.Connect() + if err != nil { + return errors.New("Couldn't establish a connection to the remote server\n") + } + // Close client connection after the file has been copied + defer client.Close() + file, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0777) + if err != nil { + fmt.Println("Couldn't open the output file") + } + defer file.Close() + + // the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application. + err = client.CopyFromRemote(context.Background(), file, remotePath) + + if err != nil { + fmt.Println("Error while copying file ", err) + return err + } + return nil + +} From 5b0d4507407ecb6ec1e473ee8fcc818a4e56ee66 Mon Sep 17 00:00:00 2001 From: Jonas Kaninda Date: Wed, 31 Jul 2024 22:32:07 +0200 Subject: [PATCH 6/9] Add restore from SSH --- Makefile | 9 +++++++-- cmd/root.go | 3 +-- cmd/s3mount.go | 3 +-- pkg/backup.go | 11 ++++++----- pkg/restore.go | 32 ++++++++++++++++++++++---------- pkg/s3fs.go | 3 +++ pkg/scp.go | 7 ++++--- pkg/var.go | 1 - utils/s3.go | 5 +++-- 9 files changed, 47 insertions(+), 27 deletions(-) diff --git a/Makefile b/Makefile index 54778df..c4991f7 100644 --- a/Makefile +++ b/Makefile @@ -30,9 +30,14 @@ docker-run-scheduled-s3: docker-build docker run --rm --network internal --user 1000:1000 --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *" docker-run-s3: docker-build - docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --path /custom-path + docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 ##--path /custom-path docker-restore-s3: docker-build - docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage s3 --path /custom-path -f $FILE_NAME + docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage s3 -f ${FILE_NAME} #--path /custom-path +docker-run-ssh: docker-build + docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage ssh + +docker-restore-ssh: docker-build + docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage ssh -f uzaraka_20240731_200104.sql.gz.gpg \ No newline at end of file diff --git a/cmd/root.go b/cmd/root.go index 7c4f191..edb9c10 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -19,7 +19,6 @@ var rootCmd = &cobra.Command{ Version: appVersion, } var operation = "" -var s3Path = "/pg-bkup" // Execute adds all child commands to the root command and sets flags appropriately. // This is called by main.main(). It only needs to happen once to the rootCmd. @@ -32,7 +31,7 @@ func Execute() { func init() { rootCmd.PersistentFlags().StringP("storage", "s", "local", "Set storage. local or s3") - rootCmd.PersistentFlags().StringP("path", "P", s3Path, "Set s3 path, without file name. for S3 storage only") + rootCmd.PersistentFlags().StringP("path", "P", "", "Set s3 path, without file name. for S3 storage only") rootCmd.PersistentFlags().StringP("dbname", "d", "", "Set database name") rootCmd.PersistentFlags().IntP("timeout", "t", 30, "Set timeout") rootCmd.PersistentFlags().IntP("port", "p", 5432, "Set database port") diff --git a/cmd/s3mount.go b/cmd/s3mount.go index c7c6007..ab56e0d 100644 --- a/cmd/s3mount.go +++ b/cmd/s3mount.go @@ -1,7 +1,6 @@ package cmd import ( - "github.com/jkaninda/pg-bkup/pkg" "github.com/spf13/cobra" ) @@ -9,6 +8,6 @@ var S3MountCmd = &cobra.Command{ Use: "s3mount", Short: "Mount AWS S3 storage", Run: func(cmd *cobra.Command, args []string) { - pkg.S3Mount() + //pkg.S3Mount() }, } diff --git a/pkg/backup.go b/pkg/backup.go index 7d78cb0..e8cab0e 100644 --- a/pkg/backup.go +++ b/pkg/backup.go @@ -25,7 +25,8 @@ func StartBackup(cmd *cobra.Command) { utils.GetEnv(cmd, "period", "SCHEDULE_PERIOD") //Get flag value and set env - s3Path = utils.GetEnv(cmd, "path", "S3_PATH") + s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH") + remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH") storage = utils.GetEnv(cmd, "storage", "STORAGE") file = utils.GetEnv(cmd, "file", "FILE_NAME") backupRetention, _ := cmd.Flags().GetInt("keep-last") @@ -52,7 +53,7 @@ func StartBackup(cmd *cobra.Command) { case "local": localBackup(backupFileName, disableCompression, prune, backupRetention, encryption) case "ssh": - sshBackup(backupFileName, s3Path, disableCompression, prune, backupRetention, encryption) + sshBackup(backupFileName, remotePath, disableCompression, prune, backupRetention, encryption) case "ftp": utils.Fatalf("Not supported storage type: %s", storage) default: @@ -241,7 +242,7 @@ func s3Backup(backupFileName string, s3Path string, disableCompression bool, pru } utils.Done("Database has been backed up and uploaded to s3 ") } -func sshBackup(backupFileName string, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { +func sshBackup(backupFileName, remotePath string, disableCompression bool, prune bool, backupRetention int, encrypt bool) { utils.Info("Backup database to Remote server") //Backup database BackupDatabase(backupFileName, disableCompression) @@ -250,9 +251,9 @@ func sshBackup(backupFileName string, remotePath string, disableCompression bool encryptBackup(backupFileName) finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg") } - utils.Info("Uploading backup file to S3 storage...") + utils.Info("Uploading backup file to remote server...") utils.Info("Backup name is ", backupFileName) - err := CopyToRemote(filepath.Join(tmpPath, finalFileName), remotePath) + err := CopyToRemote(finalFileName, remotePath) if err != nil { utils.Fatalf("Error uploading file to S3: %s ", err) diff --git a/pkg/restore.go b/pkg/restore.go index a3d8149..886fc53 100644 --- a/pkg/restore.go +++ b/pkg/restore.go @@ -17,33 +17,46 @@ func StartRestore(cmd *cobra.Command) { utils.GetEnv(cmd, "port", "DB_PORT") //Get flag value and set env - s3Path = utils.GetEnv(cmd, "path", "S3_PATH") + s3Path := utils.GetEnv(cmd, "path", "AWS_S3_PATH") + remotePath := utils.GetEnv(cmd, "path", "SSH_REMOTE_PATH") storage = utils.GetEnv(cmd, "storage", "STORAGE") file = utils.GetEnv(cmd, "file", "FILE_NAME") executionMode, _ = cmd.Flags().GetString("mode") bucket := utils.GetEnvVariable("AWS_S3_BUCKET_NAME", "BUCKET_NAME") switch storage { case "s3": - utils.Info("Restore database from s3") - err := utils.DownloadFile(tmpPath, file, bucket, s3Path) - if err != nil { - utils.Fatal("Error download file from s3 ", file, err) - } - RestoreDatabase(file) + restoreFromS3(file, bucket, s3Path) case "local": utils.Info("Restore database from local") copyToTmp(storagePath, file) RestoreDatabase(file) case "ssh": - fmt.Println("x is 2") + restoreFromRemote(file, remotePath) case "ftp": - fmt.Println("x is 3") + utils.Fatalf("Restore from FTP is not yet supported") default: utils.Info("Restore database from local") RestoreDatabase(file) } } +func restoreFromS3(file, bucket, s3Path string) { + utils.Info("Restore database from s3") + err := utils.DownloadFile(tmpPath, file, bucket, s3Path) + if err != nil { + utils.Fatal("Error download file from s3 ", file, err) + } + RestoreDatabase(file) +} +func restoreFromRemote(file, remotePath string) { + utils.Info("Restore database from remote server") + err := CopyFromRemote(file, remotePath) + if err != nil { + utils.Fatal("Error download file from remote server: ", filepath.Join(remotePath, file), err) + } + RestoreDatabase(file) +} + // RestoreDatabase restore database func RestoreDatabase(file string) { dbHost = os.Getenv("DB_HOST") @@ -52,7 +65,6 @@ func RestoreDatabase(file string) { dbName = os.Getenv("DB_NAME") dbPort = os.Getenv("DB_PORT") gpgPassphrase := os.Getenv("GPG_PASSPHRASE") - //storagePath = os.Getenv("STORAGE_PATH") if file == "" { utils.Fatal("Error, file required") } diff --git a/pkg/s3fs.go b/pkg/s3fs.go index db59b26..95bd4cf 100644 --- a/pkg/s3fs.go +++ b/pkg/s3fs.go @@ -4,6 +4,7 @@ Copyright © 2024 Jonas Kaninda */ package pkg +/* import ( "fmt" "github.com/jkaninda/pg-bkup/utils" @@ -78,3 +79,5 @@ func MountS3Storage(s3Path string) { } } + +*/ diff --git a/pkg/scp.go b/pkg/scp.go index 6012f09..9a5d47f 100644 --- a/pkg/scp.go +++ b/pkg/scp.go @@ -9,6 +9,7 @@ import ( "github.com/jkaninda/pg-bkup/utils" "golang.org/x/crypto/ssh" "os" + "path/filepath" ) func CopyToRemote(fileName, remotePath string) error { @@ -39,14 +40,14 @@ func CopyToRemote(fileName, remotePath string) error { } // Open a file - file, _ := os.Open(fileName) + file, _ := os.Open(filepath.Join(tmpPath, fileName)) // Close client connection after the file has been copied defer client.Close() // Close the file after it has been copied defer file.Close() // the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application. - err = client.CopyFromFile(context.Background(), *file, remotePath, "0655") + err = client.CopyFromFile(context.Background(), *file, filepath.Join(remotePath, fileName), "0655") if err != nil { fmt.Println("Error while copying file ") return err @@ -72,9 +73,9 @@ func CopyFromRemote(fileName, remotePath string) error { clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey()) } - // Create a new SCP client client := scp.NewClient(fmt.Sprintf("%s:%s", sshHostName, sshPort), &clientConfig) + // Connect to the remote server err := client.Connect() if err != nil { diff --git a/pkg/var.go b/pkg/var.go index 255e854..542ed95 100644 --- a/pkg/var.go +++ b/pkg/var.go @@ -11,7 +11,6 @@ const gpgExtension = "gpg" var ( storage = "local" file = "" - s3Path = "/pg-bkup" dbPassword = "" dbUserName = "" dbName = "" diff --git a/utils/s3.go b/utils/s3.go index 3186344..58d929a 100644 --- a/utils/s3.go +++ b/utils/s3.go @@ -59,7 +59,8 @@ func UploadFileToS3(filePath, key, bucket, prefix string) error { return err } - objectKey := fmt.Sprintf("%s/%s", prefix, key) + objectKey := filepath.Join(prefix, key) + //fmt.Sprintf("%s/%s", prefix, key) buffer := make([]byte, fileInfo.Size()) file.Read(buffer) @@ -93,7 +94,7 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error { } defer file.Close() - objectKey := fmt.Sprintf("%s/%s", prefix, key) + objectKey := filepath.Join(prefix, key) downloader := s3manager.NewDownloader(sess) numBytes, err := downloader.Download(file, From 4208a1622323e917dcc83d0d41ac6d546832e5f0 Mon Sep 17 00:00:00 2001 From: Jonas Kaninda Date: Thu, 1 Aug 2024 07:14:40 +0200 Subject: [PATCH 7/9] Fix restore from ssh, refactoring of code --- Makefile | 8 ++++---- pkg/backup.go | 8 ++++---- pkg/restore.go | 3 ++- pkg/scp.go | 6 ++++-- utils/s3.go | 4 +--- 5 files changed, 15 insertions(+), 14 deletions(-) diff --git a/Makefile b/Makefile index c4991f7..65e4c19 100644 --- a/Makefile +++ b/Makefile @@ -30,14 +30,14 @@ docker-run-scheduled-s3: docker-build docker run --rm --network internal --user 1000:1000 --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *" docker-run-s3: docker-build - docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 ##--path /custom-path + docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 #--path /custom-path docker-restore-s3: docker-build - docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage s3 -f ${FILE_NAME} #--path /custom-path + docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage s3 -f ${FILE_NAME} #--path /custom-path docker-run-ssh: docker-build - docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage ssh + docker run --rm --network internal --name pg-bkup -v "/Users/jonas/.ssh/id_ed25519:/tmp/id_ed25519" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage ssh docker-restore-ssh: docker-build - docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage ssh -f uzaraka_20240731_200104.sql.gz.gpg \ No newline at end of file + docker run --rm --network internal --name pg-bkup -v "/Users/jonas/.ssh/id_ed25519:/tmp/id_ed25519" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" jkaninda/pg-bkup bkup restore --storage ssh -f uzaraka_20240731_200104.sql.gz.gpg \ No newline at end of file diff --git a/pkg/backup.go b/pkg/backup.go index e8cab0e..d994944 100644 --- a/pkg/backup.go +++ b/pkg/backup.go @@ -52,7 +52,7 @@ func StartBackup(cmd *cobra.Command) { s3Backup(backupFileName, s3Path, disableCompression, prune, backupRetention, encryption) case "local": localBackup(backupFileName, disableCompression, prune, backupRetention, encryption) - case "ssh": + case "ssh", "remote": sshBackup(backupFileName, remotePath, disableCompression, prune, backupRetention, encryption) case "ftp": utils.Fatalf("Not supported storage type: %s", storage) @@ -220,7 +220,7 @@ func s3Backup(backupFileName string, s3Path string, disableCompression bool, pru finalFileName = fmt.Sprintf("%s.%s", backupFileName, "gpg") } utils.Info("Uploading backup file to S3 storage...") - utils.Info("Backup name is ", backupFileName) + utils.Info("Backup name is ", finalFileName) err := utils.UploadFileToS3(tmpPath, finalFileName, bucket, s3Path) if err != nil { utils.Fatalf("Error uploading file to S3: %s ", err) @@ -255,9 +255,10 @@ func sshBackup(backupFileName, remotePath string, disableCompression bool, prune utils.Info("Backup name is ", backupFileName) err := CopyToRemote(finalFileName, remotePath) if err != nil { - utils.Fatalf("Error uploading file to S3: %s ", err) + utils.Fatalf("Error uploading file to the remote server: %s ", err) } + //Delete backup file from tmp folder err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName)) if err != nil { @@ -275,7 +276,6 @@ func sshBackup(backupFileName, remotePath string, disableCompression bool, prune func encryptBackup(backupFileName string) { gpgPassphrase := os.Getenv("GPG_PASSPHRASE") - err := Encrypt(filepath.Join(tmpPath, backupFileName), gpgPassphrase) if err != nil { utils.Fatalf("Error during encrypting backup %s", err) diff --git a/pkg/restore.go b/pkg/restore.go index 886fc53..042478d 100644 --- a/pkg/restore.go +++ b/pkg/restore.go @@ -71,7 +71,8 @@ func RestoreDatabase(file string) { extension := filepath.Ext(fmt.Sprintf("%s/%s", tmpPath, file)) if extension == ".gpg" { if gpgPassphrase == "" { - utils.Fatal("Error, GPG_PASSPHRASE environment variable required, you need to set the GPG_PASSPHRASE") + utils.Fatal("Error: GPG passphrase is required, your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE environment variable is required.") + } else { //Decrypt file err := Decrypt(filepath.Join(tmpPath, file), gpgPassphrase) diff --git a/pkg/scp.go b/pkg/scp.go index 9a5d47f..242cc9d 100644 --- a/pkg/scp.go +++ b/pkg/scp.go @@ -27,6 +27,7 @@ func CopyToRemote(fileName, remotePath string) error { if sshPassword == "" { return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n") } + utils.Info("Accessing the remote server using password, private key is recommended\n") clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey()) } @@ -70,6 +71,7 @@ func CopyFromRemote(fileName, remotePath string) error { if sshPassword == "" { return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n") } + utils.Info("Accessing the remote server using password, private key is recommended\n") clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey()) } @@ -83,14 +85,14 @@ func CopyFromRemote(fileName, remotePath string) error { } // Close client connection after the file has been copied defer client.Close() - file, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0777) + file, err := os.OpenFile(filepath.Join(tmpPath, fileName), os.O_RDWR|os.O_CREATE, 0777) if err != nil { fmt.Println("Couldn't open the output file") } defer file.Close() // the context can be adjusted to provide time-outs or inherit from other contexts if this is embedded in a larger application. - err = client.CopyFromRemote(context.Background(), file, remotePath) + err = client.CopyFromRemote(context.Background(), file, filepath.Join(remotePath, fileName)) if err != nil { fmt.Println("Error while copying file ", err) diff --git a/utils/s3.go b/utils/s3.go index 58d929a..984a505 100644 --- a/utils/s3.go +++ b/utils/s3.go @@ -60,7 +60,6 @@ func UploadFileToS3(filePath, key, bucket, prefix string) error { } objectKey := filepath.Join(prefix, key) - //fmt.Sprintf("%s/%s", prefix, key) buffer := make([]byte, fileInfo.Size()) file.Read(buffer) @@ -106,8 +105,7 @@ func DownloadFile(destinationPath, key, bucket, prefix string) error { fmt.Println("Failed to download file", err) return err } - Info("Backup downloaded: ", file.Name()) - Info("Bytes size: ", numBytes) + Info("Backup downloaded: ", file.Name(), " bytes size ", numBytes) return nil } From c3fdef18d2179cfc91eac9adb202dfee6e4ae805 Mon Sep 17 00:00:00 2001 From: Jonas Kaninda Date: Thu, 1 Aug 2024 07:16:45 +0200 Subject: [PATCH 8/9] Delete s3fs file --- pkg/s3fs.go | 83 ----------------------------------------------------- 1 file changed, 83 deletions(-) delete mode 100644 pkg/s3fs.go diff --git a/pkg/s3fs.go b/pkg/s3fs.go deleted file mode 100644 index 95bd4cf..0000000 --- a/pkg/s3fs.go +++ /dev/null @@ -1,83 +0,0 @@ -// Package pkg /* -/* -Copyright © 2024 Jonas Kaninda -*/ -package pkg - -/* -import ( - "fmt" - "github.com/jkaninda/pg-bkup/utils" - "os" - "os/exec" -) - -var ( - accessKey = "" - secretKey = "" - bucketName = "" - s3Endpoint = "" -) - -func S3Mount() { - MountS3Storage(s3Path) -} - -// MountS3Storage Mount s3 storage using s3fs -func MountS3Storage(s3Path string) { - accessKey = os.Getenv("ACCESS_KEY") - secretKey = os.Getenv("SECRET_KEY") - bucketName = os.Getenv("BUCKET_NAME") - if bucketName == "" { - bucketName = os.Getenv("BUCKETNAME") - } - s3Endpoint = os.Getenv("S3_ENDPOINT") - - if accessKey == "" || secretKey == "" || bucketName == "" { - utils.Fatal("Please make sure all environment variables are set for S3") - } else { - storagePath := fmt.Sprintf("%s%s", s3MountPath, s3Path) - err := os.Setenv("STORAGE_PATH", storagePath) - if err != nil { - return - } - - //Write file - err = utils.WriteToFile(s3fsPasswdFile, fmt.Sprintf("%s:%s", accessKey, secretKey)) - if err != nil { - utils.Fatal("Error creating file") - } - //Change file permission - utils.ChangePermission(s3fsPasswdFile, 0600) - - //Mount object storage - utils.Info("Mounting Object storage in ", s3MountPath) - if isEmpty, _ := utils.IsDirEmpty(s3MountPath); isEmpty { - cmd := exec.Command("s3fs", bucketName, s3MountPath, - "-o", "passwd_file="+s3fsPasswdFile, - "-o", "use_cache=/tmp/s3cache", - "-o", "allow_other", - "-o", "url="+s3Endpoint, - "-o", "use_path_request_style", - ) - - if err := cmd.Run(); err != nil { - utils.Fatal("Error mounting Object storage:", err) - } - - if err := os.MkdirAll(storagePath, os.ModePerm); err != nil { - utils.Fatalf("Error creating directory %v %v", storagePath, err) - } - - } else { - utils.Info("Object storage already mounted in " + s3MountPath) - if err := os.MkdirAll(storagePath, os.ModePerm); err != nil { - utils.Fatal("Error creating directory "+storagePath, err) - } - - } - - } -} - -*/ From bd8631070733e9ee2f3cbd5111226a752e85940a Mon Sep 17 00:00:00 2001 From: Jonas Kaninda Date: Sat, 3 Aug 2024 00:49:14 +0200 Subject: [PATCH 9/9] Add docs --- .github/workflows/build.yml | 20 +- Makefile | 11 +- cmd/backup.go | 4 +- cmd/history.go | 14 - cmd/root.go | 15 +- cmd/s3mount.go | 13 - docker/Dockerfile | 2 +- docs/.gitignore | 3 + docs/404.html | 24 ++ docs/Dockerfile | 12 + docs/Gemfile | 43 +++ docs/Gemfile.lock | 116 ++++++ docs/_config.yml | 69 ++++ .../2024-07-29-welcome-to-jekyll.markdown | 25 ++ docs/docker-compose.yml | 13 + docs/how-tos/backup-to-s3.md | 141 +++++++ docs/how-tos/backup-to-ssh.md | 146 +++++++ docs/how-tos/backup.md | 89 +++++ docs/how-tos/encrypt-backup.md | 44 +++ docs/how-tos/index.md | 8 + docs/how-tos/restore-from-s3.md | 51 +++ docs/how-tos/restore-from-ssh.md | 50 +++ docs/how-tos/restore.md | 43 +++ docs/index.md | 107 ++++++ docs/old-version/index.md | 358 ++++++++++++++++++ docs/reference/index.md | 105 +++++ examples/docker-compose.s3.yaml | 34 +- examples/docker-compose.scheduled.s3.yaml | 34 +- examples/k8s-job.yaml | 66 ++-- go.mod | 1 + go.sum | 2 + pkg/scp.go | 5 +- utils/utils.go | 7 +- 33 files changed, 1570 insertions(+), 105 deletions(-) delete mode 100644 cmd/history.go delete mode 100644 cmd/s3mount.go create mode 100644 docs/.gitignore create mode 100644 docs/404.html create mode 100644 docs/Dockerfile create mode 100644 docs/Gemfile create mode 100644 docs/Gemfile.lock create mode 100644 docs/_config.yml create mode 100644 docs/_posts/2024-07-29-welcome-to-jekyll.markdown create mode 100644 docs/docker-compose.yml create mode 100644 docs/how-tos/backup-to-s3.md create mode 100644 docs/how-tos/backup-to-ssh.md create mode 100644 docs/how-tos/backup.md create mode 100644 docs/how-tos/encrypt-backup.md create mode 100644 docs/how-tos/index.md create mode 100644 docs/how-tos/restore-from-s3.md create mode 100644 docs/how-tos/restore-from-ssh.md create mode 100644 docs/how-tos/restore.md create mode 100644 docs/index.md create mode 100644 docs/old-version/index.md create mode 100644 docs/reference/index.md diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ee8ad36..d975206 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,7 +1,7 @@ name: Build on: push: - branches: [ "main" ] + branches: [ "main","v1.0"] workflow_dispatch: inputs: docker_tag: @@ -11,9 +11,13 @@ on: type: string env: BUILDKIT_IMAGE: jkaninda/pg-bkup + TAG: v1.0 jobs: docker: runs-on: ubuntu-latest + permissions: + packages: write + contents: read steps: - name: Set up QEMU @@ -27,13 +31,21 @@ jobs: with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Log in to GHCR + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push uses: docker/build-push-action@v3 with: push: true file: "./docker/Dockerfile" - platforms: linux/amd64,linux/arm64 + platforms: linux/amd64,linux/arm64,linux/arm/v7 tags: | - "${{env.BUILDKIT_IMAGE}}:v0.7" - "${{env.BUILDKIT_IMAGE}}:latest" + "${{env.BUILDKIT_IMAGE}}:${{env.TAG}}" + # "${{env.BUILDKIT_IMAGE}}:latest" + "ghcr.io/${{env.BUILDKIT_IMAGE}}:${{TAG}}" + # "ghcr.io/${{env.BUILDKIT_IMAGE}}:latest" diff --git a/Makefile b/Makefile index 65e4c19..e2823c3 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,7 @@ docker-build: docker build -f docker/Dockerfile -t jkaninda/pg-bkup:latest . docker-run: docker-build - docker run --rm --network internal --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --prune --keep-last 2 + docker run --rm --network internal --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup backup --prune --keep-last 2 docker-restore: docker-build docker run --rm --network internal --user 1000:1000 --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore -f ${FILE_NAME} @@ -30,14 +30,17 @@ docker-run-scheduled-s3: docker-build docker run --rm --network internal --user 1000:1000 --name pg-bkup -v "./backup:/backup" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 --mode scheduled --path /custom-path --period "* * * * *" docker-run-s3: docker-build - docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 #--path /custom-path + docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "AWS_S3_BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "AWS_S3_ENDPOINT=${AWS_S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage s3 #--path /custom-path docker-restore-s3: docker-build docker run --rm --network internal --privileged --device /dev/fuse --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "ACCESS_KEY=${ACCESS_KEY}" -e "SECRET_KEY=${SECRET_KEY}" -e "BUCKET_NAME=${AWS_S3_BUCKET_NAME}" -e "S3_ENDPOINT=${S3_ENDPOINT}" -e "AWS_REGION=eu2" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup restore --storage s3 -f ${FILE_NAME} #--path /custom-path docker-run-ssh: docker-build - docker run --rm --network internal --name pg-bkup -v "/Users/jonas/.ssh/id_ed25519:/tmp/id_ed25519" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage ssh + docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" jkaninda/pg-bkup bkup backup --storage ssh docker-restore-ssh: docker-build - docker run --rm --network internal --name pg-bkup -v "/Users/jonas/.ssh/id_ed25519:/tmp/id_ed25519" -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" jkaninda/pg-bkup bkup restore --storage ssh -f uzaraka_20240731_200104.sql.gz.gpg \ No newline at end of file + docker run --rm --network internal --name pg-bkup -e "DB_HOST=${DB_HOST}" -e "DB_NAME=${DB_NAME}" -e "DB_USERNAME=${DB_USERNAME}" -e "DB_PASSWORD=${DB_PASSWORD}" -e "SSH_USER=${SSH_USER}" -e "SSH_HOST_NAME=${SSH_HOST_NAME}" -e "SSH_REMOTE_PATH=${SSH_REMOTE_PATH}" -e "SSH_PASSWORD=${SSH_PASSWORD}" -e "SSH_PORT=${SSH_PORT}" -e "GPG_PASSPHRASE=${GPG_PASSPHRASE}" -e "SSH_IDENTIFY_FILE=${SSH_IDENTIFY_FILE}" jkaninda/pg-bkup bkup restore --storage ssh -f data_20240731_200104.sql.gz.gpg + +run-docs: + cd docs && bundle exec jekyll serve -H 0.0.0.0 -t \ No newline at end of file diff --git a/cmd/backup.go b/cmd/backup.go index 3f5aa7c..5d48819 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -21,8 +21,8 @@ var BackupCmd = &cobra.Command{ func init() { //Backup - BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Set execution mode. default or scheduled") - BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Set schedule period time") + BackupCmd.PersistentFlags().StringP("mode", "m", "default", "Execution mode. default or scheduled") + BackupCmd.PersistentFlags().StringP("period", "", "0 1 * * *", "Schedule period time") BackupCmd.PersistentFlags().BoolP("prune", "", false, "Delete old backup, default disabled") BackupCmd.PersistentFlags().IntP("keep-last", "", 7, "Delete files created more than specified days ago, default 7 days") BackupCmd.PersistentFlags().BoolP("disable-compression", "", false, "Disable backup compression") diff --git a/cmd/history.go b/cmd/history.go deleted file mode 100644 index cf232e7..0000000 --- a/cmd/history.go +++ /dev/null @@ -1,14 +0,0 @@ -package cmd - -import ( - "github.com/jkaninda/pg-bkup/utils" - "github.com/spf13/cobra" -) - -var HistoryCmd = &cobra.Command{ - Use: "history", - Short: "Show the history of backup", - Run: func(cmd *cobra.Command, args []string) { - utils.ShowHistory() - }, -} diff --git a/cmd/root.go b/cmd/root.go index edb9c10..d3688ec 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -13,8 +13,8 @@ import ( // rootCmd represents the base command when called without any subcommands var rootCmd = &cobra.Command{ Use: "pg-bkup [Command]", - Short: "PostgreSQL Backup tool, backup database to S3 or Object Storage", - Long: `PostgreSQL Database backup and restoration tool. Backup database to AWS S3 storage or any S3 Alternatives for Object Storage.`, + Short: "PostgreSQL Backup tool, backup database to AWS S3 or SSH Remote Server", + Long: `PostgreSQL Database backup and restoration tool. Backup database to AWS S3 storage, any S3 Alternatives for Object Storage or SSH remote server.`, Example: utils.MainExample, Version: appVersion, } @@ -30,16 +30,13 @@ func Execute() { } func init() { - rootCmd.PersistentFlags().StringP("storage", "s", "local", "Set storage. local or s3") - rootCmd.PersistentFlags().StringP("path", "P", "", "Set s3 path, without file name. for S3 storage only") - rootCmd.PersistentFlags().StringP("dbname", "d", "", "Set database name") - rootCmd.PersistentFlags().IntP("timeout", "t", 30, "Set timeout") - rootCmd.PersistentFlags().IntP("port", "p", 5432, "Set database port") + rootCmd.PersistentFlags().StringP("storage", "s", "local", "Storage. local or s3") + rootCmd.PersistentFlags().StringP("path", "P", "", "AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup`") + rootCmd.PersistentFlags().StringP("dbname", "d", "", "Database name") + rootCmd.PersistentFlags().IntP("port", "p", 5432, "Database port") rootCmd.PersistentFlags().StringVarP(&operation, "operation", "o", "", "Set operation, for old version only") rootCmd.AddCommand(VersionCmd) rootCmd.AddCommand(BackupCmd) rootCmd.AddCommand(RestoreCmd) - rootCmd.AddCommand(S3MountCmd) - rootCmd.AddCommand(HistoryCmd) } diff --git a/cmd/s3mount.go b/cmd/s3mount.go deleted file mode 100644 index ab56e0d..0000000 --- a/cmd/s3mount.go +++ /dev/null @@ -1,13 +0,0 @@ -package cmd - -import ( - "github.com/spf13/cobra" -) - -var S3MountCmd = &cobra.Command{ - Use: "s3mount", - Short: "Mount AWS S3 storage", - Run: func(cmd *cobra.Command, args []string) { - //pkg.S3Mount() - }, -} diff --git a/docker/Dockerfile b/docker/Dockerfile index 4e3f512..2bd9779 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -40,7 +40,7 @@ LABEL author="Jonas Kaninda" RUN apt-get update -qq -RUN apt install postgresql-client postgresql-client-common supervisor cron openssh-client gnupg -y +RUN apt install postgresql-client postgresql-client-common supervisor cron gnupg -y # Clear cache RUN apt-get clean && rm -rf /var/lib/apt/lists/* diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 0000000..45c1505 --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1,3 @@ +_site +.sass-cache +.jekyll-metadata diff --git a/docs/404.html b/docs/404.html new file mode 100644 index 0000000..c472b4e --- /dev/null +++ b/docs/404.html @@ -0,0 +1,24 @@ +--- +layout: default +--- + + + +
+

404

+ +

Page not found :(

+

The requested page could not be found.

+
diff --git a/docs/Dockerfile b/docs/Dockerfile new file mode 100644 index 0000000..5e1108f --- /dev/null +++ b/docs/Dockerfile @@ -0,0 +1,12 @@ +FROM ruby:3.3.4 + +ENV LC_ALL C.UTF-8 +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US.UTF-8 + +WORKDIR /usr/src/app + +COPY . ./ +RUN gem install bundler && bundle install + +EXPOSE 4000 \ No newline at end of file diff --git a/docs/Gemfile b/docs/Gemfile new file mode 100644 index 0000000..3347de5 --- /dev/null +++ b/docs/Gemfile @@ -0,0 +1,43 @@ +source "https://rubygems.org" + +# Hello! This is where you manage which Jekyll version is used to run. +# When you want to use a different version, change it below, save the +# file and run `bundle install`. Run Jekyll with `bundle exec`, like so: +# +# bundle exec jekyll serve +# +# This will help ensure the proper Jekyll version is running. +# Happy Jekylling! +gem "jekyll", "~> 3.10.0" + +# This is the default theme for new Jekyll sites. You may change this to anything you like. +gem "minima", "~> 2.0" + +# If you want to use GitHub Pages, remove the "gem "jekyll"" above and +# uncomment the line below. To upgrade, run `bundle update github-pages`. +# gem "github-pages", group: :jekyll_plugins + +# If you have any plugins, put them here! +group :jekyll_plugins do + gem "jekyll-feed", "~> 0.6" +end + +# Windows and JRuby does not include zoneinfo files, so bundle the tzinfo-data gem +# and associated library. +platforms :mingw, :x64_mingw, :mswin, :jruby do + gem "tzinfo", ">= 1", "< 3" + gem "tzinfo-data" +end + +# Performance-booster for watching directories on Windows +gem "wdm", "~> 0.1.0", :install_if => Gem.win_platform? + +# kramdown v2 ships without the gfm parser by default. If you're using +# kramdown v1, comment out this line. +gem "kramdown-parser-gfm" + +# Lock `http_parser.rb` gem to `v0.6.x` on JRuby builds since newer versions of the gem +# do not have a Java counterpart. +gem "http_parser.rb", "~> 0.6.0", :platforms => [:jruby] +gem "just-the-docs" + diff --git a/docs/Gemfile.lock b/docs/Gemfile.lock new file mode 100644 index 0000000..1bf9a5d --- /dev/null +++ b/docs/Gemfile.lock @@ -0,0 +1,116 @@ +GEM + remote: https://rubygems.org/ + specs: + addressable (2.8.7) + public_suffix (>= 2.0.2, < 7.0) + colorator (1.1.0) + concurrent-ruby (1.3.3) + csv (3.3.0) + em-websocket (0.5.3) + eventmachine (>= 0.12.9) + http_parser.rb (~> 0) + eventmachine (1.2.7) + ffi (1.17.0) + ffi (1.17.0-aarch64-linux-gnu) + ffi (1.17.0-aarch64-linux-musl) + ffi (1.17.0-arm-linux-gnu) + ffi (1.17.0-arm-linux-musl) + ffi (1.17.0-arm64-darwin) + ffi (1.17.0-x86-linux-gnu) + ffi (1.17.0-x86-linux-musl) + ffi (1.17.0-x86_64-darwin) + ffi (1.17.0-x86_64-linux-gnu) + ffi (1.17.0-x86_64-linux-musl) + forwardable-extended (2.6.0) + http_parser.rb (0.8.0) + i18n (1.14.5) + concurrent-ruby (~> 1.0) + jekyll (3.10.0) + addressable (~> 2.4) + colorator (~> 1.0) + csv (~> 3.0) + em-websocket (~> 0.5) + i18n (>= 0.7, < 2) + jekyll-sass-converter (~> 1.0) + jekyll-watch (~> 2.0) + kramdown (>= 1.17, < 3) + liquid (~> 4.0) + mercenary (~> 0.3.3) + pathutil (~> 0.9) + rouge (>= 1.7, < 4) + safe_yaml (~> 1.0) + webrick (>= 1.0) + jekyll-feed (0.17.0) + jekyll (>= 3.7, < 5.0) + jekyll-include-cache (0.2.1) + jekyll (>= 3.7, < 5.0) + jekyll-sass-converter (1.5.2) + sass (~> 3.4) + jekyll-seo-tag (2.8.0) + jekyll (>= 3.8, < 5.0) + jekyll-watch (2.2.1) + listen (~> 3.0) + just-the-docs (0.8.2) + jekyll (>= 3.8.5) + jekyll-include-cache + jekyll-seo-tag (>= 2.0) + rake (>= 12.3.1) + kramdown (2.4.0) + rexml + kramdown-parser-gfm (1.1.0) + kramdown (~> 2.0) + liquid (4.0.4) + listen (3.9.0) + rb-fsevent (~> 0.10, >= 0.10.3) + rb-inotify (~> 0.9, >= 0.9.10) + mercenary (0.3.6) + minima (2.5.1) + jekyll (>= 3.5, < 5.0) + jekyll-feed (~> 0.9) + jekyll-seo-tag (~> 2.1) + pathutil (0.16.2) + forwardable-extended (~> 2.6) + public_suffix (6.0.1) + rake (13.2.1) + rb-fsevent (0.11.2) + rb-inotify (0.11.1) + ffi (~> 1.0) + rexml (3.3.2) + strscan + rouge (3.30.0) + safe_yaml (1.0.5) + sass (3.7.4) + sass-listen (~> 4.0.0) + sass-listen (4.0.0) + rb-fsevent (~> 0.9, >= 0.9.4) + rb-inotify (~> 0.9, >= 0.9.7) + strscan (3.1.0) + wdm (0.1.1) + webrick (1.8.1) + +PLATFORMS + aarch64-linux-gnu + aarch64-linux-musl + arm-linux-gnu + arm-linux-musl + arm64-darwin + ruby + x86-linux-gnu + x86-linux-musl + x86_64-darwin + x86_64-linux-gnu + x86_64-linux-musl + +DEPENDENCIES + http_parser.rb (~> 0.6.0) + jekyll (~> 3.10.0) + jekyll-feed (~> 0.6) + just-the-docs + kramdown-parser-gfm + minima (~> 2.0) + tzinfo (>= 1, < 3) + tzinfo-data + wdm (~> 0.1.0) + +BUNDLED WITH + 2.5.16 diff --git a/docs/_config.yml b/docs/_config.yml new file mode 100644 index 0000000..40ee934 --- /dev/null +++ b/docs/_config.yml @@ -0,0 +1,69 @@ +# Welcome to Jekyll! +# +# This config file is meant for settings that affect your whole blog, values +# which you are expected to set up once and rarely edit after that. If you find +# yourself editing this file very often, consider using Jekyll's data files +# feature for the data you need to update frequently. +# +# For technical reasons, this file is *NOT* reloaded automatically when you use +# 'bundle exec jekyll serve'. If you change this file, please restart the server process. + +# Site settings +# These are used to personalize your new site. If you look in the HTML files, +# you will see them accessed via {{ site.title }}, {{ site.email }}, and so on. +# You can create any custom variable you would like, and they will be accessible +# in the templates via {{ site.myvariable }}. +title: Postgres Backup +email: hi@jonaskaninda.com +description: >- # this means to ignore newlines until "baseurl:" + PostgreSQL Backup and Restoration tool. Backup database to AWS S3 storage or any S3 Alternatives for Object Storage. +baseurl: "" # the subpath of your site, e.g. /blog +url: "" # the base hostname & protocol for your site, e.g. http://example.com +twitter_username: jonaskaninda +github_username: jkaninda + +callouts_level: quiet +callouts: + highlight: + color: yellow + important: + title: Important + color: blue + new: + title: New + color: green + note: + title: Note + color: purple + warning: + title: Warning + color: red +# Build settings +markdown: kramdown +theme: just-the-docs +plugins: + - jekyll-feed +aux_links: + 'GitHub Repository': + - https://github.com/jkaninda/pg-bkup + +nav_external_links: + - title: GitHub Repository + url: https://github.com/jkaninda/pg-bkup + +footer_content: >- + Copyright © 2024 Jonas Kaninda. + Distributed under the MIT License.
+ Something missing, unclear or not working? Open an issue. + +# Exclude from processing. +# The following items will not be processed, by default. Create a custom list +# to override the default setting. +# exclude: +# - Gemfile +# - Gemfile2.lock +# - node_modules +# - vendor/bundle/ +# - vendor/cache/ +# - vendor/gems/ +# - vendor/ruby/ diff --git a/docs/_posts/2024-07-29-welcome-to-jekyll.markdown b/docs/_posts/2024-07-29-welcome-to-jekyll.markdown new file mode 100644 index 0000000..6c2334f --- /dev/null +++ b/docs/_posts/2024-07-29-welcome-to-jekyll.markdown @@ -0,0 +1,25 @@ +--- +layout: post +title: "Welcome to Jekyll!" +date: 2024-07-29 03:36:13 +0200 +categories: jekyll update +--- +You’ll find this post in your `_posts` directory. Go ahead and edit it and re-build the site to see your changes. You can rebuild the site in many different ways, but the most common way is to run `jekyll serve`, which launches a web server and auto-regenerates your site when a file is updated. + +To add new posts, simply add a file in the `_posts` directory that follows the convention `YYYY-MM-DD-name-of-post.ext` and includes the necessary front matter. Take a look at the source for this post to get an idea about how it works. + +Jekyll also offers powerful support for code snippets: + +{% highlight ruby %} +def print_hi(name) + puts "Hi, #{name}" +end +print_hi('Tom') +#=> prints 'Hi, Tom' to STDOUT. +{% endhighlight %} + +Check out the [Jekyll docs][jekyll-docs] for more info on how to get the most out of Jekyll. File all bugs/feature requests at [Jekyll’s GitHub repo][jekyll-gh]. If you have questions, you can ask them on [Jekyll Talk][jekyll-talk]. + +[jekyll-docs]: https://jekyllrb.com/docs/home +[jekyll-gh]: https://github.com/jekyll/jekyll +[jekyll-talk]: https://talk.jekyllrb.com/ diff --git a/docs/docker-compose.yml b/docs/docker-compose.yml new file mode 100644 index 0000000..5ceb7d5 --- /dev/null +++ b/docs/docker-compose.yml @@ -0,0 +1,13 @@ +services: + jekyll: + build: + context: ./ + ports: + - 4000:4000 + environment: + - JEKYLL_ENV=development + volumes: + - .:/usr/src/app + stdin_open: true + tty: true + command: bundle exec jekyll serve -H 0.0.0.0 -t \ No newline at end of file diff --git a/docs/how-tos/backup-to-s3.md b/docs/how-tos/backup-to-s3.md new file mode 100644 index 0000000..c802290 --- /dev/null +++ b/docs/how-tos/backup-to-s3.md @@ -0,0 +1,141 @@ +--- +title: Backup to AWS S3 +layout: default +parent: How Tos +nav_order: 2 +--- +# Backup to AWS S3 + +{: .note } +As described on local backup section, to change the storage of you backup and use S3 as storage. You need to add `--storage s3` (-s s3). +You can also specify a specify folder where you want to save you data by adding `--path /my-custom-path` flag. + + +## Backup to S3 + +```yml +services: + pg-bkup: + # In production, it is advised to lock your image tag to a proper + # release version instead of using `latest`. + # Check https://github.com/jkaninda/pg-bkup/releases + # for a list of available releases. + image: jkaninda/pg-bkup + container_name: pg-bkup + command: + - /bin/sh + - -c + - pg-bkup backup --storage s3 -d database --path /my-custom-path + environment: + - DB_PORT=5432 + - DB_HOST=postgres + - DB_NAME=database + - DB_USERNAME=username + - DB_PASSWORD=password + ## AWS configurations + - AWS_S3_ENDPOINT=https://s3.amazonaws.com + - AWS_S3_BUCKET_NAME=backup + - AWS_REGION="us-west-2" + - AWS_ACCESS_KEY=xxxx + - AWS_SECRET_KEY=xxxxx + ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true + - AWS_DISABLE_SSL="false" + + # pg-bkup container must be connected to the same network with your database + networks: + - web +networks: + web: +``` + +### Recurring backups to S3 + +As explained above, you need just to add AWS environment variables and specify the storage type `--storage s3`. +In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below. + +```yml +services: + pg-bkup: + # In production, it is advised to lock your image tag to a proper + # release version instead of using `latest`. + # Check https://github.com/jkaninda/pg-bkup/releases + # for a list of available releases. + image: jkaninda/pg-bkup + container_name: pg-bkup + command: + - /bin/sh + - -c + - pg-bkup backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *" + environment: + - DB_PORT=5432 + - DB_HOST=postgres + - DB_NAME=database + - DB_USERNAME=username + - DB_PASSWORD=password + ## AWS configurations + - AWS_S3_ENDPOINT=https://s3.amazonaws.com + - AWS_S3_BUCKET_NAME=backup + - AWS_REGION="us-west-2" + - AWS_ACCESS_KEY=xxxx + - AWS_SECRET_KEY=xxxxx + ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true + - AWS_DISABLE_SSL="false" + # pg-bkup container must be connected to the same network with your database + networks: + - web +networks: + web: +``` + +## Deploy on Kubernetes + +For Kubernetes, you don't need to run it in scheduled mode. You can deploy it as CronJob. + +### Simple Kubernetes CronJob usage: + +```yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: bkup-job +spec: + schedule: "0 1 * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: pg-bkup + image: jkaninda/pg-bkup + command: + - /bin/sh + - -c + - pg-bkup backup -s s3 --path /custom_path + env: + - name: DB_PORT + value: "5432" + - name: DB_HOST + value: "" + - name: DB_NAME + value: "" + - name: DB_USERNAME + value: "" + # Please use secret! + - name: DB_PASSWORD + value: "" + - name: ACCESS_KEY + value: "" + - name: AWS_S3_ENDPOINT + value: "https://s3.amazonaws.com" + - name: AWS_S3_BUCKET_NAME + value: "xxx" + - name: AWS_REGION + value: "us-west-2" + - name: AWS_ACCESS_KEY + value: "xxxx" + - name: AWS_SECRET_KEY + value: "xxxx" + - name: AWS_DISABLE_SSL + value: "false" + restartPolicy: OnFailure +``` \ No newline at end of file diff --git a/docs/how-tos/backup-to-ssh.md b/docs/how-tos/backup-to-ssh.md new file mode 100644 index 0000000..4d75030 --- /dev/null +++ b/docs/how-tos/backup-to-ssh.md @@ -0,0 +1,146 @@ +--- +title: Backup to SSH +layout: default +parent: How Tos +nav_order: 3 +--- +# Backup to SSH remote server + + +As described for s3 backup section, to change the storage of you backup and use S3 as storage. You need to add `--storage ssh` or `--storage remote`. +You need to add the full remote path by adding `--path /home/jkaninda/backups` flag or using `SSH_REMOTE_PATH` environment variable. + +{: .note } +These environment variables are required for SSH backup `SSH_HOST_NAME`, `SSH_USER`, `SSH_REMOTE_PATH`, `SSH_IDENTIFY_FILE`, `SSH_PORT` or `SSH_PASSWORD` if you dont use a private key to access to your server. +Accessing the remote server using password is not recommended, use private key instead. + +```yml +services: + pg-bkup: + # In production, it is advised to lock your image tag to a proper + # release version instead of using `latest`. + # Check https://github.com/jkaninda/pg-bkup/releases + # for a list of available releases. + image: jkaninda/pg-bkup + container_name: pg-bkup + command: + - /bin/sh + - -c + - pg-bkup backup --storage remote -d database + volumes: + - ./id_ed25519:/tmp/id_ed25519" + environment: + - DB_PORT=5432 + - DB_HOST=postgres + - DB_NAME=database + - DB_USERNAME=username + - DB_PASSWORD=password + ## SSH config + - SSH_HOST_NAME="hostname" + - SSH_PORT=22 + - SSH_USER=user + - SSH_REMOTE_PATH=/home/jkaninda/backups + - SSH_IDENTIFY_FILE=/tmp/id_ed25519 + ## We advise you to use a private jey instead of password + #- SSH_PASSWORD=password + + # pg-bkup container must be connected to the same network with your database + networks: + - web +networks: + web: +``` + + +### Recurring backups to SSH remote server + +As explained above, you need just to add required environment variables and specify the storage type `--storage ssh`. +You can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below. + +```yml +services: + pg-bkup: + # In production, it is advised to lock your image tag to a proper + # release version instead of using `latest`. + # Check https://github.com/jkaninda/pg-bkup/releases + # for a list of available releases. + image: jkaninda/pg-bkup + container_name: pg-bkup + command: + - /bin/sh + - -c + - pg-bkup backup -d database --storage s3 --mode scheduled --period "0 1 * * *" + volumes: + - ./id_ed25519:/tmp/id_ed25519" + environment: + - DB_PORT=5432 + - DB_HOST=postgres + - DB_NAME=database + - DB_USERNAME=username + - DB_PASSWORD=password + ## SSH config + - SSH_HOST_NAME="hostname" + - SSH_PORT=22 + - SSH_USER=user + - SSH_REMOTE_PATH=/home/jkaninda/backups + - SSH_IDENTIFY_FILE=/tmp/id_ed25519 + ## We advise you to use a private jey instead of password + #- SSH_PASSWORD=password + # pg-bkup container must be connected to the same network with your database + networks: + - web +networks: + web: +``` + +## Deploy on Kubernetes + +For Kubernetes, you don't need to run it in scheduled mode. +You can deploy it as CronJob. + +Simple Kubernetes CronJob usage: + +```yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: bkup-job +spec: + schedule: "0 1 * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: pg-bkup + image: jkaninda/pg-bkup + command: + - /bin/sh + - -c + - pg-bkup backup -s s3 --path /custom_path + env: + - name: DB_PORT + value: "5432" + - name: DB_HOST + value: "" + - name: DB_NAME + value: "" + - name: DB_USERNAME + value: "" + # Please use secret! + - name: DB_PASSWORD + value: "" + - name: SSH_HOST_NAME + value: "" + - name: SSH_PORT + value: "22" + - name: SSH_USER + value: "xxx" + - name: SSH_REMOTE_PATH + value: "/home/jkaninda/backups" + - name: AWS_ACCESS_KEY + value: "xxxx" + - name: SSH_IDENTIFY_FILE + value: "/home/jkaninda/backups" + restartPolicy: OnFailure +``` \ No newline at end of file diff --git a/docs/how-tos/backup.md b/docs/how-tos/backup.md new file mode 100644 index 0000000..bf43d42 --- /dev/null +++ b/docs/how-tos/backup.md @@ -0,0 +1,89 @@ +--- +title: Backup +layout: default +parent: How Tos +nav_order: 1 +--- + +# Backup database + +To backup the database, you need to add `backup` subcommand to `pg-bkup` or `bkup`. + +{: .note } +The default storage is local storage mounted to __/backup__. The backup is compressed by default using gzip. The flag __`disable-compression`__ can be used when you need to disable backup compression. + +{: .warning } +Creating a user for backup tasks who has read-only access is recommended! + +The backup process can be run in scheduled mode for the recurring backups. +It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage. + +```yml +services: + pg-bkup: + # In production, it is advised to lock your image tag to a proper + # release version instead of using `latest`. + # Check https://github.com/jkaninda/pg-bkup/releases + # for a list of available releases. + image: jkaninda/pg-bkup + container_name: pg-bkup + command: + - /bin/sh + - -c + - pg-bkup backup -d database + volumes: + - ./backup:/backup + environment: + - DB_PORT=5432 + - DB_HOST=postgres + - DB_NAME=database + - DB_USERNAME=username + - DB_PASSWORD=password + # pg-bkup container must be connected to the same network with your database + networks: + - web +networks: + web: +``` + +### Backup using Docker CLI + +```shell + docker run --rm --network your_network_name \ + -v $PWD/backup:/backup/ \ + -e "DB_HOST=dbhost" \ + -e "DB_USERNAME=username" \ + -e "DB_PASSWORD=password" \ + jkaninda/pg-bkup pg-bkup backup -d database_name +``` + +In case you need to use recurring backups, you can use `--mode scheduled` and specify the periodical backup time by adding `--period "0 1 * * *"` flag as described below. + +```yml +services: + pg-bkup: + # In production, it is advised to lock your image tag to a proper + # release version instead of using `latest`. + # Check https://github.com/jkaninda/pg-bkup/releases + # for a list of available releases. + image: jkaninda/pg-bkup + container_name: pg-bkup + command: + - /bin/sh + - -c + - pg-bkup backup -d database --mode scheduled --period "0 1 * * *" + volumes: + - ./backup:/backup + environment: + - DB_PORT=5432 + - DB_HOST=postgres + - DB_NAME=database + - DB_USERNAME=username + - DB_PASSWORD=password + # pg-bkup container must be connected to the same network with your database + networks: + - web +networks: + web: +``` + diff --git a/docs/how-tos/encrypt-backup.md b/docs/how-tos/encrypt-backup.md new file mode 100644 index 0000000..2065dca --- /dev/null +++ b/docs/how-tos/encrypt-backup.md @@ -0,0 +1,44 @@ +--- +title: Encrypt backups using GPG +layout: default +parent: How Tos +nav_order: 7 +--- +# Encrypt backup + +The image supports encrypting backups using GPG out of the box. In case a `GPG_PASSPHRASE` environment variable is set, the backup archive will be encrypted using the given key and saved as a sql.gpg file instead or sql.gz.gpg. + +{: .warning } +To restore an encrypted backup, you need to provide the same GPG passphrase used during backup process. + +### Backup + +```yml +services: + pg-bkup: + # In production, it is advised to lock your image tag to a proper + # release version instead of using `latest`. + # Check https://github.com/jkaninda/pg-bkup/releases + # for a list of available releases. + image: jkaninda/pg-bkup + container_name: pg-bkup + command: + - /bin/sh + - -c + - pg-bkup backup -d database + volumes: + - ./backup:/backup + environment: + - DB_PORT=5432 + - DB_HOST=postgres + - DB_NAME=database + - DB_USERNAME=username + - DB_PASSWORD=password + ## Required to encrypt backup + - GPG_PASSPHRASE=my-secure-passphrase + # pg-bkup container must be connected to the same network with your database + networks: + - web +networks: + web: +``` \ No newline at end of file diff --git a/docs/how-tos/index.md b/docs/how-tos/index.md new file mode 100644 index 0000000..e869ec7 --- /dev/null +++ b/docs/how-tos/index.md @@ -0,0 +1,8 @@ +--- +title: How Tos +layout: default +nav_order: 3 +has_children: true +--- + +## How Tos \ No newline at end of file diff --git a/docs/how-tos/restore-from-s3.md b/docs/how-tos/restore-from-s3.md new file mode 100644 index 0000000..9f671d5 --- /dev/null +++ b/docs/how-tos/restore-from-s3.md @@ -0,0 +1,51 @@ +--- +title: Restore database from AWS S3 +layout: default +parent: How Tos +nav_order: 5 +--- + +# Restore database from S3 storage + +To restore the database, you need to add `restore` subcommand to `pg-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`. + +{: .note } +It supports __.sql__ and __.sql.gz__ compressed file. + +### Restore + +```yml +services: + pg-bkup: + # In production, it is advised to lock your image tag to a proper + # release version instead of using `latest`. + # Check https://github.com/jkaninda/pg-bkup/releases + # for a list of available releases. + image: jkaninda/pg-bkup + container_name: pg-bkup + command: + - /bin/sh + - -c + - pg-bkup restore --storage s3 -d my-database -f store_20231219_022941.sql.gz --path /my-custom-path + volumes: + - ./backup:/backup + environment: + - DB_PORT=5432 + - DB_HOST=postgres + - DB_NAME=database + - DB_USERNAME=username + - DB_PASSWORD=password + ## AWS configurations + - AWS_S3_ENDPOINT=https://s3.amazonaws.com + - AWS_S3_BUCKET_NAME=backup + - AWS_REGION="us-west-2" + - AWS_ACCESS_KEY=xxxx + - AWS_SECRET_KEY=xxxxx + ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true + - AWS_DISABLE_SSL="false" + # pg-bkup container must be connected to the same network with your database + networks: + - web +networks: + web: +``` \ No newline at end of file diff --git a/docs/how-tos/restore-from-ssh.md b/docs/how-tos/restore-from-ssh.md new file mode 100644 index 0000000..1ec6845 --- /dev/null +++ b/docs/how-tos/restore-from-ssh.md @@ -0,0 +1,50 @@ +--- +title: Restore database from SSH +layout: default +parent: How Tos +nav_order: 6 +--- +# Restore database from SSH remote server + +To restore the database from your remote server, you need to add `restore` subcommand to `pg-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`. + +{: .note } +It supports __.sql__ and __.sql.gz__ compressed file. + +### Restore + +```yml +services: + pg-bkup: + # In production, it is advised to lock your image tag to a proper + # release version instead of using `latest`. + # Check https://github.com/jkaninda/pg-bkup/releases + # for a list of available releases. + image: jkaninda/pg-bkup + container_name: pg-bkup + command: + - /bin/sh + - -c + - pg-bkup restore --storage ssh -d my-database -f store_20231219_022941.sql.gz --path /home/jkaninda/backups + volumes: + - ./backup:/backup + environment: + - DB_PORT=5432 + - DB_HOST=postgres + - DB_NAME=database + - DB_USERNAME=username + - DB_PASSWORD=password + ## SSH config + - SSH_HOST_NAME="hostname" + - SSH_PORT=22 + - SSH_USER=user + - SSH_REMOTE_PATH=/home/jkaninda/backups + - SSH_IDENTIFY_FILE=/tmp/id_ed25519 + ## We advise you to use a private jey instead of password + #- SSH_PASSWORD=password + # pg-bkup container must be connected to the same network with your database + networks: + - web +networks: + web: +``` \ No newline at end of file diff --git a/docs/how-tos/restore.md b/docs/how-tos/restore.md new file mode 100644 index 0000000..8acc16f --- /dev/null +++ b/docs/how-tos/restore.md @@ -0,0 +1,43 @@ +--- +title: Restore database +layout: default +parent: How Tos +nav_order: 4 +--- + +# Restore database + +To restore the database, you need to add `restore` subcommand to `pg-bkup` or `bkup` and specify the file to restore by adding `--file store_20231219_022941.sql.gz`. + +{: .note } +It supports __.sql__ and __.sql.gz__ compressed file. + +### Restore + +```yml +services: + pg-bkup: + # In production, it is advised to lock your image tag to a proper + # release version instead of using `latest`. + # Check https://github.com/jkaninda/pg-bkup/releases + # for a list of available releases. + image: jkaninda/pg-bkup + container_name: pg-bkup + command: + - /bin/sh + - -c + - pg-bkup restore -d database -f store_20231219_022941.sql.gz + volumes: + - ./backup:/backup + environment: + - DB_PORT=5432 + - DB_HOST=postgres + - DB_NAME=database + - DB_USERNAME=username + - DB_PASSWORD=password + # pg-bkup container must be connected to the same network with your database + networks: + - web +networks: + web: +``` \ No newline at end of file diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..0e4f20e --- /dev/null +++ b/docs/index.md @@ -0,0 +1,107 @@ +--- +title: Overview +layout: home +nav_order: 1 +--- + +# About pg-bkup +{:.no_toc} +pg-bkup it's a Docker container image that can be used to backup and restore Postgres database. It supports local storage, AWS S3 or any S3 Alternatives for Object Storage, and SSH compatible storage. +It also supports __encrypting__ your backups using GPG. + +We are open to receiving stars, PRs, and issues! + + +{: .fs-6 .fw-300 } + +--- + +The [jkaninda/pg-bkup](https://hub.docker.com/r/jkaninda/pg-bkup) Docker image can be deployed on Docker, Docker Swarm and Kubernetes. +It handles __recurring__ backups of postgres database on Docker and can be deployed as __CronJob on Kubernetes__ using local, AWS S3 or SSH compatible storage. + +It also supports __encrypting__ your backups using GPG. + +{: .note } +Code and documentation for `v1` version on [this branch][v1-branch]. + +[v1-branch]: https://github.com/jkaninda/pg-bkup + +--- + +## Quickstart + +### Simple backup using Docker CLI + +To run a one time backup, bind your local volume to `/backup` in the container and run the `pg-bkup backup` command: + +```shell + docker run --rm --network your_network_name \ + -v $PWD/backup:/backup/ \ + -e "DB_HOST=dbhost" \ + -e "DB_USERNAME=username" \ + -e "DB_PASSWORD=password" \ + jkaninda/pg-bkup pg-bkup backup -d database_name +``` + +Alternatively, pass a `--env-file` in order to use a full config as described below. + + + +Add a `backup` service to your compose setup and mount the volumes you would like to see backed up: + +### Simple backup in docker compose file + +```yaml +services: + pg-bkup: + # In production, it is advised to lock your image tag to a proper + # release version instead of using `latest`. + # Check https://github.com/jkaninda/pg-bkup/releases + # for a list of available releases. + image: jkaninda/pg-bkup + container_name: pg-bkup + command: + - /bin/sh + - -c + - pg-bkup backup + volumes: + - ./backup:/backup + environment: + - DB_PORT=5432 + - DB_HOST=postgres + - DB_NAME=foo + - DB_USERNAME=bar + - DB_PASSWORD=password + # pg-bkup container must be connected to the same network with your database + networks: + - web +networks: + web: +``` + +## Available image registries + +This Docker image is published to both Docker Hub and the GitHub container registry. +Depending on your preferences and needs, you can reference both `jkaninda/pg-bkup` as well as `ghcr.io/jkaninda/pg-bkup`: + +``` +docker pull jkaninda/pg-bkup:v1.0 +docker pull ghcr.io/jkaninda/pg-bkup:v1.0 +``` + +Documentation references Docker Hub, but all examples will work using ghcr.io just as well. + +## Supported Engines + +This image is developed and tested against the Docker CE engine and Kubernetes exclusively. +While it may work against different implementations, there are no guarantees about support for non-Docker engines. + +## References + +We decided to publish this image as a simpler and more lightweight alternative because of the following requirements: + +- The original image is based on `ubuntu` and requires additional tools, making it heavy. +- This image is written in Go. +- `arm64` and `arm/v7` architectures are supported. +- Docker in Swarm mode is supported. +- Kubernetes is supported. diff --git a/docs/old-version/index.md b/docs/old-version/index.md new file mode 100644 index 0000000..f9b0798 --- /dev/null +++ b/docs/old-version/index.md @@ -0,0 +1,358 @@ +--- +layout: page +title: Old version +permalink: /old-version/ +--- + +This is the documentation of pg-backup for all old versions bellow `v1.0`. +In the old version, S3 storage was mounted using s3fs, so we decided to migrate to the official AWS SDK. + +## Storage: +- local +- s3 +- Object storage + +## Volumes: + +- /s3mnt => S3 mounting path +- /backup => local storage mounting path + +### Usage + +| Options | Shorts | Usage | +|-----------------------|--------|------------------------------------------------------------------------| +| pg-bkup | bkup | CLI utility | +| backup | | Backup database operation | +| restore | | Restore database operation | +| history | | Show the history of backup | +| --storage | -s | Storage. local or s3 (default: local) | +| --file | -f | File name to restore | +| --path | | S3 path without file name. eg: /custom_path | +| --dbname | -d | Database name | +| --port | -p | Database port (default: 5432) | +| --mode | -m | Execution mode. default or scheduled (default: default) | +| --disable-compression | | Disable database backup compression | +| --prune | | Delete old backup, default disabled | +| --keep-last | | Delete old backup created more than specified days ago, default 7 days | +| --period | | Crontab period for scheduled mode only. (default: "0 1 * * *") | +| --help | -h | Print this help message and exit | +| --version | -V | Print version information and exit | + + +## Environment variables + +| Name | Requirement | Description | +|-------------|--------------------------------------------------|------------------------------------------------------| +| DB_PORT | Optional, default 5432 | Database port number | +| DB_HOST | Required | Database host | +| DB_NAME | Optional if it was provided from the -d flag | Database name | +| DB_USERNAME | Required | Database user name | +| DB_PASSWORD | Required | Database password | +| ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key | +| SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key | +| BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name | +| S3_ENDPOINT | Optional, required for S3 storage | AWS S3 Endpoint | +| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) | + + +## Note: + +Creating a user for backup tasks who has read-only access is recommended! + +> create read-only user + + +## Backup database : + +Simple backup usage + +```sh +bkup backup +``` + +### S3 + +```sh +pg-bkup backup --storage s3 +``` +## Docker run: + +```sh +docker run --rm --network your_network_name \ +--name pg-bkup -v $PWD/backup:/backup/ \ +-e "DB_HOST=database_host_name" \ +-e "DB_USERNAME=username" \ +-e "DB_PASSWORD=password" jkaninda/pg-bkup:v0.7 pg-bkup backup -d database_name +``` + +## Docker compose file: +```yaml +version: '3' +services: + postgres: + image: postgres:14.5 + container_name: postgres + restart: unless-stopped + volumes: + - ./postgres:/var/lib/postgresql/data + environment: + POSTGRES_DB: bkup + POSTGRES_PASSWORD: password + POSTGRES_USER: bkup + pg-bkup: + image: jkaninda/pg-bkup:v0.7 + container_name: pg-bkup + depends_on: + - postgres + command: + - /bin/sh + - -c + - pg-bkup backup -d bkup + volumes: + - ./backup:/backup + environment: + - DB_PORT=5432 + - DB_HOST=postgres + - DB_NAME=bkup + - DB_USERNAME=bkup + - DB_PASSWORD=password +``` +## Restore database : + +Simple database restore operation usage + +```sh +pg-bkup restore --file database_20231217_115621.sql --dbname database_name +``` + +```sh +pg-bkup restore -f database_20231217_115621.sql -d database_name +``` +### S3 + +```sh +pg-bkup restore --storage s3 --file database_20231217_115621.sql --dbname database_name +``` + +## Docker run: + +```sh +docker run --rm --network your_network_name \ +--name pg-bkup \ +-v $PWD/backup:/backup/ \ +-e "DB_HOST=database_host_name" \ +-e "DB_USERNAME=username" \ +-e "DB_PASSWORD=password" \ +jkaninda/pg-bkup:v0.7 pg-bkup restore -d database_name -f store_20231219_022941.sql.gz +``` + +## Docker compose file: + +```yaml +version: '3' +services: + pg-bkup: + image: jkaninda/pg-bkup:v0.7 + container_name: pg-bkup + command: + - /bin/sh + - -c + - pg-bkup restore --file database_20231217_115621.sql -d database_name + volumes: + - ./backup:/backup + environment: + #- FILE_NAME=database_20231217_040238.sql.gz # Optional if file name is set from command + - DB_PORT=5432 + - DB_HOST=postgres + - DB_USERNAME=user_name + - DB_PASSWORD=password +``` +## Run + +```sh +docker-compose up -d +``` +## Backup to S3 + +```sh +docker run --rm --privileged \ +--device /dev/fuse --name pg-bkup \ +-e "DB_HOST=db_hostname" \ +-e "DB_USERNAME=username" \ +-e "DB_PASSWORD=password" \ +-e "ACCESS_KEY=your_access_key" \ +-e "SECRET_KEY=your_secret_key" \ +-e "BUCKETNAME=your_bucket_name" \ +-e "S3_ENDPOINT=https://s3.us-west-2.amazonaws.com" \ +jkaninda/pg-bkup:v0.7 pg-bkup backup -s s3 -d database_name +``` +> To change s3 backup path add this flag : --path /my_customPath . default path is /pg-bkup + +Simple S3 backup usage + +```sh +pg-bkup backup --storage s3 --dbname mydatabase +``` +```yaml + pg-bkup: + image: jkaninda/pg-bkup:v0.7 + container_name: pg-bkup + privileged: true + devices: + - "/dev/fuse" + command: + - /bin/sh + - -c + - pg-bkup restore --storage s3 -f database_20231217_115621.sql.gz --dbname database_name + environment: + - DB_PORT=5432 + - DB_HOST=postgress + - DB_USERNAME=user_name + - DB_PASSWORD=password + - ACCESS_KEY=${ACCESS_KEY} + - SECRET_KEY=${SECRET_KEY} + - BUCKET_NAME=${BUCKET_NAME} + - S3_ENDPOINT=${S3_ENDPOINT} + +``` +## Run in Scheduled mode + +This tool can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources. +For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag. + +Make an automated backup on Docker + +## Syntax of crontab (field description) + +The syntax is: + +- 1: Minute (0-59) +- 2: Hours (0-23) +- 3: Day (0-31) +- 4: Month (0-12 [12 == December]) +- 5: Day of the week(0-7 [7 or 0 == sunday]) + +Easy to remember format: + +```conf +* * * * * command to be executed +``` + +```conf +- - - - - +| | | | | +| | | | ----- Day of week (0 - 7) (Sunday=0 or 7) +| | | ------- Month (1 - 12) +| | --------- Day of month (1 - 31) +| ----------- Hour (0 - 23) +------------- Minute (0 - 59) +``` + +> At every 30th minute + +```conf +*/30 * * * * +``` +> “At minute 0.” every hour +```conf +0 * * * * +``` + +> “At 01:00.” every day + +```conf +0 1 * * * +``` + +## Example of scheduled mode + +> Docker run : + +```sh +docker run --rm --name pg-bkup \ +-v $BACKUP_DIR:/backup/ \ +-e "DB_HOST=$DB_HOST" \ +-e "DB_USERNAME=$DB_USERNAME" \ +-e "DB_PASSWORD=$DB_PASSWORD" jkaninda/pg-bkup:v0.7 pg-bkup backup --dbname $DB_NAME --mode scheduled --period "0 1 * * *" +``` + +> With Docker compose + +```yaml +version: "3" +services: + pg-bkup: + image: jkaninda/pg-bkup:v0.7 + container_name: pg-bkup + privileged: true + devices: + - "/dev/fuse" + command: + - /bin/sh + - -c + - pg-bkup backup --storage s3 --path /mys3_custom_path --dbname database_name --mode scheduled --period "*/30 * * * *" + environment: + - DB_PORT=5432 + - DB_HOST=postgreshost + - DB_USERNAME=userName + - DB_PASSWORD=${DB_PASSWORD} + - ACCESS_KEY=${ACCESS_KEY} + - SECRET_KEY=${SECRET_KEY} + - BUCKET_NAME=${BUCKET_NAME} + - S3_ENDPOINT=${S3_ENDPOINT} +``` + +## Kubernetes CronJob + +For Kubernetes, you don't need to run it in scheduled mode. + +Simple Kubernetes CronJob usage: + +```yaml +apiVersion: batch/v1 +kind: CronJob +metadata: + name: bkup-job +spec: + schedule: "0 1 * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: pg-bkup + image: jkaninda/pg-bkup:v0.7 + securityContext: + privileged: true + command: + - /bin/sh + - -c + - pg-bkup backup -s s3 --path /custom_path + env: + - name: DB_PORT + value: "5432" + - name: DB_HOST + value: "" + - name: DB_NAME + value: "" + - name: DB_USERNAME + value: "" + # Please use secret! + - name: DB_PASSWORD + value: "" + - name: ACCESS_KEY + value: "" + - name: SECRET_KEY + value: "" + - name: BUCKET_NAME + value: "" + - name: S3_ENDPOINT + value: "https://s3.us-west-2.amazonaws.com" + restartPolicy: Never +``` + +## Authors + +**Jonas Kaninda** +- + diff --git a/docs/reference/index.md b/docs/reference/index.md new file mode 100644 index 0000000..38e931f --- /dev/null +++ b/docs/reference/index.md @@ -0,0 +1,105 @@ +--- +title: Configuration Reference +layout: default +nav_order: 2 +--- + +# Configuration reference + +Backup and restore targets, schedule and retention are configured using environment variables or flags. + + + + + +### CLI utility Usage + +| Options | Shorts | Usage | +|-----------------------|--------|----------------------------------------------------------------------------------------| +| pg-bkup | bkup | CLI utility | +| backup | | Backup database operation | +| restore | | Restore database operation | +| --storage | -s | Storage. local or s3 (default: local) | +| --file | -f | File name for restoration | +| --path | | AWS S3 path without file name. eg: /custom_path or ssh remote path `/home/foo/backup` | +| --dbname | -d | Database name | +| --port | -p | Database port (default: 5432) | +| --mode | -m | Execution mode. default or scheduled (default: default) | +| --disable-compression | | Disable database backup compression | +| --prune | | Delete old backup, default disabled | +| --keep-last | | Delete old backup created more than specified days ago, default 7 days | +| --period | | Crontab period for scheduled mode only. (default: "0 1 * * *") | +| --help | -h | Print this help message and exit | +| --version | -V | Print version information and exit | + +## Environment variables + +| Name | Requirement | Description | +|-------------------|--------------------------------------------------|------------------------------------------------------| +| DB_PORT | Optional, default 5432 | Database port number | +| DB_HOST | Required | Database host | +| DB_NAME | Optional if it was provided from the -d flag | Database name | +| DB_USERNAME | Required | Database user name | +| DB_PASSWORD | Required | Database password | +| AWS_ACCESS_KEY | Optional, required for S3 storage | AWS S3 Access Key | +| AWS_SECRET_KEY | Optional, required for S3 storage | AWS S3 Secret Key | +| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name | +| AWS_BUCKET_NAME | Optional, required for S3 storage | AWS S3 Bucket Name | +| AWS_REGION | Optional, required for S3 storage | AWS Region | +| AWS_DISABLE_SSL | Optional, required for S3 storage | Disable SSL | +| FILE_NAME | Optional if it was provided from the --file flag | Database file to restore (extensions: .sql, .sql.gz) | +| GPG_PASSPHRASE | Optional, required to encrypt and restore backup | GPG passphrase | +| SSH_HOST_NAME | Optional, required for SSH storage | ssh remote hostname or ip | +| SSH_USER | Optional, required for SSH storage | ssh remote user | +| SSH_PASSWORD | Optional, required for SSH storage | ssh remote user's password | +| SSH_IDENTIFY_FILE | Optional, required for SSH storage | ssh remote user's private key | +| SSH_PORT | Optional, required for SSH storage | ssh remote server port | +| SSH_REMOTE_PATH | Optional, required for SSH storage | ssh remote path (/home/toto/backup) | + +--- +## Run in Scheduled mode + +This image can be run as CronJob in Kubernetes for a regular backup which makes deployment on Kubernetes easy as Kubernetes has CronJob resources. +For Docker, you need to run it in scheduled mode by adding `--mode scheduled` flag and specify the periodical backup time by adding `--period "0 1 * * *"` flag. + +## Syntax of crontab (field description) + +The syntax is: + +- 1: Minute (0-59) +- 2: Hours (0-23) +- 3: Day (0-31) +- 4: Month (0-12 [12 == December]) +- 5: Day of the week(0-7 [7 or 0 == sunday]) + +Easy to remember format: + +```conf +* * * * * command to be executed +``` + +```conf +- - - - - +| | | | | +| | | | ----- Day of week (0 - 7) (Sunday=0 or 7) +| | | ------- Month (1 - 12) +| | --------- Day of month (1 - 31) +| ----------- Hour (0 - 23) +------------- Minute (0 - 59) +``` + +> At every 30th minute + +```conf +*/30 * * * * +``` +> “At minute 0.” every hour +```conf +0 * * * * +``` + +> “At 01:00.” every day + +```conf +0 1 * * * +``` \ No newline at end of file diff --git a/examples/docker-compose.s3.yaml b/examples/docker-compose.s3.yaml index 4fc2566..15ac3d1 100644 --- a/examples/docker-compose.s3.yaml +++ b/examples/docker-compose.s3.yaml @@ -1,21 +1,31 @@ -version: "3" services: pg-bkup: + # In production, it is advised to lock your image tag to a proper + # release version instead of using `latest`. + # Check https://github.com/jkaninda/pg-bkup/releases + # for a list of available releases. image: jkaninda/pg-bkup container_name: pg-bkup - privileged: true - devices: - - "/dev/fuse" command: - /bin/sh - -c - - pg-bkup backup --storage s3 --path /mys3_custom_path --dbname database_name + - pg-bkup backup --storage s3 -d my-database" environment: - DB_PORT=5432 - - DB_HOST=postgress - - DB_USERNAME=userName - - DB_PASSWORD=${DB_PASSWORD} - - ACCESS_KEY=${ACCESS_KEY} - - SECRET_KEY=${SECRET_KEY} - - BUCKET_NAME=${BUCKET_NAME} - - S3_ENDPOINT=https://s3.us-west-2.amazonaws.com \ No newline at end of file + - DB_HOST=postgres + - DB_NAME=database + - DB_USERNAME=username + - DB_PASSWORD=password + ## AWS configurations + - AWS_S3_ENDPOINT=https://s3.amazonaws.com + - AWS_S3_BUCKET_NAME=backup + - AWS_REGION="us-west-2" + - AWS_ACCESS_KEY=xxxx + - AWS_SECRET_KEY=xxxxx + ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true + - AWS_DISABLE_SSL="false" + # pg-bkup container must be connected to the same network with your database + networks: + - web +networks: + web: \ No newline at end of file diff --git a/examples/docker-compose.scheduled.s3.yaml b/examples/docker-compose.scheduled.s3.yaml index 1389018..b69e689 100644 --- a/examples/docker-compose.scheduled.s3.yaml +++ b/examples/docker-compose.scheduled.s3.yaml @@ -1,21 +1,31 @@ -version: "3" services: pg-bkup: + # In production, it is advised to lock your image tag to a proper + # release version instead of using `latest`. + # Check https://github.com/jkaninda/pg-bkup/releases + # for a list of available releases. image: jkaninda/pg-bkup container_name: pg-bkup - privileged: true - devices: - - "/dev/fuse" command: - /bin/sh - -c - - pg-bkup backup --storage s3 --path /mys3_custom_path --dbname database_name --mode scheduled --period "0 1 * * *" + - pg-bkup backup --storage s3 -d my-database --mode scheduled --period "0 1 * * *" environment: - DB_PORT=5432 - - DB_HOST=postgress - - DB_USERNAME=userName - - DB_PASSWORD=${DB_PASSWORD} - - ACCESS_KEY=${ACCESS_KEY} - - SECRET_KEY=${SECRET_KEY} - - BUCKET_NAME=${BUCKET_NAME} - - S3_ENDPOINT=https://s3.us-west-2.amazonaws.com \ No newline at end of file + - DB_HOST=postgres + - DB_NAME=database + - DB_USERNAME=username + - DB_PASSWORD=password + ## AWS configurations + - AWS_S3_ENDPOINT=https://s3.amazonaws.com + - AWS_S3_BUCKET_NAME=backup + - AWS_REGION="us-west-2" + - AWS_ACCESS_KEY=xxxx + - AWS_SECRET_KEY=xxxxx + ## In case you are using S3 alternative such as Minio and your Minio instance is not secured, you change it to true + - AWS_DISABLE_SSL="false" + # pg-bkup container must be connected to the same network with your database + networks: + - web +networks: + web: \ No newline at end of file diff --git a/examples/k8s-job.yaml b/examples/k8s-job.yaml index 43cba37..5aa6704 100644 --- a/examples/k8s-job.yaml +++ b/examples/k8s-job.yaml @@ -1,7 +1,7 @@ -apiVersion: batch/v1 +piVersion: batch/v1 kind: CronJob metadata: - name: pg-bkup-job + name: bkup-job spec: schedule: "0 1 * * *" jobTemplate: @@ -9,32 +9,36 @@ spec: template: spec: containers: - - name: pg-bkup - image: jkaninda/pg-bkup - securityContext: - privileged: true - command: - - /bin/sh - - -c - - pg-bkup backup --storage s3 --path /custom_path - env: - - name: DB_PORT - value: "5432" - - name: DB_HOST - value: "" - - name: DB_NAME - value: "" - - name: DB_USERNAME - value: "" - # Please use secret! - - name: DB_PASSWORD - value: "password" - - name: ACCESS_KEY - value: "" - - name: SECRET_KEY - value: "" - - name: BUCKETNAME - value: "" - - name: S3_ENDPOINT - value: "https://s3.us-west-2.amazonaws.com" - restartPolicy: Never \ No newline at end of file + - name: pg-bkup + image: jkaninda/pg-bkup + command: + - /bin/sh + - -c + - pg-bkup backup -s s3 --path /custom_path + env: + - name: DB_PORT + value: "5432" + - name: DB_HOST + value: "" + - name: DB_NAME + value: "" + - name: DB_USERNAME + value: "" + # Please use secret! + - name: DB_PASSWORD + value: "" + - name: ACCESS_KEY + value: "" + - name: AWS_S3_ENDPOINT + value: "https://s3.amazonaws.com" + - name: AWS_S3_BUCKET_NAME + value: "xxx" + - name: AWS_REGION + value: "us-west-2" + - name: AWS_ACCESS_KEY + value: "xxxx" + - name: AWS_SECRET_KEY + value: "xxxx" + - name: AWS_DISABLE_SSL + value: "false" + restartPolicy: OnFailure \ No newline at end of file diff --git a/go.mod b/go.mod index f59aa94..5333b0e 100644 --- a/go.mod +++ b/go.mod @@ -13,6 +13,7 @@ require ( github.com/hpcloud/tail v1.0.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect golang.org/x/crypto v0.18.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/sys v0.22.0 // indirect gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect diff --git a/go.sum b/go.sum index 7512d7f..8671785 100644 --- a/go.sum +++ b/go.sum @@ -41,6 +41,8 @@ golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/pkg/scp.go b/pkg/scp.go index 242cc9d..ce73e12 100644 --- a/pkg/scp.go +++ b/pkg/scp.go @@ -8,6 +8,7 @@ import ( "github.com/bramvdbogaerde/go-scp/auth" "github.com/jkaninda/pg-bkup/utils" "golang.org/x/crypto/ssh" + "golang.org/x/exp/slog" "os" "path/filepath" ) @@ -27,7 +28,7 @@ func CopyToRemote(fileName, remotePath string) error { if sshPassword == "" { return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n") } - utils.Info("Accessing the remote server using password, private key is recommended\n") + slog.Warn("Accessing the remote server using password, password is not recommended\n") clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey()) } @@ -71,7 +72,7 @@ func CopyFromRemote(fileName, remotePath string) error { if sshPassword == "" { return errors.New("SSH_PASSWORD environment variable is required if SSH_IDENTIFY_FILE is empty\n") } - utils.Info("Accessing the remote server using password, private key is recommended\n") + slog.Warn("Accessing the remote server using password, password is not recommended\n") clientConfig, _ = auth.PasswordKey(sshUser, sshPassword, ssh.InsecureIgnoreHostKey()) } diff --git a/utils/utils.go b/utils/utils.go index da69d4e..ecae7cd 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -10,6 +10,7 @@ import ( "bytes" "fmt" "github.com/spf13/cobra" + "golang.org/x/exp/slog" "io" "io/fs" "os" @@ -19,6 +20,9 @@ import ( func Info(v ...any) { fmt.Println("⒤ ", fmt.Sprint(v...)) } +func Worn(msg string, v ...any) { + slog.Warn(fmt.Sprintf(msg, v)) +} func Done(v ...any) { fmt.Println("✔ ", fmt.Sprint(v...)) } @@ -183,7 +187,8 @@ func GetEnvVariable(envName, oldEnvName string) string { if value == "" { value = os.Getenv(oldEnvName) if value != "" { - fmt.Printf("%s is deprecated, please use %s instead!\n", oldEnvName, envName) + slog.Warn(fmt.Sprintf("%s is deprecated, please use %s instead!\n", oldEnvName, envName)) + } } return value