Refactoring of code to meet all go lint requirements

This commit is contained in:
Jonas Kaninda
2024-12-06 21:27:04 +01:00
parent adf6a478fe
commit 915ebbfb40
31 changed files with 1149 additions and 1678 deletions

View File

@@ -1,6 +1,3 @@
// Package cmd /
package cmd
/*
MIT License
@@ -24,9 +21,11 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package cmd
import (
"github.com/jkaninda/pg-bkup/internal"
"github.com/jkaninda/pg-bkup/pkg/logger"
"github.com/jkaninda/pg-bkup/pkg"
"github.com/jkaninda/pg-bkup/utils"
"github.com/spf13/cobra"
)
@@ -37,9 +36,9 @@ var BackupCmd = &cobra.Command{
Example: utils.BackupExample,
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
internal.StartBackup(cmd)
pkg.StartBackup(cmd)
} else {
logger.Fatal(`"backup" accepts no argument %q`, args)
utils.Fatal(`"backup" accepts no argument %q`, args)
}
},

View File

@@ -1,4 +1,3 @@
// Package cmd /
/*
MIT License
@@ -22,11 +21,12 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package cmd
import (
"github.com/jkaninda/pg-bkup/internal"
"github.com/jkaninda/pg-bkup/pkg/logger"
"github.com/jkaninda/pg-bkup/pkg"
"github.com/jkaninda/pg-bkup/utils"
"github.com/spf13/cobra"
)
@@ -35,9 +35,9 @@ var MigrateCmd = &cobra.Command{
Short: "Migrate database from a source database to a target database",
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
internal.StartMigration(cmd)
pkg.StartMigration(cmd)
} else {
logger.Fatal(`"migrate" accepts no argument %q`, args)
utils.Fatal(`"migrate" accepts no argument %q`, args)
}

View File

@@ -1,8 +1,7 @@
// Package cmd /
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
# Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -22,11 +21,11 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package cmd
import (
"github.com/jkaninda/pg-bkup/internal"
"github.com/jkaninda/pg-bkup/pkg/logger"
"github.com/jkaninda/pg-bkup/pkg"
"github.com/jkaninda/pg-bkup/utils"
"github.com/spf13/cobra"
)
@@ -37,9 +36,9 @@ var RestoreCmd = &cobra.Command{
Example: utils.RestoreExample,
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
internal.StartRestore(cmd)
pkg.StartRestore(cmd)
} else {
logger.Fatal(`"restore" accepts no argument %q`, args)
utils.Fatal(`"restore" accepts no argument %q`, args)
}

View File

@@ -1,8 +1,7 @@
// Package cmd /
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
# Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -22,6 +21,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package cmd
import (

View File

@@ -1,4 +1,3 @@
// Package cmd /
/*
MIT License
@@ -22,6 +21,7 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package cmd
import (

View File

@@ -0,0 +1,44 @@
---
title: Azure Blob storage
layout: default
parent: How Tos
nav_order: 5
---
# Azure Blob storage
{: .note }
As described on local backup section, to change the storage of your backup and use Azure Blob as storage. You need to add `--storage azure` (-s azure).
You can also specify a folder where you want to save you data by adding `--path my-custom-path` flag.
## Backup to Azure Blob storage
```yml
services:
mysql-bkup:
# In production, it is advised to lock your image tag to a proper
# release version instead of using `latest`.
# Check https://github.com/jkaninda/pg-bkup/releases
# for a list of available releases.
image: jkaninda/pg-bkup
container_name: pg-bkup
command: backup --storage azure -d database --path my-custom-path
environment:
- DB_PORT=5432
- DB_HOST=postgres
- DB_NAME=database
- DB_USERNAME=username
- DB_PASSWORD=password
## Azure Blob configurations
- AZURE_STORAGE_CONTAINER_NAME=backup-container
- AZURE_STORAGE_ACCOUNT_NAME=account-name
- AZURE_STORAGE_ACCOUNT_KEY=Ppby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==
# pg-bkup container must be connected to the same network with your database
networks:
- web
networks:
web:
```

17
go.mod
View File

@@ -3,30 +3,35 @@ module github.com/jkaninda/pg-bkup
go 1.23.2
require (
github.com/ProtonMail/gopenpgp/v2 v2.7.5
github.com/aws/aws-sdk-go v1.55.5
github.com/bramvdbogaerde/go-scp v1.5.0
github.com/go-mail/mail v2.3.1+incompatible
github.com/jkaninda/encryptor v0.0.0-20241013064832-ed4bd6a1b221
github.com/jlaffaye/ftp v0.2.0
github.com/jkaninda/go-storage v0.1.2
github.com/robfig/cron/v3 v3.0.1
github.com/spf13/cobra v1.8.1
golang.org/x/crypto v0.28.0
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 // indirect
github.com/ProtonMail/go-crypto v1.0.0 // indirect
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
github.com/ProtonMail/gopenpgp/v2 v2.7.5 // indirect
github.com/aws/aws-sdk-go v1.55.5 // indirect
github.com/bramvdbogaerde/go-scp v1.5.0 // indirect
github.com/cloudflare/circl v1.5.0 // indirect
github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jlaffaye/ftp v0.2.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/crypto v0.28.0 // indirect
golang.org/x/net v0.29.0 // indirect
golang.org/x/sys v0.26.0 // indirect
golang.org/x/text v0.19.0 // indirect
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
gopkg.in/mail.v2 v2.3.1 // indirect
)

51
go.sum
View File

@@ -1,3 +1,15 @@
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 h1:mlmW46Q0B79I+Aj4azKC6xDMFN9a9SyZWESlGWYXbFs=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0/go.mod h1:PXe2h+LKcWTX9afWdZoHyODqR4fBa5boUM/8uJfZ0Jo=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78=
github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
@@ -13,14 +25,16 @@ github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7N
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/cloudflare/circl v1.5.0 h1:hxIWksrX6XN5a1L2TI/h53AGPhNHoUBo+TD1ms9+pys=
github.com/cloudflare/circl v1.5.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ=
github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-mail/mail v2.3.1+incompatible h1:UzNOn0k5lpfVtO31cK3hn6I4VEVGhe3lX8AJBAxXExM=
github.com/go-mail/mail v2.3.1+incompatible/go.mod h1:VPWjmmNyRsWXQZHVHT3g0YbIINUkSmuKOiLIDkWbL6M=
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -30,20 +44,30 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jkaninda/encryptor v0.0.0-20241013064832-ed4bd6a1b221 h1:AwkCf7el1kzeCJ89A+gUAK0ero5JYnvLOKsYMzq+rs4=
github.com/jkaninda/encryptor v0.0.0-20241013064832-ed4bd6a1b221/go.mod h1:9F8ZJ+ZXE8DZBo77+aneGj8LMjrYXX6eFUCC/uqZOUo=
github.com/jkaninda/go-storage v0.1.1 h1:vjpdD/fh39S5HGyfHvLE5HGYOEPIukINlOX3OnM3GW4=
github.com/jkaninda/go-storage v0.1.1/go.mod h1:7VK5gQISQaLxtLfBtc+een8spcgLVSBAKTRuyF1N81I=
github.com/jkaninda/go-storage v0.1.2 h1:d7+TRPjmHXdSqO0wne3KAB8zt9ih8lf5D8aL4n7/Dds=
github.com/jkaninda/go-storage v0.1.2/go.mod h1:zVRnLprBk/9AUz2+za6Y03MgoNYrqKLy3edVtjqMaps=
github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
@@ -51,8 +75,8 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
@@ -60,8 +84,6 @@ golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2Uz
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ=
golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -70,6 +92,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -84,15 +108,13 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU=
golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E=
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
@@ -101,8 +123,6 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug=
golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
@@ -110,8 +130,9 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/mail.v2 v2.3.1 h1:WYFn/oANrAGP2C0dcV6/pbkPzv8yGzqTjPmTeO7qoXk=
gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=

View File

@@ -1,551 +0,0 @@
package internal
import (
"fmt"
"github.com/jkaninda/encryptor"
"github.com/jkaninda/pg-bkup/pkg/logger"
"github.com/jkaninda/pg-bkup/pkg/storage/ftp"
"github.com/jkaninda/pg-bkup/pkg/storage/local"
"github.com/jkaninda/pg-bkup/pkg/storage/s3"
"github.com/jkaninda/pg-bkup/pkg/storage/ssh"
"github.com/jkaninda/pg-bkup/utils"
"github.com/robfig/cron/v3"
"github.com/spf13/cobra"
"log"
"os"
"os/exec"
"path/filepath"
"time"
)
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
func StartBackup(cmd *cobra.Command) {
intro()
// Initialize backup configs
config := initBackupConfig(cmd)
// Load backup configuration file
configFile, err := loadConfigFile()
if err != nil {
dbConf = initDbConfig(cmd)
if config.cronExpression == "" {
BackupTask(dbConf, config)
} else {
if utils.IsValidCronExpression(config.cronExpression) {
scheduledMode(dbConf, config)
} else {
logger.Fatal("Cron expression is not valid: %s", config.cronExpression)
}
}
} else {
startMultiBackup(config, configFile)
}
}
// scheduledMode Runs backup in scheduled mode
func scheduledMode(db *dbConfig, config *BackupConfig) {
logger.Info("Running in Scheduled mode")
logger.Info("Backup cron expression: %s", config.cronExpression)
logger.Info("The next scheduled time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
logger.Info("Storage type %s ", config.storage)
// Test backup
logger.Info("Testing backup configurations...")
testDatabaseConnection(db)
logger.Info("Testing backup configurations...done")
logger.Info("Creating backup job...")
// Create a new cron instance
c := cron.New()
_, err := c.AddFunc(config.cronExpression, func() {
BackupTask(db, config)
logger.Info("Next backup time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
})
if err != nil {
return
}
// Start the cron scheduler
c.Start()
logger.Info("Creating backup job...done")
logger.Info("Backup job started")
defer c.Stop()
select {}
}
// multiBackupTask backup multi database
func multiBackupTask(databases []Database, bkConfig *BackupConfig) {
for _, db := range databases {
// Check if path is defined in config file
if db.Path != "" {
bkConfig.remotePath = db.Path
}
BackupTask(getDatabase(db), bkConfig)
}
}
// BackupTask backups database
func BackupTask(db *dbConfig, config *BackupConfig) {
logger.Info("Starting backup task...")
// Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20060102_150405"))
if config.disableCompression {
backupFileName = fmt.Sprintf("%s_%s.sql", db.dbName, time.Now().Format("20060102_150405"))
}
config.backupFileName = backupFileName
switch config.storage {
case "local":
localBackup(db, config)
case "s3", "S3":
s3Backup(db, config)
case "ssh", "SSH", "remote":
sshBackup(db, config)
case "ftp", "FTP":
ftpBackup(db, config)
default:
localBackup(db, config)
}
}
func startMultiBackup(bkConfig *BackupConfig, configFile string) {
logger.Info("Starting backup task...")
conf, err := readConf(configFile)
if err != nil {
logger.Fatal("Error reading config file: %s", err)
}
// Check if cronExpression is defined in config file
if conf.CronExpression != "" {
bkConfig.cronExpression = conf.CronExpression
}
if len(conf.Databases) == 0 {
logger.Fatal("No databases found")
}
// Check if cronExpression is defined
if bkConfig.cronExpression == "" {
multiBackupTask(conf.Databases, bkConfig)
} else {
// Check if cronExpression is valid
if utils.IsValidCronExpression(bkConfig.cronExpression) {
logger.Info("Running backup in Scheduled mode")
logger.Info("Backup cron expression: %s", bkConfig.cronExpression)
logger.Info("The next scheduled time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
logger.Info("Storage type %s ", bkConfig.storage)
// Test backup
logger.Info("Testing backup configurations...")
for _, db := range conf.Databases {
testDatabaseConnection(getDatabase(db))
}
logger.Info("Testing backup configurations...done")
logger.Info("Creating backup job...")
// Create a new cron instance
c := cron.New()
_, err := c.AddFunc(bkConfig.cronExpression, func() {
multiBackupTask(conf.Databases, bkConfig)
logger.Info("Next backup time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
})
if err != nil {
return
}
// Start the cron scheduler
c.Start()
logger.Info("Creating backup job...done")
logger.Info("Backup job started")
defer c.Stop()
select {}
} else {
logger.Fatal("Cron expression is not valid: %s", bkConfig.cronExpression)
}
}
}
// BackupDatabase backup database
func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) {
storagePath = os.Getenv("STORAGE_PATH")
logger.Info("Starting database backup...")
err := os.Setenv("PGPASSWORD", db.dbPassword)
if err != nil {
return
}
testDatabaseConnection(db)
// Backup Database database
logger.Info("Backing up database...")
// Verify is compression is disabled
if disableCompression {
// Execute pg_dump
cmd := exec.Command("pg_dump",
"-h", db.dbHost,
"-p", db.dbPort,
"-U", db.dbUserName,
"-d", db.dbName,
)
output, err := cmd.Output()
if err != nil {
log.Fatal(err)
}
// save output
file, err := os.Create(filepath.Join(tmpPath, backupFileName))
if err != nil {
log.Fatal(err)
}
defer func(file *os.File) {
err := file.Close()
if err != nil {
return
}
}(file)
_, err = file.Write(output)
if err != nil {
log.Fatal(err)
}
} else {
// Execute pg_dump
cmd := exec.Command("pg_dump",
"-h", db.dbHost,
"-p", db.dbPort,
"-U", db.dbUserName,
"-d", db.dbName,
)
stdout, err := cmd.StdoutPipe()
if err != nil {
log.Fatal(err)
}
gzipCmd := exec.Command("gzip")
gzipCmd.Stdin = stdout
// save output
gzipCmd.Stdout, err = os.Create(filepath.Join(tmpPath, backupFileName))
err2 := gzipCmd.Start()
if err2 != nil {
return
}
if err != nil {
log.Fatal(err)
}
if err := cmd.Run(); err != nil {
log.Fatal(err)
}
if err := gzipCmd.Wait(); err != nil {
log.Fatal(err)
}
}
logger.Info("Database has been backed up")
}
func localBackup(db *dbConfig, config *BackupConfig) {
logger.Info("Backup database to local storage")
startTime = time.Now().Format(utils.TimeFormat())
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, gpgExtension)
}
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
logger.Error("Error: %s", err)
}
backupSize = fileInfo.Size()
logger.Info("Backup name is %s", finalFileName)
localStorage := local.NewStorage(local.Config{
LocalPath: tmpPath,
RemotePath: storagePath,
})
err = localStorage.Copy(finalFileName)
if err != nil {
logger.Fatal("Error copying backup file: %s", err)
}
logger.Info("Backup saved in %s", filepath.Join(storagePath, finalFileName))
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: backupSize,
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(storagePath, finalFileName),
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
// Delete old backup
if config.prune {
err = localStorage.Prune(config.backupRetention)
if err != nil {
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
}
// Delete temp
deleteTemp()
logger.Info("Backup completed successfully")
}
func s3Backup(db *dbConfig, config *BackupConfig) {
logger.Info("Backup database to s3 storage")
startTime = time.Now().Format(utils.TimeFormat())
// Backup database
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
}
logger.Info("Uploading backup archive to remote storage S3 ... ")
awsConfig := initAWSConfig()
if config.remotePath == "" {
config.remotePath = awsConfig.remotePath
}
logger.Info("Backup name is %s", finalFileName)
s3Storage, err := s3.NewStorage(s3.Config{
Endpoint: awsConfig.endpoint,
Bucket: awsConfig.bucket,
AccessKey: awsConfig.accessKey,
SecretKey: awsConfig.secretKey,
Region: awsConfig.region,
DisableSsl: awsConfig.disableSsl,
ForcePathStyle: awsConfig.forcePathStyle,
RemotePath: awsConfig.remotePath,
LocalPath: tmpPath,
})
if err != nil {
logger.Fatal("Error creating s3 storage: %s", err)
}
err = s3Storage.Copy(finalFileName)
if err != nil {
logger.Fatal("Error copying backup file: %s", err)
}
// Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
logger.Error("Error: %s", err)
}
backupSize = fileInfo.Size()
// Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName))
if err != nil {
fmt.Println("Error deleting file: ", err)
}
// Delete old backup
if config.prune {
err := s3Storage.Prune(config.backupRetention)
if err != nil {
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
}
logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
logger.Info("Uploading backup archive to remote storage S3 ... done ")
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: backupSize,
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName),
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
// Delete temp
deleteTemp()
logger.Info("Backup completed successfully")
}
func sshBackup(db *dbConfig, config *BackupConfig) {
logger.Info("Backup database to Remote server")
startTime = time.Now().Format(utils.TimeFormat())
// Backup database
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
}
logger.Info("Uploading backup archive to remote storage ... ")
logger.Info("Backup name is %s", finalFileName)
sshConfig, err := loadSSHConfig()
if err != nil {
logger.Fatal("Error loading ssh config: %s", err)
}
sshStorage, err := ssh.NewStorage(ssh.Config{
Host: sshConfig.hostName,
Port: sshConfig.port,
User: sshConfig.user,
Password: sshConfig.password,
RemotePath: config.remotePath,
LocalPath: tmpPath,
})
if err != nil {
logger.Fatal("Error creating SSH storage: %s", err)
}
err = sshStorage.Copy(finalFileName)
if err != nil {
logger.Fatal("Error copying backup file: %s", err)
}
// Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
logger.Error("Error: %s", err)
}
backupSize = fileInfo.Size()
logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
// Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil {
logger.Error("Error deleting file: %v", err)
}
if config.prune {
err := sshStorage.Prune(config.backupRetention)
if err != nil {
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
}
logger.Info("Uploading backup archive to remote storage ... done ")
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: backupSize,
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName),
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
// Delete temp
deleteTemp()
logger.Info("Backup completed successfully")
}
func ftpBackup(db *dbConfig, config *BackupConfig) {
logger.Info("Backup database to the remote FTP server")
startTime = time.Now().Format(utils.TimeFormat())
// Backup database
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
}
logger.Info("Uploading backup archive to the remote FTP server ... ")
logger.Info("Backup name is %s", finalFileName)
ftpConfig := loadFtpConfig()
ftpStorage, err := ftp.NewStorage(ftp.Config{
Host: ftpConfig.host,
Port: ftpConfig.port,
User: ftpConfig.user,
Password: ftpConfig.password,
RemotePath: config.remotePath,
LocalPath: tmpPath,
})
if err != nil {
logger.Fatal("Error creating SSH storage: %s", err)
}
err = ftpStorage.Copy(finalFileName)
if err != nil {
logger.Fatal("Error copying backup file: %s", err)
}
logger.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
// Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
logger.Error("Error: %s", err)
}
backupSize = fileInfo.Size()
// Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil {
logger.Error("Error deleting file: %v", err)
}
if config.prune {
err := ftpStorage.Prune(config.backupRetention)
if err != nil {
logger.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
}
logger.Info("Uploading backup archive to the remote FTP server ... done ")
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: backupSize,
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName),
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
// Delete temp
deleteTemp()
logger.Info("Backup completed successfully")
}
func encryptBackup(config *BackupConfig) {
backupFile, err := os.ReadFile(filepath.Join(tmpPath, config.backupFileName))
outputFile := fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension)
if err != nil {
logger.Fatal("Error reading backup file: %s ", err)
}
if config.usingKey {
logger.Info("Encrypting backup using public key...")
pubKey, err := os.ReadFile(config.publicKey)
if err != nil {
logger.Fatal("Error reading public key: %s ", err)
}
err = encryptor.EncryptWithPublicKey(backupFile, fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension), pubKey)
if err != nil {
logger.Fatal("Error encrypting backup file: %v ", err)
}
logger.Info("Encrypting backup using public key...done")
} else if config.passphrase != "" {
logger.Info("Encrypting backup using passphrase...")
err := encryptor.Encrypt(backupFile, outputFile, config.passphrase)
if err != nil {
logger.Fatal("error during encrypting backup %v", err)
}
logger.Info("Encrypting backup using passphrase...done")
}
}

View File

@@ -1,232 +0,0 @@
// Package internal /
package internal
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
import (
"github.com/jkaninda/pg-bkup/pkg/logger"
"github.com/jkaninda/pg-bkup/pkg/storage/ftp"
"github.com/jkaninda/pg-bkup/pkg/storage/local"
"github.com/jkaninda/pg-bkup/pkg/storage/s3"
"github.com/jkaninda/pg-bkup/pkg/storage/ssh"
"os"
"os/exec"
"path/filepath"
"github.com/jkaninda/encryptor"
"github.com/jkaninda/pg-bkup/utils"
"github.com/spf13/cobra"
)
func StartRestore(cmd *cobra.Command) {
intro()
dbConf = initDbConfig(cmd)
restoreConf := initRestoreConfig(cmd)
switch restoreConf.storage {
case "local":
localRestore(dbConf, restoreConf)
case "s3", "S3":
restoreFromS3(dbConf, restoreConf)
case "ssh", "SSH", "remote":
restoreFromRemote(dbConf, restoreConf)
case "ftp", "FTP":
restoreFromFTP(dbConf, restoreConf)
default:
localRestore(dbConf, restoreConf)
}
}
func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) {
logger.Info("Restore database from local")
localStorage := local.NewStorage(local.Config{
RemotePath: storagePath,
LocalPath: tmpPath,
})
err := localStorage.CopyFrom(restoreConf.file)
if err != nil {
logger.Fatal("Error copying backup file: %s", err)
}
RestoreDatabase(dbConf, restoreConf)
}
func restoreFromS3(db *dbConfig, conf *RestoreConfig) {
logger.Info("Restore database from s3")
awsConfig := initAWSConfig()
if conf.remotePath == "" {
conf.remotePath = awsConfig.remotePath
}
s3Storage, err := s3.NewStorage(s3.Config{
Endpoint: awsConfig.endpoint,
Bucket: awsConfig.bucket,
AccessKey: awsConfig.accessKey,
SecretKey: awsConfig.secretKey,
Region: awsConfig.region,
DisableSsl: awsConfig.disableSsl,
ForcePathStyle: awsConfig.forcePathStyle,
RemotePath: awsConfig.remotePath,
LocalPath: tmpPath,
})
if err != nil {
logger.Fatal("Error creating s3 storage: %s", err)
}
err = s3Storage.CopyFrom(conf.file)
if err != nil {
logger.Fatal("Error download file from S3 storage: %s", err)
}
RestoreDatabase(db, conf)
}
func restoreFromRemote(db *dbConfig, conf *RestoreConfig) {
logger.Info("Restore database from remote server")
sshConfig, err := loadSSHConfig()
if err != nil {
logger.Fatal("Error loading ssh config: %s", err)
}
sshStorage, err := ssh.NewStorage(ssh.Config{
Host: sshConfig.hostName,
Port: sshConfig.port,
User: sshConfig.user,
Password: sshConfig.password,
IdentifyFile: sshConfig.identifyFile,
RemotePath: conf.remotePath,
LocalPath: tmpPath,
})
if err != nil {
logger.Fatal("Error creating SSH storage: %s", err)
}
err = sshStorage.CopyFrom(conf.file)
if err != nil {
logger.Fatal("Error copying backup file: %s", err)
}
RestoreDatabase(db, conf)
}
func restoreFromFTP(db *dbConfig, conf *RestoreConfig) {
logger.Info("Restore database from FTP server")
ftpConfig := loadFtpConfig()
ftpStorage, err := ftp.NewStorage(ftp.Config{
Host: ftpConfig.host,
Port: ftpConfig.port,
User: ftpConfig.user,
Password: ftpConfig.password,
RemotePath: conf.remotePath,
LocalPath: tmpPath,
})
if err != nil {
logger.Fatal("Error creating SSH storage: %s", err)
}
err = ftpStorage.CopyFrom(conf.file)
if err != nil {
logger.Fatal("Error copying backup file: %s", err)
}
RestoreDatabase(db, conf)
}
// RestoreDatabase restore database
func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
if conf.file == "" {
logger.Fatal("Error, file required")
}
extension := filepath.Ext(filepath.Join(tmpPath, conf.file))
rFile, err := os.ReadFile(filepath.Join(tmpPath, conf.file))
outputFile := RemoveLastExtension(filepath.Join(tmpPath, conf.file))
if err != nil {
logger.Fatal("Error reading backup file: %s ", err)
}
if extension == ".gpg" {
if conf.usingKey {
logger.Info("Decrypting backup using private key...")
logger.Warn("Backup decryption using a private key is not fully supported")
prKey, err := os.ReadFile(conf.privateKey)
if err != nil {
logger.Fatal("Error reading public key: %s ", err)
}
err = encryptor.DecryptWithPrivateKey(rFile, outputFile, prKey, conf.passphrase)
if err != nil {
logger.Fatal("error during decrypting backup %v", err)
}
logger.Info("Decrypting backup using private key...done")
} else {
if conf.passphrase == "" {
logger.Error("Error, passphrase or private key required")
logger.Fatal("Your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE or GPG_PRIVATE_KEY environment variable is required.")
} else {
logger.Info("Decrypting backup using passphrase...")
// decryptWithGPG file
err := encryptor.Decrypt(rFile, outputFile, conf.passphrase)
if err != nil {
logger.Fatal("Error decrypting file %s %v", file, err)
}
logger.Info("Decrypting backup using passphrase...done")
// Update file name
conf.file = RemoveLastExtension(file)
}
}
}
if utils.FileExists(filepath.Join(tmpPath, conf.file)) {
err := os.Setenv("PGPASSWORD", db.dbPassword)
if err != nil {
return
}
testDatabaseConnection(db)
logger.Info("Restoring database...")
extension := filepath.Ext(conf.file)
// Restore from compressed file / .sql.gz
if extension == ".gz" {
str := "zcat " + filepath.Join(tmpPath, conf.file) + " | psql -h " + db.dbHost + " -p " + db.dbPort + " -U " + db.dbUserName + " -v -d " + db.dbName
_, err := exec.Command("sh", "-c", str).Output()
if err != nil {
logger.Fatal("Error, in restoring the database %v", err)
}
logger.Info("Restoring database... done")
logger.Info("Database has been restored")
// Delete temp
deleteTemp()
} else if extension == ".sql" {
// Restore from sql file
str := "cat " + filepath.Join(tmpPath, conf.file) + " | psql -h " + db.dbHost + " -p " + db.dbPort + " -U " + db.dbUserName + " -v -d " + db.dbName
_, err := exec.Command("sh", "-c", str).Output()
if err != nil {
logger.Fatal("Error in restoring the database %v", err)
}
logger.Info("Restoring database... done")
logger.Info("Database has been restored")
// Delete temp
deleteTemp()
} else {
logger.Fatal("Unknown file extension: %s", extension)
}
} else {
logger.Fatal("File not found in %s", filepath.Join(tmpPath, conf.file))
}
}

96
pkg/azure.go Normal file
View File

@@ -0,0 +1,96 @@
package pkg
import (
"fmt"
"github.com/jkaninda/go-storage/pkg/azure"
"github.com/jkaninda/pg-bkup/utils"
"os"
"path/filepath"
"time"
)
func azureBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to the remote FTP server")
startTime = time.Now().Format(utils.TimeFormat())
// Backup database
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
}
utils.Info("Uploading backup archive to Azure Blob storage ...")
utils.Info("Backup name is %s", finalFileName)
azureConfig := loadAzureConfig()
azureStorage, err := azure.NewStorage(azure.Config{
ContainerName: azureConfig.containerName,
AccountName: azureConfig.accountName,
AccountKey: azureConfig.accountKey,
RemotePath: config.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating Azure Blob storage: %s", err)
}
err = azureStorage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
// Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error: %s", err)
}
backupSize = fileInfo.Size()
// Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error deleting file: %v", err)
}
if config.prune {
err := azureStorage.Prune(config.backupRetention)
if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
}
utils.Info("Uploading backup archive to Azure Blob storage ... done ")
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: backupSize,
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName),
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
// Delete temp
deleteTemp()
utils.Info("Backup completed successfully")
}
func azureRestore(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from Azure Blob storage")
azureConfig := loadAzureConfig()
azureStorage, err := azure.NewStorage(azure.Config{
ContainerName: azureConfig.containerName,
AccountName: azureConfig.accountName,
AccountKey: azureConfig.accountKey,
RemotePath: conf.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating SSH storage: %s", err)
}
err = azureStorage.CopyFrom(conf.file)
if err != nil {
utils.Fatal("Error downloading backup file: %s", err)
}
RestoreDatabase(db, conf)
}

340
pkg/backup.go Normal file
View File

@@ -0,0 +1,340 @@
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package pkg
import (
"fmt"
"github.com/jkaninda/encryptor"
"github.com/jkaninda/go-storage/pkg/local"
"github.com/jkaninda/pg-bkup/utils"
"github.com/robfig/cron/v3"
"github.com/spf13/cobra"
"log"
"os"
"os/exec"
"path/filepath"
"time"
)
func StartBackup(cmd *cobra.Command) {
intro()
// Initialize backup configs
config := initBackupConfig(cmd)
// Load backup configuration file
configFile, err := loadConfigFile()
if err != nil {
dbConf = initDbConfig(cmd)
if config.cronExpression == "" {
BackupTask(dbConf, config)
} else {
if utils.IsValidCronExpression(config.cronExpression) {
scheduledMode(dbConf, config)
} else {
utils.Fatal("Cron expression is not valid: %s", config.cronExpression)
}
}
} else {
startMultiBackup(config, configFile)
}
}
// scheduledMode Runs backup in scheduled mode
func scheduledMode(db *dbConfig, config *BackupConfig) {
utils.Info("Running in Scheduled mode")
utils.Info("Backup cron expression: %s", config.cronExpression)
utils.Info("The next scheduled time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
utils.Info("Storage type %s ", config.storage)
// Test backup
utils.Info("Testing backup configurations...")
testDatabaseConnection(db)
utils.Info("Testing backup configurations...done")
utils.Info("Creating backup job...")
// Create a new cron instance
c := cron.New()
_, err := c.AddFunc(config.cronExpression, func() {
BackupTask(db, config)
utils.Info("Next backup time is: %v", utils.CronNextTime(config.cronExpression).Format(timeFormat))
})
if err != nil {
return
}
// Start the cron scheduler
c.Start()
utils.Info("Creating backup job...done")
utils.Info("Backup job started")
defer c.Stop()
select {}
}
// multiBackupTask backup multi database
func multiBackupTask(databases []Database, bkConfig *BackupConfig) {
for _, db := range databases {
// Check if path is defined in config file
if db.Path != "" {
bkConfig.remotePath = db.Path
}
BackupTask(getDatabase(db), bkConfig)
}
}
// BackupTask backups database
func BackupTask(db *dbConfig, config *BackupConfig) {
utils.Info("Starting backup task...")
// Generate file name
backupFileName := fmt.Sprintf("%s_%s.sql.gz", db.dbName, time.Now().Format("20060102_150405"))
if config.disableCompression {
backupFileName = fmt.Sprintf("%s_%s.sql", db.dbName, time.Now().Format("20060102_150405"))
}
config.backupFileName = backupFileName
switch config.storage {
case "local":
localBackup(db, config)
case "s3", "S3":
s3Backup(db, config)
case "ssh", "SSH", "remote":
sshBackup(db, config)
case "ftp", "FTP":
ftpBackup(db, config)
case "azure":
azureBackup(db, config)
default:
localBackup(db, config)
}
}
func startMultiBackup(bkConfig *BackupConfig, configFile string) {
utils.Info("Starting backup task...")
conf, err := readConf(configFile)
if err != nil {
utils.Fatal("Error reading config file: %s", err)
}
// Check if cronExpression is defined in config file
if conf.CronExpression != "" {
bkConfig.cronExpression = conf.CronExpression
}
if len(conf.Databases) == 0 {
utils.Fatal("No databases found")
}
// Check if cronExpression is defined
if bkConfig.cronExpression == "" {
multiBackupTask(conf.Databases, bkConfig)
} else {
// Check if cronExpression is valid
if utils.IsValidCronExpression(bkConfig.cronExpression) {
utils.Info("Running backup in Scheduled mode")
utils.Info("Backup cron expression: %s", bkConfig.cronExpression)
utils.Info("The next scheduled time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
utils.Info("Storage type %s ", bkConfig.storage)
// Test backup
utils.Info("Testing backup configurations...")
for _, db := range conf.Databases {
testDatabaseConnection(getDatabase(db))
}
utils.Info("Testing backup configurations...done")
utils.Info("Creating backup job...")
// Create a new cron instance
c := cron.New()
_, err := c.AddFunc(bkConfig.cronExpression, func() {
multiBackupTask(conf.Databases, bkConfig)
utils.Info("Next backup time is: %v", utils.CronNextTime(bkConfig.cronExpression).Format(timeFormat))
})
if err != nil {
return
}
// Start the cron scheduler
c.Start()
utils.Info("Creating backup job...done")
utils.Info("Backup job started")
defer c.Stop()
select {}
} else {
utils.Fatal("Cron expression is not valid: %s", bkConfig.cronExpression)
}
}
}
// BackupDatabase backup database
func BackupDatabase(db *dbConfig, backupFileName string, disableCompression bool) {
storagePath = os.Getenv("STORAGE_PATH")
utils.Info("Starting database backup...")
err := os.Setenv("PGPASSWORD", db.dbPassword)
if err != nil {
return
}
testDatabaseConnection(db)
// Backup Database database
utils.Info("Backing up database...")
// Verify is compression is disabled
if disableCompression {
// Execute pg_dump
cmd := exec.Command("pg_dump",
"-h", db.dbHost,
"-p", db.dbPort,
"-U", db.dbUserName,
"-d", db.dbName,
)
output, err := cmd.Output()
if err != nil {
log.Fatal(err)
}
// save output
file, err := os.Create(filepath.Join(tmpPath, backupFileName))
if err != nil {
log.Fatal(err)
}
defer func(file *os.File) {
err := file.Close()
if err != nil {
return
}
}(file)
_, err = file.Write(output)
if err != nil {
log.Fatal(err)
}
} else {
// Execute pg_dump
cmd := exec.Command("pg_dump",
"-h", db.dbHost,
"-p", db.dbPort,
"-U", db.dbUserName,
"-d", db.dbName,
)
stdout, err := cmd.StdoutPipe()
if err != nil {
log.Fatal(err)
}
gzipCmd := exec.Command("gzip")
gzipCmd.Stdin = stdout
// save output
gzipCmd.Stdout, err = os.Create(filepath.Join(tmpPath, backupFileName))
err2 := gzipCmd.Start()
if err2 != nil {
return
}
if err != nil {
log.Fatal(err)
}
if err := cmd.Run(); err != nil {
log.Fatal(err)
}
if err := gzipCmd.Wait(); err != nil {
log.Fatal(err)
}
}
utils.Info("Database has been backed up")
}
func localBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to local storage")
startTime = time.Now().Format(utils.TimeFormat())
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, gpgExtension)
}
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error: %s", err)
}
backupSize = fileInfo.Size()
utils.Info("Backup name is %s", finalFileName)
localStorage := local.NewStorage(local.Config{
LocalPath: tmpPath,
RemotePath: storagePath,
})
err = localStorage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
utils.Info("Backup saved in %s", filepath.Join(storagePath, finalFileName))
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: backupSize,
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(storagePath, finalFileName),
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
// Delete old backup
if config.prune {
err = localStorage.Prune(config.backupRetention)
if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
}
// Delete temp
deleteTemp()
utils.Info("Backup completed successfully")
}
func encryptBackup(config *BackupConfig) {
backupFile, err := os.ReadFile(filepath.Join(tmpPath, config.backupFileName))
outputFile := fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension)
if err != nil {
utils.Fatal("Error reading backup file: %s ", err)
}
if config.usingKey {
utils.Info("Encrypting backup using public key...")
pubKey, err := os.ReadFile(config.publicKey)
if err != nil {
utils.Fatal("Error reading public key: %s ", err)
}
err = encryptor.EncryptWithPublicKey(backupFile, fmt.Sprintf("%s.%s", filepath.Join(tmpPath, config.backupFileName), gpgExtension), pubKey)
if err != nil {
utils.Fatal("Error encrypting backup file: %v ", err)
}
utils.Info("Encrypting backup using public key...done")
} else if config.passphrase != "" {
utils.Info("Encrypting backup using passphrase...")
err := encryptor.Encrypt(backupFile, outputFile, config.passphrase)
if err != nil {
utils.Fatal("error during encrypting backup %v", err)
}
utils.Info("Encrypting backup using passphrase...done")
}
}

View File

@@ -1,4 +1,3 @@
// Package internal /
/*
MIT License
@@ -22,11 +21,11 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package internal
package pkg
import (
"fmt"
"github.com/jkaninda/pg-bkup/pkg/logger"
"github.com/jkaninda/pg-bkup/utils"
"github.com/spf13/cobra"
"os"
@@ -84,6 +83,11 @@ type FTPConfig struct {
port string
remotePath string
}
type AzureConfig struct {
accountName string
accountKey string
containerName string
}
// SSHConfig holds the SSH connection details
type SSHConfig struct {
@@ -116,8 +120,8 @@ func initDbConfig(cmd *cobra.Command) *dbConfig {
err := utils.CheckEnvVars(dbHVars)
if err != nil {
logger.Error("Please make sure all required environment variables for database are set")
logger.Fatal("Error checking environment variables: %s", err)
utils.Error("Please make sure all required environment variables for database are set")
utils.Fatal("Error checking environment variables: %s", err)
}
return &dConf
}
@@ -159,11 +163,25 @@ func loadFtpConfig() *FTPConfig {
fConfig.remotePath = os.Getenv("REMOTE_PATH")
err := utils.CheckEnvVars(ftpVars)
if err != nil {
logger.Error("Please make sure all required environment variables for FTP are set")
logger.Fatal("Error missing environment variables: %s", err)
utils.Error("Please make sure all required environment variables for FTP are set")
utils.Fatal("Error missing environment variables: %s", err)
}
return &fConfig
}
func loadAzureConfig() *AzureConfig {
// Initialize data configs
aConfig := AzureConfig{}
aConfig.containerName = os.Getenv("AZURE_STORAGE_CONTAINER_NAME")
aConfig.accountName = os.Getenv("AZURE_STORAGE_ACCOUNT_NAME")
aConfig.accountKey = os.Getenv("AZURE_STORAGE_ACCOUNT_KEY")
err := utils.CheckEnvVars(azureVars)
if err != nil {
utils.Error("Please make sure all required environment variables for Azure Blob storage are set")
utils.Fatal("Error missing environment variables: %s", err)
}
return &aConfig
}
func initAWSConfig() *AWSConfig {
// Initialize AWS configs
aConfig := AWSConfig{}
@@ -186,8 +204,8 @@ func initAWSConfig() *AWSConfig {
aConfig.forcePathStyle = forcePathStyle
err = utils.CheckEnvVars(awsVars)
if err != nil {
logger.Error("Please make sure all required environment variables for AWS S3 are set")
logger.Fatal("Error checking environment variables: %s", err)
utils.Error("Please make sure all required environment variables for AWS S3 are set")
utils.Fatal("Error checking environment variables: %s", err)
}
return &aConfig
}
@@ -284,8 +302,8 @@ func initTargetDbConfig() *targetDbConfig {
err := utils.CheckEnvVars(tdbRVars)
if err != nil {
logger.Error("Please make sure all required environment variables for the target database are set")
logger.Fatal("Error checking target database environment variables: %s", err)
utils.Error("Please make sure all required environment variables for the target database are set")
utils.Fatal("Error checking target database environment variables: %s", err)
}
return &tdbConfig
}

View File

@@ -1,4 +1,3 @@
// Package internal /
/*
MIT License
@@ -22,12 +21,12 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package internal
package pkg
import (
"bytes"
"fmt"
"github.com/jkaninda/pg-bkup/pkg/logger"
"github.com/jkaninda/pg-bkup/utils"
"gopkg.in/yaml.v3"
"os"
@@ -44,7 +43,7 @@ func intro() {
// copyToTmp copy file to temporary directory
func deleteTemp() {
logger.Info("Deleting %s ...", tmpPath)
utils.Info("Deleting %s ...", tmpPath)
err := filepath.Walk(tmpPath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
@@ -60,16 +59,16 @@ func deleteTemp() {
return nil
})
if err != nil {
logger.Error("Error deleting files: %v", err)
utils.Error("Error deleting files: %v", err)
} else {
logger.Info("Deleting %s ... done", tmpPath)
utils.Info("Deleting %s ... done", tmpPath)
}
}
// TestDatabaseConnection tests the database connection
func testDatabaseConnection(db *dbConfig) {
logger.Info("Connecting to %s database ...", db.dbName)
utils.Info("Connecting to %s database ...", db.dbName)
// Test database connection
query := "SELECT version();"
@@ -94,10 +93,10 @@ func testDatabaseConnection(db *dbConfig) {
// Run the command and capture any errors
err = cmd.Run()
if err != nil {
logger.Fatal("Error running psql command: %v\nOutput: %s\n", err, out.String())
utils.Fatal("Error running psql command: %v\nOutput: %s\n", err, out.String())
return
}
logger.Info("Successfully connected to %s database", db.dbName)
utils.Info("Successfully connected to %s database", db.dbName)
}

View File

@@ -1,26 +0,0 @@
package logger
/*
MIT License
# Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
const traceLog = "trace"

View File

@@ -1,4 +1,3 @@
// Package internal /
/*
MIT License
@@ -22,18 +21,19 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package internal
package pkg
import (
"fmt"
"github.com/jkaninda/pg-bkup/pkg/logger"
"github.com/jkaninda/pg-bkup/utils"
"github.com/spf13/cobra"
"time"
)
func StartMigration(cmd *cobra.Command) {
intro()
logger.Info("Starting database migration...")
utils.Info("Starting database migration...")
// Get DB config
dbConf = initDbConfig(cmd)
targetDbConf = initTargetDbConfig()
@@ -53,8 +53,8 @@ func StartMigration(cmd *cobra.Command) {
// Backup source Database
BackupDatabase(dbConf, backupFileName, true)
// Restore source database into target database
logger.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
utils.Info("Restoring [%s] database into [%s] database...", dbConf.dbName, targetDbConf.targetDbName)
RestoreDatabase(&newDbConfig, conf)
logger.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
logger.Info("Database migration completed.")
utils.Info("[%s] database has been restored into [%s] database", dbConf.dbName, targetDbConf.targetDbName)
utils.Info("Database migration completed.")
}

216
pkg/remote.go Normal file
View File

@@ -0,0 +1,216 @@
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package pkg
import (
"fmt"
"github.com/jkaninda/go-storage/pkg/ftp"
"github.com/jkaninda/go-storage/pkg/ssh"
"github.com/jkaninda/pg-bkup/utils"
"os"
"path/filepath"
"time"
)
func sshBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to Remote server")
startTime = time.Now().Format(utils.TimeFormat())
// Backup database
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
}
utils.Info("Uploading backup archive to remote storage ... ")
utils.Info("Backup name is %s", finalFileName)
sshConfig, err := loadSSHConfig()
if err != nil {
utils.Fatal("Error loading ssh config: %s", err)
}
sshStorage, err := ssh.NewStorage(ssh.Config{
Host: sshConfig.hostName,
Port: sshConfig.port,
User: sshConfig.user,
Password: sshConfig.password,
RemotePath: config.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating SSH storage: %s", err)
}
err = sshStorage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
// Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error: %s", err)
}
backupSize = fileInfo.Size()
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
// Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error deleting file: %v", err)
}
if config.prune {
err := sshStorage.Prune(config.backupRetention)
if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
}
utils.Info("Uploading backup archive to remote storage ... done ")
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: backupSize,
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName),
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
// Delete temp
deleteTemp()
utils.Info("Backup completed successfully")
}
func ftpBackup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to the remote FTP server")
startTime = time.Now().Format(utils.TimeFormat())
// Backup database
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
}
utils.Info("Uploading backup archive to the remote FTP server ... ")
utils.Info("Backup name is %s", finalFileName)
ftpConfig := loadFtpConfig()
ftpStorage, err := ftp.NewStorage(ftp.Config{
Host: ftpConfig.host,
Port: ftpConfig.port,
User: ftpConfig.user,
Password: ftpConfig.password,
RemotePath: config.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating SSH storage: %s", err)
}
err = ftpStorage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
// Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error: %s", err)
}
backupSize = fileInfo.Size()
// Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error deleting file: %v", err)
}
if config.prune {
err := ftpStorage.Prune(config.backupRetention)
if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
}
utils.Info("Uploading backup archive to the remote FTP server ... done ")
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: backupSize,
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName),
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
// Delete temp
deleteTemp()
utils.Info("Backup completed successfully")
}
func remoteRestore(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from remote server")
sshConfig, err := loadSSHConfig()
if err != nil {
utils.Fatal("Error loading ssh config: %s", err)
}
sshStorage, err := ssh.NewStorage(ssh.Config{
Host: sshConfig.hostName,
Port: sshConfig.port,
User: sshConfig.user,
Password: sshConfig.password,
IdentifyFile: sshConfig.identifyFile,
RemotePath: conf.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating SSH storage: %s", err)
}
err = sshStorage.CopyFrom(conf.file)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
RestoreDatabase(db, conf)
}
func ftpRestore(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from FTP server")
ftpConfig := loadFtpConfig()
ftpStorage, err := ftp.NewStorage(ftp.Config{
Host: ftpConfig.host,
Port: ftpConfig.port,
User: ftpConfig.user,
Password: ftpConfig.password,
RemotePath: conf.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating SSH storage: %s", err)
}
err = ftpStorage.CopyFrom(conf.file)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
RestoreDatabase(db, conf)
}

159
pkg/restore.go Normal file
View File

@@ -0,0 +1,159 @@
// Package internal /
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package pkg
import (
"github.com/jkaninda/go-storage/pkg/local"
"os"
"os/exec"
"path/filepath"
"github.com/jkaninda/encryptor"
"github.com/jkaninda/pg-bkup/utils"
"github.com/spf13/cobra"
)
func StartRestore(cmd *cobra.Command) {
intro()
dbConf = initDbConfig(cmd)
restoreConf := initRestoreConfig(cmd)
switch restoreConf.storage {
case "local":
localRestore(dbConf, restoreConf)
case "s3", "S3":
s3Restore(dbConf, restoreConf)
case "ssh", "SSH", "remote":
remoteRestore(dbConf, restoreConf)
case "ftp", "FTP":
ftpRestore(dbConf, restoreConf)
case "azure":
azureRestore(dbConf, restoreConf)
default:
localRestore(dbConf, restoreConf)
}
}
func localRestore(dbConf *dbConfig, restoreConf *RestoreConfig) {
utils.Info("Restore database from local")
localStorage := local.NewStorage(local.Config{
RemotePath: storagePath,
LocalPath: tmpPath,
})
err := localStorage.CopyFrom(restoreConf.file)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
RestoreDatabase(dbConf, restoreConf)
}
// RestoreDatabase restore database
func RestoreDatabase(db *dbConfig, conf *RestoreConfig) {
if conf.file == "" {
utils.Fatal("Error, file required")
}
extension := filepath.Ext(filepath.Join(tmpPath, conf.file))
rFile, err := os.ReadFile(filepath.Join(tmpPath, conf.file))
outputFile := RemoveLastExtension(filepath.Join(tmpPath, conf.file))
if err != nil {
utils.Fatal("Error reading backup file: %s ", err)
}
if extension == ".gpg" {
if conf.usingKey {
utils.Info("Decrypting backup using private key...")
utils.Warn("Backup decryption using a private key is not fully supported")
prKey, err := os.ReadFile(conf.privateKey)
if err != nil {
utils.Fatal("Error reading public key: %s ", err)
}
err = encryptor.DecryptWithPrivateKey(rFile, outputFile, prKey, conf.passphrase)
if err != nil {
utils.Fatal("error during decrypting backup %v", err)
}
utils.Info("Decrypting backup using private key...done")
} else {
if conf.passphrase == "" {
utils.Error("Error, passphrase or private key required")
utils.Fatal("Your file seems to be a GPG file.\nYou need to provide GPG keys. GPG_PASSPHRASE or GPG_PRIVATE_KEY environment variable is required.")
} else {
utils.Info("Decrypting backup using passphrase...")
// decryptWithGPG file
err := encryptor.Decrypt(rFile, outputFile, conf.passphrase)
if err != nil {
utils.Fatal("Error decrypting file %s %v", file, err)
}
utils.Info("Decrypting backup using passphrase...done")
// Update file name
conf.file = RemoveLastExtension(file)
}
}
}
if utils.FileExists(filepath.Join(tmpPath, conf.file)) {
err := os.Setenv("PGPASSWORD", db.dbPassword)
if err != nil {
return
}
testDatabaseConnection(db)
utils.Info("Restoring database...")
extension := filepath.Ext(conf.file)
// Restore from compressed file / .sql.gz
if extension == ".gz" {
str := "zcat " + filepath.Join(tmpPath, conf.file) + " | psql -h " + db.dbHost + " -p " + db.dbPort + " -U " + db.dbUserName + " -v -d " + db.dbName
_, err := exec.Command("sh", "-c", str).Output()
if err != nil {
utils.Fatal("Error, in restoring the database %v", err)
}
utils.Info("Restoring database... done")
utils.Info("Database has been restored")
// Delete temp
deleteTemp()
} else if extension == ".sql" {
// Restore from sql file
str := "cat " + filepath.Join(tmpPath, conf.file) + " | psql -h " + db.dbHost + " -p " + db.dbPort + " -U " + db.dbUserName + " -v -d " + db.dbName
_, err := exec.Command("sh", "-c", str).Output()
if err != nil {
utils.Fatal("Error in restoring the database %v", err)
}
utils.Info("Restoring database... done")
utils.Info("Database has been restored")
// Delete temp
deleteTemp()
} else {
utils.Fatal("Unknown file extension: %s", extension)
}
} else {
utils.Fatal("File not found in %s", filepath.Join(tmpPath, conf.file))
}
}

133
pkg/s3.go Normal file
View File

@@ -0,0 +1,133 @@
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package pkg
import (
"fmt"
"github.com/jkaninda/go-storage/pkg/s3"
"github.com/jkaninda/pg-bkup/utils"
"os"
"path/filepath"
"time"
)
func s3Backup(db *dbConfig, config *BackupConfig) {
utils.Info("Backup database to s3 storage")
startTime = time.Now().Format(utils.TimeFormat())
// Backup database
BackupDatabase(db, config.backupFileName, disableCompression)
finalFileName := config.backupFileName
if config.encryption {
encryptBackup(config)
finalFileName = fmt.Sprintf("%s.%s", config.backupFileName, "gpg")
}
utils.Info("Uploading backup archive to remote storage S3 ... ")
awsConfig := initAWSConfig()
if config.remotePath == "" {
config.remotePath = awsConfig.remotePath
}
utils.Info("Backup name is %s", finalFileName)
s3Storage, err := s3.NewStorage(s3.Config{
Endpoint: awsConfig.endpoint,
Bucket: awsConfig.bucket,
AccessKey: awsConfig.accessKey,
SecretKey: awsConfig.secretKey,
Region: awsConfig.region,
DisableSsl: awsConfig.disableSsl,
ForcePathStyle: awsConfig.forcePathStyle,
RemotePath: awsConfig.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating s3 storage: %s", err)
}
err = s3Storage.Copy(finalFileName)
if err != nil {
utils.Fatal("Error copying backup file: %s", err)
}
// Get backup info
fileInfo, err := os.Stat(filepath.Join(tmpPath, finalFileName))
if err != nil {
utils.Error("Error: %s", err)
}
backupSize = fileInfo.Size()
// Delete backup file from tmp folder
err = utils.DeleteFile(filepath.Join(tmpPath, config.backupFileName))
if err != nil {
fmt.Println("Error deleting file: ", err)
}
// Delete old backup
if config.prune {
err := s3Storage.Prune(config.backupRetention)
if err != nil {
utils.Fatal("Error deleting old backup from %s storage: %s ", config.storage, err)
}
}
utils.Info("Backup saved in %s", filepath.Join(config.remotePath, finalFileName))
utils.Info("Uploading backup archive to remote storage S3 ... done ")
// Send notification
utils.NotifySuccess(&utils.NotificationData{
File: finalFileName,
BackupSize: backupSize,
Database: db.dbName,
Storage: config.storage,
BackupLocation: filepath.Join(config.remotePath, finalFileName),
StartTime: startTime,
EndTime: time.Now().Format(utils.TimeFormat()),
})
// Delete temp
deleteTemp()
utils.Info("Backup completed successfully")
}
func s3Restore(db *dbConfig, conf *RestoreConfig) {
utils.Info("Restore database from s3")
awsConfig := initAWSConfig()
if conf.remotePath == "" {
conf.remotePath = awsConfig.remotePath
}
s3Storage, err := s3.NewStorage(s3.Config{
Endpoint: awsConfig.endpoint,
Bucket: awsConfig.bucket,
AccessKey: awsConfig.accessKey,
SecretKey: awsConfig.secretKey,
Region: awsConfig.region,
DisableSsl: awsConfig.disableSsl,
ForcePathStyle: awsConfig.forcePathStyle,
RemotePath: awsConfig.remotePath,
LocalPath: tmpPath,
})
if err != nil {
utils.Fatal("Error creating s3 storage: %s", err)
}
err = s3Storage.CopyFrom(conf.file)
if err != nil {
utils.Fatal("Error download file from S3 storage: %s", err)
}
RestoreDatabase(db, conf)
}

View File

@@ -1,166 +0,0 @@
package ftp
import (
"fmt"
pkg "github.com/jkaninda/pg-bkup/pkg/storage"
"github.com/jlaffaye/ftp"
"io"
"os"
"path/filepath"
"time"
)
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
type ftpStorage struct {
*pkg.Backend
client *ftp.ServerConn
}
// Config holds the SSH connection details
type Config struct {
Host string
User string
Password string
Port string
LocalPath string
RemotePath string
}
// createClient creates FTP Client
func createClient(conf Config) (*ftp.ServerConn, error) {
ftpClient, err := ftp.Dial(fmt.Sprintf("%s:%s", conf.Host, conf.Port), ftp.DialWithTimeout(5*time.Second))
if err != nil {
return nil, fmt.Errorf("failed to connect to FTP: %w", err)
}
err = ftpClient.Login(conf.User, conf.Password)
if err != nil {
return nil, fmt.Errorf("failed to log in to FTP: %w", err)
}
return ftpClient, nil
}
// NewStorage creates new Storage
func NewStorage(conf Config) (pkg.Storage, error) {
client, err := createClient(conf)
if err != nil {
return nil, err
}
return &ftpStorage{
client: client,
Backend: &pkg.Backend{
RemotePath: conf.RemotePath,
LocalPath: conf.LocalPath,
},
}, nil
}
// Copy copies file to the remote server
func (s ftpStorage) Copy(fileName string) error {
ftpClient := s.client
defer func(ftpClient *ftp.ServerConn) {
err := ftpClient.Quit()
if err != nil {
return
}
}(ftpClient)
filePath := filepath.Join(s.LocalPath, fileName)
file, err := os.Open(filePath)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", fileName, err)
}
defer func(file *os.File) {
err := file.Close()
if err != nil {
return
}
}(file)
remoteFilePath := filepath.Join(s.RemotePath, fileName)
err = ftpClient.Stor(remoteFilePath, file)
if err != nil {
return fmt.Errorf("failed to upload file %s: %w", filepath.Join(s.LocalPath, fileName), err)
}
return nil
}
// CopyFrom copies a file from the remote server to local storage
func (s ftpStorage) CopyFrom(fileName string) error {
ftpClient := s.client
defer func(ftpClient *ftp.ServerConn) {
err := ftpClient.Quit()
if err != nil {
return
}
}(ftpClient)
remoteFilePath := filepath.Join(s.RemotePath, fileName)
r, err := ftpClient.Retr(remoteFilePath)
if err != nil {
return fmt.Errorf("failed to retrieve file %s: %w", fileName, err)
}
defer func(r *ftp.Response) {
err := r.Close()
if err != nil {
return
}
}(r)
localFilePath := filepath.Join(s.LocalPath, fileName)
outFile, err := os.Create(localFilePath)
if err != nil {
return fmt.Errorf("failed to create local file %s: %w", fileName, err)
}
defer func(outFile *os.File) {
err := outFile.Close()
if err != nil {
return
}
}(outFile)
_, err = io.Copy(outFile, r)
if err != nil {
return fmt.Errorf("failed to copy data to local file %s: %w", fileName, err)
}
return nil
}
// Prune deletes old backup created more than specified days
func (s ftpStorage) Prune(retentionDays int) error {
fmt.Println("Deleting old backup from a remote server is not implemented yet")
return nil
}
// Name returns the storage name
func (s ftpStorage) Name() string {
return "ftp"
}

View File

@@ -1,140 +0,0 @@
package local
import (
pkg "github.com/jkaninda/pg-bkup/pkg/storage"
"io"
"os"
"path/filepath"
"time"
)
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
type localStorage struct {
*pkg.Backend
}
type Config struct {
LocalPath string
RemotePath string
}
// NewStorage creates new Storage
func NewStorage(conf Config) pkg.Storage {
return &localStorage{
Backend: &pkg.Backend{
LocalPath: conf.LocalPath,
RemotePath: conf.RemotePath,
},
}
}
// Copy copies file to the local destination path
func (l localStorage) Copy(file string) error {
if _, err := os.Stat(filepath.Join(l.LocalPath, file)); os.IsNotExist(err) {
return err
}
err := copyFile(filepath.Join(l.LocalPath, file), filepath.Join(l.RemotePath, file))
if err != nil {
return err
}
return nil
}
// CopyFrom copies file from a Path to local path
func (l localStorage) CopyFrom(file string) error {
if _, err := os.Stat(filepath.Join(l.RemotePath, file)); os.IsNotExist(err) {
return err
}
err := copyFile(filepath.Join(l.RemotePath, file), filepath.Join(l.LocalPath, file))
if err != nil {
return err
}
return nil
}
// Prune deletes old backup created more than specified days
func (l localStorage) Prune(retentionDays int) error {
currentTime := time.Now()
// Delete file
deleteFile := func(filePath string) error {
err := os.Remove(filePath)
return err
}
// Walk through the directory and delete files modified more than specified days ago
err := filepath.Walk(l.RemotePath, func(filePath string, fileInfo os.FileInfo, err error) error {
if err != nil {
return err
}
// Check if it's a regular file and if it was modified more than specified days ago
if fileInfo.Mode().IsRegular() {
timeDiff := currentTime.Sub(fileInfo.ModTime())
if timeDiff.Hours() > 24*float64(retentionDays) {
err := deleteFile(filePath)
if err != nil {
return err
}
}
}
return nil
})
if err != nil {
return err
}
return nil
}
// Name returns the storage name
func (l localStorage) Name() string {
return "local"
}
// copyFile copies file
func copyFile(src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer func(in *os.File) {
err := in.Close()
if err != nil {
return
}
}(in)
out, err := os.Create(dst)
if err != nil {
return err
}
_, err = io.Copy(out, in)
if err != nil {
err := out.Close()
if err != nil {
return err
}
return err
}
return out.Close()
}

View File

@@ -1,66 +0,0 @@
package local
import (
"fmt"
"os"
"path/filepath"
"testing"
)
const content = "Lorem ipsum dolor sit amet. Eum eius voluptas sit vitae vitae aut sequi molestias hic accusamus consequatur"
const inputFile = "file.txt"
const localPath = "./tests/local"
const RemotePath = "./tests/remote"
func TestCopy(t *testing.T) {
err := os.MkdirAll(localPath, 0777)
if err != nil {
t.Error(err)
}
err = os.MkdirAll(RemotePath, 0777)
if err != nil {
t.Error(err)
}
_, err = createFile(filepath.Join(localPath, inputFile), content)
if err != nil {
t.Error(err)
}
l := NewStorage(Config{
LocalPath: "./tests/local",
RemotePath: "./tests/remote",
})
err = l.Copy(inputFile)
if err != nil {
t.Error(err)
}
fmt.Printf("File copied to %s\n", filepath.Join(RemotePath, inputFile))
}
func createFile(fileName, content string) ([]byte, error) {
// Create a file named hello.txt
file, err := os.Create(fileName)
if err != nil {
fmt.Println("Error creating file:", err)
return nil, err
}
defer func(file *os.File) {
err := file.Close()
if err != nil {
fmt.Println("Error closing file:", err)
return
}
}(file)
// Write the message to the file
_, err = file.WriteString(content)
if err != nil {
fmt.Println("Error writing to file:", err)
return nil, err
}
fmt.Printf("Successfully wrote to %s\n", fileName)
fileBytes, err := os.ReadFile(fileName)
return fileBytes, err
}

View File

@@ -1,200 +0,0 @@
package s3
import (
"bytes"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
pkg "github.com/jkaninda/pg-bkup/pkg/storage"
"net/http"
"os"
"path/filepath"
"time"
)
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
type s3Storage struct {
*pkg.Backend
client *session.Session
bucket string
}
// Config holds the AWS S3 config
type Config struct {
Endpoint string
Bucket string
AccessKey string
SecretKey string
Region string
DisableSsl bool
ForcePathStyle bool
LocalPath string
RemotePath string
}
// CreateSession creates a new AWS session
func createSession(conf Config) (*session.Session, error) {
s3Config := &aws.Config{
Credentials: credentials.NewStaticCredentials(conf.AccessKey, conf.SecretKey, ""),
Endpoint: aws.String(conf.Endpoint),
Region: aws.String(conf.Region),
DisableSSL: aws.Bool(conf.DisableSsl),
S3ForcePathStyle: aws.Bool(conf.ForcePathStyle),
}
return session.NewSession(s3Config)
}
// NewStorage creates new Storage
func NewStorage(conf Config) (pkg.Storage, error) {
sess, err := createSession(conf)
if err != nil {
return nil, err
}
return &s3Storage{
client: sess,
bucket: conf.Bucket,
Backend: &pkg.Backend{
RemotePath: conf.RemotePath,
LocalPath: conf.LocalPath,
},
}, nil
}
// Copy copies file to S3 storage
func (s s3Storage) Copy(fileName string) error {
svc := s3.New(s.client)
file, err := os.Open(filepath.Join(s.LocalPath, fileName))
if err != nil {
return err
}
defer func(file *os.File) {
err := file.Close()
if err != nil {
return
}
}(file)
fileInfo, err := file.Stat()
if err != nil {
return err
}
objectKey := filepath.Join(s.RemotePath, fileName)
buffer := make([]byte, fileInfo.Size())
_, err = file.Read(buffer)
if err != nil {
return err
}
fileBytes := bytes.NewReader(buffer)
fileType := http.DetectContentType(buffer)
_, err = svc.PutObject(&s3.PutObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(objectKey),
Body: fileBytes,
ContentLength: aws.Int64(fileInfo.Size()),
ContentType: aws.String(fileType),
})
if err != nil {
return err
}
return nil
}
// CopyFrom copies a file from S3 to local storage
func (s s3Storage) CopyFrom(fileName string) error {
file, err := os.Create(filepath.Join(s.LocalPath, fileName))
if err != nil {
return err
}
defer func(file *os.File) {
err := file.Close()
if err != nil {
fmt.Printf("Error closing file: %v\n", err)
return
}
}(file)
objectKey := filepath.Join(s.RemotePath, fileName)
downloader := s3manager.NewDownloader(s.client)
_, err = downloader.Download(file,
&s3.GetObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(objectKey),
})
if err != nil {
return err
}
return nil
}
// Prune deletes old backup created more than specified days
func (s s3Storage) Prune(retentionDays int) error {
svc := s3.New(s.client)
// Get the current time
now := time.Now()
backupRetentionDays := now.AddDate(0, 0, -retentionDays)
// List objects in the bucket
listObjectsInput := &s3.ListObjectsV2Input{
Bucket: aws.String(s.bucket),
Prefix: aws.String(s.RemotePath),
}
err := svc.ListObjectsV2Pages(listObjectsInput, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, object := range page.Contents {
if object.LastModified.Before(backupRetentionDays) {
// Object is older than retention days, delete it
_, err := svc.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(s.bucket),
Key: object.Key,
})
if err != nil {
fmt.Printf("failed to delete object %s: %v", *object.Key, err)
} else {
fmt.Printf("Deleted object %s", *object.Key)
}
}
}
return !lastPage
})
if err != nil {
return fmt.Errorf("failed to list objects: %v", err)
}
return nil
}
// Name returns the storage name
func (s s3Storage) Name() string {
return "s3"
}

View File

@@ -1,148 +0,0 @@
package ssh
import (
"context"
"errors"
"fmt"
"github.com/bramvdbogaerde/go-scp"
"github.com/bramvdbogaerde/go-scp/auth"
pkg "github.com/jkaninda/pg-bkup/pkg/storage"
"golang.org/x/crypto/ssh"
"os"
"path/filepath"
)
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
type sshStorage struct {
*pkg.Backend
client scp.Client
}
// Config holds the SSH connection details
type Config struct {
Host string
User string
Password string
Port string
IdentifyFile string
LocalPath string
RemotePath string
}
// createClient creates SSH Client
func createClient(conf Config) (scp.Client, error) {
if _, err := os.Stat(conf.IdentifyFile); os.IsNotExist(err) {
clientConfig, err := auth.PrivateKey(conf.User, conf.IdentifyFile, ssh.InsecureIgnoreHostKey())
return scp.NewClient(fmt.Sprintf("%s:%s", conf.Host, conf.Port), &clientConfig), err
} else {
if conf.Password == "" {
return scp.Client{}, errors.New("ssh password required")
}
clientConfig, err := auth.PasswordKey(conf.User, conf.Password, ssh.InsecureIgnoreHostKey())
return scp.NewClient(fmt.Sprintf("%s:%s", conf.Host, conf.Port), &clientConfig), err
}
}
// NewStorage creates new Storage
func NewStorage(conf Config) (pkg.Storage, error) {
client, err := createClient(conf)
if err != nil {
return nil, err
}
return &sshStorage{
client: client,
Backend: &pkg.Backend{
RemotePath: conf.RemotePath,
LocalPath: conf.LocalPath,
},
}, nil
}
// Copy copies file to the remote server
func (s sshStorage) Copy(fileName string) error {
client := s.client
// Connect to the remote server
err := client.Connect()
if err != nil {
return errors.New("couldn't establish a connection to the remote server")
}
// Open the local file
filePath := filepath.Join(s.LocalPath, fileName)
file, err := os.Open(filePath)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", filePath, err)
}
defer client.Close()
// Copy file to the remote server
err = client.CopyFromFile(context.Background(), *file, filepath.Join(s.RemotePath, fileName), "0655")
if err != nil {
return fmt.Errorf("failed to copy file to remote server: %w", err)
}
return nil
}
// CopyFrom copies a file from the remote server to local storage
func (s sshStorage) CopyFrom(fileName string) error {
// Create a new SCP client
client := s.client
// Connect to the remote server
err := client.Connect()
if err != nil {
return errors.New("couldn't establish a connection to the remote server")
}
// Close client connection after the file has been copied
defer client.Close()
file, err := os.OpenFile(filepath.Join(s.LocalPath, fileName), os.O_RDWR|os.O_CREATE, 0777)
if err != nil {
return errors.New("couldn't open the output file")
}
defer func(file *os.File) {
err := file.Close()
if err != nil {
return
}
}(file)
err = client.CopyFromRemote(context.Background(), file, filepath.Join(s.RemotePath, fileName))
if err != nil {
return err
}
return nil
}
// Prune deletes old backup created more than specified days
func (s sshStorage) Prune(retentionDays int) error {
fmt.Println("Deleting old backup from a remote server is not implemented yet")
return nil
}
// Name returns the storage name
func (s sshStorage) Name() string {
return "ssh"
}

View File

@@ -1,38 +0,0 @@
package storage
/*
MIT License
Copyright (c) 2023 Jonas Kaninda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
type Storage interface {
Copy(fileName string) error
CopyFrom(fileName string) error
Prune(retentionDays int) error
Name() string
}
type Backend struct {
// Local Path
LocalPath string
// Remote path or Destination path
RemotePath string
}

View File

@@ -1,6 +1,3 @@
// Package internal /
package internal
/*
MIT License
@@ -25,6 +22,8 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package pkg
const tmpPath = "/tmp/backup"
const gpgHome = "/config/gnupg"
const gpgExtension = "gpg"
@@ -69,6 +68,12 @@ var ftpVars = []string{
"FTP_PORT",
}
var azureVars = []string{
"AZURE_STORAGE_CONTAINER_NAME",
"AZURE_STORAGE_ACCOUNT_NAME",
"AZURE_STORAGE_ACCOUNT_KEY",
}
// AwsVars Required environment variables for AWS S3 storage
var awsVars = []string{
"AWS_S3_ENDPOINT",

View File

@@ -1,5 +1,3 @@
package utils
/*
MIT License
@@ -24,6 +22,8 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package utils
import "os"
type MailConfig struct {

View File

@@ -1,6 +1,3 @@
// Package utils /
package utils
/*
MIT License
@@ -25,6 +22,8 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package utils
import "os"
const RestoreExample = "restore --dbname database --file db_20231219_022941.sql.gz\n" +
@@ -35,6 +34,7 @@ const BackupExample = "backup --dbname database --disable-compression\n" +
const MainExample = "backup --dbname database --disable-compression\n" +
"backup --dbname database --storage s3 --path /custom-path\n" +
"restore --dbname database --file db_20231219_022941.sql.gz"
const traceLog = "trace"
func VERSION(def string) string {
build := os.Getenv("VERSION")

View File

@@ -1,13 +1,3 @@
package logger
import (
"fmt"
"log"
"os"
"runtime"
"strings"
)
/*
MIT License
@@ -32,6 +22,16 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package utils
import (
"fmt"
"log"
"os"
"runtime"
"strings"
)
// Info returns info log
func Info(msg string, args ...interface{}) {
log.SetOutput(getStd("/dev/stdout"))
@@ -54,7 +54,13 @@ func Error(msg string, args ...interface{}) {
func Fatal(msg string, args ...interface{}) {
log.SetOutput(os.Stdout)
// Format message if there are additional arguments
formattedMessage := msg
if len(args) > 0 {
formattedMessage = fmt.Sprintf(msg, args...)
}
logWithCaller("ERROR", msg, args...)
NotifyError(formattedMessage)
os.Exit(1)
}

View File

@@ -1,5 +1,3 @@
package utils
/*
MIT License
@@ -24,13 +22,14 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package utils
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"github.com/go-mail/mail"
"github.com/jkaninda/pg-bkup/pkg/logger"
"html/template"
"io"
"net/http"
@@ -56,7 +55,7 @@ func parseTemplate[T any](data T, fileName string) (string, error) {
}
func SendEmail(subject, body string) error {
logger.Info("Start sending email notification....")
Info("Start sending email notification....")
config := loadMailConfig()
emails := strings.Split(config.MailTo, ",")
m := mail.NewMessage()
@@ -68,16 +67,16 @@ func SendEmail(subject, body string) error {
d.TLSConfig = &tls.Config{InsecureSkipVerify: config.SkipTls}
if err := d.DialAndSend(m); err != nil {
logger.Error("Error could not send email : %v", err)
Error("Error could not send email : %v", err)
return err
}
logger.Info("Email notification has been sent")
Info("Email notification has been sent")
return nil
}
func sendMessage(msg string) error {
logger.Info("Sending Telegram notification... ")
Info("Sending Telegram notification... ")
chatId := os.Getenv("TG_CHAT_ID")
body, _ := json.Marshal(map[string]string{
"chat_id": chatId,
@@ -97,11 +96,11 @@ func sendMessage(msg string) error {
}
code := response.StatusCode
if code == 200 {
logger.Info("Telegram notification has been sent")
Info("Telegram notification has been sent")
return nil
} else {
body, _ := io.ReadAll(response.Body)
logger.Error("Error could not send message, error: %s", string(body))
Error("Error could not send message, error: %s", string(body))
return fmt.Errorf("error could not send message %s", string(body))
}
@@ -126,11 +125,11 @@ func NotifySuccess(notificationData *NotificationData) {
if err == nil {
body, err := parseTemplate(*notificationData, "email.tmpl")
if err != nil {
logger.Error("Could not parse email template: %v", err)
Error("Could not parse email template: %v", err)
}
err = SendEmail(fmt.Sprintf("✅ Database Backup Notification %s", notificationData.Database), body)
if err != nil {
logger.Error("Could not send email: %v", err)
Error("Could not send email: %v", err)
}
}
// Telegram notification
@@ -138,12 +137,12 @@ func NotifySuccess(notificationData *NotificationData) {
if err == nil {
message, err := parseTemplate(*notificationData, "telegram.tmpl")
if err != nil {
logger.Error("Could not parse telegram template: %v", err)
Error("Could not parse telegram template: %v", err)
}
err = sendMessage(message)
if err != nil {
logger.Error("Could not send Telegram message: %v", err)
Error("Could not send Telegram message: %v", err)
}
}
}
@@ -170,11 +169,11 @@ func NotifyError(error string) {
BackupReference: os.Getenv("BACKUP_REFERENCE"),
}, "email-error.tmpl")
if err != nil {
logger.Error("Could not parse error template: %v", err)
Error("Could not parse error template: %v", err)
}
err = SendEmail("🔴 Urgent: Database Backup Failure Notification", body)
if err != nil {
logger.Error("Could not send email: %v", err)
Error("Could not send email: %v", err)
}
}
// Telegram notification
@@ -186,13 +185,13 @@ func NotifyError(error string) {
BackupReference: os.Getenv("BACKUP_REFERENCE"),
}, "telegram-error.tmpl")
if err != nil {
logger.Error("Could not parse error template: %v", err)
Error("Could not parse error template: %v", err)
}
err = sendMessage(message)
if err != nil {
logger.Error("Could not send telegram message: %v", err)
Error("Could not send telegram message: %v", err)
}
}
}

View File

@@ -1,4 +1,3 @@
// Package utils /
/*
MIT License
@@ -22,11 +21,11 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package utils
import (
"fmt"
"github.com/jkaninda/pg-bkup/pkg/logger"
"github.com/robfig/cron/v3"
"github.com/spf13/cobra"
"io"
@@ -36,7 +35,7 @@ import (
"time"
)
var Version = "development"
var Version = ""
// FileExists checks if the file does exist
func FileExists(filename string) bool {
@@ -112,7 +111,7 @@ func CopyFile(src, dst string) error {
}
func ChangePermission(filePath string, mod int) {
if err := os.Chmod(filePath, fs.FileMode(mod)); err != nil {
logger.Fatal("Error changing permissions of %s: %v\n", filePath, err)
Fatal("Error changing permissions of %s: %v\n", filePath, err)
}
}
@@ -174,7 +173,7 @@ func GetEnvVariable(envName, oldEnvName string) string {
if err != nil {
return value
}
logger.Warn("%s is deprecated, please use %s instead! ", oldEnvName, envName)
Warn("%s is deprecated, please use %s instead! ", oldEnvName, envName)
}
}
return value
@@ -221,7 +220,7 @@ func GetIntEnv(envName string) int {
}
ret, err := strconv.Atoi(val)
if err != nil {
logger.Error("Error: %v", err)
Error("Error: %v", err)
}
return ret
}
@@ -246,7 +245,7 @@ func CronNextTime(cronExpr string) time.Time {
// Parse the cron expression
schedule, err := cron.ParseStandard(cronExpr)
if err != nil {
logger.Error("Error parsing cron expression: %s", err)
Error("Error parsing cron expression: %s", err)
return time.Time{}
}
// Get the current time