mirror of
https://github.com/cooperspencer/gickup.git
synced 2025-05-11 09:45:36 +02:00
add zip support for s3 destination (#304)
This commit is contained in:
parent
79d5b30970
commit
6be0214ee3
5 changed files with 33 additions and 3 deletions
|
@ -276,6 +276,8 @@ destination:
|
||||||
bucket: your-bucket-name
|
bucket: your-bucket-name
|
||||||
accesskey: your-access-key # can be an environment variable, just don't add a $ in front of it
|
accesskey: your-access-key # can be an environment variable, just don't add a $ in front of it
|
||||||
secretkey: your-secret-key # can be an environment variable, just don't add a $ in front of it
|
secretkey: your-secret-key # can be an environment variable, just don't add a $ in front of it
|
||||||
|
token: your-token # can be an environment variable, just don't add a $ in front of it
|
||||||
|
zip: false # if true, will zip the entire git repo into a single zip file and upload that instead
|
||||||
usessl: true # wheter to use ssl or not
|
usessl: true # wheter to use ssl or not
|
||||||
cron: 0 22 * * * # optional - when cron is not provided, the program runs once and exits.
|
cron: 0 22 * * * # optional - when cron is not provided, the program runs once and exits.
|
||||||
# Otherwise, it runs according to the cron schedule.
|
# Otherwise, it runs according to the cron schedule.
|
||||||
|
|
|
@ -542,6 +542,10 @@
|
||||||
"token": {
|
"token": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "The token to authenticate against the S3 server"
|
"description": "The token to authenticate against the S3 server"
|
||||||
|
},
|
||||||
|
"zip": {
|
||||||
|
"type": "boolean",
|
||||||
|
"description": "If true, zip the entire Git repo into a single zip file and upload that instead"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"additionalProperties": false
|
"additionalProperties": false
|
||||||
|
@ -1107,4 +1111,4 @@
|
||||||
"additionalProperties": false
|
"additionalProperties": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
25
main.go
25
main.go
|
@ -33,6 +33,7 @@ import (
|
||||||
"github.com/cooperspencer/gickup/metrics/prometheus"
|
"github.com/cooperspencer/gickup/metrics/prometheus"
|
||||||
"github.com/cooperspencer/gickup/types"
|
"github.com/cooperspencer/gickup/types"
|
||||||
"github.com/cooperspencer/gickup/whatever"
|
"github.com/cooperspencer/gickup/whatever"
|
||||||
|
"github.com/cooperspencer/gickup/zip"
|
||||||
"github.com/robfig/cron/v3"
|
"github.com/robfig/cron/v3"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/rs/zerolog/log"
|
"github.com/rs/zerolog/log"
|
||||||
|
@ -189,10 +190,14 @@ func backup(repos []types.Repo, conf *types.Conf) {
|
||||||
repotime := time.Now()
|
repotime := time.Now()
|
||||||
status := 0
|
status := 0
|
||||||
|
|
||||||
|
logOp := "pushing"
|
||||||
|
if d.Zip {
|
||||||
|
logOp = "zipping and pushing"
|
||||||
|
}
|
||||||
log.Info().
|
log.Info().
|
||||||
Str("stage", "s3").
|
Str("stage", "s3").
|
||||||
Str("url", d.Endpoint).
|
Str("url", d.Endpoint).
|
||||||
Msgf("pushing %s to %s", types.Blue(r.Name), d.Bucket)
|
Msgf("%s %s to %s", logOp, types.Blue(r.Name), d.Bucket)
|
||||||
|
|
||||||
if !cli.Dry {
|
if !cli.Dry {
|
||||||
tempname := fmt.Sprintf("s3-%x", repotime)
|
tempname := fmt.Sprintf("s3-%x", repotime)
|
||||||
|
@ -210,7 +215,8 @@ func backup(repos []types.Repo, conf *types.Conf) {
|
||||||
}
|
}
|
||||||
|
|
||||||
defer os.RemoveAll(tempdir)
|
defer os.RemoveAll(tempdir)
|
||||||
_, err = local.TempClone(r, path.Join(tempdir, r.Name))
|
tempClonePath := path.Join(tempdir, r.Name)
|
||||||
|
_, err = local.TempClone(r, tempClonePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == git.NoErrAlreadyUpToDate {
|
if err == git.NoErrAlreadyUpToDate {
|
||||||
log.Info().
|
log.Info().
|
||||||
|
@ -241,6 +247,21 @@ func backup(repos []types.Repo, conf *types.Conf) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error().Str("stage", "s3").Str("endpoint", d.Endpoint).Str("bucket", d.Bucket).Msg(err.Error())
|
log.Error().Str("stage", "s3").Str("endpoint", d.Endpoint).Str("bucket", d.Bucket).Msg(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.Zip {
|
||||||
|
log.Info().
|
||||||
|
Msgf("zipping %s", types.Green(r.Name))
|
||||||
|
err := zip.Zip(tempClonePath, []string{tempClonePath})
|
||||||
|
if err != nil {
|
||||||
|
log.Error().
|
||||||
|
Str("stage", "zip").
|
||||||
|
Str("url", r.URL).
|
||||||
|
Str("repo", r.Name).
|
||||||
|
Msg(err.Error())
|
||||||
|
log.Error().Msgf("Skipping backup of %s due to error while zipping", r.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
err = s3.UploadDirToS3(tempdir, d)
|
err = s3.UploadDirToS3(tempdir, d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error().Str("stage", "s3").Str("endpoint", d.Endpoint).Str("bucket", d.Bucket).Msg(err.Error())
|
log.Error().Str("stage", "s3").Str("endpoint", d.Endpoint).Str("bucket", d.Bucket).Msg(err.Error())
|
||||||
|
|
|
@ -543,6 +543,7 @@ type S3Repo struct {
|
||||||
Region string `yaml:"region"`
|
Region string `yaml:"region"`
|
||||||
UseSSL bool `yaml:"usessl"`
|
UseSSL bool `yaml:"usessl"`
|
||||||
Structured bool `yaml:"structured"`
|
Structured bool `yaml:"structured"`
|
||||||
|
Zip bool `yaml:"zip"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s3 S3Repo) GetKey(accessString string) (string, error) {
|
func (s3 S3Repo) GetKey(accessString string) (string, error) {
|
||||||
|
|
|
@ -9,6 +9,8 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Create a Zip file `{repository}.zip` and recursively add the contents of all paths in the `tozip` array to it.
|
||||||
|
// Deletes the original contents of `repository` such that only the newly created Zip file remains.
|
||||||
func Zip(repository string, tozip []string) error {
|
func Zip(repository string, tozip []string) error {
|
||||||
file, err := os.Create(fmt.Sprintf("%s.zip", repository))
|
file, err := os.Create(fmt.Sprintf("%s.zip", repository))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue