Skip to content

Commit 7dc9f2c

Browse files
feat: implement backup retention policies
1 parent eb7aeae commit 7dc9f2c

File tree

10 files changed

+719
-21
lines changed

10 files changed

+719
-21
lines changed

README.md

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,8 +54,30 @@ storage {
5454
# S3 region (optional)
5555
region = "auto"
5656
57-
# S3 prefix (optional, backup file will be stored in `{prefix}/2006-01-02T15:04:05.{compress_algorithm}.sql`)
57+
# S3 prefix (optional, backup file will be stored in `{prefix}/2006-01-02T15:04:05.{compress_algorithm}`)
5858
prefix = "backup"
59+
60+
# Retention settings (optional)
61+
# Flexible time-based retention using various formats
62+
retention_period = "30 days" # Examples: "7 days", "1h", "2 weeks", "1 month", "yearly"
63+
# Also supports: "1hr", "24hrs", "daily", "weekly", "monthly"
64+
65+
# Keep only the latest 10 backups (optional, works with time-based retention)
66+
retention_count = 10
67+
}
68+
69+
# Local storage configuration (optional, can be used with or without S3)
70+
local {
71+
# Local directory path to store backups
72+
directory = "/var/backups/postgres"
73+
74+
# Retention settings (optional)
75+
# Flexible time-based retention using various formats
76+
retention_period = "1 week" # Examples: "7 days", "24h", "2 weeks", "monthly"
77+
# Also supports: "1hr", "daily", "weekly", "1 month", "yearly"
78+
79+
# Keep only the latest 5 backups (optional, works with time-based retention)
80+
retention_count = 5
5981
}
6082
}
6183

@@ -82,7 +104,7 @@ verbose = false
82104
- [ ] Add more compress algorithm
83105
- [ ] Support multiple database backup
84106
- [ ] Support notification
85-
- [ ] Support backup retention
107+
- [X] Support backup retention
86108
- [ ] Support backup restore
87109
- [ ] Support streaming compress/upload backup
88110
- [ ] Support backup encryption

cmd/retention.go

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
package cmd
2+
3+
import (
4+
"context"
5+
6+
"github.com/rs/zerolog/log"
7+
"github.com/spf13/cobra"
8+
9+
"github.com/DeltaLaboratory/postgres-backup/internal/config"
10+
"github.com/DeltaLaboratory/postgres-backup/internal/storage/local"
11+
"github.com/DeltaLaboratory/postgres-backup/internal/storage/s3"
12+
)
13+
14+
// retentionCmd represents the retention command
15+
var retentionCmd = &cobra.Command{
16+
Use: "retention",
17+
Short: "Manage backup retention policies",
18+
Long: `Manage backup retention policies for cleaning up old backups.`,
19+
}
20+
21+
// cleanupCmd represents the cleanup command
22+
var cleanupCmd = &cobra.Command{
23+
Use: "cleanup",
24+
Short: "Clean up old backups based on retention policy",
25+
Long: `Clean up old backups based on the configured retention policy.
26+
This command will remove backups that exceed the retention limits defined
27+
in the configuration file (retention_days and/or retention_count).`,
28+
Run: func(cmd *cobra.Command, args []string) {
29+
ctx := context.Background()
30+
logger := log.Logger.With().Str("caller", "retention_cleanup_cmd").Logger()
31+
32+
// Count configured storage backends
33+
backends := make([]string, 0, 2)
34+
if config.Loaded.Storage.S3 != nil {
35+
backends = append(backends, "S3")
36+
}
37+
if config.Loaded.Storage.Local != nil {
38+
backends = append(backends, "local")
39+
}
40+
41+
if len(backends) == 0 {
42+
logger.Fatal().Msg("no storage backends configured - cannot perform retention cleanup")
43+
}
44+
45+
logger.Info().
46+
Strs("storage_backends", backends).
47+
Msg("starting manual retention cleanup")
48+
49+
successCount := 0
50+
51+
// Run S3 retention cleanup if configured
52+
if config.Loaded.Storage.S3 != nil {
53+
if err := s3.CleanupRetention(ctx); err != nil {
54+
logger.Error().Err(err).
55+
Str("bucket", config.Loaded.Storage.S3.Bucket).
56+
Msg("S3 retention cleanup failed")
57+
} else {
58+
logger.Info().
59+
Str("bucket", config.Loaded.Storage.S3.Bucket).
60+
Msg("S3 retention cleanup completed successfully")
61+
successCount++
62+
}
63+
}
64+
65+
// Run local retention cleanup if configured
66+
if config.Loaded.Storage.Local != nil {
67+
if err := local.CleanupRetention(ctx); err != nil {
68+
logger.Error().Err(err).
69+
Str("directory", config.Loaded.Storage.Local.Directory).
70+
Msg("local retention cleanup failed")
71+
} else {
72+
logger.Info().
73+
Str("directory", config.Loaded.Storage.Local.Directory).
74+
Msg("local retention cleanup completed successfully")
75+
successCount++
76+
}
77+
}
78+
79+
logger.Info().
80+
Int("successful_backends", successCount).
81+
Int("total_backends", len(backends)).
82+
Msg("retention cleanup operation finished")
83+
},
84+
}
85+
86+
func init() {
87+
retentionCmd.AddCommand(cleanupCmd)
88+
RootCmd.AddCommand(retentionCmd)
89+
}

cmd/schedule/run.go

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,19 +15,29 @@ var runCmd = &cobra.Command{
1515
Short: "Run the backup schedule",
1616
Long: `Run the backup schedule defined in the configuration file.`,
1717
Run: func(cmd *cobra.Command, args []string) {
18+
logger := log.Logger.With().Str("caller", "schedule_runner").Logger()
19+
1820
if len(config.Loaded.Schedule) == 0 {
19-
log.Fatal().Msg("no schedule provided")
21+
logger.Fatal().Msg("no backup schedules configured - cannot start scheduler")
2022
}
2123

24+
logger.Info().Int("schedule_count", len(config.Loaded.Schedule)).Msg("initializing backup scheduler")
25+
2226
c := cron.New()
2327
for _, schedule := range config.Loaded.Schedule {
2428
if id, err := c.AddFunc(schedule, internal.Backup); err != nil {
25-
log.Fatal().Err(err).Str("schedule", schedule).Msg("failed to add schedule")
29+
logger.Fatal().Err(err).
30+
Str("cron_expression", schedule).
31+
Msg("failed to register backup schedule - invalid cron expression")
2632
} else {
27-
log.Info().Str("schedule", schedule).Str("next", c.Entry(id).Next.String()).Msg("schedule added")
33+
logger.Info().
34+
Str("cron_expression", schedule).
35+
Str("next_run", c.Entry(id).Next.String()).
36+
Msg("backup schedule registered successfully")
2837
}
2938
}
30-
log.Info().Msg("starting cron")
39+
40+
logger.Info().Msg("starting backup scheduler - waiting for scheduled backup jobs")
3141
c.Run()
3242
},
3343
}

internal/backup.go

Lines changed: 61 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,47 +1,100 @@
11
package internal
22

33
import (
4+
"bytes"
45
"context"
56
"io"
67

78
"github.com/rs/zerolog/log"
89

910
"github.com/DeltaLaboratory/postgres-backup/internal/config"
11+
"github.com/DeltaLaboratory/postgres-backup/internal/storage/local"
1012
"github.com/DeltaLaboratory/postgres-backup/internal/storage/s3"
1113
)
1214

1315
func Backup() {
16+
logger := log.Logger.With().Str("caller", "backup").Logger()
17+
18+
// Get database name for logging context
19+
dbName := "unknown"
20+
if config.Loaded.Postgres.Database != nil {
21+
dbName = *config.Loaded.Postgres.Database
22+
}
23+
24+
logger.Info().Str("database", dbName).Msg("starting database backup")
25+
1426
process, err := Dump()
1527
if err != nil {
16-
log.Error().Err(err).Msg("failed to dump database")
28+
logger.Error().Err(err).Str("database", dbName).Msg("failed to create database dump")
1729
return
1830
}
1931

2032
if err := process.Start(); err != nil {
21-
log.Error().Err(err).Msg("failed to start backup process")
33+
logger.Error().Err(err).Str("database", dbName).Msg("failed to start pg_dump process")
2234
return
2335
}
2436

2537
var reader io.Reader = process
2638

2739
if config.Loaded.Compress != nil {
28-
log.Info().Str("algorithm", config.Loaded.Compress.Algorithm).Int("compress_level", *config.Loaded.Compress.CompressLevel).Msg("start compress steam")
40+
logger.Info().Str("algorithm", config.Loaded.Compress.Algorithm).Int("compress_level", *config.Loaded.Compress.CompressLevel).Str("database", dbName).Msg("starting compression stream")
2941
reader, err = Compress(reader)
3042
if err != nil {
31-
log.Error().Err(err).Msg("failed to compress dump")
43+
logger.Error().Err(err).Str("database", dbName).Str("algorithm", config.Loaded.Compress.Algorithm).Msg("failed to compress database dump")
3244
return
3345
}
3446
}
3547

48+
// Buffer the data if we need to upload to multiple storage backends
49+
var buffer *bytes.Buffer
50+
bothConfigured := config.Loaded.Storage.S3 != nil && config.Loaded.Storage.Local != nil
51+
52+
if bothConfigured {
53+
// Read all data into buffer for multiple uploads
54+
logger.Info().Str("database", dbName).Msg("buffering backup data for multiple storage backends")
55+
buffer = &bytes.Buffer{}
56+
_, err = io.Copy(buffer, reader)
57+
if err != nil {
58+
logger.Error().Err(err).Str("database", dbName).Msg("failed to buffer backup data for storage")
59+
return
60+
}
61+
}
62+
63+
// Upload to S3 if configured
3664
if config.Loaded.Storage.S3 != nil {
37-
if err := s3.Upload(context.Background(), reader); err != nil {
38-
log.Error().Err(err).Msg("failed to upload dump")
65+
var s3Reader io.Reader
66+
if bothConfigured {
67+
s3Reader = bytes.NewReader(buffer.Bytes())
68+
} else {
69+
s3Reader = reader
70+
}
71+
72+
if err := s3.Upload(context.Background(), s3Reader); err != nil {
73+
logger.Error().Err(err).Str("database", dbName).Str("bucket", config.Loaded.Storage.S3.Bucket).Msg("failed to upload backup to S3")
74+
} else {
75+
logger.Info().Str("database", dbName).Str("bucket", config.Loaded.Storage.S3.Bucket).Msg("successfully uploaded backup to S3")
76+
}
77+
}
78+
79+
// Upload to local storage if configured
80+
if config.Loaded.Storage.Local != nil {
81+
var localReader io.Reader
82+
if bothConfigured {
83+
localReader = bytes.NewReader(buffer.Bytes())
84+
} else {
85+
localReader = reader
86+
}
87+
88+
if err := local.Upload(context.Background(), localReader); err != nil {
89+
logger.Error().Err(err).Str("database", dbName).Str("directory", config.Loaded.Storage.Local.Directory).Msg("failed to upload backup to local storage")
90+
} else {
91+
logger.Info().Str("database", dbName).Str("directory", config.Loaded.Storage.Local.Directory).Msg("successfully uploaded backup to local storage")
3992
}
4093
}
4194

4295
if err := process.Wait(); err != nil {
43-
log.Error().Err(err).Msg("failed to run backup process")
96+
logger.Error().Err(err).Str("database", dbName).Msg("pg_dump process finished with error")
4497
}
4598

46-
log.Info().Msg("backup completed")
99+
logger.Info().Str("database", dbName).Msg("database backup completed successfully")
47100
}

internal/config/config.go

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
package config
22

33
import (
4+
"fmt"
45
"os"
56

67
"github.com/hashicorp/hcl/v2/hclsimple"
@@ -32,6 +33,36 @@ func (c Config) Validate() error {
3233
if err := c.Compress.Validate(); err != nil {
3334
return err
3435
}
36+
37+
// Validate storage retention settings
38+
if c.Storage.S3 != nil {
39+
// Validate retention_period
40+
if c.Storage.S3.RetentionPeriod != nil {
41+
if _, err := c.Storage.S3.GetEffectiveRetentionDays(); err != nil {
42+
return fmt.Errorf("s3 retention_period validation failed: %w", err)
43+
}
44+
}
45+
46+
// Validate retention_count
47+
if c.Storage.S3.RetentionCount != nil && *c.Storage.S3.RetentionCount <= 0 {
48+
return fmt.Errorf("s3 retention_count must be positive, got %d", *c.Storage.S3.RetentionCount)
49+
}
50+
}
51+
52+
if c.Storage.Local != nil {
53+
// Validate retention_period
54+
if c.Storage.Local.RetentionPeriod != nil {
55+
if _, err := c.Storage.Local.GetEffectiveRetentionDays(); err != nil {
56+
return fmt.Errorf("local retention_period validation failed: %w", err)
57+
}
58+
}
59+
60+
// Validate retention_count
61+
if c.Storage.Local.RetentionCount != nil && *c.Storage.Local.RetentionCount <= 0 {
62+
return fmt.Errorf("local retention_count must be positive, got %d", *c.Storage.Local.RetentionCount)
63+
}
64+
}
65+
3566
return nil
3667
}
3768

internal/config/storage/local.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,8 @@ package storage
22

33
type LocalStorage struct {
44
Directory string `hcl:"directory"`
5+
6+
// Retention settings
7+
RetentionPeriod *string `hcl:"retention_period"`
8+
RetentionCount *int `hcl:"retention_count"`
59
}

0 commit comments

Comments
 (0)