-
-
Notifications
You must be signed in to change notification settings - Fork 1.1k
[proxy] add access log cleanup #5376
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
ae38bfe
5783f8d
88cf3aa
40b10b0
c0f219d
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,281 @@ | ||
| package manager | ||
|
|
||
| import ( | ||
| "context" | ||
| "testing" | ||
| "time" | ||
|
|
||
| "github.com/golang/mock/gomock" | ||
| "github.com/stretchr/testify/assert" | ||
| "github.com/stretchr/testify/require" | ||
|
|
||
| "github.com/netbirdio/netbird/management/server/store" | ||
| ) | ||
|
|
||
| func TestCleanupOldAccessLogs(t *testing.T) { | ||
| tests := []struct { | ||
| name string | ||
| retentionDays int | ||
| setupMock func(*store.MockStore) | ||
| expectedCount int64 | ||
| expectedError bool | ||
| }{ | ||
| { | ||
| name: "cleanup logs older than retention period", | ||
| retentionDays: 30, | ||
| setupMock: func(mockStore *store.MockStore) { | ||
| mockStore.EXPECT(). | ||
| DeleteOldAccessLogs(gomock.Any(), gomock.Any()). | ||
| DoAndReturn(func(ctx context.Context, olderThan time.Time) (int64, error) { | ||
| expectedCutoff := time.Now().AddDate(0, 0, -30) | ||
| timeDiff := olderThan.Sub(expectedCutoff) | ||
| if timeDiff.Abs() > time.Second { | ||
| t.Errorf("cutoff time not as expected: got %v, want ~%v", olderThan, expectedCutoff) | ||
| } | ||
| return 5, nil | ||
| }) | ||
| }, | ||
| expectedCount: 5, | ||
| expectedError: false, | ||
| }, | ||
| { | ||
| name: "no logs to cleanup", | ||
| retentionDays: 30, | ||
| setupMock: func(mockStore *store.MockStore) { | ||
| mockStore.EXPECT(). | ||
| DeleteOldAccessLogs(gomock.Any(), gomock.Any()). | ||
| Return(int64(0), nil) | ||
| }, | ||
| expectedCount: 0, | ||
| expectedError: false, | ||
| }, | ||
| { | ||
| name: "zero retention days skips cleanup", | ||
| retentionDays: 0, | ||
| setupMock: func(mockStore *store.MockStore) { | ||
| // No expectations - DeleteOldAccessLogs should not be called | ||
| }, | ||
| expectedCount: 0, | ||
| expectedError: false, | ||
| }, | ||
| { | ||
| name: "negative retention days skips cleanup", | ||
| retentionDays: -10, | ||
| setupMock: func(mockStore *store.MockStore) { | ||
| // No expectations - DeleteOldAccessLogs should not be called | ||
| }, | ||
| expectedCount: 0, | ||
| expectedError: false, | ||
| }, | ||
| } | ||
|
|
||
| for _, tt := range tests { | ||
| t.Run(tt.name, func(t *testing.T) { | ||
| ctrl := gomock.NewController(t) | ||
| defer ctrl.Finish() | ||
|
|
||
| mockStore := store.NewMockStore(ctrl) | ||
| tt.setupMock(mockStore) | ||
|
|
||
| manager := &managerImpl{ | ||
| store: mockStore, | ||
| } | ||
|
|
||
| ctx := context.Background() | ||
| deletedCount, err := manager.CleanupOldAccessLogs(ctx, tt.retentionDays) | ||
|
|
||
| if tt.expectedError { | ||
| require.Error(t, err) | ||
| } else { | ||
| require.NoError(t, err) | ||
| } | ||
| assert.Equal(t, tt.expectedCount, deletedCount, "unexpected number of deleted logs") | ||
| }) | ||
| } | ||
| } | ||
|
|
||
| func TestCleanupWithExactBoundary(t *testing.T) { | ||
| ctrl := gomock.NewController(t) | ||
| defer ctrl.Finish() | ||
|
|
||
| mockStore := store.NewMockStore(ctrl) | ||
|
|
||
| mockStore.EXPECT(). | ||
| DeleteOldAccessLogs(gomock.Any(), gomock.Any()). | ||
| DoAndReturn(func(ctx context.Context, olderThan time.Time) (int64, error) { | ||
| expectedCutoff := time.Now().AddDate(0, 0, -30) | ||
| timeDiff := olderThan.Sub(expectedCutoff) | ||
| assert.Less(t, timeDiff.Abs(), time.Second, "cutoff time should be close to expected value") | ||
| return 1, nil | ||
| }) | ||
|
|
||
| manager := &managerImpl{ | ||
| store: mockStore, | ||
| } | ||
|
|
||
| ctx := context.Background() | ||
| deletedCount, err := manager.CleanupOldAccessLogs(ctx, 30) | ||
|
|
||
| require.NoError(t, err) | ||
| assert.Equal(t, int64(1), deletedCount) | ||
| } | ||
|
|
||
| func TestStartPeriodicCleanup(t *testing.T) { | ||
| t.Run("periodic cleanup disabled with zero retention", func(t *testing.T) { | ||
| ctrl := gomock.NewController(t) | ||
| defer ctrl.Finish() | ||
|
|
||
| mockStore := store.NewMockStore(ctrl) | ||
| // No expectations - cleanup should not run | ||
|
|
||
| manager := &managerImpl{ | ||
| store: mockStore, | ||
| } | ||
|
|
||
| ctx, cancel := context.WithCancel(context.Background()) | ||
| defer cancel() | ||
|
|
||
| manager.StartPeriodicCleanup(ctx, 0, 1) | ||
|
|
||
| time.Sleep(100 * time.Millisecond) | ||
|
|
||
| // If DeleteOldAccessLogs was called, the test will fail due to unexpected call | ||
| }) | ||
|
|
||
| t.Run("periodic cleanup runs immediately on start", func(t *testing.T) { | ||
| ctrl := gomock.NewController(t) | ||
| defer ctrl.Finish() | ||
|
|
||
| mockStore := store.NewMockStore(ctrl) | ||
|
|
||
| mockStore.EXPECT(). | ||
| DeleteOldAccessLogs(gomock.Any(), gomock.Any()). | ||
| Return(int64(2), nil). | ||
| Times(1) | ||
|
|
||
| manager := &managerImpl{ | ||
| store: mockStore, | ||
| } | ||
|
|
||
| ctx, cancel := context.WithCancel(context.Background()) | ||
| defer cancel() | ||
|
|
||
| manager.StartPeriodicCleanup(ctx, 30, 24) | ||
|
|
||
| time.Sleep(200 * time.Millisecond) | ||
|
|
||
| // Expectations verified by gomock on defer ctrl.Finish() | ||
| }) | ||
|
|
||
| t.Run("periodic cleanup stops on context cancel", func(t *testing.T) { | ||
| ctrl := gomock.NewController(t) | ||
| defer ctrl.Finish() | ||
|
|
||
| mockStore := store.NewMockStore(ctrl) | ||
|
|
||
| mockStore.EXPECT(). | ||
| DeleteOldAccessLogs(gomock.Any(), gomock.Any()). | ||
| Return(int64(1), nil). | ||
| Times(1) | ||
|
|
||
| manager := &managerImpl{ | ||
| store: mockStore, | ||
| } | ||
|
|
||
| ctx, cancel := context.WithCancel(context.Background()) | ||
|
|
||
| manager.StartPeriodicCleanup(ctx, 30, 24) | ||
|
|
||
| time.Sleep(100 * time.Millisecond) | ||
|
|
||
| cancel() | ||
|
|
||
| time.Sleep(200 * time.Millisecond) | ||
|
|
||
| }) | ||
|
|
||
| t.Run("cleanup interval defaults to 24 hours when invalid", func(t *testing.T) { | ||
| ctrl := gomock.NewController(t) | ||
| defer ctrl.Finish() | ||
|
|
||
| mockStore := store.NewMockStore(ctrl) | ||
|
|
||
| mockStore.EXPECT(). | ||
| DeleteOldAccessLogs(gomock.Any(), gomock.Any()). | ||
| Return(int64(0), nil). | ||
| Times(1) | ||
|
|
||
| manager := &managerImpl{ | ||
| store: mockStore, | ||
| } | ||
|
|
||
| ctx, cancel := context.WithCancel(context.Background()) | ||
| defer cancel() | ||
|
|
||
| manager.StartPeriodicCleanup(ctx, 30, 0) | ||
|
|
||
| time.Sleep(100 * time.Millisecond) | ||
|
|
||
| manager.StopPeriodicCleanup() | ||
| }) | ||
|
|
||
| t.Run("cleanup interval uses configured hours", func(t *testing.T) { | ||
| ctrl := gomock.NewController(t) | ||
| defer ctrl.Finish() | ||
|
|
||
| mockStore := store.NewMockStore(ctrl) | ||
|
|
||
| mockStore.EXPECT(). | ||
| DeleteOldAccessLogs(gomock.Any(), gomock.Any()). | ||
| Return(int64(3), nil). | ||
| Times(1) | ||
|
|
||
| manager := &managerImpl{ | ||
| store: mockStore, | ||
| } | ||
|
|
||
| ctx, cancel := context.WithCancel(context.Background()) | ||
| defer cancel() | ||
|
|
||
| manager.StartPeriodicCleanup(ctx, 30, 12) | ||
|
|
||
| time.Sleep(100 * time.Millisecond) | ||
|
|
||
| manager.StopPeriodicCleanup() | ||
| }) | ||
| } | ||
|
|
||
| func TestStopPeriodicCleanup(t *testing.T) { | ||
| ctrl := gomock.NewController(t) | ||
| defer ctrl.Finish() | ||
|
|
||
| mockStore := store.NewMockStore(ctrl) | ||
|
|
||
| mockStore.EXPECT(). | ||
| DeleteOldAccessLogs(gomock.Any(), gomock.Any()). | ||
| Return(int64(1), nil). | ||
| Times(1) | ||
|
|
||
| manager := &managerImpl{ | ||
| store: mockStore, | ||
| } | ||
|
|
||
| ctx := context.Background() | ||
|
|
||
| manager.StartPeriodicCleanup(ctx, 30, 24) | ||
|
|
||
| time.Sleep(100 * time.Millisecond) | ||
|
|
||
| manager.StopPeriodicCleanup() | ||
|
|
||
| time.Sleep(200 * time.Millisecond) | ||
|
|
||
| // Expectations verified by gomock - would fail if more than 1 call happened | ||
| } | ||
|
|
||
| func TestStopPeriodicCleanup_NotStarted(t *testing.T) { | ||
| manager := &managerImpl{} | ||
|
|
||
| // Should not panic if cleanup was never started | ||
| manager.StopPeriodicCleanup() | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -197,6 +197,11 @@ func (s *BaseServer) ProxyTokenStore() *nbgrpc.OneTimeTokenStore { | |
| func (s *BaseServer) AccessLogsManager() accesslogs.Manager { | ||
| return Create(s, func() accesslogs.Manager { | ||
| accessLogManager := accesslogsmanager.NewManager(s.Store(), s.PermissionsManager(), s.GeoLocationManager()) | ||
| accessLogManager.StartPeriodicCleanup( | ||
| context.Background(), | ||
| s.Config.ReverseProxy.AccessLogRetentionDays, | ||
| s.Config.ReverseProxy.AccessLogCleanupIntervalHours, | ||
| ) | ||
| return accessLogManager | ||
| }) | ||
| } | ||
|
Comment on lines
197
to
207
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🧩 Analysis chain🏁 Script executed: # First, let's check the directory structure and locate the file
find . -name "boot.go" -type f | head -20Repository: netbirdio/netbird Length of output: 98 🏁 Script executed: # Search for StopPeriodicCleanup usage in the codebase
rg -n "StopPeriodicCleanup" --type go -A 2 -B 2Repository: netbirdio/netbird Length of output: 3862 🏁 Script executed: # Check the boot.go file around the AccessLogsManager function and line 126 for proxyAuthClose
rg -n "AccessLogsManager|proxyAuthClose" management/internals/server/boot.go -A 5 -B 2Repository: netbirdio/netbird Length of output: 2221 🏁 Script executed: # Look at the BaseServer struct definition
rg -n "type BaseServer struct" --type go -A 20Repository: netbirdio/netbird Length of output: 1564 🏁 Script executed: # Search for BaseServer Close or Shutdown methods
rg -n "func.*BaseServer.*Close|func.*BaseServer.*Shutdown" --type go -A 10Repository: netbirdio/netbird Length of output: 43 🏁 Script executed: # Check the full boot.go file to understand the context better
wc -l management/internals/server/boot.goRepository: netbirdio/netbird Length of output: 100 🏁 Script executed: # Check the full BaseServer struct definition
cat -n management/internals/server/server.go | head -100Repository: netbirdio/netbird Length of output: 3834 🏁 Script executed: # Search for BaseServer Close method
rg -n "func.*Close|func.*Shutdown" management/internals/server/server.go -A 15Repository: netbirdio/netbird Length of output: 43 🏁 Script executed: # Search for where proxyAuthClose is actually used/called
rg -n "proxyAuthClose" management/internals/server/ -A 3 -B 3Repository: netbirdio/netbird Length of output: 1754 🏁 Script executed: # Look at the full AccessLogsManager function in context
sed -n '195,210p' management/internals/server/boot.goRepository: netbirdio/netbird Length of output: 610 🏁 Script executed: # Get full Stop method
sed -n '218,235p' management/internals/server/server.goRepository: netbirdio/netbird Length of output: 460 🏁 Script executed: # Check the full boot.go to see how Create pattern works
rg -n "func Create" management/internals/server/boot.go -A 10Repository: netbirdio/netbird Length of output: 43 🏁 Script executed: # Check if AccessLogsManager is cached/stored
rg -n "accessLogsManager|AccessLogsManager" management/internals/server/ --type goRepository: netbirdio/netbird Length of output: 950
The 🔧 Suggested fixAdd an type BaseServer struct {
// ... existing fields
proxyAuthClose func()
+ accessLogManager accesslogs.Manager
}Then in func (s *BaseServer) AccessLogsManager() accesslogs.Manager {
return Create(s, func() accesslogs.Manager {
+ if s.accessLogManager == nil {
accessLogManager := accesslogsmanager.NewManager(s.Store(), s.PermissionsManager(), s.GeoLocationManager())
accessLogManager.StartPeriodicCleanup(
context.Background(),
s.Config.ReverseProxy.AccessLogRetentionDays,
s.Config.ReverseProxy.AccessLogCleanupIntervalHours,
)
+ s.accessLogManager = accessLogManager
+ }
- return accessLogManager
+ return s.accessLogManager
})
}And in the if s.proxyAuthClose != nil {
s.proxyAuthClose()
s.proxyAuthClose = nil
}
+if s.accessLogManager != nil {
+ s.accessLogManager.StopPeriodicCleanup()
+}
_ = s.Store().Close(ctx)🤖 Prompt for AI Agents |
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Use
t.Fatalfinstead oft.ErrorfinsideDoAndReturnto stop execution on assertion failure.t.Errorfmarks the test failed but allows the callback to return5, nil, which makes subsequent assertions pass even when the cutoff check failed. Uset.Fatalfto abort immediately.🐛 Proposed fix
Apply the same fix to the identical pattern at Line 108 in
TestCleanupWithExactBoundary.📝 Committable suggestion
🤖 Prompt for AI Agents