diff --git a/api/grpc/mpi/v1/files.pb.go b/api/grpc/mpi/v1/files.pb.go index 57fdff253a..3afe12305a 100644 --- a/api/grpc/mpi/v1/files.pb.go +++ b/api/grpc/mpi/v1/files.pb.go @@ -1617,11 +1617,11 @@ const file_mpi_v1_files_proto_rawDesc = "" + "\x0fGetFileResponse\x120\n" + "\bcontents\x18\x01 \x01(\v2\x14.mpi.v1.FileContentsR\bcontents\"*\n" + "\fFileContents\x12\x1a\n" + - "\bcontents\x18\x01 \x01(\fR\bcontents\"\xa2\x02\n" + + "\bcontents\x18\x01 \x01(\fR\bcontents\"\x98\x02\n" + "\bFileMeta\x12\x1c\n" + "\x04name\x18\x01 \x01(\tB\b\xbaH\x05r\x03:\x01/R\x04name\x12\x12\n" + - "\x04hash\x18\x02 \x01(\tR\x04hash\x12I\n" + - "\rmodified_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampB\b\xbaH\x05\xb2\x01\x028\x01R\fmodifiedTime\x122\n" + + "\x04hash\x18\x02 \x01(\tR\x04hash\x12?\n" + + "\rmodified_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\fmodifiedTime\x122\n" + "\vpermissions\x18\x04 \x01(\tB\x10\xbaH\rr\v2\t0[0-7]{3}R\vpermissions\x12\x12\n" + "\x04size\x18\x05 \x01(\x03R\x04size\x12D\n" + "\x10certificate_meta\x18\x06 \x01(\v2\x17.mpi.v1.CertificateMetaH\x00R\x0fcertificateMetaB\v\n" + diff --git a/api/grpc/mpi/v1/files.proto b/api/grpc/mpi/v1/files.proto index c6e0ebe566..0e2e648752 100644 --- a/api/grpc/mpi/v1/files.proto +++ b/api/grpc/mpi/v1/files.proto @@ -166,7 +166,7 @@ message FileMeta { // The hash of the file contents sha256, hex encoded string hash = 2; // Last modified time of the file (created time if never modified) - google.protobuf.Timestamp modified_time = 3 [(buf.validate.field).timestamp.lt_now = true]; + google.protobuf.Timestamp modified_time = 3; // The permission set associated with a particular file string permissions = 4 [(buf.validate.field).string.pattern = "0[0-7]{3}"]; // The size of the file in bytes diff --git a/internal/bus/topics.go b/internal/bus/topics.go index 5ad6995277..33372e4653 100644 --- a/internal/bus/topics.go +++ b/internal/bus/topics.go @@ -6,9 +6,7 @@ package bus const ( - AddInstancesTopic = "add-instances" UpdatedInstancesTopic = "updated-instances" - DeletedInstancesTopic = "deleted-instances" ResourceUpdateTopic = "resource-update" NginxConfigUpdateTopic = "nginx-config-update" InstanceHealthTopic = "instance-health" @@ -19,11 +17,7 @@ const ( ConnectionResetTopic = "connection-reset" ConfigApplyRequestTopic = "config-apply-request" WriteConfigSuccessfulTopic = "write-config-successful" - ReloadSuccessfulTopic = "reload-successful" EnableWatchersTopic = "enable-watchers" - ConfigApplyFailedTopic = "config-apply-failed" - ConfigApplyCompleteTopic = "config-apply-complete" - RollbackWriteTopic = "rollback-write" DataPlaneHealthRequestTopic = "data-plane-health-request" DataPlaneHealthResponseTopic = "data-plane-health-response" APIActionRequestTopic = "api-action-request" diff --git a/internal/file/file_manager_service.go b/internal/file/file_manager_service.go index 5798b09766..2e58461da0 100644 --- a/internal/file/file_manager_service.go +++ b/internal/file/file_manager_service.go @@ -31,7 +31,7 @@ import ( //counterfeiter:generate . fileOperator //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6@v6.8.1 -generate -//counterfeiter:generate . fileManagerServiceInterface +//counterfeiter:generate . FileManagerServiceInterface //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6@v6.8.1 -generate //counterfeiter:generate . fileServiceOperatorInterface @@ -86,7 +86,7 @@ type ( UpdateClient(ctx context.Context, fileServiceClient mpi.FileServiceClient) } - fileManagerServiceInterface interface { + FileManagerServiceInterface interface { ConfigApply(ctx context.Context, configApplyRequest *mpi.ConfigApplyRequest) (writeStatus model.WriteStatus, err error) Rollback(ctx context.Context, instanceID string) error diff --git a/internal/file/file_plugin.go b/internal/file/file_plugin.go deleted file mode 100644 index 58d4ace1a9..0000000000 --- a/internal/file/file_plugin.go +++ /dev/null @@ -1,484 +0,0 @@ -// Copyright (c) F5, Inc. -// -// This source code is licensed under the Apache License, Version 2.0 license found in the -// LICENSE file in the root directory of this source tree. - -package file - -import ( - "context" - "errors" - "fmt" - "log/slog" - "sync" - - "github.com/nginx/agent/v3/pkg/files" - "github.com/nginx/agent/v3/pkg/id" - - mpi "github.com/nginx/agent/v3/api/grpc/mpi/v1" - "github.com/nginx/agent/v3/internal/bus" - "github.com/nginx/agent/v3/internal/config" - "github.com/nginx/agent/v3/internal/grpc" - "github.com/nginx/agent/v3/internal/logger" - "github.com/nginx/agent/v3/internal/model" - "google.golang.org/protobuf/types/known/timestamppb" -) - -var _ bus.Plugin = (*FilePlugin)(nil) - -// The file plugin only writes, deletes and checks hashes of files -// the file plugin does not care about the instance type - -type FilePlugin struct { - manifestLock *sync.RWMutex - agentConfigMutex *sync.Mutex - messagePipe bus.MessagePipeInterface - config *config.Config - conn grpc.GrpcConnectionInterface - fileManagerService fileManagerServiceInterface - serverType model.ServerType -} - -func NewFilePlugin(agentConfig *config.Config, grpcConnection grpc.GrpcConnectionInterface, - serverType model.ServerType, manifestLock *sync.RWMutex, -) *FilePlugin { - return &FilePlugin{ - config: agentConfig, - conn: grpcConnection, - serverType: serverType, - manifestLock: manifestLock, - agentConfigMutex: &sync.Mutex{}, - } -} - -func (fp *FilePlugin) Init(ctx context.Context, messagePipe bus.MessagePipeInterface) error { - ctx = context.WithValue( - ctx, - logger.ServerTypeContextKey, slog.Any(logger.ServerTypeKey, fp.serverType.String()), - ) - slog.DebugContext(ctx, "Starting file plugin") - - fp.messagePipe = messagePipe - fp.fileManagerService = NewFileManagerService(fp.conn.FileServiceClient(), fp.config, fp.manifestLock) - - return nil -} - -func (fp *FilePlugin) Close(ctx context.Context) error { - ctx = context.WithValue( - ctx, - logger.ServerTypeContextKey, slog.Any(logger.ServerTypeKey, fp.serverType.String()), - ) - slog.InfoContext(ctx, "Closing file plugin") - - return fp.conn.Close(ctx) -} - -func (fp *FilePlugin) Info() *bus.Info { - name := "file" - if fp.serverType.String() == model.Auxiliary.String() { - name = "auxiliary-file" - } - - return &bus.Info{ - Name: name, - } -} - -//nolint:revive,cyclop // Cyclomatic complexity is acceptable for this function -func (fp *FilePlugin) Process(ctx context.Context, msg *bus.Message) { - ctxWithMetadata := fp.config.NewContextWithLabels(ctx) - - if logger.ServerType(ctx) == "" { - ctxWithMetadata = context.WithValue( - ctxWithMetadata, - logger.ServerTypeContextKey, slog.Any(logger.ServerTypeKey, fp.serverType.String()), - ) - } - - if logger.ServerType(ctxWithMetadata) == fp.serverType.String() { - switch msg.Topic { - case bus.ConnectionResetTopic: - fp.handleConnectionReset(ctxWithMetadata, msg) - case bus.ConnectionCreatedTopic: - slog.DebugContext(ctxWithMetadata, "File plugin received connection created message") - fp.fileManagerService.SetIsConnected(true) - case bus.NginxConfigUpdateTopic: - fp.handleNginxConfigUpdate(ctxWithMetadata, msg) - case bus.ConfigUploadRequestTopic: - fp.handleConfigUploadRequest(ctxWithMetadata, msg) - case bus.ConfigApplyRequestTopic: - fp.handleConfigApplyRequest(ctxWithMetadata, msg) - case bus.ConfigApplyCompleteTopic: - fp.handleConfigApplyComplete(ctxWithMetadata, msg) - case bus.ReloadSuccessfulTopic: - fp.handleReloadSuccess(ctxWithMetadata, msg) - case bus.ConfigApplyFailedTopic: - fp.handleConfigApplyFailedRequest(ctxWithMetadata, msg) - case bus.AgentConfigUpdateTopic: - fp.handleAgentConfigUpdate(ctxWithMetadata, msg) - default: - slog.DebugContext(ctxWithMetadata, "File plugin received unknown topic", "topic", msg.Topic) - } - } -} - -func (fp *FilePlugin) Subscriptions() []string { - if fp.serverType == model.Auxiliary { - return []string{ - bus.ConnectionResetTopic, - bus.ConnectionCreatedTopic, - bus.NginxConfigUpdateTopic, - bus.ConfigUploadRequestTopic, - } - } - - return []string{ - bus.ConnectionResetTopic, - bus.ConnectionCreatedTopic, - bus.NginxConfigUpdateTopic, - bus.ConfigUploadRequestTopic, - bus.ConfigApplyRequestTopic, - bus.ConfigApplyFailedTopic, - bus.ReloadSuccessfulTopic, - bus.ConfigApplyCompleteTopic, - bus.AgentConfigUpdateTopic, - } -} - -func (fp *FilePlugin) Reconfigure(ctx context.Context, agentConfig *config.Config) error { - slog.DebugContext(ctx, "File plugin is reconfiguring to update agent configuration") - - fp.agentConfigMutex.Lock() - defer fp.agentConfigMutex.Unlock() - - fp.config = agentConfig - - return nil -} - -func (fp *FilePlugin) enableWatchers(ctx context.Context, - configContext *model.NginxConfigContext, - instanceID string, -) { - enableWatcher := &model.EnableWatchers{ - ConfigContext: configContext, - InstanceID: instanceID, - } - - fp.messagePipe.Process(ctx, &bus.Message{ - Data: enableWatcher, - Topic: bus.EnableWatchersTopic, - }) -} - -func (fp *FilePlugin) handleConnectionReset(ctx context.Context, msg *bus.Message) { - slog.DebugContext(ctx, "File plugin received connection reset message") - if newConnection, ok := msg.Data.(grpc.GrpcConnectionInterface); ok { - var reconnect bool - err := fp.conn.Close(ctx) - if err != nil { - slog.ErrorContext(ctx, "File plugin: unable to close connection", "error", err) - } - fp.conn = newConnection - - reconnect = fp.fileManagerService.IsConnected() - fp.fileManagerService.ResetClient(ctx, fp.conn.FileServiceClient()) - fp.fileManagerService.SetIsConnected(reconnect) - - slog.DebugContext(ctx, "File manager service client reset successfully") - } -} - -func (fp *FilePlugin) handleConfigApplyComplete(ctx context.Context, msg *bus.Message) { - slog.DebugContext(ctx, "File plugin received config apply complete message") - response, ok := msg.Data.(*mpi.DataPlaneResponse) - - if !ok { - slog.ErrorContext(ctx, "Unable to cast message payload to *mpi.DataPlaneResponse", "payload", msg.Data) - return - } - - fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: response}) - fp.fileManagerService.ClearCache() - fp.enableWatchers(ctx, &model.NginxConfigContext{}, response.GetInstanceId()) -} - -func (fp *FilePlugin) handleReloadSuccess(ctx context.Context, msg *bus.Message) { - slog.DebugContext(ctx, "File plugin received reload success message", "data", msg.Data) - - successMessage, ok := msg.Data.(*model.ReloadSuccess) - - if !ok { - slog.ErrorContext(ctx, "Unable to cast message payload to *model.ReloadSuccess", "payload", msg.Data) - return - } - - fp.fileManagerService.ClearCache() - fp.enableWatchers(ctx, successMessage.ConfigContext, successMessage.DataPlaneResponse.GetInstanceId()) - - if successMessage.ConfigContext.Files != nil { - slog.DebugContext(ctx, "Changes made during config apply, update files on disk") - updateError := fp.fileManagerService.UpdateCurrentFilesOnDisk( - ctx, - files.ConvertToMapOfFiles(successMessage.ConfigContext.Files), - true, - ) - if updateError != nil { - slog.ErrorContext(ctx, "Unable to update current files on disk", "error", updateError) - } - } - fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: successMessage.DataPlaneResponse}) -} - -func (fp *FilePlugin) handleConfigApplyFailedRequest(ctx context.Context, msg *bus.Message) { - slog.DebugContext(ctx, "File plugin received config failed message") - - data, ok := msg.Data.(*model.ConfigApplyMessage) - if data.InstanceID == "" || !ok { - slog.ErrorContext(ctx, "Unable to cast message payload to *model.ConfigApplyMessage", - "payload", msg.Data) - fp.fileManagerService.ClearCache() - - return - } - - err := fp.fileManagerService.Rollback(ctx, data.InstanceID) - if err != nil { - rollbackResponse := fp.createDataPlaneResponse( - data.CorrelationID, - mpi.CommandResponse_COMMAND_STATUS_ERROR, - "Rollback failed", - data.InstanceID, - err.Error(), - ) - - applyResponse := fp.createDataPlaneResponse( - data.CorrelationID, - mpi.CommandResponse_COMMAND_STATUS_FAILURE, - "Config apply failed, rollback failed", - data.InstanceID, - data.Error.Error(), - ) - - fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: rollbackResponse}) - fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.ConfigApplyCompleteTopic, Data: applyResponse}) - - return - } - - // Send RollbackWriteTopic with Correlation and Instance ID for use by resource plugin - fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.RollbackWriteTopic, Data: data}) -} - -func (fp *FilePlugin) handleConfigApplyRequest(ctx context.Context, msg *bus.Message) { - slog.DebugContext(ctx, "File plugin received config apply request message") - var response *mpi.DataPlaneResponse - correlationID := logger.CorrelationID(ctx) - - managementPlaneRequest, ok := msg.Data.(*mpi.ManagementPlaneRequest) - if !ok { - slog.ErrorContext(ctx, "Unable to cast message payload to *mpi.ManagementPlaneRequest", - "payload", msg.Data) - - return - } - - request, requestOk := managementPlaneRequest.GetRequest().(*mpi.ManagementPlaneRequest_ConfigApplyRequest) - if !requestOk { - slog.ErrorContext(ctx, "Unable to cast message payload to *mpi.ManagementPlaneRequest_ConfigApplyRequest", - "payload", msg.Data) - - return - } - - configApplyRequest := request.ConfigApplyRequest - instanceID := configApplyRequest.GetOverview().GetConfigVersion().GetInstanceId() - - writeStatus, err := fp.fileManagerService.ConfigApply(ctx, configApplyRequest) - - switch writeStatus { - case model.NoChange: - slog.DebugContext(ctx, "No changes required for config apply request") - dpResponse := fp.createDataPlaneResponse( - correlationID, - mpi.CommandResponse_COMMAND_STATUS_OK, - "Config apply successful, no files to change", - instanceID, - "", - ) - - fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.ConfigApplyCompleteTopic, Data: dpResponse}) - - return - case model.Error: - slog.ErrorContext( - ctx, - "Failed to apply config changes", - "instance_id", instanceID, - "error", err, - ) - response = fp.createDataPlaneResponse( - correlationID, - mpi.CommandResponse_COMMAND_STATUS_FAILURE, - "Config apply failed", - instanceID, - err.Error(), - ) - - fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.ConfigApplyCompleteTopic, Data: response}) - - return - case model.RollbackRequired: - slog.ErrorContext( - ctx, - "Failed to apply config changes, rolling back", - "instance_id", instanceID, - "error", err, - ) - - response = fp.createDataPlaneResponse( - correlationID, - mpi.CommandResponse_COMMAND_STATUS_ERROR, - "Config apply failed, rolling back config", - instanceID, - err.Error(), - ) - fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: response}) - - rollbackErr := fp.fileManagerService.Rollback( - ctx, - instanceID, - ) - if rollbackErr != nil { - // include both the original apply error and the rollback error so the management plane - // receives actionable information about what failed during apply and what failed during rollback - applyErr := fmt.Errorf("config apply error: %w", err) - rbErr := fmt.Errorf("rollback error: %w", rollbackErr) - combinedErr := errors.Join(applyErr, rbErr) - - rollbackResponse := fp.createDataPlaneResponse( - correlationID, - mpi.CommandResponse_COMMAND_STATUS_FAILURE, - "Config apply failed, rollback failed", - instanceID, - combinedErr.Error(), - ) - - fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.ConfigApplyCompleteTopic, Data: rollbackResponse}) - - return - } - - response = fp.createDataPlaneResponse( - correlationID, - mpi.CommandResponse_COMMAND_STATUS_FAILURE, - "Config apply failed, rollback successful", - instanceID, - err.Error(), - ) - - fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.ConfigApplyCompleteTopic, Data: response}) - - return - case model.OK: - slog.DebugContext(ctx, "Changes required for config apply request") - // Send WriteConfigSuccessfulTopic with Correlation and Instance ID for use by resource plugin - data := &model.ConfigApplyMessage{ - CorrelationID: correlationID, - InstanceID: instanceID, - } - - fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.WriteConfigSuccessfulTopic, Data: data}) - } -} - -func (fp *FilePlugin) handleNginxConfigUpdate(ctx context.Context, msg *bus.Message) { - slog.DebugContext(ctx, "File plugin received nginx config update message") - nginxConfigContext, ok := msg.Data.(*model.NginxConfigContext) - if !ok { - slog.ErrorContext(ctx, "Unable to cast message payload to *model.NginxConfigContext", "payload", msg.Data) - - return - } - - fp.fileManagerService.ConfigUpdate(ctx, nginxConfigContext) -} - -func (fp *FilePlugin) handleConfigUploadRequest(ctx context.Context, msg *bus.Message) { - slog.DebugContext(ctx, "File plugin received config upload request message") - managementPlaneRequest, ok := msg.Data.(*mpi.ManagementPlaneRequest) - if !ok { - slog.ErrorContext( - ctx, - "Unable to cast message payload to *mpi.ManagementPlaneRequest", - "payload", msg.Data, - ) - - return - } - - configUploadRequest := managementPlaneRequest.GetConfigUploadRequest() - - correlationID := logger.CorrelationID(ctx) - - updatingFilesError := fp.fileManagerService.ConfigUpload(ctx, configUploadRequest) - - response := &mpi.DataPlaneResponse{ - MessageMeta: &mpi.MessageMeta{ - MessageId: id.GenerateMessageID(), - CorrelationId: correlationID, - Timestamp: timestamppb.Now(), - }, - CommandResponse: &mpi.CommandResponse{ - Status: mpi.CommandResponse_COMMAND_STATUS_OK, - Message: "Successfully updated all files", - }, - InstanceId: configUploadRequest.GetOverview().GetConfigVersion().GetInstanceId(), - RequestType: mpi.DataPlaneResponse_CONFIG_UPLOAD_REQUEST, - } - - if updatingFilesError != nil { - response.CommandResponse.Status = mpi.CommandResponse_COMMAND_STATUS_FAILURE - response.CommandResponse.Message = "Failed to update all files" - response.CommandResponse.Error = updatingFilesError.Error() - } - - fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: response}) -} - -func (fp *FilePlugin) handleAgentConfigUpdate(ctx context.Context, msg *bus.Message) { - slog.DebugContext(ctx, "File plugin received agent config update message") - - fp.agentConfigMutex.Lock() - defer fp.agentConfigMutex.Unlock() - - agentConfig, ok := msg.Data.(*config.Config) - if !ok { - slog.ErrorContext(ctx, "Unable to cast message payload to *config.Config", "payload", msg.Data) - return - } - - fp.config = agentConfig -} - -func (fp *FilePlugin) createDataPlaneResponse( - correlationID string, - status mpi.CommandResponse_CommandStatus, - message, instanceID, err string, -) *mpi.DataPlaneResponse { - return &mpi.DataPlaneResponse{ - MessageMeta: &mpi.MessageMeta{ - MessageId: id.GenerateMessageID(), - CorrelationId: correlationID, - Timestamp: timestamppb.Now(), - }, - CommandResponse: &mpi.CommandResponse{ - Status: status, - Message: message, - Error: err, - }, - InstanceId: instanceID, - RequestType: mpi.DataPlaneResponse_CONFIG_APPLY_REQUEST, - } -} diff --git a/internal/file/file_plugin_test.go b/internal/file/file_plugin_test.go deleted file mode 100644 index 74aa6024a6..0000000000 --- a/internal/file/file_plugin_test.go +++ /dev/null @@ -1,528 +0,0 @@ -// Copyright (c) F5, Inc. -// -// This source code is licensed under the Apache License, Version 2.0 license found in the -// LICENSE file in the root directory of this source tree. - -package file - -import ( - "context" - "errors" - "os" - "sync" - "testing" - "time" - - "github.com/nginx/agent/v3/internal/bus/busfakes" - "google.golang.org/protobuf/types/known/timestamppb" - - mpi "github.com/nginx/agent/v3/api/grpc/mpi/v1" - "github.com/nginx/agent/v3/api/grpc/mpi/v1/v1fakes" - "github.com/nginx/agent/v3/internal/bus" - "github.com/nginx/agent/v3/internal/file/filefakes" - "github.com/nginx/agent/v3/internal/grpc/grpcfakes" - "github.com/nginx/agent/v3/internal/model" - "github.com/nginx/agent/v3/pkg/files" - "github.com/nginx/agent/v3/pkg/id" - "github.com/nginx/agent/v3/test/helpers" - "github.com/nginx/agent/v3/test/protos" - "github.com/nginx/agent/v3/test/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestFilePlugin_Info(t *testing.T) { - filePlugin := NewFilePlugin(types.AgentConfig(), &grpcfakes.FakeGrpcConnectionInterface{}, - model.Command, &sync.RWMutex{}) - assert.Equal(t, "file", filePlugin.Info().Name) -} - -func TestFilePlugin_Close(t *testing.T) { - ctx := context.Background() - fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} - - filePlugin := NewFilePlugin(types.AgentConfig(), fakeGrpcConnection, model.Command, &sync.RWMutex{}) - filePlugin.Close(ctx) - - assert.Equal(t, 1, fakeGrpcConnection.CloseCallCount()) -} - -func TestFilePlugin_Subscriptions(t *testing.T) { - filePlugin := NewFilePlugin(types.AgentConfig(), &grpcfakes.FakeGrpcConnectionInterface{}, - model.Command, &sync.RWMutex{}) - assert.Equal( - t, - []string{ - bus.ConnectionResetTopic, - bus.ConnectionCreatedTopic, - bus.NginxConfigUpdateTopic, - bus.ConfigUploadRequestTopic, - bus.ConfigApplyRequestTopic, - bus.ConfigApplyFailedTopic, - bus.ReloadSuccessfulTopic, - bus.ConfigApplyCompleteTopic, - bus.AgentConfigUpdateTopic, - }, - filePlugin.Subscriptions(), - ) - - readOnlyFilePlugin := NewFilePlugin(types.AgentConfig(), &grpcfakes.FakeGrpcConnectionInterface{}, - model.Auxiliary, &sync.RWMutex{}) - assert.Equal(t, []string{ - bus.ConnectionResetTopic, - bus.ConnectionCreatedTopic, - bus.NginxConfigUpdateTopic, - bus.ConfigUploadRequestTopic, - }, readOnlyFilePlugin.Subscriptions()) -} - -func TestFilePlugin_Process_NginxConfigUpdateTopic(t *testing.T) { - ctx := context.Background() - - fileMeta := protos.FileMeta("/etc/nginx/nginx/conf", "") - - message := &model.NginxConfigContext{ - Files: []*mpi.File{ - { - FileMeta: fileMeta, - }, - }, - } - - fakeFileServiceClient := &v1fakes.FakeFileServiceClient{} - fakeFileServiceClient.UpdateOverviewReturns(&mpi.UpdateOverviewResponse{ - Overview: nil, - }, nil) - - fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} - fakeGrpcConnection.FileServiceClientReturns(fakeFileServiceClient) - messagePipe := busfakes.NewFakeMessagePipe() - - filePlugin := NewFilePlugin(types.AgentConfig(), fakeGrpcConnection, model.Command, &sync.RWMutex{}) - err := filePlugin.Init(ctx, messagePipe) - require.NoError(t, err) - - filePlugin.Process(ctx, &bus.Message{Topic: bus.ConnectionCreatedTopic}) - filePlugin.Process(ctx, &bus.Message{Topic: bus.NginxConfigUpdateTopic, Data: message}) - - assert.Eventually( - t, - func() bool { return fakeFileServiceClient.UpdateOverviewCallCount() == 1 }, - 2*time.Second, - 10*time.Millisecond, - ) -} - -func TestFilePlugin_Process_ConfigApplyRequestTopic(t *testing.T) { - ctx := context.Background() - tempDir := t.TempDir() - - filePath := tempDir + "/nginx.conf" - fileContent := []byte("location /test {\n return 200 \"Test location\\n\";\n}") - fileHash := files.GenerateHash(fileContent) - - message := &mpi.ManagementPlaneRequest{ - Request: &mpi.ManagementPlaneRequest_ConfigApplyRequest{ - ConfigApplyRequest: protos.CreateConfigApplyRequest(protos.FileOverview(filePath, fileHash)), - }, - } - fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} - agentConfig := types.AgentConfig() - agentConfig.AllowedDirectories = []string{tempDir} - - tests := []struct { - message *mpi.ManagementPlaneRequest - configApplyReturnsErr error - name string - configApplyStatus model.WriteStatus - }{ - { - name: "Test 1 - Success", - configApplyReturnsErr: nil, - configApplyStatus: model.OK, - message: message, - }, - { - name: "Test 2 - Fail, Rollback", - configApplyReturnsErr: errors.New("something went wrong"), - configApplyStatus: model.RollbackRequired, - message: message, - }, - { - name: "Test 3 - Fail, No Rollback", - configApplyReturnsErr: errors.New("something went wrong"), - configApplyStatus: model.Error, - message: message, - }, - { - name: "Test 4 - Fail to cast payload", - configApplyReturnsErr: errors.New("something went wrong"), - configApplyStatus: model.Error, - message: nil, - }, - { - name: "Test 5 - No changes needed", - configApplyReturnsErr: nil, - configApplyStatus: model.NoChange, - message: message, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - fakeFileManagerService := &filefakes.FakeFileManagerServiceInterface{} - fakeFileManagerService.ConfigApplyReturns(test.configApplyStatus, test.configApplyReturnsErr) - messagePipe := busfakes.NewFakeMessagePipe() - filePlugin := NewFilePlugin(agentConfig, fakeGrpcConnection, model.Command, &sync.RWMutex{}) - err := filePlugin.Init(ctx, messagePipe) - filePlugin.fileManagerService = fakeFileManagerService - require.NoError(t, err) - - filePlugin.Process(ctx, &bus.Message{Topic: bus.ConfigApplyRequestTopic, Data: test.message}) - - messages := messagePipe.Messages() - - switch { - case test.configApplyStatus == model.OK: - assert.Equal(t, bus.WriteConfigSuccessfulTopic, messages[0].Topic) - assert.Len(t, messages, 1) - - _, ok := messages[0].Data.(*model.ConfigApplyMessage) - assert.True(t, ok) - case test.configApplyStatus == model.RollbackRequired: - assert.Equal(t, bus.DataPlaneResponseTopic, messages[0].Topic) - assert.Len(t, messages, 2) - dataPlaneResponse, ok := messages[0].Data.(*mpi.DataPlaneResponse) - assert.True(t, ok) - assert.Equal( - t, - mpi.CommandResponse_COMMAND_STATUS_ERROR, - dataPlaneResponse.GetCommandResponse().GetStatus(), - ) - assert.Equal(t, "Config apply failed, rolling back config", - dataPlaneResponse.GetCommandResponse().GetMessage()) - assert.Equal(t, test.configApplyReturnsErr.Error(), dataPlaneResponse.GetCommandResponse().GetError()) - dataPlaneResponse, ok = messages[1].Data.(*mpi.DataPlaneResponse) - assert.True(t, ok) - assert.Equal(t, "Config apply failed, rollback successful", - dataPlaneResponse.GetCommandResponse().GetMessage()) - assert.Equal(t, mpi.CommandResponse_COMMAND_STATUS_FAILURE, - dataPlaneResponse.GetCommandResponse().GetStatus()) - case test.configApplyStatus == model.NoChange: - assert.Len(t, messages, 1) - - response, ok := messages[0].Data.(*mpi.DataPlaneResponse) - assert.True(t, ok) - assert.Equal(t, bus.ConfigApplyCompleteTopic, messages[0].Topic) - assert.Equal( - t, - mpi.CommandResponse_COMMAND_STATUS_OK, - response.GetCommandResponse().GetStatus(), - ) - case test.message == nil: - assert.Empty(t, messages) - default: - assert.Len(t, messages, 1) - dataPlaneResponse, ok := messages[0].Data.(*mpi.DataPlaneResponse) - assert.True(t, ok) - assert.Equal( - t, - mpi.CommandResponse_COMMAND_STATUS_FAILURE, - dataPlaneResponse.GetCommandResponse().GetStatus(), - ) - assert.Equal(t, "Config apply failed", dataPlaneResponse.GetCommandResponse().GetMessage()) - assert.Equal(t, test.configApplyReturnsErr.Error(), dataPlaneResponse.GetCommandResponse().GetError()) - } - }) - } -} - -func TestFilePlugin_Process_ConfigUploadRequestTopic(t *testing.T) { - ctx := context.Background() - - tempDir := os.TempDir() - testFile := helpers.CreateFileWithErrorCheck(t, tempDir, "nginx.conf") - defer helpers.RemoveFileWithErrorCheck(t, testFile.Name()) - fileMeta := protos.FileMeta(testFile.Name(), "") - - message := &mpi.ManagementPlaneRequest{ - Request: &mpi.ManagementPlaneRequest_ConfigUploadRequest{ - ConfigUploadRequest: &mpi.ConfigUploadRequest{ - Overview: &mpi.FileOverview{ - Files: []*mpi.File{ - { - FileMeta: fileMeta, - }, - { - FileMeta: fileMeta, - }, - }, - ConfigVersion: &mpi.ConfigVersion{ - InstanceId: "123", - Version: "f33ref3d32d3c32d3a", - }, - }, - }, - }, - } - - fakeFileServiceClient := &v1fakes.FakeFileServiceClient{} - fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} - fakeGrpcConnection.FileServiceClientReturns(fakeFileServiceClient) - messagePipe := busfakes.NewFakeMessagePipe() - - filePlugin := NewFilePlugin(types.AgentConfig(), fakeGrpcConnection, model.Command, &sync.RWMutex{}) - err := filePlugin.Init(ctx, messagePipe) - require.NoError(t, err) - - filePlugin.Process(ctx, &bus.Message{Topic: bus.ConnectionCreatedTopic}) - filePlugin.Process(ctx, &bus.Message{Topic: bus.ConfigUploadRequestTopic, Data: message}) - - assert.Eventually( - t, - func() bool { return fakeFileServiceClient.UpdateFileCallCount() == 2 }, - 2*time.Second, - 10*time.Millisecond, - ) - - messages := messagePipe.Messages() - assert.Len(t, messages, 1) - assert.Equal(t, bus.DataPlaneResponseTopic, messages[0].Topic) - - dataPlaneResponse, ok := messages[0].Data.(*mpi.DataPlaneResponse) - assert.True(t, ok) - assert.Equal( - t, - mpi.CommandResponse_COMMAND_STATUS_OK, - dataPlaneResponse.GetCommandResponse().GetStatus(), - ) -} - -func TestFilePlugin_Process_ConfigUploadRequestTopic_Failure(t *testing.T) { - ctx := context.Background() - - fileMeta := protos.FileMeta("/unknown/file.conf", "") - - message := &mpi.ManagementPlaneRequest{ - Request: &mpi.ManagementPlaneRequest_ConfigUploadRequest{ - ConfigUploadRequest: &mpi.ConfigUploadRequest{ - Overview: &mpi.FileOverview{ - Files: []*mpi.File{ - { - FileMeta: fileMeta, - }, - { - FileMeta: fileMeta, - }, - }, - ConfigVersion: protos.CreateConfigVersion(), - }, - }, - }, - } - - fakeFileServiceClient := &v1fakes.FakeFileServiceClient{} - fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} - fakeGrpcConnection.FileServiceClientReturns(fakeFileServiceClient) - messagePipe := busfakes.NewFakeMessagePipe() - - filePlugin := NewFilePlugin(types.AgentConfig(), fakeGrpcConnection, model.Command, &sync.RWMutex{}) - err := filePlugin.Init(ctx, messagePipe) - require.NoError(t, err) - - filePlugin.Process(ctx, &bus.Message{Topic: bus.ConnectionCreatedTopic}) - filePlugin.Process(ctx, &bus.Message{Topic: bus.ConfigUploadRequestTopic, Data: message}) - - assert.Eventually( - t, - func() bool { return len(messagePipe.Messages()) == 1 }, - 2*time.Second, - 10*time.Millisecond, - ) - - assert.Equal(t, 0, fakeFileServiceClient.UpdateFileCallCount()) - - messages := messagePipe.Messages() - assert.Len(t, messages, 1) - - assert.Equal(t, bus.DataPlaneResponseTopic, messages[0].Topic) - - dataPlaneResponse, ok := messages[0].Data.(*mpi.DataPlaneResponse) - assert.True(t, ok) - assert.Equal( - t, - mpi.CommandResponse_COMMAND_STATUS_FAILURE, - dataPlaneResponse.GetCommandResponse().GetStatus(), - ) -} - -func TestFilePlugin_Process_ConfigApplyFailedTopic(t *testing.T) { - ctx := context.Background() - instanceID := protos.NginxOssInstance([]string{}).GetInstanceMeta().GetInstanceId() - - tests := []struct { - name string - rollbackReturns error - instanceID string - }{ - { - name: "Test 1 - Rollback Success", - rollbackReturns: nil, - instanceID: instanceID, - }, - { - name: "Test 2 - Rollback Fail", - rollbackReturns: errors.New("something went wrong"), - instanceID: instanceID, - }, - - { - name: "Test 3 - Fail to cast payload", - rollbackReturns: errors.New("something went wrong"), - instanceID: "", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - mockFileManager := &filefakes.FakeFileManagerServiceInterface{} - mockFileManager.RollbackReturns(test.rollbackReturns) - - fakeFileServiceClient := &v1fakes.FakeFileServiceClient{} - fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} - fakeGrpcConnection.FileServiceClientReturns(fakeFileServiceClient) - - messagePipe := busfakes.NewFakeMessagePipe() - agentConfig := types.AgentConfig() - filePlugin := NewFilePlugin(agentConfig, fakeGrpcConnection, model.Command, &sync.RWMutex{}) - - err := filePlugin.Init(ctx, messagePipe) - require.NoError(t, err) - filePlugin.fileManagerService = mockFileManager - - data := &model.ConfigApplyMessage{ - CorrelationID: "dfsbhj6-bc92-30c1-a9c9-85591422068e", - InstanceID: test.instanceID, - Error: errors.New("something went wrong with config apply"), - } - - filePlugin.Process(ctx, &bus.Message{Topic: bus.ConfigApplyFailedTopic, Data: data}) - - messages := messagePipe.Messages() - - switch { - case test.rollbackReturns == nil: - assert.Equal(t, bus.RollbackWriteTopic, messages[0].Topic) - assert.Len(t, messages, 1) - - case test.instanceID == "": - assert.Empty(t, messages) - default: - rollbackMessage, ok := messages[0].Data.(*mpi.DataPlaneResponse) - assert.True(t, ok) - assert.Equal(t, "Rollback failed", rollbackMessage.GetCommandResponse().GetMessage()) - assert.Equal(t, test.rollbackReturns.Error(), rollbackMessage.GetCommandResponse().GetError()) - applyMessage, ok := messages[1].Data.(*mpi.DataPlaneResponse) - assert.True(t, ok) - assert.Equal(t, "Config apply failed, rollback failed", - applyMessage.GetCommandResponse().GetMessage()) - assert.Equal(t, data.Error.Error(), applyMessage.GetCommandResponse().GetError()) - assert.Len(t, messages, 2) - } - }) - } -} - -func TestFilePlugin_Process_ConfigApplyReloadSuccessTopic(t *testing.T) { - ctx := context.Background() - instance := protos.NginxOssInstance([]string{}) - mockFileManager := &filefakes.FakeFileManagerServiceInterface{} - - messagePipe := busfakes.NewFakeMessagePipe() - agentConfig := types.AgentConfig() - fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} - filePlugin := NewFilePlugin(agentConfig, fakeGrpcConnection, model.Command, &sync.RWMutex{}) - - err := filePlugin.Init(ctx, messagePipe) - require.NoError(t, err) - filePlugin.fileManagerService = mockFileManager - - expectedResponse := &mpi.DataPlaneResponse{ - MessageMeta: &mpi.MessageMeta{ - MessageId: id.GenerateMessageID(), - CorrelationId: "dfsbhj6-bc92-30c1-a9c9-85591422068e", - Timestamp: timestamppb.Now(), - }, - CommandResponse: &mpi.CommandResponse{ - Status: mpi.CommandResponse_COMMAND_STATUS_OK, - Message: "Config apply successful", - Error: "", - }, - InstanceId: instance.GetInstanceMeta().GetInstanceId(), - } - - filePlugin.Process(ctx, &bus.Message{Topic: bus.ReloadSuccessfulTopic, Data: &model.ReloadSuccess{ - ConfigContext: &model.NginxConfigContext{}, - DataPlaneResponse: expectedResponse, - }}) - - messages := messagePipe.Messages() - - watchers, ok := messages[0].Data.(*model.EnableWatchers) - assert.True(t, ok) - assert.Equal(t, bus.EnableWatchersTopic, messages[0].Topic) - assert.Equal(t, &model.NginxConfigContext{}, watchers.ConfigContext) - assert.Equal(t, instance.GetInstanceMeta().GetInstanceId(), watchers.InstanceID) - - response, ok := messages[1].Data.(*mpi.DataPlaneResponse) - assert.True(t, ok) - assert.Equal(t, bus.DataPlaneResponseTopic, messages[1].Topic) - - assert.Equal(t, expectedResponse.GetCommandResponse().GetStatus(), response.GetCommandResponse().GetStatus()) - assert.Equal(t, expectedResponse.GetCommandResponse().GetMessage(), response.GetCommandResponse().GetMessage()) - assert.Equal(t, expectedResponse.GetCommandResponse().GetError(), response.GetCommandResponse().GetError()) - assert.Equal(t, expectedResponse.GetMessageMeta().GetCorrelationId(), response.GetMessageMeta().GetCorrelationId()) - - assert.Equal(t, expectedResponse.GetInstanceId(), response.GetInstanceId()) -} - -func TestFilePlugin_Process_ConfigApplyCompleteTopic(t *testing.T) { - ctx := context.Background() - instance := protos.NginxOssInstance([]string{}) - mockFileManager := &filefakes.FakeFileManagerServiceInterface{} - - messagePipe := busfakes.NewFakeMessagePipe() - agentConfig := types.AgentConfig() - fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} - filePlugin := NewFilePlugin(agentConfig, fakeGrpcConnection, model.Command, &sync.RWMutex{}) - - err := filePlugin.Init(ctx, messagePipe) - require.NoError(t, err) - filePlugin.fileManagerService = mockFileManager - expectedResponse := &mpi.DataPlaneResponse{ - MessageMeta: &mpi.MessageMeta{ - MessageId: id.GenerateMessageID(), - CorrelationId: "dfsbhj6-bc92-30c1-a9c9-85591422068e", - Timestamp: timestamppb.Now(), - }, - CommandResponse: &mpi.CommandResponse{ - Status: mpi.CommandResponse_COMMAND_STATUS_OK, - Message: "Config apply successful", - Error: "", - }, - InstanceId: instance.GetInstanceMeta().GetInstanceId(), - } - - filePlugin.Process(ctx, &bus.Message{Topic: bus.ConfigApplyCompleteTopic, Data: expectedResponse}) - - messages := messagePipe.Messages() - response, ok := messages[0].Data.(*mpi.DataPlaneResponse) - assert.True(t, ok) - - assert.Equal(t, expectedResponse.GetCommandResponse().GetStatus(), response.GetCommandResponse().GetStatus()) - assert.Equal(t, expectedResponse.GetCommandResponse().GetMessage(), response.GetCommandResponse().GetMessage()) - assert.Equal(t, expectedResponse.GetCommandResponse().GetError(), response.GetCommandResponse().GetError()) - assert.Equal(t, expectedResponse.GetMessageMeta().GetCorrelationId(), response.GetMessageMeta().GetCorrelationId()) - - assert.Equal(t, expectedResponse.GetInstanceId(), response.GetInstanceId()) -} diff --git a/internal/file/filefakes/fake_file_manager_service_interface.go b/internal/file/filefakes/fake_file_manager_service_interface.go index f2af670fe8..10016297e3 100644 --- a/internal/file/filefakes/fake_file_manager_service_interface.go +++ b/internal/file/filefakes/fake_file_manager_service_interface.go @@ -6,6 +6,7 @@ import ( "sync" v1 "github.com/nginx/agent/v3/api/grpc/mpi/v1" + "github.com/nginx/agent/v3/internal/file" "github.com/nginx/agent/v3/internal/model" ) @@ -645,3 +646,5 @@ func (fake *FakeFileManagerServiceInterface) recordInvocation(key string, args [ } fake.invocations[key] = append(fake.invocations[key], args) } + +var _ file.FileManagerServiceInterface = new(FakeFileManagerServiceInterface) diff --git a/internal/resource/nginx_instance_operator.go b/internal/nginx/nginx_instance_operator.go similarity index 99% rename from internal/resource/nginx_instance_operator.go rename to internal/nginx/nginx_instance_operator.go index b1d4106cff..02f34877ef 100644 --- a/internal/resource/nginx_instance_operator.go +++ b/internal/nginx/nginx_instance_operator.go @@ -3,7 +3,7 @@ // This source code is licensed under the Apache License, Version 2.0 license found in the // LICENSE file in the root directory of this source tree. -package resource +package nginx import ( "bytes" diff --git a/internal/resource/nginx_instance_operator_test.go b/internal/nginx/nginx_instance_operator_test.go similarity index 99% rename from internal/resource/nginx_instance_operator_test.go rename to internal/nginx/nginx_instance_operator_test.go index 3e757cfc9d..dac1e53853 100644 --- a/internal/resource/nginx_instance_operator_test.go +++ b/internal/nginx/nginx_instance_operator_test.go @@ -3,7 +3,7 @@ // This source code is licensed under the Apache License, Version 2.0 license found in the // LICENSE file in the root directory of this source tree. -package resource +package nginx import ( "bytes" @@ -16,7 +16,7 @@ import ( "time" "github.com/nginx/agent/v3/internal/config" - "github.com/nginx/agent/v3/internal/resource/resourcefakes" + "github.com/nginx/agent/v3/internal/nginx/nginxfakes" "github.com/nginx/agent/v3/pkg/nginxprocess" "github.com/nginx/agent/v3/test/stub" @@ -393,7 +393,7 @@ func TestInstanceOperator_checkWorkers(t *testing.T) { mockExec.RunCmdReturnsOnCall(2, bytes.NewBufferString(nginxVersionCommandOutput), nil) mockExec.RunCmdReturnsOnCall(3, bytes.NewBufferString(nginxVersionCommandOutput), nil) - mockProcessOp := &resourcefakes.FakeProcessOperator{} + mockProcessOp := &nginxfakes.FakeProcessOperator{} allProcesses := slices.Concat(test.workers, test.masterProcess) mockProcessOp.FindNginxProcessesReturnsOnCall(0, allProcesses, nil) mockProcessOp.NginxWorkerProcessesReturnsOnCall(0, test.workers) diff --git a/internal/resource/nginx_instance_process_operator.go b/internal/nginx/nginx_instance_process_operator.go similarity index 99% rename from internal/resource/nginx_instance_process_operator.go rename to internal/nginx/nginx_instance_process_operator.go index c8b2bb6b01..3999639802 100644 --- a/internal/resource/nginx_instance_process_operator.go +++ b/internal/nginx/nginx_instance_process_operator.go @@ -3,7 +3,7 @@ // This source code is licensed under the Apache License, Version 2.0 license found in the // LICENSE file in the root directory of this source tree. -package resource +package nginx import ( "context" diff --git a/internal/resource/nginx_instance_process_operator_test.go b/internal/nginx/nginx_instance_process_operator_test.go similarity index 99% rename from internal/resource/nginx_instance_process_operator_test.go rename to internal/nginx/nginx_instance_process_operator_test.go index 940a4e858c..2460c290e5 100644 --- a/internal/resource/nginx_instance_process_operator_test.go +++ b/internal/nginx/nginx_instance_process_operator_test.go @@ -3,7 +3,7 @@ // This source code is licensed under the Apache License, Version 2.0 license found in the // LICENSE file in the root directory of this source tree. -package resource +package nginx import ( "bytes" diff --git a/internal/resource/nginx_log_tailer_operator.go b/internal/nginx/nginx_log_tailer_operator.go similarity index 99% rename from internal/resource/nginx_log_tailer_operator.go rename to internal/nginx/nginx_log_tailer_operator.go index ea6da44f5e..ec282b980e 100644 --- a/internal/resource/nginx_log_tailer_operator.go +++ b/internal/nginx/nginx_log_tailer_operator.go @@ -3,7 +3,7 @@ // This source code is licensed under the Apache License, Version 2.0 license found in the // LICENSE file in the root directory of this source tree. -package resource +package nginx import ( "context" diff --git a/internal/resource/nginx_log_tailer_operator_test.go b/internal/nginx/nginx_log_tailer_operator_test.go similarity index 99% rename from internal/resource/nginx_log_tailer_operator_test.go rename to internal/nginx/nginx_log_tailer_operator_test.go index c7643d9d0a..0d3a9c6f08 100644 --- a/internal/resource/nginx_log_tailer_operator_test.go +++ b/internal/nginx/nginx_log_tailer_operator_test.go @@ -3,7 +3,7 @@ // This source code is licensed under the Apache License, Version 2.0 license found in the // LICENSE file in the root directory of this source tree. -package resource +package nginx import ( "bytes" diff --git a/internal/nginx/nginx_plugin.go b/internal/nginx/nginx_plugin.go new file mode 100644 index 0000000000..e4db227912 --- /dev/null +++ b/internal/nginx/nginx_plugin.go @@ -0,0 +1,638 @@ +// Copyright (c) F5, Inc. +// +// This source code is licensed under the Apache License, Version 2.0 license found in the +// LICENSE file in the root directory of this source tree. + +package nginx + +import ( + "context" + "errors" + "fmt" + "log/slog" + "sync" + + mpi "github.com/nginx/agent/v3/api/grpc/mpi/v1" + "github.com/nginx/agent/v3/internal/bus" + "github.com/nginx/agent/v3/internal/config" + response "github.com/nginx/agent/v3/internal/datasource/proto" + "github.com/nginx/agent/v3/internal/file" + "github.com/nginx/agent/v3/internal/grpc" + "github.com/nginx/agent/v3/internal/logger" + "github.com/nginx/agent/v3/internal/model" + "github.com/nginx/agent/v3/pkg/files" + "github.com/nginx/agent/v3/pkg/id" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// The Nginx plugin listens for a writeConfigSuccessfulTopic from the file plugin after the config apply +// files have been written. The Nginx plugin then, validates the config, +// reloads the instance and monitors the logs. +// This is done in the Nginx plugin to make the file plugin usable for every type of instance. + +type NginxPlugin struct { + messagePipe bus.MessagePipeInterface + nginxService nginxServiceInterface + agentConfig *config.Config + agentConfigMutex *sync.Mutex + manifestLock *sync.RWMutex + conn grpc.GrpcConnectionInterface + fileManagerService file.FileManagerServiceInterface + serverType model.ServerType +} + +type errResponse struct { + Status string `json:"status"` + Text string `json:"test"` + Code string `json:"code"` +} + +type plusAPIErr struct { + Error errResponse `json:"error"` + RequestID string `json:"request_id"` + Href string `json:"href"` +} + +var _ bus.Plugin = (*NginxPlugin)(nil) + +func NewNginx(agentConfig *config.Config, grpcConnection grpc.GrpcConnectionInterface, + serverType model.ServerType, manifestLock *sync.RWMutex, +) *NginxPlugin { + return &NginxPlugin{ + agentConfig: agentConfig, + conn: grpcConnection, + serverType: serverType, + manifestLock: manifestLock, + agentConfigMutex: &sync.Mutex{}, + } +} + +func (n *NginxPlugin) Init(ctx context.Context, messagePipe bus.MessagePipeInterface) error { + ctx = context.WithValue( + ctx, + logger.ServerTypeContextKey, slog.Any(logger.ServerTypeKey, n.serverType.String()), + ) + slog.DebugContext(ctx, "Starting nginx plugin") + + n.messagePipe = messagePipe + n.nginxService = NewNginxService(ctx, n.agentConfig) + n.fileManagerService = file.NewFileManagerService(n.conn.FileServiceClient(), n.agentConfig, n.manifestLock) + + return nil +} + +func (n *NginxPlugin) Close(ctx context.Context) error { + ctx = context.WithValue( + ctx, + logger.ServerTypeContextKey, slog.Any(logger.ServerTypeKey, n.serverType.String()), + ) + slog.InfoContext(ctx, "Closing nginx plugin") + + return n.conn.Close(ctx) +} + +func (n *NginxPlugin) Info() *bus.Info { + name := "nginx" + if n.serverType.String() == model.Auxiliary.String() { + name = "auxiliary-nginx" + } + + return &bus.Info{ + Name: name, + } +} + +//nolint:revive,cyclop // cyclomatic complexity 16 max is 12 +func (n *NginxPlugin) Process(ctx context.Context, msg *bus.Message) { + ctxWithMetadata := n.agentConfig.NewContextWithLabels(ctx) + if logger.ServerType(ctx) == "" { + ctxWithMetadata = context.WithValue( + ctxWithMetadata, + logger.ServerTypeContextKey, slog.Any(logger.ServerTypeKey, n.serverType.String()), + ) + } + + switch msg.Topic { + case bus.ResourceUpdateTopic: + resourceUpdate, ok := msg.Data.(*mpi.Resource) + + if !ok { + slog.ErrorContext(ctx, "Unable to cast message payload to *mpi.Resource", "payload", + msg.Data) + + return + } + n.nginxService.UpdateResource(ctx, resourceUpdate) + slog.DebugContext(ctx, "Nginx plugin received update resource message") + + return + case bus.APIActionRequestTopic: + n.handleAPIActionRequest(ctx, msg) + case bus.ConnectionResetTopic: + if logger.ServerType(ctxWithMetadata) == n.serverType.String() { + n.handleConnectionReset(ctxWithMetadata, msg) + } + case bus.ConnectionCreatedTopic: + if logger.ServerType(ctxWithMetadata) == n.serverType.String() { + slog.DebugContext(ctxWithMetadata, "Nginx plugin received connection created message") + n.fileManagerService.SetIsConnected(true) + } + case bus.NginxConfigUpdateTopic: + if logger.ServerType(ctxWithMetadata) == n.serverType.String() { + n.handleNginxConfigUpdate(ctxWithMetadata, msg) + } + case bus.ConfigUploadRequestTopic: + if logger.ServerType(ctxWithMetadata) == n.serverType.String() { + n.handleConfigUploadRequest(ctxWithMetadata, msg) + } + case bus.ConfigApplyRequestTopic: + if logger.ServerType(ctxWithMetadata) == n.serverType.String() { + n.handleConfigApplyRequest(ctxWithMetadata, msg) + } + default: + slog.DebugContext(ctx, "NGINX plugin received message with unknown topic", "topic", msg.Topic) + } +} + +func (n *NginxPlugin) Subscriptions() []string { + subscriptions := []string{ + bus.APIActionRequestTopic, + bus.ConnectionResetTopic, + bus.ConnectionCreatedTopic, + bus.NginxConfigUpdateTopic, + bus.ConfigUploadRequestTopic, + bus.ResourceUpdateTopic, + } + + if n.serverType == model.Command { + subscriptions = append(subscriptions, bus.ConfigApplyRequestTopic) + } + + return subscriptions +} + +func (n *NginxPlugin) Reconfigure(ctx context.Context, agentConfig *config.Config) error { + slog.DebugContext(ctx, "Nginx plugin is reconfiguring to update agent configuration") + + n.agentConfigMutex.Lock() + defer n.agentConfigMutex.Unlock() + + n.agentConfig = agentConfig + + return nil +} + +func (n *NginxPlugin) handleConfigUploadRequest(ctx context.Context, msg *bus.Message) { + slog.DebugContext(ctx, "Nginx plugin received config upload request message") + managementPlaneRequest, ok := msg.Data.(*mpi.ManagementPlaneRequest) + if !ok { + slog.ErrorContext( + ctx, + "Unable to cast message payload to *mpi.ManagementPlaneRequest", + "payload", msg.Data, + ) + + return + } + + configUploadRequest := managementPlaneRequest.GetConfigUploadRequest() + + correlationID := logger.CorrelationID(ctx) + + updatingFilesError := n.fileManagerService.ConfigUpload(ctx, configUploadRequest) + + dataplaneResponse := &mpi.DataPlaneResponse{ + MessageMeta: &mpi.MessageMeta{ + MessageId: id.GenerateMessageID(), + CorrelationId: correlationID, + Timestamp: timestamppb.Now(), + }, + CommandResponse: &mpi.CommandResponse{ + Status: mpi.CommandResponse_COMMAND_STATUS_OK, + Message: "Successfully updated all files", + }, + InstanceId: configUploadRequest.GetOverview().GetConfigVersion().GetInstanceId(), + RequestType: mpi.DataPlaneResponse_CONFIG_UPLOAD_REQUEST, + } + + if updatingFilesError != nil { + dataplaneResponse.CommandResponse.Status = mpi.CommandResponse_COMMAND_STATUS_FAILURE + dataplaneResponse.CommandResponse.Message = "Failed to update all files" + dataplaneResponse.CommandResponse.Error = updatingFilesError.Error() + } + + n.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: dataplaneResponse}) +} + +func (n *NginxPlugin) handleConnectionReset(ctx context.Context, msg *bus.Message) { + slog.DebugContext(ctx, "Nginx plugin received connection reset message") + + if newConnection, ok := msg.Data.(grpc.GrpcConnectionInterface); ok { + err := n.conn.Close(ctx) + if err != nil { + slog.ErrorContext(ctx, "Nginx plugin: unable to close connection", "error", err) + } + + n.conn = newConnection + + reconnect := n.fileManagerService.IsConnected() + n.fileManagerService.ResetClient(ctx, n.conn.FileServiceClient()) + n.fileManagerService.SetIsConnected(reconnect) + + slog.DebugContext(ctx, "Nginx plugin connection reset successfully") + } +} + +func (n *NginxPlugin) handleNginxConfigUpdate(ctx context.Context, msg *bus.Message) { + slog.DebugContext(ctx, "Nginx plugin received config update message") + nginxConfigContext, ok := msg.Data.(*model.NginxConfigContext) + + if !ok { + slog.ErrorContext(ctx, "Unable to cast message payload to *model.NginxConfigContext", "payload", msg.Data) + + return + } + + n.fileManagerService.ConfigUpdate(ctx, nginxConfigContext) +} + +func (n *NginxPlugin) handleAPIActionRequest(ctx context.Context, msg *bus.Message) { + slog.DebugContext(ctx, "Nginx plugin received api action request message") + managementPlaneRequest, ok := msg.Data.(*mpi.ManagementPlaneRequest) + + if !ok { + slog.ErrorContext(ctx, "Unable to cast message payload to *mpi.ManagementPlaneRequest", "payload", + msg.Data) + + return + } + + request, requestOk := managementPlaneRequest.GetRequest().(*mpi.ManagementPlaneRequest_ActionRequest) + if !requestOk { + slog.ErrorContext(ctx, "Unable to cast message payload to *mpi.ManagementPlaneRequest_ActionRequest", + "payload", msg.Data) + } + + instanceID := request.ActionRequest.GetInstanceId() + + switch request.ActionRequest.GetAction().(type) { + case *mpi.APIActionRequest_NginxPlusAction: + n.handleNginxPlusActionRequest(ctx, request.ActionRequest.GetNginxPlusAction(), instanceID) + default: + slog.DebugContext(ctx, "API action request not implemented yet") + } +} + +func (n *NginxPlugin) handleNginxPlusActionRequest(ctx context.Context, + action *mpi.NGINXPlusAction, instanceID string, +) { + correlationID := logger.CorrelationID(ctx) + instance := n.nginxService.Instance(instanceID) + apiAction := APIAction{ + NginxService: n.nginxService, + } + if instance == nil { + slog.ErrorContext(ctx, "Unable to find instance with ID", "id", instanceID) + resp := response.CreateDataPlaneResponse( + correlationID, + &mpi.CommandResponse{ + Status: mpi.CommandResponse_COMMAND_STATUS_FAILURE, + Message: "", + Error: "failed to preform API action, could not find instance with ID: " + instanceID, + }, + mpi.DataPlaneResponse_API_ACTION_REQUEST, + instanceID, + ) + + n.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: resp}) + + return + } + + if instance.GetInstanceMeta().GetInstanceType() != mpi.InstanceMeta_INSTANCE_TYPE_NGINX_PLUS { + slog.ErrorContext(ctx, "Failed to preform API action", "error", errors.New("instance is not NGINX Plus")) + resp := response.CreateDataPlaneResponse( + correlationID, + &mpi.CommandResponse{ + Status: mpi.CommandResponse_COMMAND_STATUS_FAILURE, + Message: "", + Error: "failed to preform API action, instance is not NGINX Plus", + }, + mpi.DataPlaneResponse_API_ACTION_REQUEST, + instanceID, + ) + + n.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: resp}) + + return + } + + switch action.GetAction().(type) { + case *mpi.NGINXPlusAction_UpdateHttpUpstreamServers: + slog.DebugContext(ctx, "Updating http upstream servers", "request", action.GetUpdateHttpUpstreamServers()) + resp := apiAction.HandleUpdateHTTPUpstreamsRequest(ctx, action, instance) + n.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: resp}) + case *mpi.NGINXPlusAction_GetHttpUpstreamServers: + slog.DebugContext(ctx, "Getting http upstream servers", "request", action.GetGetHttpUpstreamServers()) + resp := apiAction.HandleGetHTTPUpstreamsServersRequest(ctx, action, instance) + n.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: resp}) + case *mpi.NGINXPlusAction_UpdateStreamServers: + slog.DebugContext(ctx, "Updating stream servers", "request", action.GetUpdateStreamServers()) + resp := apiAction.HandleUpdateStreamServersRequest(ctx, action, instance) + n.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: resp}) + case *mpi.NGINXPlusAction_GetStreamUpstreams: + slog.DebugContext(ctx, "Getting stream upstreams", "request", action.GetGetStreamUpstreams()) + resp := apiAction.HandleGetStreamUpstreamsRequest(ctx, instance) + n.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: resp}) + case *mpi.NGINXPlusAction_GetUpstreams: + slog.DebugContext(ctx, "Getting upstreams", "request", action.GetGetUpstreams()) + resp := apiAction.HandleGetUpstreamsRequest(ctx, instance) + n.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: resp}) + default: + slog.DebugContext(ctx, "NGINX Plus action not implemented yet") + } +} + +func (n *NginxPlugin) handleConfigApplyRequest(ctx context.Context, msg *bus.Message) { + slog.DebugContext(ctx, "Nginx plugin received config apply request message") + + var dataplaneResponse *mpi.DataPlaneResponse + correlationID := logger.CorrelationID(ctx) + + managementPlaneRequest, ok := msg.Data.(*mpi.ManagementPlaneRequest) + + if !ok { + slog.ErrorContext(ctx, "Unable to cast message payload to *mpi.ManagementPlaneRequest", "payload", msg.Data) + return + } + + request, requestOk := managementPlaneRequest.GetRequest().(*mpi.ManagementPlaneRequest_ConfigApplyRequest) + if !requestOk { + slog.ErrorContext(ctx, "Unable to cast message payload to *mpi.ManagementPlaneRequest_ConfigApplyRequest", + "payload", msg.Data) + + return + } + + configApplyRequest := request.ConfigApplyRequest + instanceID := configApplyRequest.GetOverview().GetConfigVersion().GetInstanceId() + + writeStatus, err := n.fileManagerService.ConfigApply(ctx, configApplyRequest) + + switch writeStatus { + case model.NoChange: + slog.DebugContext(ctx, "No changes required for config apply request") + dataplaneResponse = response.CreateDataPlaneResponse(correlationID, + &mpi.CommandResponse{ + Status: mpi.CommandResponse_COMMAND_STATUS_OK, + Message: "Config apply successful, no files to change", + Error: "", + }, + mpi.DataPlaneResponse_CONFIG_APPLY_REQUEST, + instanceID, + ) + n.completeConfigApply(ctx, &model.NginxConfigContext{}, dataplaneResponse) + case model.Error: + slog.ErrorContext( + ctx, + "Failed to apply config changes", + "instance_id", instanceID, + "error", err, + ) + dataplaneResponse = response.CreateDataPlaneResponse( + correlationID, + &mpi.CommandResponse{ + Status: mpi.CommandResponse_COMMAND_STATUS_FAILURE, + Message: "Config apply failed", + Error: err.Error(), + }, + mpi.DataPlaneResponse_CONFIG_APPLY_REQUEST, + instanceID, + ) + + n.completeConfigApply(ctx, &model.NginxConfigContext{}, dataplaneResponse) + case model.RollbackRequired: + slog.ErrorContext( + ctx, + "Failed to apply config changes, rolling back", + "instance_id", instanceID, + "error", err, + ) + + dataplaneResponse = response.CreateDataPlaneResponse( + correlationID, + &mpi.CommandResponse{ + Status: mpi.CommandResponse_COMMAND_STATUS_ERROR, + Message: "Config apply failed, rolling back config", + Error: err.Error(), + }, + mpi.DataPlaneResponse_CONFIG_APPLY_REQUEST, + instanceID, + ) + + n.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: dataplaneResponse}) + + rollbackErr := n.fileManagerService.Rollback(ctx, instanceID) + + if rollbackErr != nil { + applyErr := fmt.Errorf("config apply error: %w", err) + rbErr := fmt.Errorf("rollback error: %w", rollbackErr) + combinedErr := errors.Join(applyErr, rbErr) + + rollbackResponse := response.CreateDataPlaneResponse( + correlationID, + &mpi.CommandResponse{ + Status: mpi.CommandResponse_COMMAND_STATUS_FAILURE, + Message: "Config apply failed, rollback failed", + Error: combinedErr.Error(), + }, + mpi.DataPlaneResponse_CONFIG_APPLY_REQUEST, + instanceID, + ) + n.completeConfigApply(ctx, &model.NginxConfigContext{}, rollbackResponse) + + return + } + + dataplaneResponse = response.CreateDataPlaneResponse( + correlationID, + &mpi.CommandResponse{ + Status: mpi.CommandResponse_COMMAND_STATUS_FAILURE, + Message: "Config apply failed, rollback successful", + Error: err.Error(), + }, + mpi.DataPlaneResponse_CONFIG_APPLY_REQUEST, + instanceID) + n.completeConfigApply(ctx, &model.NginxConfigContext{}, dataplaneResponse) + case model.OK: + slog.DebugContext(ctx, "Changes required for config apply request") + n.applyConfig(ctx, correlationID, instanceID) + } +} + +func (n *NginxPlugin) applyConfig(ctx context.Context, correlationID, instanceID string) { + configContext, err := n.nginxService.ApplyConfig(ctx, instanceID) + if err != nil { + slog.ErrorContext( + ctx, + "Errors found during config apply, sending error status and rolling back configuration updates", + "error", err, + ) + dpResponse := response.CreateDataPlaneResponse( + correlationID, + &mpi.CommandResponse{ + Status: mpi.CommandResponse_COMMAND_STATUS_ERROR, + Message: "Config apply failed, rolling back config", + Error: err.Error(), + }, + mpi.DataPlaneResponse_CONFIG_APPLY_REQUEST, + instanceID, + ) + + n.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: dpResponse}) + + n.writeRollbackConfig(ctx, correlationID, instanceID, err) + + return + } + + dpResponse := response.CreateDataPlaneResponse( + correlationID, + &mpi.CommandResponse{ + Status: mpi.CommandResponse_COMMAND_STATUS_OK, + Message: "Config apply successful", + Error: "", + }, + mpi.DataPlaneResponse_CONFIG_APPLY_REQUEST, + instanceID, + ) + + if configContext.Files != nil { + slog.DebugContext(ctx, "Changes made during config apply, update files on disk") + updateError := n.fileManagerService.UpdateCurrentFilesOnDisk( + ctx, + files.ConvertToMapOfFiles(configContext.Files), + true, + ) + if updateError != nil { + slog.ErrorContext(ctx, "Unable to update current files on disk", "error", updateError) + } + } + + n.completeConfigApply(ctx, configContext, dpResponse) +} + +func (n *NginxPlugin) writeRollbackConfig(ctx context.Context, correlationID, instanceID string, applyErr error) { + slog.DebugContext(ctx, "Starting rollback of config", "instance_id", instanceID) + if instanceID == "" { + n.fileManagerService.ClearCache() + return + } + + err := n.fileManagerService.Rollback(ctx, instanceID) + if err != nil { + configErr := fmt.Errorf("config apply error: %w", applyErr) + rbErr := fmt.Errorf("rollback error: %w", err) + combinedErr := errors.Join(configErr, rbErr) + + rollbackResponse := response.CreateDataPlaneResponse( + correlationID, + &mpi.CommandResponse{ + Status: mpi.CommandResponse_COMMAND_STATUS_ERROR, + Message: "Rollback failed", + Error: err.Error(), + }, + mpi.DataPlaneResponse_CONFIG_APPLY_REQUEST, + instanceID, + ) + + applyResponse := response.CreateDataPlaneResponse( + correlationID, + &mpi.CommandResponse{ + Status: mpi.CommandResponse_COMMAND_STATUS_FAILURE, + Message: "Config apply failed, rollback failed", + Error: combinedErr.Error(), + }, + mpi.DataPlaneResponse_CONFIG_APPLY_REQUEST, + instanceID, + ) + + n.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: rollbackResponse}) + + n.completeConfigApply(ctx, &model.NginxConfigContext{}, applyResponse) + + return + } + + n.rollbackConfigApply(ctx, correlationID, instanceID, applyErr) +} + +func (n *NginxPlugin) rollbackConfigApply(ctx context.Context, correlationID, instanceID string, applyErr error) { + slog.DebugContext(ctx, "Rolling back config apply, after config written", "instance_id", instanceID) + _, err := n.nginxService.ApplyConfig(ctx, instanceID) + if err != nil { + slog.ErrorContext(ctx, "Errors found during rollback, sending failure status", "error", err) + + rollbackResponse := response.CreateDataPlaneResponse( + correlationID, + &mpi.CommandResponse{ + Status: mpi.CommandResponse_COMMAND_STATUS_ERROR, + Message: "Rollback failed", + Error: err.Error(), + }, + mpi.DataPlaneResponse_CONFIG_APPLY_REQUEST, + instanceID, + ) + + configErr := fmt.Errorf("config apply error: %w", applyErr) + rbErr := fmt.Errorf("rollback error: %w", err) + combinedErr := errors.Join(configErr, rbErr) + + applyResponse := response.CreateDataPlaneResponse( + correlationID, + &mpi.CommandResponse{ + Status: mpi.CommandResponse_COMMAND_STATUS_FAILURE, + Message: "Config apply failed, rollback failed", + Error: combinedErr.Error(), + }, + mpi.DataPlaneResponse_CONFIG_APPLY_REQUEST, + instanceID, + ) + + n.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: rollbackResponse}) + + n.completeConfigApply(ctx, &model.NginxConfigContext{}, applyResponse) + + return + } + + applyResponse := response.CreateDataPlaneResponse( + correlationID, + &mpi.CommandResponse{ + Status: mpi.CommandResponse_COMMAND_STATUS_FAILURE, + Message: "Config apply failed, rollback successful", + Error: applyErr.Error(), + }, + mpi.DataPlaneResponse_CONFIG_APPLY_REQUEST, + instanceID, + ) + + n.completeConfigApply(ctx, &model.NginxConfigContext{}, applyResponse) +} + +func (n *NginxPlugin) completeConfigApply(ctx context.Context, configContext *model.NginxConfigContext, + dpResponse *mpi.DataPlaneResponse, +) { + n.fileManagerService.ClearCache() + n.enableWatchers(ctx, configContext, dpResponse.GetInstanceId()) + n.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: dpResponse}) +} + +func (n *NginxPlugin) enableWatchers(ctx context.Context, configContext *model.NginxConfigContext, instanceID string) { + enableWatcher := &model.EnableWatchers{ + InstanceID: instanceID, + ConfigContext: configContext, + } + + n.messagePipe.Process(ctx, &bus.Message{Topic: bus.EnableWatchersTopic, Data: enableWatcher}) +} diff --git a/internal/resource/resource_plugin_test.go b/internal/nginx/nginx_plugin_test.go similarity index 50% rename from internal/resource/resource_plugin_test.go rename to internal/nginx/nginx_plugin_test.go index df45879f48..633370c1b0 100644 --- a/internal/resource/resource_plugin_test.go +++ b/internal/nginx/nginx_plugin_test.go @@ -3,16 +3,22 @@ // This source code is licensed under the Apache License, Version 2.0 license found in the // LICENSE file in the root directory of this source tree. -package resource +package nginx import ( "bytes" "context" "encoding/json" "errors" - "sort" + "os" + "sync" "testing" + "time" + "github.com/nginx/agent/v3/api/grpc/mpi/v1/v1fakes" + "github.com/nginx/agent/v3/internal/file/filefakes" + "github.com/nginx/agent/v3/internal/grpc/grpcfakes" + "github.com/nginx/agent/v3/pkg/files" "github.com/nginx/agent/v3/test/stub" "google.golang.org/protobuf/types/known/structpb" @@ -27,170 +33,13 @@ import ( mpi "github.com/nginx/agent/v3/api/grpc/mpi/v1" "github.com/nginx/agent/v3/internal/bus" - "github.com/nginx/agent/v3/internal/resource/resourcefakes" + "github.com/nginx/agent/v3/internal/nginx/nginxfakes" "github.com/nginx/agent/v3/test/protos" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestResource_Process(t *testing.T) { - ctx := context.Background() - - updatedInstance := &mpi.Instance{ - InstanceConfig: protos.NginxOssInstance([]string{}).GetInstanceConfig(), - InstanceMeta: protos.NginxOssInstance([]string{}).GetInstanceMeta(), - InstanceRuntime: &mpi.InstanceRuntime{ - ProcessId: 56789, - BinaryPath: protos.NginxOssInstance([]string{}).GetInstanceRuntime().GetBinaryPath(), - ConfigPath: protos.NginxOssInstance([]string{}).GetInstanceRuntime().GetConfigPath(), - Details: protos.NginxOssInstance([]string{}).GetInstanceRuntime().GetDetails(), - }, - } - - tests := []struct { - name string - message *bus.Message - resource *mpi.Resource - topic string - }{ - { - name: "Test 1: New Instance Topic", - message: &bus.Message{ - Topic: bus.AddInstancesTopic, - Data: []*mpi.Instance{ - protos.NginxOssInstance([]string{}), - }, - }, - resource: protos.HostResource(), - topic: bus.ResourceUpdateTopic, - }, - { - name: "Test 2: Update Instance Topic", - message: &bus.Message{ - Topic: bus.UpdatedInstancesTopic, - Data: []*mpi.Instance{ - updatedInstance, - }, - }, - resource: &mpi.Resource{ - ResourceId: protos.HostResource().GetResourceId(), - Instances: []*mpi.Instance{ - updatedInstance, - }, - Info: protos.HostResource().GetInfo(), - }, - topic: bus.ResourceUpdateTopic, - }, - { - name: "Test 3: Delete Instance Topic", - message: &bus.Message{ - Topic: bus.DeletedInstancesTopic, - Data: []*mpi.Instance{ - updatedInstance, - }, - }, - resource: &mpi.Resource{ - ResourceId: protos.HostResource().GetResourceId(), - Instances: []*mpi.Instance{}, - Info: protos.HostResource().GetInfo(), - }, - topic: bus.ResourceUpdateTopic, - }, - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - fakeResourceService := &resourcefakes.FakeResourceServiceInterface{} - fakeResourceService.AddInstancesReturns(protos.HostResource()) - fakeResourceService.UpdateInstancesReturns(test.resource) - fakeResourceService.DeleteInstancesReturns(test.resource) - messagePipe := busfakes.NewFakeMessagePipe() - - resourcePlugin := NewResource(types.AgentConfig()) - resourcePlugin.resourceService = fakeResourceService - - err := messagePipe.Register(2, []bus.Plugin{resourcePlugin}) - require.NoError(t, err) - - resourcePlugin.messagePipe = messagePipe - - resourcePlugin.Process(ctx, test.message) - - assert.Equal(t, test.topic, messagePipe.Messages()[0].Topic) - assert.Equal(t, test.resource, messagePipe.Messages()[0].Data) - }) - } -} - -func TestResource_Process_Apply(t *testing.T) { - ctx := context.Background() - - tests := []struct { - name string - message *bus.Message - applyErr error - topic []string - }{ - { - name: "Test 1: Write Config Successful Topic - Success Status", - message: &bus.Message{ - Topic: bus.WriteConfigSuccessfulTopic, - Data: &model.ConfigApplyMessage{ - CorrelationID: "dfsbhj6-bc92-30c1-a9c9-85591422068e", - InstanceID: protos.NginxOssInstance([]string{}).GetInstanceMeta().GetInstanceId(), - Error: nil, - }, - }, - applyErr: nil, - topic: []string{bus.ReloadSuccessfulTopic}, - }, - { - name: "Test 2: Write Config Successful Topic - Fail Status", - message: &bus.Message{ - Topic: bus.WriteConfigSuccessfulTopic, - Data: &model.ConfigApplyMessage{ - CorrelationID: "dfsbhj6-bc92-30c1-a9c9-85591422068e", - InstanceID: protos.NginxOssInstance([]string{}).GetInstanceMeta().GetInstanceId(), - Error: nil, - }, - }, - applyErr: errors.New("error reloading"), - topic: []string{bus.DataPlaneResponseTopic, bus.ConfigApplyFailedTopic}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - fakeResourceService := &resourcefakes.FakeResourceServiceInterface{} - fakeResourceService.ApplyConfigReturns(&model.NginxConfigContext{}, test.applyErr) - messagePipe := busfakes.NewFakeMessagePipe() - - resourcePlugin := NewResource(types.AgentConfig()) - resourcePlugin.resourceService = fakeResourceService - - err := messagePipe.Register(2, []bus.Plugin{resourcePlugin}) - require.NoError(t, err) - - resourcePlugin.messagePipe = messagePipe - - resourcePlugin.Process(ctx, test.message) - - assert.Equal(t, test.topic[0], messagePipe.Messages()[0].Topic) - - if len(test.topic) > 1 { - assert.Equal(t, test.topic[1], messagePipe.Messages()[1].Topic) - } - - if test.applyErr != nil { - response, ok := messagePipe.Messages()[0].Data.(*mpi.DataPlaneResponse) - assert.True(tt, ok) - assert.Equal(tt, test.applyErr.Error(), response.GetCommandResponse().GetError()) - } - }) - } -} - -func TestResource_createPlusAPIError(t *testing.T) { +func TestNginx_createPlusAPIError(t *testing.T) { s := "failed to get the HTTP servers of upstream nginx1: expected 200 response, got 404. error.status=404;" + " error.text=upstream not found; error.code=UpstreamNotFound; request_id=b534bdab5cb5e321e8b41b431828b270; " + "href=https://nginx.org/en/docs/http/ngx_http_api_module.html" @@ -212,7 +61,7 @@ func TestResource_createPlusAPIError(t *testing.T) { assert.Equal(t, errors.New(string(expectedJSON)), result) } -func TestResource_Process_APIAction_GetHTTPServers(t *testing.T) { +func TestNginx_Process_APIAction_GetHTTPServers(t *testing.T) { ctx := context.Background() inValidInstance := protos.NginxPlusInstance([]string{}) @@ -287,14 +136,14 @@ func TestResource_Process_APIAction_GetHTTPServers(t *testing.T) { } for _, test := range tests { - runResourceTestHelper(t, ctx, test.name, func(fakeService *resourcefakes.FakeResourceServiceInterface) { + runNginxTestHelper(t, ctx, test.name, func(fakeService *nginxfakes.FakeNginxServiceInterface) { fakeService.GetHTTPUpstreamServersReturns(test.upstreams, test.err) }, test.instance, test.message, test.topic, test.err) } } //nolint:dupl // need to refactor so that redundant code can be removed -func TestResource_Process_APIAction_UpdateHTTPUpstreams(t *testing.T) { +func TestNginx_Process_APIAction_UpdateHTTPUpstreams(t *testing.T) { ctx := context.Background() tests := []struct { instance *mpi.Instance @@ -360,24 +209,25 @@ func TestResource_Process_APIAction_UpdateHTTPUpstreams(t *testing.T) { logBuf := &bytes.Buffer{} stub.StubLoggerWith(logBuf) - fakeResourceService := &resourcefakes.FakeResourceServiceInterface{} - fakeResourceService.InstanceReturns(test.instance) - fakeResourceService.UpdateHTTPUpstreamServersReturnsOnCall(0, test.upstreams, []client.UpstreamServer{}, + fakeNginxService := &nginxfakes.FakeNginxServiceInterface{} + fakeNginxService.InstanceReturns(test.instance) + fakeNginxService.UpdateHTTPUpstreamServersReturnsOnCall(0, test.upstreams, []client.UpstreamServer{}, []client.UpstreamServer{}, test.err) - fakeResourceService.UpdateHTTPUpstreamServersReturnsOnCall(1, []client.UpstreamServer{}, + fakeNginxService.UpdateHTTPUpstreamServersReturnsOnCall(1, []client.UpstreamServer{}, []client.UpstreamServer{}, []client.UpstreamServer{}, test.err) messagePipe := busfakes.NewFakeMessagePipe() - resourcePlugin := NewResource(types.AgentConfig()) - resourcePlugin.resourceService = fakeResourceService + fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} + nginxPlugin := NewNginx(types.AgentConfig(), fakeGrpcConnection, model.Command, &sync.RWMutex{}) + nginxPlugin.nginxService = fakeNginxService - err := messagePipe.Register(2, []bus.Plugin{resourcePlugin}) + err := messagePipe.Register(2, []bus.Plugin{nginxPlugin}) require.NoError(tt, err) - resourcePlugin.messagePipe = messagePipe + nginxPlugin.messagePipe = messagePipe - resourcePlugin.Process(ctx, test.message) + nginxPlugin.Process(ctx, test.message) assert.Equal(tt, test.topic[0], messagePipe.Messages()[0].Topic) @@ -397,7 +247,7 @@ func TestResource_Process_APIAction_UpdateHTTPUpstreams(t *testing.T) { } //nolint:dupl // need to refactor so that redundant code can be removed -func TestResource_Process_APIAction_UpdateStreamServers(t *testing.T) { +func TestNginx_Process_APIAction_UpdateStreamServers(t *testing.T) { ctx := context.Background() tests := []struct { instance *mpi.Instance @@ -463,24 +313,25 @@ func TestResource_Process_APIAction_UpdateStreamServers(t *testing.T) { logBuf := &bytes.Buffer{} stub.StubLoggerWith(logBuf) - fakeResourceService := &resourcefakes.FakeResourceServiceInterface{} - fakeResourceService.InstanceReturns(test.instance) - fakeResourceService.UpdateStreamServersReturnsOnCall(0, test.upstreams, []client.StreamUpstreamServer{}, + fakeNginxService := &nginxfakes.FakeNginxServiceInterface{} + fakeNginxService.InstanceReturns(test.instance) + fakeNginxService.UpdateStreamServersReturnsOnCall(0, test.upstreams, []client.StreamUpstreamServer{}, []client.StreamUpstreamServer{}, test.err) - fakeResourceService.UpdateStreamServersReturnsOnCall(0, test.upstreams, []client.StreamUpstreamServer{}, + fakeNginxService.UpdateStreamServersReturnsOnCall(0, test.upstreams, []client.StreamUpstreamServer{}, []client.StreamUpstreamServer{}, test.err) messagePipe := busfakes.NewFakeMessagePipe() - resourcePlugin := NewResource(types.AgentConfig()) - resourcePlugin.resourceService = fakeResourceService + fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} + nginxPlugin := NewNginx(types.AgentConfig(), fakeGrpcConnection, model.Command, &sync.RWMutex{}) + nginxPlugin.nginxService = fakeNginxService - err := messagePipe.Register(2, []bus.Plugin{resourcePlugin}) + err := messagePipe.Register(2, []bus.Plugin{nginxPlugin}) require.NoError(tt, err) - resourcePlugin.messagePipe = messagePipe + nginxPlugin.messagePipe = messagePipe - resourcePlugin.Process(ctx, test.message) + nginxPlugin.Process(ctx, test.message) assert.Equal(tt, test.topic[0], messagePipe.Messages()[0].Topic) @@ -499,7 +350,7 @@ func TestResource_Process_APIAction_UpdateStreamServers(t *testing.T) { } } -func TestResource_Process_APIAction_GetStreamUpstreams(t *testing.T) { +func TestNginx_Process_APIAction_GetStreamUpstreams(t *testing.T) { ctx := context.Background() inValidInstance := protos.NginxPlusInstance([]string{}) @@ -606,23 +457,24 @@ func TestResource_Process_APIAction_GetStreamUpstreams(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - fakeResourceService := &resourcefakes.FakeResourceServiceInterface{} - fakeResourceService.GetStreamUpstreamsReturns(test.upstreams, test.err) + fakeNginxService := &nginxfakes.FakeNginxServiceInterface{} + fakeNginxService.GetStreamUpstreamsReturns(test.upstreams, test.err) if test.instance.GetInstanceMeta().GetInstanceId() != "e1374cb1-462d-3b6c-9f3b-f28332b5f10f" { - fakeResourceService.InstanceReturns(test.instance) + fakeNginxService.InstanceReturns(test.instance) } messagePipe := busfakes.NewFakeMessagePipe() - resourcePlugin := NewResource(types.AgentConfig()) - resourcePlugin.resourceService = fakeResourceService + fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} + nginxPlugin := NewNginx(types.AgentConfig(), fakeGrpcConnection, model.Command, &sync.RWMutex{}) + nginxPlugin.nginxService = fakeNginxService - err := messagePipe.Register(2, []bus.Plugin{resourcePlugin}) + err := messagePipe.Register(2, []bus.Plugin{nginxPlugin}) require.NoError(t, err) - resourcePlugin.messagePipe = messagePipe + nginxPlugin.messagePipe = messagePipe - resourcePlugin.Process(ctx, test.message) + nginxPlugin.Process(ctx, test.message) assert.Equal(t, test.topic[0], messagePipe.Messages()[0].Topic) @@ -640,7 +492,7 @@ func TestResource_Process_APIAction_GetStreamUpstreams(t *testing.T) { } } -func TestResource_Process_APIAction_GetUpstreams(t *testing.T) { +func TestNginx_Process_APIAction_GetUpstreams(t *testing.T) { ctx := context.Background() inValidInstance := protos.NginxPlusInstance([]string{}) @@ -754,145 +606,503 @@ func TestResource_Process_APIAction_GetUpstreams(t *testing.T) { } for _, test := range tests { - runResourceTestHelper(t, ctx, test.name, func(fakeService *resourcefakes.FakeResourceServiceInterface) { + runNginxTestHelper(t, ctx, test.name, func(fakeService *nginxfakes.FakeNginxServiceInterface) { fakeService.GetUpstreamsReturns(test.upstreams, test.err) }, test.instance, test.message, test.topic, test.err) } } -func TestResource_Process_Rollback(t *testing.T) { +func TestNginx_Subscriptions(t *testing.T) { + fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} + nginxPlugin := NewNginx(types.AgentConfig(), fakeGrpcConnection, model.Command, &sync.RWMutex{}) + assert.Equal(t, + []string{ + bus.APIActionRequestTopic, + bus.ConnectionResetTopic, + bus.ConnectionCreatedTopic, + bus.NginxConfigUpdateTopic, + bus.ConfigUploadRequestTopic, + bus.ResourceUpdateTopic, + bus.ConfigApplyRequestTopic, + }, + nginxPlugin.Subscriptions()) + + readNginxPlugin := NewNginx(types.AgentConfig(), fakeGrpcConnection, model.Auxiliary, &sync.RWMutex{}) + assert.Equal(t, + []string{ + bus.APIActionRequestTopic, + bus.ConnectionResetTopic, + bus.ConnectionCreatedTopic, + bus.NginxConfigUpdateTopic, + bus.ConfigUploadRequestTopic, + bus.ResourceUpdateTopic, + }, + readNginxPlugin.Subscriptions()) +} + +func TestNginx_Info(t *testing.T) { + fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} + nginxPlugin := NewNginx(types.AgentConfig(), fakeGrpcConnection, model.Command, &sync.RWMutex{}) + assert.Equal(t, &bus.Info{Name: "nginx"}, nginxPlugin.Info()) + + readNginxPlugin := NewNginx(types.AgentConfig(), fakeGrpcConnection, model.Auxiliary, &sync.RWMutex{}) + assert.Equal(t, &bus.Info{Name: "auxiliary-nginx"}, readNginxPlugin.Info()) +} + +func TestNginx_Init(t *testing.T) { ctx := context.Background() + fakeNginxService := nginxfakes.FakeNginxServiceInterface{} - tests := []struct { - name string - message *bus.Message - rollbackErr error - topic []string - }{ - { - name: "Test 1: Rollback Write Topic - Success Status", - message: &bus.Message{ - Topic: bus.RollbackWriteTopic, - Data: &model.ConfigApplyMessage{ - CorrelationID: "dfsbhj6-bc92-30c1-a9c9-85591422068e", - InstanceID: protos.NginxOssInstance([]string{}).GetInstanceMeta().GetInstanceId(), - Error: errors.New("something went wrong with config apply"), + messagePipe := busfakes.NewFakeMessagePipe() + messagePipe.RunWithoutInit(ctx) + + fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} + nginxPlugin := NewNginx(types.AgentConfig(), fakeGrpcConnection, model.Command, &sync.RWMutex{}) + nginxPlugin.nginxService = &fakeNginxService + err := nginxPlugin.Init(ctx, messagePipe) + require.NoError(t, err) + + messages := messagePipe.Messages() + + assert.Empty(t, messages) +} + +func TestNginx_Process_handleConfigUploadRequest(t *testing.T) { + ctx := context.Background() + + tempDir := os.TempDir() + testFile := helpers.CreateFileWithErrorCheck(t, tempDir, "nginx.conf") + defer helpers.RemoveFileWithErrorCheck(t, testFile.Name()) + fileMeta := protos.FileMeta(testFile.Name(), "") + + message := &mpi.ManagementPlaneRequest{ + Request: &mpi.ManagementPlaneRequest_ConfigUploadRequest{ + ConfigUploadRequest: &mpi.ConfigUploadRequest{ + Overview: &mpi.FileOverview{ + Files: []*mpi.File{ + { + FileMeta: fileMeta, + }, + { + FileMeta: fileMeta, + }, + }, + ConfigVersion: &mpi.ConfigVersion{ + InstanceId: "123", + Version: "f33ref3d32d3c32d3a", + }, }, }, - rollbackErr: nil, - topic: []string{bus.ConfigApplyCompleteTopic}, }, - { - name: "Test 2: Rollback Write Topic - Fail Status", - message: &bus.Message{ - Topic: bus.RollbackWriteTopic, - Data: &model.ConfigApplyMessage{ - CorrelationID: "", - InstanceID: protos.NginxOssInstance([]string{}).GetInstanceMeta().GetInstanceId(), - Error: errors.New("something went wrong with config apply"), + } + + fakeFileServiceClient := &v1fakes.FakeFileServiceClient{} + fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} + fakeGrpcConnection.FileServiceClientReturns(fakeFileServiceClient) + messagePipe := busfakes.NewFakeMessagePipe() + + nginxPlugin := NewNginx(types.AgentConfig(), fakeGrpcConnection, model.Command, &sync.RWMutex{}) + err := nginxPlugin.Init(ctx, messagePipe) + require.NoError(t, err) + + nginxPlugin.Process(ctx, &bus.Message{Topic: bus.ConnectionCreatedTopic}) + nginxPlugin.Process(ctx, &bus.Message{Topic: bus.ConfigUploadRequestTopic, Data: message}) + + assert.Eventually( + t, + func() bool { return fakeFileServiceClient.UpdateFileCallCount() == 2 }, + 2*time.Second, + 10*time.Millisecond, + ) + + messages := messagePipe.Messages() + assert.Len(t, messages, 1) + assert.Equal(t, bus.DataPlaneResponseTopic, messages[0].Topic) + + dataPlaneResponse, ok := messages[0].Data.(*mpi.DataPlaneResponse) + assert.True(t, ok) + assert.Equal( + t, + mpi.CommandResponse_COMMAND_STATUS_OK, + dataPlaneResponse.GetCommandResponse().GetStatus(), + ) +} + +func TestNginx_Process_handleConfigUploadRequest_Failure(t *testing.T) { + ctx := context.Background() + + fileMeta := protos.FileMeta("/unknown/file.conf", "") + + message := &mpi.ManagementPlaneRequest{ + Request: &mpi.ManagementPlaneRequest_ConfigUploadRequest{ + ConfigUploadRequest: &mpi.ConfigUploadRequest{ + Overview: &mpi.FileOverview{ + Files: []*mpi.File{ + { + FileMeta: fileMeta, + }, + { + FileMeta: fileMeta, + }, + }, + ConfigVersion: protos.CreateConfigVersion(), }, }, - rollbackErr: errors.New("error reloading"), - topic: []string{bus.ConfigApplyCompleteTopic, bus.DataPlaneResponseTopic}, }, } - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - fakeResourceService := &resourcefakes.FakeResourceServiceInterface{} - fakeResourceService.ApplyConfigReturns(&model.NginxConfigContext{}, test.rollbackErr) - messagePipe := busfakes.NewFakeMessagePipe() + fakeFileServiceClient := &v1fakes.FakeFileServiceClient{} + fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} + fakeGrpcConnection.FileServiceClientReturns(fakeFileServiceClient) + messagePipe := busfakes.NewFakeMessagePipe() - resourcePlugin := NewResource(types.AgentConfig()) - resourcePlugin.resourceService = fakeResourceService + nginxPlugin := NewNginx(types.AgentConfig(), fakeGrpcConnection, model.Command, &sync.RWMutex{}) + err := nginxPlugin.Init(ctx, messagePipe) + require.NoError(t, err) - err := messagePipe.Register(2, []bus.Plugin{resourcePlugin}) - require.NoError(t, err) + nginxPlugin.Process(ctx, &bus.Message{Topic: bus.ConnectionCreatedTopic}) + nginxPlugin.Process(ctx, &bus.Message{Topic: bus.ConfigUploadRequestTopic, Data: message}) + + assert.Eventually( + t, + func() bool { return len(messagePipe.Messages()) == 1 }, + 2*time.Second, + 10*time.Millisecond, + ) - resourcePlugin.messagePipe = messagePipe + assert.Equal(t, 0, fakeFileServiceClient.UpdateFileCallCount()) - resourcePlugin.Process(ctx, test.message) + messages := messagePipe.Messages() + assert.Len(t, messages, 1) - sort.Slice(messagePipe.Messages(), func(i, j int) bool { - return messagePipe.Messages()[i].Topic < messagePipe.Messages()[j].Topic - }) + assert.Equal(t, bus.DataPlaneResponseTopic, messages[0].Topic) - assert.Len(tt, messagePipe.Messages(), len(test.topic)) + dataPlaneResponse, ok := messages[0].Data.(*mpi.DataPlaneResponse) + assert.True(t, ok) + assert.Equal( + t, + mpi.CommandResponse_COMMAND_STATUS_FAILURE, + dataPlaneResponse.GetCommandResponse().GetStatus(), + ) +} - assert.Equal(t, test.topic[0], messagePipe.Messages()[0].Topic) +func TestNginx_Process_handleConfigApplyRequest(t *testing.T) { + ctx := context.Background() + tempDir := t.TempDir() - if len(test.topic) > 1 { - assert.Equal(t, test.topic[1], messagePipe.Messages()[1].Topic) - } + filePath := tempDir + "/nginx.conf" + fileContent := []byte("location /test {\n return 200 \"Test location\\n\";\n}") + fileHash := files.GenerateHash(fileContent) - if test.rollbackErr != nil { - rollbackResponse, ok := messagePipe.Messages()[1].Data.(*mpi.DataPlaneResponse) - assert.True(tt, ok) - assert.Equal(t, test.topic[1], messagePipe.Messages()[1].Topic) - assert.Equal(tt, test.rollbackErr.Error(), rollbackResponse.GetCommandResponse().GetError()) + message := &mpi.ManagementPlaneRequest{ + Request: &mpi.ManagementPlaneRequest_ConfigApplyRequest{ + ConfigApplyRequest: protos.CreateConfigApplyRequest(protos.FileOverview(filePath, fileHash)), + }, + } + fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} + agentConfig := types.AgentConfig() + agentConfig.AllowedDirectories = []string{tempDir} + + tests := []struct { + message *mpi.ManagementPlaneRequest + configApplyReturnsErr error + name string + configApplyStatus model.WriteStatus + }{ + { + name: "Test 1 - Success", + configApplyReturnsErr: nil, + configApplyStatus: model.OK, + message: message, + }, + { + name: "Test 2 - Fail, Rollback", + configApplyReturnsErr: errors.New("something went wrong"), + configApplyStatus: model.RollbackRequired, + message: message, + }, + { + name: "Test 3 - Fail, No Rollback", + configApplyReturnsErr: errors.New("something went wrong"), + configApplyStatus: model.Error, + message: message, + }, + { + name: "Test 4 - Fail to cast payload", + configApplyReturnsErr: errors.New("something went wrong"), + configApplyStatus: model.Error, + message: nil, + }, + { + name: "Test 5 - No changes needed", + configApplyReturnsErr: nil, + configApplyStatus: model.NoChange, + message: message, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeNginxService := &nginxfakes.FakeNginxServiceInterface{} + fakeNginxService.ApplyConfigReturns(&model.NginxConfigContext{}, test.configApplyReturnsErr) + + fakeFileManagerService := &filefakes.FakeFileManagerServiceInterface{} + fakeFileManagerService.ConfigApplyReturns(test.configApplyStatus, test.configApplyReturnsErr) + messagePipe := busfakes.NewFakeMessagePipe() + + nginxPlugin := NewNginx(types.AgentConfig(), fakeGrpcConnection, model.Command, &sync.RWMutex{}) + + err := nginxPlugin.Init(ctx, messagePipe) + nginxPlugin.fileManagerService = fakeFileManagerService + nginxPlugin.nginxService = fakeNginxService + require.NoError(t, err) + + nginxPlugin.Process(ctx, &bus.Message{Topic: bus.ConfigApplyRequestTopic, Data: test.message}) + + messages := messagePipe.Messages() + + switch { + case test.configApplyStatus == model.OK: + assert.Len(t, messages, 2) + assert.Equal(t, bus.EnableWatchersTopic, messages[0].Topic) + assert.Equal(t, bus.DataPlaneResponseTopic, messages[1].Topic) + + msg, ok := messages[1].Data.(*mpi.DataPlaneResponse) + assert.True(t, ok) + assert.Equal(t, mpi.CommandResponse_COMMAND_STATUS_OK, msg.GetCommandResponse().GetStatus()) + assert.Equal(t, "Config apply successful", msg.GetCommandResponse().GetMessage()) + case test.configApplyStatus == model.RollbackRequired: + assert.Len(t, messages, 3) + + assert.Equal(t, bus.DataPlaneResponseTopic, messages[0].Topic) + dataPlaneResponse, ok := messages[0].Data.(*mpi.DataPlaneResponse) + assert.True(t, ok) + assert.Equal( + t, + mpi.CommandResponse_COMMAND_STATUS_ERROR, + dataPlaneResponse.GetCommandResponse().GetStatus(), + ) + assert.Equal(t, "Config apply failed, rolling back config", + dataPlaneResponse.GetCommandResponse().GetMessage()) + assert.Equal(t, test.configApplyReturnsErr.Error(), dataPlaneResponse.GetCommandResponse().GetError()) + + assert.Equal(t, bus.EnableWatchersTopic, messages[1].Topic) + + dataPlaneResponse, ok = messages[2].Data.(*mpi.DataPlaneResponse) + assert.True(t, ok) + assert.Equal(t, "Config apply failed, rollback successful", + dataPlaneResponse.GetCommandResponse().GetMessage()) + assert.Equal(t, mpi.CommandResponse_COMMAND_STATUS_FAILURE, + dataPlaneResponse.GetCommandResponse().GetStatus()) + + case test.configApplyStatus == model.NoChange: + assert.Len(t, messages, 2) + assert.Equal(t, bus.EnableWatchersTopic, messages[0].Topic) + + response, ok := messages[1].Data.(*mpi.DataPlaneResponse) + assert.True(t, ok) + assert.Equal(t, 1, fakeFileManagerService.ClearCacheCallCount()) + assert.Equal( + t, + mpi.CommandResponse_COMMAND_STATUS_OK, + response.GetCommandResponse().GetStatus(), + ) + assert.Equal( + t, + mpi.CommandResponse_COMMAND_STATUS_OK, + response.GetCommandResponse().GetStatus(), + ) + + case test.message == nil: + assert.Empty(t, messages) + default: + assert.Len(t, messages, 2) + assert.Equal(t, bus.EnableWatchersTopic, messages[0].Topic) + + dataPlaneResponse, ok := messages[1].Data.(*mpi.DataPlaneResponse) + assert.True(t, ok) + assert.Equal( + t, + mpi.CommandResponse_COMMAND_STATUS_FAILURE, + dataPlaneResponse.GetCommandResponse().GetStatus(), + ) + assert.Equal(t, "Config apply failed", dataPlaneResponse.GetCommandResponse().GetMessage()) + assert.Equal(t, test.configApplyReturnsErr.Error(), dataPlaneResponse.GetCommandResponse().GetError()) } }) } } -func TestResource_Subscriptions(t *testing.T) { - resourcePlugin := NewResource(types.AgentConfig()) - assert.Equal(t, - []string{ - bus.AddInstancesTopic, - bus.UpdatedInstancesTopic, - bus.DeletedInstancesTopic, - bus.WriteConfigSuccessfulTopic, - bus.RollbackWriteTopic, - bus.APIActionRequestTopic, - bus.AgentConfigUpdateTopic, +func TestNginxPlugin_Failed_ConfigApply(t *testing.T) { + ctx := context.Background() + + fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} + + tests := []struct { + rollbackError error + rollbackWriteError error + message string + name string + }{ + { + name: "Test 1 - Rollback Success", + message: "", + rollbackError: nil, + rollbackWriteError: nil, }, - resourcePlugin.Subscriptions()) -} + { + name: "Test 2 - Rollback Failed", + message: "config apply error: something went wrong\nrollback error: rollback failed", + rollbackError: errors.New("rollback failed"), + rollbackWriteError: nil, + }, + { + name: "Test 3 - Rollback Write Failed", + message: "config apply error: something went wrong\nrollback error: rollback write failed", + rollbackError: nil, + rollbackWriteError: errors.New("rollback write failed"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeNginxService := &nginxfakes.FakeNginxServiceInterface{} + fakeNginxService.ApplyConfigReturnsOnCall(0, &model.NginxConfigContext{}, + errors.New("something went wrong")) + fakeNginxService.ApplyConfigReturnsOnCall(1, &model.NginxConfigContext{}, tt.rollbackWriteError) + + fakeFileManagerService := &filefakes.FakeFileManagerServiceInterface{} + fakeFileManagerService.RollbackReturns(tt.rollbackError) -func TestResource_Info(t *testing.T) { - resourcePlugin := NewResource(types.AgentConfig()) - assert.Equal(t, &bus.Info{Name: "resource"}, resourcePlugin.Info()) + messagePipe := busfakes.NewFakeMessagePipe() + + nginxPlugin := NewNginx(types.AgentConfig(), fakeGrpcConnection, model.Command, &sync.RWMutex{}) + + err := nginxPlugin.Init(ctx, messagePipe) + nginxPlugin.fileManagerService = fakeFileManagerService + nginxPlugin.nginxService = fakeNginxService + require.NoError(t, err) + + nginxPlugin.applyConfig(ctx, "dfsbhj6-bc92-30c1-a9c9-85591422068e", protos. + NginxOssInstance([]string{}).GetInstanceMeta().GetInstanceId()) + + messages := messagePipe.Messages() + + dataPlaneResponse, ok := messages[0].Data.(*mpi.DataPlaneResponse) + assert.True(t, ok) + assert.Equal( + t, + mpi.CommandResponse_COMMAND_STATUS_ERROR, + dataPlaneResponse.GetCommandResponse().GetStatus(), + ) + assert.Equal(t, "Config apply failed, rolling back config", + dataPlaneResponse.GetCommandResponse().GetMessage()) + + if tt.rollbackError == nil && tt.rollbackWriteError == nil { + assert.Len(t, messages, 3) + assert.Equal(t, bus.EnableWatchersTopic, messages[1].Topic) + + dataPlaneResponse, ok = messages[2].Data.(*mpi.DataPlaneResponse) + assert.True(t, ok) + assert.Equal( + t, + mpi.CommandResponse_COMMAND_STATUS_FAILURE, + dataPlaneResponse.GetCommandResponse().GetStatus(), + ) + + assert.Equal(t, "Config apply failed, rollback successful", + dataPlaneResponse.GetCommandResponse().GetMessage()) + } else { + assert.Len(t, messages, 4) + dataPlaneResponse, ok = messages[1].Data.(*mpi.DataPlaneResponse) + assert.True(t, ok) + assert.Equal( + t, + mpi.CommandResponse_COMMAND_STATUS_ERROR, + dataPlaneResponse.GetCommandResponse().GetStatus(), + ) + + assert.Equal(t, "Rollback failed", dataPlaneResponse.GetCommandResponse().GetMessage()) + assert.Equal(t, bus.EnableWatchersTopic, messages[2].Topic) + + dataPlaneResponse, ok = messages[3].Data.(*mpi.DataPlaneResponse) + assert.True(t, ok) + assert.Equal( + t, + mpi.CommandResponse_COMMAND_STATUS_FAILURE, + dataPlaneResponse.GetCommandResponse().GetStatus(), + ) + + assert.Equal(t, "Config apply failed, rollback failed", + dataPlaneResponse.GetCommandResponse().GetMessage()) + assert.Equal(t, tt.message, dataPlaneResponse.GetCommandResponse().GetError()) + } + }) + } } -func TestResource_Init(t *testing.T) { +func TestNginxPlugin_Process_NginxConfigUpdateTopic(t *testing.T) { ctx := context.Background() - resourceService := resourcefakes.FakeResourceServiceInterface{} + fileMeta := protos.FileMeta("/etc/nginx/nginx/conf", "") + + message := &model.NginxConfigContext{ + Files: []*mpi.File{ + { + FileMeta: fileMeta, + }, + }, + } + + fakeFileServiceClient := &v1fakes.FakeFileServiceClient{} + fakeFileServiceClient.UpdateOverviewReturns(&mpi.UpdateOverviewResponse{ + Overview: nil, + }, nil) + + fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} + fakeGrpcConnection.FileServiceClientReturns(fakeFileServiceClient) messagePipe := busfakes.NewFakeMessagePipe() - messagePipe.RunWithoutInit(ctx) - resourcePlugin := NewResource(types.AgentConfig()) - resourcePlugin.resourceService = &resourceService - err := resourcePlugin.Init(ctx, messagePipe) + nginxPlugin := NewNginx(types.AgentConfig(), fakeGrpcConnection, model.Command, &sync.RWMutex{}) + err := nginxPlugin.Init(ctx, messagePipe) require.NoError(t, err) - messages := messagePipe.Messages() + nginxPlugin.Process(ctx, &bus.Message{Topic: bus.ConnectionCreatedTopic}) + nginxPlugin.Process(ctx, &bus.Message{Topic: bus.NginxConfigUpdateTopic, Data: message}) - assert.Empty(t, messages) + assert.Eventually( + t, + func() bool { return fakeFileServiceClient.UpdateOverviewCallCount() == 1 }, + 2*time.Second, + 10*time.Millisecond, + ) } //nolint:revive,lll // maximum number of arguments exceed -func runResourceTestHelper(t *testing.T, ctx context.Context, testName string, getUpstreamsFunc func(*resourcefakes.FakeResourceServiceInterface), instance *mpi.Instance, message *bus.Message, topic []string, err error) { +func runNginxTestHelper(t *testing.T, ctx context.Context, testName string, + getUpstreamsFunc func(serviceInterface *nginxfakes.FakeNginxServiceInterface), instance *mpi.Instance, + message *bus.Message, topic []string, err error, +) { t.Helper() t.Run(testName, func(tt *testing.T) { - fakeResourceService := &resourcefakes.FakeResourceServiceInterface{} - getUpstreamsFunc(fakeResourceService) + fakeNginxService := &nginxfakes.FakeNginxServiceInterface{} + getUpstreamsFunc(fakeNginxService) if instance.GetInstanceMeta().GetInstanceId() != "e1374cb1-462d-3b6c-9f3b-f28332b5f10f" { - fakeResourceService.InstanceReturns(instance) + fakeNginxService.InstanceReturns(instance) } messagePipe := busfakes.NewFakeMessagePipe() - resourcePlugin := NewResource(types.AgentConfig()) - resourcePlugin.resourceService = fakeResourceService + fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} + nginxPlugin := NewNginx(types.AgentConfig(), fakeGrpcConnection, model.Command, &sync.RWMutex{}) + nginxPlugin.nginxService = fakeNginxService - registerErr := messagePipe.Register(2, []bus.Plugin{resourcePlugin}) + registerErr := messagePipe.Register(2, []bus.Plugin{nginxPlugin}) require.NoError(t, registerErr) - resourcePlugin.messagePipe = messagePipe - resourcePlugin.Process(ctx, message) + nginxPlugin.messagePipe = messagePipe + nginxPlugin.Process(ctx, message) assert.Equal(tt, topic[0], messagePipe.Messages()[0].Topic) diff --git a/internal/resource/nginx_plus_actions.go b/internal/nginx/nginx_plus_actions.go similarity index 94% rename from internal/resource/nginx_plus_actions.go rename to internal/nginx/nginx_plus_actions.go index 33db5f2edf..4c851e42cb 100644 --- a/internal/resource/nginx_plus_actions.go +++ b/internal/nginx/nginx_plus_actions.go @@ -3,7 +3,7 @@ // This source code is licensed under the Apache License, Version 2.0 license found in the // LICENSE file in the root directory of this source tree. -package resource +package nginx import ( "context" @@ -18,7 +18,7 @@ import ( const emptyResponse = "{}" type APIAction struct { - ResourceService resourceServiceInterface + NginxService nginxServiceInterface } func (a *APIAction) HandleGetStreamUpstreamsRequest(ctx context.Context, @@ -28,7 +28,7 @@ func (a *APIAction) HandleGetStreamUpstreamsRequest(ctx context.Context, ctx, instance, func(ctx context.Context, instance *mpi.Instance) (interface{}, error) { - return a.ResourceService.GetStreamUpstreams(ctx, instance) + return a.NginxService.GetStreamUpstreams(ctx, instance) }, "Unable to get stream upstreams", ) @@ -39,7 +39,7 @@ func (a *APIAction) HandleGetUpstreamsRequest(ctx context.Context, instance *mpi ctx, instance, func(ctx context.Context, instance *mpi.Instance) (interface{}, error) { - return a.ResourceService.GetUpstreams(ctx, instance) + return a.NginxService.GetUpstreams(ctx, instance) }, "Unable to get upstreams", ) @@ -54,7 +54,7 @@ func (a *APIAction) HandleGetHTTPUpstreamsServersRequest( instanceID := instance.GetInstanceMeta().GetInstanceId() upstreamsResponse := emptyResponse - upstreams, err := a.ResourceService.GetHTTPUpstreamServers( + upstreams, err := a.NginxService.GetHTTPUpstreamServers( ctx, instance, action.GetGetHttpUpstreamServers().GetHttpUpstreamName(), @@ -102,7 +102,7 @@ func (a *APIAction) HandleUpdateStreamServersRequest( correlationID := logger.CorrelationID(ctx) instanceID := instance.GetInstanceMeta().GetInstanceId() - add, update, del, err := a.ResourceService.UpdateStreamServers( + add, update, del, err := a.NginxService.UpdateStreamServers( ctx, instance, action.GetUpdateStreamServers().GetUpstreamStreamName(), @@ -158,7 +158,7 @@ func (a *APIAction) HandleUpdateHTTPUpstreamsRequest( correlationID := logger.CorrelationID(ctx) instanceID := instance.GetInstanceMeta().GetInstanceId() - add, update, del, err := a.ResourceService.UpdateHTTPUpstreamServers(ctx, instance, + add, update, del, err := a.NginxService.UpdateHTTPUpstreamServers(ctx, instance, action.GetUpdateHttpUpstreamServers().GetHttpUpstreamName(), action.GetUpdateHttpUpstreamServers().GetServers()) if err != nil { diff --git a/internal/resource/resource_service.go b/internal/nginx/nginx_service.go similarity index 71% rename from internal/resource/resource_service.go rename to internal/nginx/nginx_service.go index 2ae6ede88e..8dcd40d123 100644 --- a/internal/resource/resource_service.go +++ b/internal/nginx/nginx_service.go @@ -3,7 +3,7 @@ // This source code is licensed under the Apache License, Version 2.0 license found in the // LICENSE file in the root directory of this source tree. -package resource +package nginx import ( "context" @@ -44,7 +44,7 @@ const ( ) //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6@v6.8.1 -generate -//counterfeiter:generate . resourceServiceInterface +//counterfeiter:generate . nginxServiceInterface //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6@v6.8.1 -generate //counterfeiter:generate . logTailerOperator @@ -55,10 +55,8 @@ const ( //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6@v6.8.1 -generate //counterfeiter:generate . processOperator -type resourceServiceInterface interface { - AddInstances(instanceList []*mpi.Instance) *mpi.Resource - UpdateInstances(ctx context.Context, instanceList []*mpi.Instance) *mpi.Resource - DeleteInstances(ctx context.Context, instanceList []*mpi.Instance) *mpi.Resource +type nginxServiceInterface interface { + UpdateResource(ctx context.Context, resource *mpi.Resource) *mpi.Resource ApplyConfig(ctx context.Context, instanceID string) (*model.NginxConfigContext, error) Instance(instanceID string) *mpi.Instance GetHTTPUpstreamServers(ctx context.Context, instance *mpi.Instance, upstreams string) ([]client.UpstreamServer, @@ -89,24 +87,24 @@ type ( } ) -type ResourceService struct { +type NginxService struct { resource *mpi.Resource nginxConfigParser parser.ConfigParser agentConfig *config.Config - instanceOperators map[string]instanceOperator // key is instance ID + instanceOperator instanceOperator info host.InfoInterface manifestFilePath string resourceMutex sync.Mutex operatorsMutex sync.Mutex } -func NewResourceService(ctx context.Context, agentConfig *config.Config) *ResourceService { - resourceService := &ResourceService{ +func NewNginxService(ctx context.Context, agentConfig *config.Config) *NginxService { + resourceService := &NginxService{ resource: &mpi.Resource{}, resourceMutex: sync.Mutex{}, info: host.NewInfo(), operatorsMutex: sync.Mutex{}, - instanceOperators: make(map[string]instanceOperator), + instanceOperator: NewInstanceOperator(agentConfig), nginxConfigParser: parser.NewNginxConfigParser(agentConfig), agentConfig: agentConfig, manifestFilePath: agentConfig.LibDir + "/manifest.json", @@ -117,17 +115,8 @@ func NewResourceService(ctx context.Context, agentConfig *config.Config) *Resour return resourceService } -func (r *ResourceService) AddInstances(instanceList []*mpi.Instance) *mpi.Resource { - r.resourceMutex.Lock() - defer r.resourceMutex.Unlock() - r.resource.Instances = append(r.resource.GetInstances(), instanceList...) - r.AddOperator(instanceList) - - return r.resource -} - -func (r *ResourceService) Instance(instanceID string) *mpi.Instance { - for _, instance := range r.resource.GetInstances() { +func (n *NginxService) Instance(instanceID string) *mpi.Instance { + for _, instance := range n.resource.GetInstances() { if instance.GetInstanceMeta().GetInstanceId() == instanceID { return instance } @@ -136,120 +125,71 @@ func (r *ResourceService) Instance(instanceID string) *mpi.Instance { return nil } -func (r *ResourceService) AddOperator(instanceList []*mpi.Instance) { - r.operatorsMutex.Lock() - defer r.operatorsMutex.Unlock() - for _, instance := range instanceList { - r.instanceOperators[instance.GetInstanceMeta().GetInstanceId()] = NewInstanceOperator(r.agentConfig) - } -} - -func (r *ResourceService) RemoveOperator(instanceList []*mpi.Instance) { - r.operatorsMutex.Lock() - defer r.operatorsMutex.Unlock() - for _, instance := range instanceList { - delete(r.instanceOperators, instance.GetInstanceMeta().GetInstanceId()) - } -} - -func (r *ResourceService) UpdateInstances(ctx context.Context, instanceList []*mpi.Instance) *mpi.Resource { - r.resourceMutex.Lock() - defer r.resourceMutex.Unlock() - - for _, updatedInstance := range instanceList { - resourceCopy, ok := proto.Clone(r.resource).(*mpi.Resource) - if ok { - for _, instance := range resourceCopy.GetInstances() { - if updatedInstance.GetInstanceMeta().GetInstanceId() == instance.GetInstanceMeta().GetInstanceId() { - instance.InstanceMeta = updatedInstance.GetInstanceMeta() - instance.InstanceRuntime = updatedInstance.GetInstanceRuntime() - instance.InstanceConfig = updatedInstance.GetInstanceConfig() - } - } - r.resource = resourceCopy - } else { - slog.WarnContext(ctx, "Unable to clone resource while updating instances", "resource", - r.resource, "instances", instanceList) - } - } - - return r.resource -} - -func (r *ResourceService) DeleteInstances(ctx context.Context, instanceList []*mpi.Instance) *mpi.Resource { - r.resourceMutex.Lock() - defer r.resourceMutex.Unlock() +func (n *NginxService) UpdateResource(ctx context.Context, resource *mpi.Resource) *mpi.Resource { + slog.DebugContext(ctx, "Updating resource", "resource", resource) + n.resourceMutex.Lock() + defer n.resourceMutex.Unlock() - for _, deletedInstance := range instanceList { - resourceCopy, ok := proto.Clone(r.resource).(*mpi.Resource) - if ok { - for index, instance := range resourceCopy.GetInstances() { - if deletedInstance.GetInstanceMeta().GetInstanceId() == instance.GetInstanceMeta().GetInstanceId() { - r.resource.Instances = append(r.resource.Instances[:index], r.resource.GetInstances()[index+1:]...) - } - } - } else { - slog.WarnContext(ctx, "Unable to clone resource while deleting instances", "resource", - r.resource, "instances", instanceList) - } - } - r.RemoveOperator(instanceList) + n.resource = resource - return r.resource + return n.resource } -func (r *ResourceService) ApplyConfig(ctx context.Context, instanceID string) (*model.NginxConfigContext, error) { +func (n *NginxService) ApplyConfig(ctx context.Context, instanceID string) (*model.NginxConfigContext, error) { var instance *mpi.Instance - operator := r.instanceOperators[instanceID] - if operator == nil { - return nil, fmt.Errorf("instance %s not found", instanceID) + if n.instanceOperator == nil { + return nil, errors.New("instance operator is nil") } - for _, resourceInstance := range r.resource.GetInstances() { + for _, resourceInstance := range n.resource.GetInstances() { if resourceInstance.GetInstanceMeta().GetInstanceId() == instanceID { instance = resourceInstance } } + if instance == nil { + return nil, fmt.Errorf("instance %s not found", instanceID) + } + // Need to parse config to determine what error logs to watch if new ones are added as part of the NGINX reload - nginxConfigContext, parseErr := r.nginxConfigParser.Parse(ctx, instance) + nginxConfigContext, parseErr := n.nginxConfigParser.Parse(ctx, instance) if parseErr != nil || nginxConfigContext == nil { return nil, fmt.Errorf("failed to parse config %w", parseErr) } - nginxConfigContext = r.updateConfigContextFiles(ctx, nginxConfigContext) + nginxConfigContext = n.updateConfigContextFiles(ctx, nginxConfigContext) datasource.UpdateNginxInstanceRuntime(instance, nginxConfigContext) slog.DebugContext(ctx, "Updated Instance Runtime after parsing config", "instance", instance.GetInstanceRuntime()) - valErr := operator.Validate(ctx, instance) + valErr := n.instanceOperator.Validate(ctx, instance) if valErr != nil { return nil, fmt.Errorf("failed validating config %w", valErr) } - reloadErr := operator.Reload(ctx, instance) + reloadErr := n.instanceOperator.Reload(ctx, instance) if reloadErr != nil { return nil, fmt.Errorf("failed to reload NGINX %w", reloadErr) } // Check if APIs have been added/updated/removed - nginxConfigContext.StubStatus = r.nginxConfigParser.FindStubStatusAPI(ctx, nginxConfigContext) - nginxConfigContext.PlusAPI = r.nginxConfigParser.FindPlusAPI(ctx, nginxConfigContext) + nginxConfigContext.StubStatus = n.nginxConfigParser.FindStubStatusAPI(ctx, nginxConfigContext) + nginxConfigContext.PlusAPI = n.nginxConfigParser.FindPlusAPI(ctx, nginxConfigContext) datasource.UpdateNginxInstanceRuntime(instance, nginxConfigContext) - r.UpdateInstances(ctx, []*mpi.Instance{instance}) + n.updateInstances(ctx, []*mpi.Instance{instance}) slog.DebugContext(ctx, "Updated Instance Runtime after reloading NGINX", "instance", instance.GetInstanceRuntime()) return nginxConfigContext, nil } -func (r *ResourceService) GetHTTPUpstreamServers(ctx context.Context, instance *mpi.Instance, +func (n *NginxService) GetHTTPUpstreamServers(ctx context.Context, instance *mpi.Instance, upstream string, ) ([]client.UpstreamServer, error) { - plusClient, err := r.createPlusClient(ctx, instance) + plusClient, err := n.createPlusClient(ctx, instance) if err != nil { slog.ErrorContext(ctx, "Failed to create plus client ", "error", err) return nil, err @@ -263,9 +203,9 @@ func (r *ResourceService) GetHTTPUpstreamServers(ctx context.Context, instance * return servers, createPlusAPIError(getServersErr) } -func (r *ResourceService) GetUpstreams(ctx context.Context, instance *mpi.Instance, +func (n *NginxService) GetUpstreams(ctx context.Context, instance *mpi.Instance, ) (*client.Upstreams, error) { - plusClient, err := r.createPlusClient(ctx, instance) + plusClient, err := n.createPlusClient(ctx, instance) if err != nil { slog.ErrorContext(ctx, "Failed to create plus client ", "error", err) return nil, err @@ -280,9 +220,9 @@ func (r *ResourceService) GetUpstreams(ctx context.Context, instance *mpi.Instan return servers, createPlusAPIError(getUpstreamsErr) } -func (r *ResourceService) GetStreamUpstreams(ctx context.Context, instance *mpi.Instance, +func (n *NginxService) GetStreamUpstreams(ctx context.Context, instance *mpi.Instance, ) (*client.StreamUpstreams, error) { - plusClient, err := r.createPlusClient(ctx, instance) + plusClient, err := n.createPlusClient(ctx, instance) if err != nil { slog.ErrorContext(ctx, "Failed to create plus client ", "error", err) return nil, err @@ -300,10 +240,10 @@ func (r *ResourceService) GetStreamUpstreams(ctx context.Context, instance *mpi. // max number of returns from function is 3 // //nolint:revive // maximum return allowed is 3 -func (r *ResourceService) UpdateStreamServers(ctx context.Context, instance *mpi.Instance, upstream string, +func (n *NginxService) UpdateStreamServers(ctx context.Context, instance *mpi.Instance, upstream string, upstreams []*structpb.Struct, ) (added, updated, deleted []client.StreamUpstreamServer, err error) { - plusClient, err := r.createPlusClient(ctx, instance) + plusClient, err := n.createPlusClient(ctx, instance) if err != nil { slog.ErrorContext(ctx, "Failed to create plus client ", "error", err) return nil, nil, nil, err @@ -323,10 +263,10 @@ func (r *ResourceService) UpdateStreamServers(ctx context.Context, instance *mpi // max number of returns from function is 3 // //nolint:revive // maximum return allowed is 3 -func (r *ResourceService) UpdateHTTPUpstreamServers(ctx context.Context, instance *mpi.Instance, upstream string, +func (n *NginxService) UpdateHTTPUpstreamServers(ctx context.Context, instance *mpi.Instance, upstream string, upstreams []*structpb.Struct, ) (added, updated, deleted []client.UpstreamServer, err error) { - plusClient, err := r.createPlusClient(ctx, instance) + plusClient, err := n.createPlusClient(ctx, instance) if err != nil { slog.ErrorContext(ctx, "Failed to create plus client ", "error", err) return nil, nil, nil, err @@ -343,10 +283,32 @@ func (r *ResourceService) UpdateHTTPUpstreamServers(ctx context.Context, instanc return added, updated, deleted, createPlusAPIError(updateError) } -func (r *ResourceService) updateConfigContextFiles(ctx context.Context, +func (n *NginxService) updateInstances(ctx context.Context, instanceList []*mpi.Instance) { + n.resourceMutex.Lock() + defer n.resourceMutex.Unlock() + + for _, updatedInstance := range instanceList { + resourceCopy, ok := proto.Clone(n.resource).(*mpi.Resource) + if ok { + for _, instance := range resourceCopy.GetInstances() { + if updatedInstance.GetInstanceMeta().GetInstanceId() == instance.GetInstanceMeta().GetInstanceId() { + instance.InstanceMeta = updatedInstance.GetInstanceMeta() + instance.InstanceRuntime = updatedInstance.GetInstanceRuntime() + instance.InstanceConfig = updatedInstance.GetInstanceConfig() + } + } + n.resource = resourceCopy + } else { + slog.WarnContext(ctx, "Unable to clone resource while updating instances", "resource", + n.resource, "instances", instanceList) + } + } +} + +func (n *NginxService) updateConfigContextFiles(ctx context.Context, nginxConfigContext *model.NginxConfigContext, ) *model.NginxConfigContext { - manifestFiles, manifestErr := r.manifestFile() + manifestFiles, manifestErr := n.manifestFile() if manifestErr != nil { slog.ErrorContext(ctx, "Error getting manifest files", "error", manifestErr) } @@ -364,12 +326,12 @@ func (r *ResourceService) updateConfigContextFiles(ctx context.Context, return nginxConfigContext } -func (r *ResourceService) manifestFile() (map[string]*model.ManifestFile, error) { - if _, err := os.Stat(r.manifestFilePath); err != nil { +func (n *NginxService) manifestFile() (map[string]*model.ManifestFile, error) { + if _, err := os.Stat(n.manifestFilePath); err != nil { return nil, err } - file, err := os.ReadFile(r.manifestFilePath) + file, err := os.ReadFile(n.manifestFilePath) if err != nil { return nil, fmt.Errorf("failed to read manifest file: %w", err) } @@ -416,7 +378,7 @@ func convertToStreamUpstreamServer(streamUpstreams []*structpb.Struct) []client. return servers } -func (r *ResourceService) createPlusClient(ctx context.Context, instance *mpi.Instance) (*client.NginxClient, error) { +func (n *NginxService) createPlusClient(ctx context.Context, instance *mpi.Instance) (*client.NginxClient, error) { plusAPI := instance.GetInstanceRuntime().GetNginxPlusRuntimeInfo().GetPlusApi() var endpoint string @@ -459,31 +421,31 @@ func (r *ResourceService) createPlusClient(ctx context.Context, instance *mpi.In ) } -func (r *ResourceService) updateResourceInfo(ctx context.Context) { - r.resourceMutex.Lock() - defer r.resourceMutex.Unlock() +func (n *NginxService) updateResourceInfo(ctx context.Context) { + n.resourceMutex.Lock() + defer n.resourceMutex.Unlock() - isContainer, err := r.info.IsContainer() + isContainer, err := n.info.IsContainer() if err != nil { slog.WarnContext(ctx, "Failed to check if resource is container", "error", err) } if isContainer { - r.resource.Info, err = r.info.ContainerInfo(ctx) + n.resource.Info, err = n.info.ContainerInfo(ctx) if err != nil { slog.ErrorContext(ctx, "Failed to get container info", "error", err) return } - r.resource.ResourceId = r.resource.GetContainerInfo().GetContainerId() - r.resource.Instances = []*mpi.Instance{} + n.resource.ResourceId = n.resource.GetContainerInfo().GetContainerId() + n.resource.Instances = []*mpi.Instance{} } else { - r.resource.Info, err = r.info.HostInfo(ctx) + n.resource.Info, err = n.info.HostInfo(ctx) if err != nil { slog.ErrorContext(ctx, "Failed to get host info", "error", err) return } - r.resource.ResourceId = r.resource.GetHostInfo().GetHostId() - r.resource.Instances = []*mpi.Instance{} + n.resource.ResourceId = n.resource.GetHostInfo().GetHostId() + n.resource.Instances = []*mpi.Instance{} } } diff --git a/internal/resource/resource_service_test.go b/internal/nginx/nginx_service_test.go similarity index 80% rename from internal/resource/resource_service_test.go rename to internal/nginx/nginx_service_test.go index e629f39ac0..a138e43807 100644 --- a/internal/resource/resource_service_test.go +++ b/internal/nginx/nginx_service_test.go @@ -3,7 +3,7 @@ // This source code is licensed under the Apache License, Version 2.0 license found in the // LICENSE file in the root directory of this source tree. -package resource +package nginx import ( "context" @@ -21,7 +21,7 @@ import ( "github.com/nginx/nginx-plus-go-client/v3/client" "google.golang.org/protobuf/types/known/structpb" - "github.com/nginx/agent/v3/internal/resource/resourcefakes" + "github.com/nginx/agent/v3/internal/nginx/nginxfakes" "github.com/nginx/agent/v3/test/types" mpi "github.com/nginx/agent/v3/api/grpc/mpi/v1" @@ -30,47 +30,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestResourceService_AddInstance(t *testing.T) { - ctx := context.Background() - - tests := []struct { - name string - resource *mpi.Resource - instanceList []*mpi.Instance - }{ - { - name: "Test 1: Add One Instance", - instanceList: []*mpi.Instance{ - protos.NginxOssInstance([]string{}), - }, - resource: protos.HostResource(), - }, - { - name: "Test 2: Add Multiple Instance", - instanceList: []*mpi.Instance{ - protos.NginxOssInstance([]string{}), - protos.NginxPlusInstance([]string{}), - }, - resource: &mpi.Resource{ - ResourceId: protos.HostResource().GetResourceId(), - Instances: []*mpi.Instance{ - protos.NginxOssInstance([]string{}), - protos.NginxPlusInstance([]string{}), - }, - Info: protos.HostResource().GetInfo(), - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - resourceService := NewResourceService(ctx, types.AgentConfig()) - resource := resourceService.AddInstances(test.instanceList) - assert.Equal(tt, test.resource.GetInstances(), resource.GetInstances()) - }) - } -} - func TestResourceService_UpdateInstance(t *testing.T) { ctx := context.Background() @@ -107,47 +66,10 @@ func TestResourceService_UpdateInstance(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - resourceService := NewResourceService(ctx, types.AgentConfig()) + resourceService := NewNginxService(ctx, types.AgentConfig()) resourceService.resource.Instances = []*mpi.Instance{protos.NginxOssInstance([]string{})} - resource := resourceService.UpdateInstances(ctx, test.instanceList) - assert.Equal(tt, test.resource.GetInstances(), resource.GetInstances()) - }) - } -} - -func TestResourceService_DeleteInstance(t *testing.T) { - ctx := context.Background() - - tests := []struct { - name string - err error - resource *mpi.Resource - instanceList []*mpi.Instance - }{ - { - name: "Test 1: Update Instances", - instanceList: []*mpi.Instance{ - protos.NginxPlusInstance([]string{}), - }, - resource: &mpi.Resource{ - ResourceId: protos.HostResource().GetResourceId(), - Instances: []*mpi.Instance{ - protos.NginxOssInstance([]string{}), - }, - Info: protos.HostResource().GetInfo(), - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - resourceService := NewResourceService(ctx, types.AgentConfig()) - resourceService.resource.Instances = []*mpi.Instance{ - protos.NginxOssInstance([]string{}), - protos.NginxPlusInstance([]string{}), - } - resource := resourceService.DeleteInstances(ctx, test.instanceList) - assert.Equal(tt, test.resource.GetInstances(), resource.GetInstances()) + resourceService.updateInstances(ctx, test.instanceList) + assert.Equal(tt, test.resource.GetInstances(), resourceService.resource.GetInstances()) }) } } @@ -179,7 +101,7 @@ func TestResourceService_Instance(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - resourceService := NewResourceService(ctx, types.AgentConfig()) + resourceService := NewNginxService(ctx, types.AgentConfig()) resourceService.resource.Instances = test.instances instance := resourceService.Instance(protos.NginxPlusInstance([]string{}). GetInstanceMeta().GetInstanceId()) @@ -222,7 +144,7 @@ func TestResourceService_GetResource(t *testing.T) { mockInfo.IsContainerReturns(tc.isContainer, nil) - resourceService := NewResourceService(ctx, types.AgentConfig()) + resourceService := NewNginxService(ctx, types.AgentConfig()) resourceService.info = mockInfo resourceService.resource = tc.expectedResource @@ -295,7 +217,7 @@ func TestResourceService_createPlusClient(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - resourceService := NewResourceService(ctx, types.AgentConfig()) + resourceService := NewNginxService(ctx, types.AgentConfig()) resourceService.resource.Instances = []*mpi.Instance{ protos.NginxOssInstance([]string{}), protos.NginxPlusInstance([]string{}), @@ -356,7 +278,7 @@ func TestResourceService_ApplyConfig(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - instanceOp := &resourcefakes.FakeInstanceOperator{} + instanceOp := &nginxfakes.FakeInstanceOperator{} instanceOp.ReloadReturns(test.reloadErr) instanceOp.ValidateReturns(test.validateErr) @@ -387,10 +309,8 @@ func TestResourceService_ApplyConfig(t *testing.T) { Ca: "", }) - resourceService := NewResourceService(ctx, types.AgentConfig()) - resourceOpMap := make(map[string]instanceOperator) - resourceOpMap[protos.NginxOssInstance([]string{}).GetInstanceMeta().GetInstanceId()] = instanceOp - resourceService.instanceOperators = resourceOpMap + resourceService := NewNginxService(ctx, types.AgentConfig()) + resourceService.instanceOperator = instanceOp resourceService.nginxConfigParser = &nginxParser instance := protos.NginxOssInstance([]string{}) @@ -408,7 +328,7 @@ func TestResourceService_ApplyConfig(t *testing.T) { func Test_updateConfigContextFiles(t *testing.T) { ctx := t.Context() - resourceService := NewResourceService(ctx, types.AgentConfig()) + resourceService := NewNginxService(ctx, types.AgentConfig()) manifestFileContents := map[string]*model.ManifestFile{ "/etc/nginx/nginx.conf": { diff --git a/internal/resource/resourcefakes/fake_instance_operator.go b/internal/nginx/nginxfakes/fake_instance_operator.go similarity index 99% rename from internal/resource/resourcefakes/fake_instance_operator.go rename to internal/nginx/nginxfakes/fake_instance_operator.go index 23abae4745..f71c4187b6 100644 --- a/internal/resource/resourcefakes/fake_instance_operator.go +++ b/internal/nginx/nginxfakes/fake_instance_operator.go @@ -1,5 +1,5 @@ // Code generated by counterfeiter. DO NOT EDIT. -package resourcefakes +package nginxfakes import ( "context" diff --git a/internal/resource/resourcefakes/fake_log_tailer_operator.go b/internal/nginx/nginxfakes/fake_log_tailer_operator.go similarity index 98% rename from internal/resource/resourcefakes/fake_log_tailer_operator.go rename to internal/nginx/nginxfakes/fake_log_tailer_operator.go index 74008254d0..e31e73fbc0 100644 --- a/internal/resource/resourcefakes/fake_log_tailer_operator.go +++ b/internal/nginx/nginxfakes/fake_log_tailer_operator.go @@ -1,5 +1,5 @@ // Code generated by counterfeiter. DO NOT EDIT. -package resourcefakes +package nginxfakes import ( "context" diff --git a/internal/resource/resourcefakes/fake_resource_service_interface.go b/internal/nginx/nginxfakes/fake_nginx_service_interface.go similarity index 58% rename from internal/resource/resourcefakes/fake_resource_service_interface.go rename to internal/nginx/nginxfakes/fake_nginx_service_interface.go index fcfcac7329..e3c9d916e3 100644 --- a/internal/resource/resourcefakes/fake_resource_service_interface.go +++ b/internal/nginx/nginxfakes/fake_nginx_service_interface.go @@ -1,5 +1,5 @@ // Code generated by counterfeiter. DO NOT EDIT. -package resourcefakes +package nginxfakes import ( "context" @@ -11,18 +11,7 @@ import ( "google.golang.org/protobuf/types/known/structpb" ) -type FakeResourceServiceInterface struct { - AddInstancesStub func([]*v1.Instance) *v1.Resource - addInstancesMutex sync.RWMutex - addInstancesArgsForCall []struct { - arg1 []*v1.Instance - } - addInstancesReturns struct { - result1 *v1.Resource - } - addInstancesReturnsOnCall map[int]struct { - result1 *v1.Resource - } +type FakeNginxServiceInterface struct { ApplyConfigStub func(context.Context, string) (*model.NginxConfigContext, error) applyConfigMutex sync.RWMutex applyConfigArgsForCall []struct { @@ -37,18 +26,6 @@ type FakeResourceServiceInterface struct { result1 *model.NginxConfigContext result2 error } - DeleteInstancesStub func(context.Context, []*v1.Instance) *v1.Resource - deleteInstancesMutex sync.RWMutex - deleteInstancesArgsForCall []struct { - arg1 context.Context - arg2 []*v1.Instance - } - deleteInstancesReturns struct { - result1 *v1.Resource - } - deleteInstancesReturnsOnCall map[int]struct { - result1 *v1.Resource - } GetHTTPUpstreamServersStub func(context.Context, *v1.Instance, string) ([]client.UpstreamServer, error) getHTTPUpstreamServersMutex sync.RWMutex getHTTPUpstreamServersArgsForCall []struct { @@ -123,16 +100,16 @@ type FakeResourceServiceInterface struct { result3 []client.UpstreamServer result4 error } - UpdateInstancesStub func(context.Context, []*v1.Instance) *v1.Resource - updateInstancesMutex sync.RWMutex - updateInstancesArgsForCall []struct { + UpdateResourceStub func(context.Context, *v1.Resource) *v1.Resource + updateResourceMutex sync.RWMutex + updateResourceArgsForCall []struct { arg1 context.Context - arg2 []*v1.Instance + arg2 *v1.Resource } - updateInstancesReturns struct { + updateResourceReturns struct { result1 *v1.Resource } - updateInstancesReturnsOnCall map[int]struct { + updateResourceReturnsOnCall map[int]struct { result1 *v1.Resource } UpdateStreamServersStub func(context.Context, *v1.Instance, string, []*structpb.Struct) ([]client.StreamUpstreamServer, []client.StreamUpstreamServer, []client.StreamUpstreamServer, error) @@ -159,73 +136,7 @@ type FakeResourceServiceInterface struct { invocationsMutex sync.RWMutex } -func (fake *FakeResourceServiceInterface) AddInstances(arg1 []*v1.Instance) *v1.Resource { - var arg1Copy []*v1.Instance - if arg1 != nil { - arg1Copy = make([]*v1.Instance, len(arg1)) - copy(arg1Copy, arg1) - } - fake.addInstancesMutex.Lock() - ret, specificReturn := fake.addInstancesReturnsOnCall[len(fake.addInstancesArgsForCall)] - fake.addInstancesArgsForCall = append(fake.addInstancesArgsForCall, struct { - arg1 []*v1.Instance - }{arg1Copy}) - stub := fake.AddInstancesStub - fakeReturns := fake.addInstancesReturns - fake.recordInvocation("AddInstances", []interface{}{arg1Copy}) - fake.addInstancesMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeResourceServiceInterface) AddInstancesCallCount() int { - fake.addInstancesMutex.RLock() - defer fake.addInstancesMutex.RUnlock() - return len(fake.addInstancesArgsForCall) -} - -func (fake *FakeResourceServiceInterface) AddInstancesCalls(stub func([]*v1.Instance) *v1.Resource) { - fake.addInstancesMutex.Lock() - defer fake.addInstancesMutex.Unlock() - fake.AddInstancesStub = stub -} - -func (fake *FakeResourceServiceInterface) AddInstancesArgsForCall(i int) []*v1.Instance { - fake.addInstancesMutex.RLock() - defer fake.addInstancesMutex.RUnlock() - argsForCall := fake.addInstancesArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *FakeResourceServiceInterface) AddInstancesReturns(result1 *v1.Resource) { - fake.addInstancesMutex.Lock() - defer fake.addInstancesMutex.Unlock() - fake.AddInstancesStub = nil - fake.addInstancesReturns = struct { - result1 *v1.Resource - }{result1} -} - -func (fake *FakeResourceServiceInterface) AddInstancesReturnsOnCall(i int, result1 *v1.Resource) { - fake.addInstancesMutex.Lock() - defer fake.addInstancesMutex.Unlock() - fake.AddInstancesStub = nil - if fake.addInstancesReturnsOnCall == nil { - fake.addInstancesReturnsOnCall = make(map[int]struct { - result1 *v1.Resource - }) - } - fake.addInstancesReturnsOnCall[i] = struct { - result1 *v1.Resource - }{result1} -} - -func (fake *FakeResourceServiceInterface) ApplyConfig(arg1 context.Context, arg2 string) (*model.NginxConfigContext, error) { +func (fake *FakeNginxServiceInterface) ApplyConfig(arg1 context.Context, arg2 string) (*model.NginxConfigContext, error) { fake.applyConfigMutex.Lock() ret, specificReturn := fake.applyConfigReturnsOnCall[len(fake.applyConfigArgsForCall)] fake.applyConfigArgsForCall = append(fake.applyConfigArgsForCall, struct { @@ -245,26 +156,26 @@ func (fake *FakeResourceServiceInterface) ApplyConfig(arg1 context.Context, arg2 return fakeReturns.result1, fakeReturns.result2 } -func (fake *FakeResourceServiceInterface) ApplyConfigCallCount() int { +func (fake *FakeNginxServiceInterface) ApplyConfigCallCount() int { fake.applyConfigMutex.RLock() defer fake.applyConfigMutex.RUnlock() return len(fake.applyConfigArgsForCall) } -func (fake *FakeResourceServiceInterface) ApplyConfigCalls(stub func(context.Context, string) (*model.NginxConfigContext, error)) { +func (fake *FakeNginxServiceInterface) ApplyConfigCalls(stub func(context.Context, string) (*model.NginxConfigContext, error)) { fake.applyConfigMutex.Lock() defer fake.applyConfigMutex.Unlock() fake.ApplyConfigStub = stub } -func (fake *FakeResourceServiceInterface) ApplyConfigArgsForCall(i int) (context.Context, string) { +func (fake *FakeNginxServiceInterface) ApplyConfigArgsForCall(i int) (context.Context, string) { fake.applyConfigMutex.RLock() defer fake.applyConfigMutex.RUnlock() argsForCall := fake.applyConfigArgsForCall[i] return argsForCall.arg1, argsForCall.arg2 } -func (fake *FakeResourceServiceInterface) ApplyConfigReturns(result1 *model.NginxConfigContext, result2 error) { +func (fake *FakeNginxServiceInterface) ApplyConfigReturns(result1 *model.NginxConfigContext, result2 error) { fake.applyConfigMutex.Lock() defer fake.applyConfigMutex.Unlock() fake.ApplyConfigStub = nil @@ -274,7 +185,7 @@ func (fake *FakeResourceServiceInterface) ApplyConfigReturns(result1 *model.Ngin }{result1, result2} } -func (fake *FakeResourceServiceInterface) ApplyConfigReturnsOnCall(i int, result1 *model.NginxConfigContext, result2 error) { +func (fake *FakeNginxServiceInterface) ApplyConfigReturnsOnCall(i int, result1 *model.NginxConfigContext, result2 error) { fake.applyConfigMutex.Lock() defer fake.applyConfigMutex.Unlock() fake.ApplyConfigStub = nil @@ -290,74 +201,7 @@ func (fake *FakeResourceServiceInterface) ApplyConfigReturnsOnCall(i int, result }{result1, result2} } -func (fake *FakeResourceServiceInterface) DeleteInstances(arg1 context.Context, arg2 []*v1.Instance) *v1.Resource { - var arg2Copy []*v1.Instance - if arg2 != nil { - arg2Copy = make([]*v1.Instance, len(arg2)) - copy(arg2Copy, arg2) - } - fake.deleteInstancesMutex.Lock() - ret, specificReturn := fake.deleteInstancesReturnsOnCall[len(fake.deleteInstancesArgsForCall)] - fake.deleteInstancesArgsForCall = append(fake.deleteInstancesArgsForCall, struct { - arg1 context.Context - arg2 []*v1.Instance - }{arg1, arg2Copy}) - stub := fake.DeleteInstancesStub - fakeReturns := fake.deleteInstancesReturns - fake.recordInvocation("DeleteInstances", []interface{}{arg1, arg2Copy}) - fake.deleteInstancesMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeResourceServiceInterface) DeleteInstancesCallCount() int { - fake.deleteInstancesMutex.RLock() - defer fake.deleteInstancesMutex.RUnlock() - return len(fake.deleteInstancesArgsForCall) -} - -func (fake *FakeResourceServiceInterface) DeleteInstancesCalls(stub func(context.Context, []*v1.Instance) *v1.Resource) { - fake.deleteInstancesMutex.Lock() - defer fake.deleteInstancesMutex.Unlock() - fake.DeleteInstancesStub = stub -} - -func (fake *FakeResourceServiceInterface) DeleteInstancesArgsForCall(i int) (context.Context, []*v1.Instance) { - fake.deleteInstancesMutex.RLock() - defer fake.deleteInstancesMutex.RUnlock() - argsForCall := fake.deleteInstancesArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeResourceServiceInterface) DeleteInstancesReturns(result1 *v1.Resource) { - fake.deleteInstancesMutex.Lock() - defer fake.deleteInstancesMutex.Unlock() - fake.DeleteInstancesStub = nil - fake.deleteInstancesReturns = struct { - result1 *v1.Resource - }{result1} -} - -func (fake *FakeResourceServiceInterface) DeleteInstancesReturnsOnCall(i int, result1 *v1.Resource) { - fake.deleteInstancesMutex.Lock() - defer fake.deleteInstancesMutex.Unlock() - fake.DeleteInstancesStub = nil - if fake.deleteInstancesReturnsOnCall == nil { - fake.deleteInstancesReturnsOnCall = make(map[int]struct { - result1 *v1.Resource - }) - } - fake.deleteInstancesReturnsOnCall[i] = struct { - result1 *v1.Resource - }{result1} -} - -func (fake *FakeResourceServiceInterface) GetHTTPUpstreamServers(arg1 context.Context, arg2 *v1.Instance, arg3 string) ([]client.UpstreamServer, error) { +func (fake *FakeNginxServiceInterface) GetHTTPUpstreamServers(arg1 context.Context, arg2 *v1.Instance, arg3 string) ([]client.UpstreamServer, error) { fake.getHTTPUpstreamServersMutex.Lock() ret, specificReturn := fake.getHTTPUpstreamServersReturnsOnCall[len(fake.getHTTPUpstreamServersArgsForCall)] fake.getHTTPUpstreamServersArgsForCall = append(fake.getHTTPUpstreamServersArgsForCall, struct { @@ -378,26 +222,26 @@ func (fake *FakeResourceServiceInterface) GetHTTPUpstreamServers(arg1 context.Co return fakeReturns.result1, fakeReturns.result2 } -func (fake *FakeResourceServiceInterface) GetHTTPUpstreamServersCallCount() int { +func (fake *FakeNginxServiceInterface) GetHTTPUpstreamServersCallCount() int { fake.getHTTPUpstreamServersMutex.RLock() defer fake.getHTTPUpstreamServersMutex.RUnlock() return len(fake.getHTTPUpstreamServersArgsForCall) } -func (fake *FakeResourceServiceInterface) GetHTTPUpstreamServersCalls(stub func(context.Context, *v1.Instance, string) ([]client.UpstreamServer, error)) { +func (fake *FakeNginxServiceInterface) GetHTTPUpstreamServersCalls(stub func(context.Context, *v1.Instance, string) ([]client.UpstreamServer, error)) { fake.getHTTPUpstreamServersMutex.Lock() defer fake.getHTTPUpstreamServersMutex.Unlock() fake.GetHTTPUpstreamServersStub = stub } -func (fake *FakeResourceServiceInterface) GetHTTPUpstreamServersArgsForCall(i int) (context.Context, *v1.Instance, string) { +func (fake *FakeNginxServiceInterface) GetHTTPUpstreamServersArgsForCall(i int) (context.Context, *v1.Instance, string) { fake.getHTTPUpstreamServersMutex.RLock() defer fake.getHTTPUpstreamServersMutex.RUnlock() argsForCall := fake.getHTTPUpstreamServersArgsForCall[i] return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 } -func (fake *FakeResourceServiceInterface) GetHTTPUpstreamServersReturns(result1 []client.UpstreamServer, result2 error) { +func (fake *FakeNginxServiceInterface) GetHTTPUpstreamServersReturns(result1 []client.UpstreamServer, result2 error) { fake.getHTTPUpstreamServersMutex.Lock() defer fake.getHTTPUpstreamServersMutex.Unlock() fake.GetHTTPUpstreamServersStub = nil @@ -407,7 +251,7 @@ func (fake *FakeResourceServiceInterface) GetHTTPUpstreamServersReturns(result1 }{result1, result2} } -func (fake *FakeResourceServiceInterface) GetHTTPUpstreamServersReturnsOnCall(i int, result1 []client.UpstreamServer, result2 error) { +func (fake *FakeNginxServiceInterface) GetHTTPUpstreamServersReturnsOnCall(i int, result1 []client.UpstreamServer, result2 error) { fake.getHTTPUpstreamServersMutex.Lock() defer fake.getHTTPUpstreamServersMutex.Unlock() fake.GetHTTPUpstreamServersStub = nil @@ -423,7 +267,7 @@ func (fake *FakeResourceServiceInterface) GetHTTPUpstreamServersReturnsOnCall(i }{result1, result2} } -func (fake *FakeResourceServiceInterface) GetStreamUpstreams(arg1 context.Context, arg2 *v1.Instance) (*client.StreamUpstreams, error) { +func (fake *FakeNginxServiceInterface) GetStreamUpstreams(arg1 context.Context, arg2 *v1.Instance) (*client.StreamUpstreams, error) { fake.getStreamUpstreamsMutex.Lock() ret, specificReturn := fake.getStreamUpstreamsReturnsOnCall[len(fake.getStreamUpstreamsArgsForCall)] fake.getStreamUpstreamsArgsForCall = append(fake.getStreamUpstreamsArgsForCall, struct { @@ -443,26 +287,26 @@ func (fake *FakeResourceServiceInterface) GetStreamUpstreams(arg1 context.Contex return fakeReturns.result1, fakeReturns.result2 } -func (fake *FakeResourceServiceInterface) GetStreamUpstreamsCallCount() int { +func (fake *FakeNginxServiceInterface) GetStreamUpstreamsCallCount() int { fake.getStreamUpstreamsMutex.RLock() defer fake.getStreamUpstreamsMutex.RUnlock() return len(fake.getStreamUpstreamsArgsForCall) } -func (fake *FakeResourceServiceInterface) GetStreamUpstreamsCalls(stub func(context.Context, *v1.Instance) (*client.StreamUpstreams, error)) { +func (fake *FakeNginxServiceInterface) GetStreamUpstreamsCalls(stub func(context.Context, *v1.Instance) (*client.StreamUpstreams, error)) { fake.getStreamUpstreamsMutex.Lock() defer fake.getStreamUpstreamsMutex.Unlock() fake.GetStreamUpstreamsStub = stub } -func (fake *FakeResourceServiceInterface) GetStreamUpstreamsArgsForCall(i int) (context.Context, *v1.Instance) { +func (fake *FakeNginxServiceInterface) GetStreamUpstreamsArgsForCall(i int) (context.Context, *v1.Instance) { fake.getStreamUpstreamsMutex.RLock() defer fake.getStreamUpstreamsMutex.RUnlock() argsForCall := fake.getStreamUpstreamsArgsForCall[i] return argsForCall.arg1, argsForCall.arg2 } -func (fake *FakeResourceServiceInterface) GetStreamUpstreamsReturns(result1 *client.StreamUpstreams, result2 error) { +func (fake *FakeNginxServiceInterface) GetStreamUpstreamsReturns(result1 *client.StreamUpstreams, result2 error) { fake.getStreamUpstreamsMutex.Lock() defer fake.getStreamUpstreamsMutex.Unlock() fake.GetStreamUpstreamsStub = nil @@ -472,7 +316,7 @@ func (fake *FakeResourceServiceInterface) GetStreamUpstreamsReturns(result1 *cli }{result1, result2} } -func (fake *FakeResourceServiceInterface) GetStreamUpstreamsReturnsOnCall(i int, result1 *client.StreamUpstreams, result2 error) { +func (fake *FakeNginxServiceInterface) GetStreamUpstreamsReturnsOnCall(i int, result1 *client.StreamUpstreams, result2 error) { fake.getStreamUpstreamsMutex.Lock() defer fake.getStreamUpstreamsMutex.Unlock() fake.GetStreamUpstreamsStub = nil @@ -488,7 +332,7 @@ func (fake *FakeResourceServiceInterface) GetStreamUpstreamsReturnsOnCall(i int, }{result1, result2} } -func (fake *FakeResourceServiceInterface) GetUpstreams(arg1 context.Context, arg2 *v1.Instance) (*client.Upstreams, error) { +func (fake *FakeNginxServiceInterface) GetUpstreams(arg1 context.Context, arg2 *v1.Instance) (*client.Upstreams, error) { fake.getUpstreamsMutex.Lock() ret, specificReturn := fake.getUpstreamsReturnsOnCall[len(fake.getUpstreamsArgsForCall)] fake.getUpstreamsArgsForCall = append(fake.getUpstreamsArgsForCall, struct { @@ -508,26 +352,26 @@ func (fake *FakeResourceServiceInterface) GetUpstreams(arg1 context.Context, arg return fakeReturns.result1, fakeReturns.result2 } -func (fake *FakeResourceServiceInterface) GetUpstreamsCallCount() int { +func (fake *FakeNginxServiceInterface) GetUpstreamsCallCount() int { fake.getUpstreamsMutex.RLock() defer fake.getUpstreamsMutex.RUnlock() return len(fake.getUpstreamsArgsForCall) } -func (fake *FakeResourceServiceInterface) GetUpstreamsCalls(stub func(context.Context, *v1.Instance) (*client.Upstreams, error)) { +func (fake *FakeNginxServiceInterface) GetUpstreamsCalls(stub func(context.Context, *v1.Instance) (*client.Upstreams, error)) { fake.getUpstreamsMutex.Lock() defer fake.getUpstreamsMutex.Unlock() fake.GetUpstreamsStub = stub } -func (fake *FakeResourceServiceInterface) GetUpstreamsArgsForCall(i int) (context.Context, *v1.Instance) { +func (fake *FakeNginxServiceInterface) GetUpstreamsArgsForCall(i int) (context.Context, *v1.Instance) { fake.getUpstreamsMutex.RLock() defer fake.getUpstreamsMutex.RUnlock() argsForCall := fake.getUpstreamsArgsForCall[i] return argsForCall.arg1, argsForCall.arg2 } -func (fake *FakeResourceServiceInterface) GetUpstreamsReturns(result1 *client.Upstreams, result2 error) { +func (fake *FakeNginxServiceInterface) GetUpstreamsReturns(result1 *client.Upstreams, result2 error) { fake.getUpstreamsMutex.Lock() defer fake.getUpstreamsMutex.Unlock() fake.GetUpstreamsStub = nil @@ -537,7 +381,7 @@ func (fake *FakeResourceServiceInterface) GetUpstreamsReturns(result1 *client.Up }{result1, result2} } -func (fake *FakeResourceServiceInterface) GetUpstreamsReturnsOnCall(i int, result1 *client.Upstreams, result2 error) { +func (fake *FakeNginxServiceInterface) GetUpstreamsReturnsOnCall(i int, result1 *client.Upstreams, result2 error) { fake.getUpstreamsMutex.Lock() defer fake.getUpstreamsMutex.Unlock() fake.GetUpstreamsStub = nil @@ -553,7 +397,7 @@ func (fake *FakeResourceServiceInterface) GetUpstreamsReturnsOnCall(i int, resul }{result1, result2} } -func (fake *FakeResourceServiceInterface) Instance(arg1 string) *v1.Instance { +func (fake *FakeNginxServiceInterface) Instance(arg1 string) *v1.Instance { fake.instanceMutex.Lock() ret, specificReturn := fake.instanceReturnsOnCall[len(fake.instanceArgsForCall)] fake.instanceArgsForCall = append(fake.instanceArgsForCall, struct { @@ -572,26 +416,26 @@ func (fake *FakeResourceServiceInterface) Instance(arg1 string) *v1.Instance { return fakeReturns.result1 } -func (fake *FakeResourceServiceInterface) InstanceCallCount() int { +func (fake *FakeNginxServiceInterface) InstanceCallCount() int { fake.instanceMutex.RLock() defer fake.instanceMutex.RUnlock() return len(fake.instanceArgsForCall) } -func (fake *FakeResourceServiceInterface) InstanceCalls(stub func(string) *v1.Instance) { +func (fake *FakeNginxServiceInterface) InstanceCalls(stub func(string) *v1.Instance) { fake.instanceMutex.Lock() defer fake.instanceMutex.Unlock() fake.InstanceStub = stub } -func (fake *FakeResourceServiceInterface) InstanceArgsForCall(i int) string { +func (fake *FakeNginxServiceInterface) InstanceArgsForCall(i int) string { fake.instanceMutex.RLock() defer fake.instanceMutex.RUnlock() argsForCall := fake.instanceArgsForCall[i] return argsForCall.arg1 } -func (fake *FakeResourceServiceInterface) InstanceReturns(result1 *v1.Instance) { +func (fake *FakeNginxServiceInterface) InstanceReturns(result1 *v1.Instance) { fake.instanceMutex.Lock() defer fake.instanceMutex.Unlock() fake.InstanceStub = nil @@ -600,7 +444,7 @@ func (fake *FakeResourceServiceInterface) InstanceReturns(result1 *v1.Instance) }{result1} } -func (fake *FakeResourceServiceInterface) InstanceReturnsOnCall(i int, result1 *v1.Instance) { +func (fake *FakeNginxServiceInterface) InstanceReturnsOnCall(i int, result1 *v1.Instance) { fake.instanceMutex.Lock() defer fake.instanceMutex.Unlock() fake.InstanceStub = nil @@ -614,7 +458,7 @@ func (fake *FakeResourceServiceInterface) InstanceReturnsOnCall(i int, result1 * }{result1} } -func (fake *FakeResourceServiceInterface) UpdateHTTPUpstreamServers(arg1 context.Context, arg2 *v1.Instance, arg3 string, arg4 []*structpb.Struct) ([]client.UpstreamServer, []client.UpstreamServer, []client.UpstreamServer, error) { +func (fake *FakeNginxServiceInterface) UpdateHTTPUpstreamServers(arg1 context.Context, arg2 *v1.Instance, arg3 string, arg4 []*structpb.Struct) ([]client.UpstreamServer, []client.UpstreamServer, []client.UpstreamServer, error) { var arg4Copy []*structpb.Struct if arg4 != nil { arg4Copy = make([]*structpb.Struct, len(arg4)) @@ -641,26 +485,26 @@ func (fake *FakeResourceServiceInterface) UpdateHTTPUpstreamServers(arg1 context return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3, fakeReturns.result4 } -func (fake *FakeResourceServiceInterface) UpdateHTTPUpstreamServersCallCount() int { +func (fake *FakeNginxServiceInterface) UpdateHTTPUpstreamServersCallCount() int { fake.updateHTTPUpstreamServersMutex.RLock() defer fake.updateHTTPUpstreamServersMutex.RUnlock() return len(fake.updateHTTPUpstreamServersArgsForCall) } -func (fake *FakeResourceServiceInterface) UpdateHTTPUpstreamServersCalls(stub func(context.Context, *v1.Instance, string, []*structpb.Struct) ([]client.UpstreamServer, []client.UpstreamServer, []client.UpstreamServer, error)) { +func (fake *FakeNginxServiceInterface) UpdateHTTPUpstreamServersCalls(stub func(context.Context, *v1.Instance, string, []*structpb.Struct) ([]client.UpstreamServer, []client.UpstreamServer, []client.UpstreamServer, error)) { fake.updateHTTPUpstreamServersMutex.Lock() defer fake.updateHTTPUpstreamServersMutex.Unlock() fake.UpdateHTTPUpstreamServersStub = stub } -func (fake *FakeResourceServiceInterface) UpdateHTTPUpstreamServersArgsForCall(i int) (context.Context, *v1.Instance, string, []*structpb.Struct) { +func (fake *FakeNginxServiceInterface) UpdateHTTPUpstreamServersArgsForCall(i int) (context.Context, *v1.Instance, string, []*structpb.Struct) { fake.updateHTTPUpstreamServersMutex.RLock() defer fake.updateHTTPUpstreamServersMutex.RUnlock() argsForCall := fake.updateHTTPUpstreamServersArgsForCall[i] return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 } -func (fake *FakeResourceServiceInterface) UpdateHTTPUpstreamServersReturns(result1 []client.UpstreamServer, result2 []client.UpstreamServer, result3 []client.UpstreamServer, result4 error) { +func (fake *FakeNginxServiceInterface) UpdateHTTPUpstreamServersReturns(result1 []client.UpstreamServer, result2 []client.UpstreamServer, result3 []client.UpstreamServer, result4 error) { fake.updateHTTPUpstreamServersMutex.Lock() defer fake.updateHTTPUpstreamServersMutex.Unlock() fake.UpdateHTTPUpstreamServersStub = nil @@ -672,7 +516,7 @@ func (fake *FakeResourceServiceInterface) UpdateHTTPUpstreamServersReturns(resul }{result1, result2, result3, result4} } -func (fake *FakeResourceServiceInterface) UpdateHTTPUpstreamServersReturnsOnCall(i int, result1 []client.UpstreamServer, result2 []client.UpstreamServer, result3 []client.UpstreamServer, result4 error) { +func (fake *FakeNginxServiceInterface) UpdateHTTPUpstreamServersReturnsOnCall(i int, result1 []client.UpstreamServer, result2 []client.UpstreamServer, result3 []client.UpstreamServer, result4 error) { fake.updateHTTPUpstreamServersMutex.Lock() defer fake.updateHTTPUpstreamServersMutex.Unlock() fake.UpdateHTTPUpstreamServersStub = nil @@ -692,22 +536,17 @@ func (fake *FakeResourceServiceInterface) UpdateHTTPUpstreamServersReturnsOnCall }{result1, result2, result3, result4} } -func (fake *FakeResourceServiceInterface) UpdateInstances(arg1 context.Context, arg2 []*v1.Instance) *v1.Resource { - var arg2Copy []*v1.Instance - if arg2 != nil { - arg2Copy = make([]*v1.Instance, len(arg2)) - copy(arg2Copy, arg2) - } - fake.updateInstancesMutex.Lock() - ret, specificReturn := fake.updateInstancesReturnsOnCall[len(fake.updateInstancesArgsForCall)] - fake.updateInstancesArgsForCall = append(fake.updateInstancesArgsForCall, struct { +func (fake *FakeNginxServiceInterface) UpdateResource(arg1 context.Context, arg2 *v1.Resource) *v1.Resource { + fake.updateResourceMutex.Lock() + ret, specificReturn := fake.updateResourceReturnsOnCall[len(fake.updateResourceArgsForCall)] + fake.updateResourceArgsForCall = append(fake.updateResourceArgsForCall, struct { arg1 context.Context - arg2 []*v1.Instance - }{arg1, arg2Copy}) - stub := fake.UpdateInstancesStub - fakeReturns := fake.updateInstancesReturns - fake.recordInvocation("UpdateInstances", []interface{}{arg1, arg2Copy}) - fake.updateInstancesMutex.Unlock() + arg2 *v1.Resource + }{arg1, arg2}) + stub := fake.UpdateResourceStub + fakeReturns := fake.updateResourceReturns + fake.recordInvocation("UpdateResource", []interface{}{arg1, arg2}) + fake.updateResourceMutex.Unlock() if stub != nil { return stub(arg1, arg2) } @@ -717,49 +556,49 @@ func (fake *FakeResourceServiceInterface) UpdateInstances(arg1 context.Context, return fakeReturns.result1 } -func (fake *FakeResourceServiceInterface) UpdateInstancesCallCount() int { - fake.updateInstancesMutex.RLock() - defer fake.updateInstancesMutex.RUnlock() - return len(fake.updateInstancesArgsForCall) +func (fake *FakeNginxServiceInterface) UpdateResourceCallCount() int { + fake.updateResourceMutex.RLock() + defer fake.updateResourceMutex.RUnlock() + return len(fake.updateResourceArgsForCall) } -func (fake *FakeResourceServiceInterface) UpdateInstancesCalls(stub func(context.Context, []*v1.Instance) *v1.Resource) { - fake.updateInstancesMutex.Lock() - defer fake.updateInstancesMutex.Unlock() - fake.UpdateInstancesStub = stub +func (fake *FakeNginxServiceInterface) UpdateResourceCalls(stub func(context.Context, *v1.Resource) *v1.Resource) { + fake.updateResourceMutex.Lock() + defer fake.updateResourceMutex.Unlock() + fake.UpdateResourceStub = stub } -func (fake *FakeResourceServiceInterface) UpdateInstancesArgsForCall(i int) (context.Context, []*v1.Instance) { - fake.updateInstancesMutex.RLock() - defer fake.updateInstancesMutex.RUnlock() - argsForCall := fake.updateInstancesArgsForCall[i] +func (fake *FakeNginxServiceInterface) UpdateResourceArgsForCall(i int) (context.Context, *v1.Resource) { + fake.updateResourceMutex.RLock() + defer fake.updateResourceMutex.RUnlock() + argsForCall := fake.updateResourceArgsForCall[i] return argsForCall.arg1, argsForCall.arg2 } -func (fake *FakeResourceServiceInterface) UpdateInstancesReturns(result1 *v1.Resource) { - fake.updateInstancesMutex.Lock() - defer fake.updateInstancesMutex.Unlock() - fake.UpdateInstancesStub = nil - fake.updateInstancesReturns = struct { +func (fake *FakeNginxServiceInterface) UpdateResourceReturns(result1 *v1.Resource) { + fake.updateResourceMutex.Lock() + defer fake.updateResourceMutex.Unlock() + fake.UpdateResourceStub = nil + fake.updateResourceReturns = struct { result1 *v1.Resource }{result1} } -func (fake *FakeResourceServiceInterface) UpdateInstancesReturnsOnCall(i int, result1 *v1.Resource) { - fake.updateInstancesMutex.Lock() - defer fake.updateInstancesMutex.Unlock() - fake.UpdateInstancesStub = nil - if fake.updateInstancesReturnsOnCall == nil { - fake.updateInstancesReturnsOnCall = make(map[int]struct { +func (fake *FakeNginxServiceInterface) UpdateResourceReturnsOnCall(i int, result1 *v1.Resource) { + fake.updateResourceMutex.Lock() + defer fake.updateResourceMutex.Unlock() + fake.UpdateResourceStub = nil + if fake.updateResourceReturnsOnCall == nil { + fake.updateResourceReturnsOnCall = make(map[int]struct { result1 *v1.Resource }) } - fake.updateInstancesReturnsOnCall[i] = struct { + fake.updateResourceReturnsOnCall[i] = struct { result1 *v1.Resource }{result1} } -func (fake *FakeResourceServiceInterface) UpdateStreamServers(arg1 context.Context, arg2 *v1.Instance, arg3 string, arg4 []*structpb.Struct) ([]client.StreamUpstreamServer, []client.StreamUpstreamServer, []client.StreamUpstreamServer, error) { +func (fake *FakeNginxServiceInterface) UpdateStreamServers(arg1 context.Context, arg2 *v1.Instance, arg3 string, arg4 []*structpb.Struct) ([]client.StreamUpstreamServer, []client.StreamUpstreamServer, []client.StreamUpstreamServer, error) { var arg4Copy []*structpb.Struct if arg4 != nil { arg4Copy = make([]*structpb.Struct, len(arg4)) @@ -786,26 +625,26 @@ func (fake *FakeResourceServiceInterface) UpdateStreamServers(arg1 context.Conte return fakeReturns.result1, fakeReturns.result2, fakeReturns.result3, fakeReturns.result4 } -func (fake *FakeResourceServiceInterface) UpdateStreamServersCallCount() int { +func (fake *FakeNginxServiceInterface) UpdateStreamServersCallCount() int { fake.updateStreamServersMutex.RLock() defer fake.updateStreamServersMutex.RUnlock() return len(fake.updateStreamServersArgsForCall) } -func (fake *FakeResourceServiceInterface) UpdateStreamServersCalls(stub func(context.Context, *v1.Instance, string, []*structpb.Struct) ([]client.StreamUpstreamServer, []client.StreamUpstreamServer, []client.StreamUpstreamServer, error)) { +func (fake *FakeNginxServiceInterface) UpdateStreamServersCalls(stub func(context.Context, *v1.Instance, string, []*structpb.Struct) ([]client.StreamUpstreamServer, []client.StreamUpstreamServer, []client.StreamUpstreamServer, error)) { fake.updateStreamServersMutex.Lock() defer fake.updateStreamServersMutex.Unlock() fake.UpdateStreamServersStub = stub } -func (fake *FakeResourceServiceInterface) UpdateStreamServersArgsForCall(i int) (context.Context, *v1.Instance, string, []*structpb.Struct) { +func (fake *FakeNginxServiceInterface) UpdateStreamServersArgsForCall(i int) (context.Context, *v1.Instance, string, []*structpb.Struct) { fake.updateStreamServersMutex.RLock() defer fake.updateStreamServersMutex.RUnlock() argsForCall := fake.updateStreamServersArgsForCall[i] return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 } -func (fake *FakeResourceServiceInterface) UpdateStreamServersReturns(result1 []client.StreamUpstreamServer, result2 []client.StreamUpstreamServer, result3 []client.StreamUpstreamServer, result4 error) { +func (fake *FakeNginxServiceInterface) UpdateStreamServersReturns(result1 []client.StreamUpstreamServer, result2 []client.StreamUpstreamServer, result3 []client.StreamUpstreamServer, result4 error) { fake.updateStreamServersMutex.Lock() defer fake.updateStreamServersMutex.Unlock() fake.UpdateStreamServersStub = nil @@ -817,7 +656,7 @@ func (fake *FakeResourceServiceInterface) UpdateStreamServersReturns(result1 []c }{result1, result2, result3, result4} } -func (fake *FakeResourceServiceInterface) UpdateStreamServersReturnsOnCall(i int, result1 []client.StreamUpstreamServer, result2 []client.StreamUpstreamServer, result3 []client.StreamUpstreamServer, result4 error) { +func (fake *FakeNginxServiceInterface) UpdateStreamServersReturnsOnCall(i int, result1 []client.StreamUpstreamServer, result2 []client.StreamUpstreamServer, result3 []client.StreamUpstreamServer, result4 error) { fake.updateStreamServersMutex.Lock() defer fake.updateStreamServersMutex.Unlock() fake.UpdateStreamServersStub = nil @@ -837,15 +676,11 @@ func (fake *FakeResourceServiceInterface) UpdateStreamServersReturnsOnCall(i int }{result1, result2, result3, result4} } -func (fake *FakeResourceServiceInterface) Invocations() map[string][][]interface{} { +func (fake *FakeNginxServiceInterface) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() - fake.addInstancesMutex.RLock() - defer fake.addInstancesMutex.RUnlock() fake.applyConfigMutex.RLock() defer fake.applyConfigMutex.RUnlock() - fake.deleteInstancesMutex.RLock() - defer fake.deleteInstancesMutex.RUnlock() fake.getHTTPUpstreamServersMutex.RLock() defer fake.getHTTPUpstreamServersMutex.RUnlock() fake.getStreamUpstreamsMutex.RLock() @@ -856,8 +691,8 @@ func (fake *FakeResourceServiceInterface) Invocations() map[string][][]interface defer fake.instanceMutex.RUnlock() fake.updateHTTPUpstreamServersMutex.RLock() defer fake.updateHTTPUpstreamServersMutex.RUnlock() - fake.updateInstancesMutex.RLock() - defer fake.updateInstancesMutex.RUnlock() + fake.updateResourceMutex.RLock() + defer fake.updateResourceMutex.RUnlock() fake.updateStreamServersMutex.RLock() defer fake.updateStreamServersMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} @@ -867,7 +702,7 @@ func (fake *FakeResourceServiceInterface) Invocations() map[string][][]interface return copiedInvocations } -func (fake *FakeResourceServiceInterface) recordInvocation(key string, args []interface{}) { +func (fake *FakeNginxServiceInterface) recordInvocation(key string, args []interface{}) { fake.invocationsMutex.Lock() defer fake.invocationsMutex.Unlock() if fake.invocations == nil { diff --git a/internal/resource/resourcefakes/fake_process_operator.go b/internal/nginx/nginxfakes/fake_process_operator.go similarity index 99% rename from internal/resource/resourcefakes/fake_process_operator.go rename to internal/nginx/nginxfakes/fake_process_operator.go index 0b8ac14f99..f10b8c9c1d 100644 --- a/internal/resource/resourcefakes/fake_process_operator.go +++ b/internal/nginx/nginxfakes/fake_process_operator.go @@ -1,5 +1,5 @@ // Code generated by counterfeiter. DO NOT EDIT. -package resourcefakes +package nginxfakes import ( "context" diff --git a/internal/plugin/plugin_manager.go b/internal/plugin/plugin_manager.go index cb27070c00..c4f8a9ea04 100644 --- a/internal/plugin/plugin_manager.go +++ b/internal/plugin/plugin_manager.go @@ -17,9 +17,8 @@ import ( "github.com/nginx/agent/v3/internal/collector" "github.com/nginx/agent/v3/internal/command" - "github.com/nginx/agent/v3/internal/file" "github.com/nginx/agent/v3/internal/grpc" - "github.com/nginx/agent/v3/internal/resource" + "github.com/nginx/agent/v3/internal/nginx" "github.com/nginx/agent/v3/internal/bus" "github.com/nginx/agent/v3/internal/config" @@ -31,23 +30,15 @@ func LoadPlugins(ctx context.Context, agentConfig *config.Config) []bus.Plugin { manifestLock := &sync.RWMutex{} - plugins = addResourcePlugin(plugins, agentConfig) - plugins = addCommandAndFilePlugins(ctx, plugins, agentConfig, manifestLock) - plugins = addAuxiliaryCommandAndFilePlugins(ctx, plugins, agentConfig, manifestLock) + plugins = addCommandAndNginxPlugins(ctx, plugins, agentConfig, manifestLock) + plugins = addAuxiliaryCommandAndNginxPlugins(ctx, plugins, agentConfig, manifestLock) plugins = addCollectorPlugin(ctx, agentConfig, plugins) plugins = addWatcherPlugin(plugins, agentConfig) return plugins } -func addResourcePlugin(plugins []bus.Plugin, agentConfig *config.Config) []bus.Plugin { - resourcePlugin := resource.NewResource(agentConfig) - plugins = append(plugins, resourcePlugin) - - return plugins -} - -func addCommandAndFilePlugins(ctx context.Context, plugins []bus.Plugin, agentConfig *config.Config, +func addCommandAndNginxPlugins(ctx context.Context, plugins []bus.Plugin, agentConfig *config.Config, manifestLock *sync.RWMutex, ) []bus.Plugin { if agentConfig.IsCommandGrpcClientConfigured() { @@ -62,8 +53,8 @@ func addCommandAndFilePlugins(ctx context.Context, plugins []bus.Plugin, agentCo } else { commandPlugin := command.NewCommandPlugin(agentConfig, grpcConnection, model.Command) plugins = append(plugins, commandPlugin) - filePlugin := file.NewFilePlugin(agentConfig, grpcConnection, model.Command, manifestLock) - plugins = append(plugins, filePlugin) + nginxPlugin := nginx.NewNginx(agentConfig, grpcConnection, model.Command, manifestLock) + plugins = append(plugins, nginxPlugin) } } else { slog.InfoContext(ctx, "Agent is not connected to a management plane. "+ @@ -73,7 +64,7 @@ func addCommandAndFilePlugins(ctx context.Context, plugins []bus.Plugin, agentCo return plugins } -func addAuxiliaryCommandAndFilePlugins(ctx context.Context, plugins []bus.Plugin, +func addAuxiliaryCommandAndNginxPlugins(ctx context.Context, plugins []bus.Plugin, agentConfig *config.Config, manifestLock *sync.RWMutex, ) []bus.Plugin { if agentConfig.IsAuxiliaryCommandGrpcClientConfigured() { @@ -88,8 +79,8 @@ func addAuxiliaryCommandAndFilePlugins(ctx context.Context, plugins []bus.Plugin } else { auxCommandPlugin := command.NewCommandPlugin(agentConfig, auxGRPCConnection, model.Auxiliary) plugins = append(plugins, auxCommandPlugin) - readFilePlugin := file.NewFilePlugin(agentConfig, auxGRPCConnection, model.Auxiliary, manifestLock) - plugins = append(plugins, readFilePlugin) + readNginxPlugin := nginx.NewNginx(agentConfig, auxGRPCConnection, model.Auxiliary, manifestLock) + plugins = append(plugins, readNginxPlugin) } } else { slog.DebugContext(ctx, "Agent is not connected to an auxiliary management plane. "+ diff --git a/internal/plugin/plugin_manager_test.go b/internal/plugin/plugin_manager_test.go index 172cb15424..5c19899852 100644 --- a/internal/plugin/plugin_manager_test.go +++ b/internal/plugin/plugin_manager_test.go @@ -13,8 +13,7 @@ import ( "github.com/nginx/agent/v3/internal/collector" "github.com/nginx/agent/v3/internal/command" - "github.com/nginx/agent/v3/internal/file" - "github.com/nginx/agent/v3/internal/resource" + "github.com/nginx/agent/v3/internal/nginx" "github.com/nginx/agent/v3/internal/bus" "github.com/nginx/agent/v3/internal/config" @@ -34,7 +33,6 @@ func TestLoadPlugins(t *testing.T) { name: "Test 1: Load plugins", input: &config.Config{}, expected: []bus.Plugin{ - &resource.Resource{}, &watcher.Watcher{}, }, }, @@ -58,11 +56,10 @@ func TestLoadPlugins(t *testing.T) { Features: config.DefaultFeatures(), }, expected: []bus.Plugin{ - &resource.Resource{}, &command.CommandPlugin{}, - &file.FilePlugin{}, + &nginx.NginxPlugin{}, &command.CommandPlugin{}, - &file.FilePlugin{}, + &nginx.NginxPlugin{}, &watcher.Watcher{}, }, }, @@ -77,7 +74,6 @@ func TestLoadPlugins(t *testing.T) { Features: config.DefaultFeatures(), }, expected: []bus.Plugin{ - &resource.Resource{}, &collector.Collector{}, &watcher.Watcher{}, }, @@ -103,9 +99,8 @@ func TestLoadPlugins(t *testing.T) { }, }, expected: []bus.Plugin{ - &resource.Resource{}, &command.CommandPlugin{}, - &file.FilePlugin{}, + &nginx.NginxPlugin{}, &watcher.Watcher{}, }, }, @@ -134,9 +129,8 @@ func TestLoadPlugins(t *testing.T) { }, }, expected: []bus.Plugin{ - &resource.Resource{}, &command.CommandPlugin{}, - &file.FilePlugin{}, + &nginx.NginxPlugin{}, &collector.Collector{}, &watcher.Watcher{}, }, diff --git a/internal/resource/resource_plugin.go b/internal/resource/resource_plugin.go deleted file mode 100644 index 27c33cda7e..0000000000 --- a/internal/resource/resource_plugin.go +++ /dev/null @@ -1,372 +0,0 @@ -// Copyright (c) F5, Inc. -// -// This source code is licensed under the Apache License, Version 2.0 license found in the -// LICENSE file in the root directory of this source tree. - -package resource - -import ( - "context" - "errors" - "log/slog" - "sync" - - mpi "github.com/nginx/agent/v3/api/grpc/mpi/v1" - "github.com/nginx/agent/v3/internal/config" - response "github.com/nginx/agent/v3/internal/datasource/proto" - "github.com/nginx/agent/v3/internal/logger" - "github.com/nginx/agent/v3/internal/model" - - "github.com/nginx/agent/v3/internal/bus" -) - -// The resource plugin listens for a writeConfigSuccessfulTopic from the file plugin after the config apply -// files have been written. The resource plugin then, validates the config, reloads the instance and monitors the logs. -// This is done in the resource plugin to make the file plugin usable for every type of instance. - -type Resource struct { - messagePipe bus.MessagePipeInterface - resourceService resourceServiceInterface - agentConfig *config.Config - agentConfigMutex *sync.Mutex -} - -type errResponse struct { - Status string `json:"status"` - Text string `json:"test"` - Code string `json:"code"` -} - -type plusAPIErr struct { - Error errResponse `json:"error"` - RequestID string `json:"request_id"` - Href string `json:"href"` -} - -var _ bus.Plugin = (*Resource)(nil) - -func NewResource(agentConfig *config.Config) *Resource { - return &Resource{ - agentConfig: agentConfig, - agentConfigMutex: &sync.Mutex{}, - } -} - -func (r *Resource) Init(ctx context.Context, messagePipe bus.MessagePipeInterface) error { - slog.DebugContext(ctx, "Starting resource plugin") - - r.messagePipe = messagePipe - r.resourceService = NewResourceService(ctx, r.agentConfig) - - return nil -} - -func (*Resource) Close(ctx context.Context) error { - slog.InfoContext(ctx, "Closing resource plugin") - return nil -} - -func (*Resource) Info() *bus.Info { - return &bus.Info{ - Name: "resource", - } -} - -// cyclomatic complexity 11 max is 10 - -func (r *Resource) Process(ctx context.Context, msg *bus.Message) { - switch msg.Topic { - case bus.AddInstancesTopic: - slog.DebugContext(ctx, "Resource plugin received add instances message") - instanceList, ok := msg.Data.([]*mpi.Instance) - if !ok { - slog.ErrorContext(ctx, "Unable to cast message payload to []*mpi.Instance", "payload", msg.Data) - - return - } - - resource := r.resourceService.AddInstances(instanceList) - - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.ResourceUpdateTopic, Data: resource}) - - return - case bus.UpdatedInstancesTopic: - slog.DebugContext(ctx, "Resource plugin received update instances message") - instanceList, ok := msg.Data.([]*mpi.Instance) - if !ok { - slog.ErrorContext(ctx, "Unable to cast message payload to []*mpi.Instance", "payload", msg.Data) - - return - } - resource := r.resourceService.UpdateInstances(ctx, instanceList) - - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.ResourceUpdateTopic, Data: resource}) - - return - - case bus.DeletedInstancesTopic: - slog.DebugContext(ctx, "Resource plugin received delete instances message") - instanceList, ok := msg.Data.([]*mpi.Instance) - if !ok { - slog.ErrorContext(ctx, "Unable to cast message payload to []*mpi.Instance", "payload", msg.Data) - - return - } - resource := r.resourceService.DeleteInstances(ctx, instanceList) - - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.ResourceUpdateTopic, Data: resource}) - - return - case bus.WriteConfigSuccessfulTopic: - r.handleWriteConfigSuccessful(ctx, msg) - case bus.RollbackWriteTopic: - r.handleRollbackWrite(ctx, msg) - case bus.APIActionRequestTopic: - r.handleAPIActionRequest(ctx, msg) - case bus.AgentConfigUpdateTopic: - r.handleAgentConfigUpdate(ctx, msg) - default: - slog.DebugContext(ctx, "Unknown topic", "topic", msg.Topic) - } -} - -func (*Resource) Subscriptions() []string { - return []string{ - bus.AddInstancesTopic, - bus.UpdatedInstancesTopic, - bus.DeletedInstancesTopic, - bus.WriteConfigSuccessfulTopic, - bus.RollbackWriteTopic, - bus.APIActionRequestTopic, - bus.AgentConfigUpdateTopic, - } -} - -func (r *Resource) Reconfigure(ctx context.Context, agentConfig *config.Config) error { - slog.DebugContext(ctx, "Resource plugin is reconfiguring to update agent configuration") - - r.agentConfigMutex.Lock() - defer r.agentConfigMutex.Unlock() - - r.agentConfig = agentConfig - - return nil -} - -func (r *Resource) handleAPIActionRequest(ctx context.Context, msg *bus.Message) { - slog.DebugContext(ctx, "Resource plugin received api action request message") - managementPlaneRequest, ok := msg.Data.(*mpi.ManagementPlaneRequest) - - if !ok { - slog.ErrorContext(ctx, "Unable to cast message payload to *mpi.ManagementPlaneRequest", "payload", - msg.Data) - - return - } - - request, requestOk := managementPlaneRequest.GetRequest().(*mpi.ManagementPlaneRequest_ActionRequest) - if !requestOk { - slog.ErrorContext(ctx, "Unable to cast message payload to *mpi.ManagementPlaneRequest_ActionRequest", - "payload", msg.Data) - } - - instanceID := request.ActionRequest.GetInstanceId() - - switch request.ActionRequest.GetAction().(type) { - case *mpi.APIActionRequest_NginxPlusAction: - r.handleNginxPlusActionRequest(ctx, request.ActionRequest.GetNginxPlusAction(), instanceID) - default: - slog.DebugContext(ctx, "API action request not implemented yet") - } -} - -func (r *Resource) handleNginxPlusActionRequest(ctx context.Context, action *mpi.NGINXPlusAction, instanceID string) { - correlationID := logger.CorrelationID(ctx) - instance := r.resourceService.Instance(instanceID) - apiAction := APIAction{ - ResourceService: r.resourceService, - } - if instance == nil { - slog.ErrorContext(ctx, "Unable to find instance with ID", "id", instanceID) - resp := response.CreateDataPlaneResponse( - correlationID, - &mpi.CommandResponse{ - Status: mpi.CommandResponse_COMMAND_STATUS_FAILURE, - Message: "", - Error: "failed to preform API action, could not find instance with ID: " + instanceID, - }, - mpi.DataPlaneResponse_API_ACTION_REQUEST, - instanceID, - ) - - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: resp}) - - return - } - - if instance.GetInstanceMeta().GetInstanceType() != mpi.InstanceMeta_INSTANCE_TYPE_NGINX_PLUS { - slog.ErrorContext(ctx, "Failed to preform API action", "error", errors.New("instance is not NGINX Plus")) - resp := response.CreateDataPlaneResponse( - correlationID, - &mpi.CommandResponse{ - Status: mpi.CommandResponse_COMMAND_STATUS_FAILURE, - Message: "", - Error: "failed to preform API action, instance is not NGINX Plus", - }, - mpi.DataPlaneResponse_API_ACTION_REQUEST, - instanceID, - ) - - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: resp}) - - return - } - - switch action.GetAction().(type) { - case *mpi.NGINXPlusAction_UpdateHttpUpstreamServers: - slog.DebugContext(ctx, "Updating http upstream servers", "request", action.GetUpdateHttpUpstreamServers()) - resp := apiAction.HandleUpdateHTTPUpstreamsRequest(ctx, action, instance) - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: resp}) - case *mpi.NGINXPlusAction_GetHttpUpstreamServers: - slog.DebugContext(ctx, "Getting http upstream servers", "request", action.GetGetHttpUpstreamServers()) - resp := apiAction.HandleGetHTTPUpstreamsServersRequest(ctx, action, instance) - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: resp}) - case *mpi.NGINXPlusAction_UpdateStreamServers: - slog.DebugContext(ctx, "Updating stream servers", "request", action.GetUpdateStreamServers()) - resp := apiAction.HandleUpdateStreamServersRequest(ctx, action, instance) - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: resp}) - case *mpi.NGINXPlusAction_GetStreamUpstreams: - slog.DebugContext(ctx, "Getting stream upstreams", "request", action.GetGetStreamUpstreams()) - resp := apiAction.HandleGetStreamUpstreamsRequest(ctx, instance) - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: resp}) - case *mpi.NGINXPlusAction_GetUpstreams: - slog.DebugContext(ctx, "Getting upstreams", "request", action.GetGetUpstreams()) - resp := apiAction.HandleGetUpstreamsRequest(ctx, instance) - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: resp}) - default: - slog.DebugContext(ctx, "NGINX Plus action not implemented yet") - } -} - -func (r *Resource) handleWriteConfigSuccessful(ctx context.Context, msg *bus.Message) { - slog.DebugContext(ctx, "Resource plugin received write config successful message") - data, ok := msg.Data.(*model.ConfigApplyMessage) - if !ok { - slog.ErrorContext(ctx, "Unable to cast message payload to *model.ConfigApplyMessage", "payload", msg.Data) - - return - } - configContext, err := r.resourceService.ApplyConfig(ctx, data.InstanceID) - if err != nil { - data.Error = err - - slog.ErrorContext( - ctx, - "Errors found during config apply, sending error status and rolling back configuration updates", - "error", err, - ) - - dpResponse := response.CreateDataPlaneResponse( - data.CorrelationID, - &mpi.CommandResponse{ - Status: mpi.CommandResponse_COMMAND_STATUS_ERROR, - Message: "Config apply failed, rolling back config", - Error: err.Error(), - }, - mpi.DataPlaneResponse_CONFIG_APPLY_REQUEST, - data.InstanceID, - ) - - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: dpResponse}) - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.ConfigApplyFailedTopic, Data: data}) - - return - } - - dpResponse := response.CreateDataPlaneResponse( - data.CorrelationID, - &mpi.CommandResponse{ - Status: mpi.CommandResponse_COMMAND_STATUS_OK, - Message: "Config apply successful", - Error: "", - }, - mpi.DataPlaneResponse_CONFIG_APPLY_REQUEST, - data.InstanceID, - ) - - successMessage := &model.ReloadSuccess{ - ConfigContext: configContext, - DataPlaneResponse: dpResponse, - } - - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.ReloadSuccessfulTopic, Data: successMessage}) -} - -func (r *Resource) handleRollbackWrite(ctx context.Context, msg *bus.Message) { - slog.DebugContext(ctx, "Resource plugin received rollback write message") - data, ok := msg.Data.(*model.ConfigApplyMessage) - if !ok { - slog.ErrorContext(ctx, "Unable to cast message payload to *model.ConfigApplyMessage", "payload", msg.Data) - - return - } - _, err := r.resourceService.ApplyConfig(ctx, data.InstanceID) - if err != nil { - slog.ErrorContext(ctx, "Errors found during rollback, sending failure status", "error", err) - - rollbackResponse := response.CreateDataPlaneResponse( - data.CorrelationID, - &mpi.CommandResponse{ - Status: mpi.CommandResponse_COMMAND_STATUS_ERROR, - Message: "Rollback failed", - Error: err.Error(), - }, - mpi.DataPlaneResponse_CONFIG_APPLY_REQUEST, - data.InstanceID, - ) - - applyResponse := response.CreateDataPlaneResponse( - data.CorrelationID, - &mpi.CommandResponse{ - Status: mpi.CommandResponse_COMMAND_STATUS_FAILURE, - Message: "Config apply failed, rollback failed", - Error: data.Error.Error(), - }, - mpi.DataPlaneResponse_CONFIG_APPLY_REQUEST, - data.InstanceID, - ) - - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: rollbackResponse}) - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.ConfigApplyCompleteTopic, Data: applyResponse}) - - return - } - - applyResponse := response.CreateDataPlaneResponse( - data.CorrelationID, - &mpi.CommandResponse{ - Status: mpi.CommandResponse_COMMAND_STATUS_FAILURE, - Message: "Config apply failed, rollback successful", - Error: data.Error.Error(), - }, - mpi.DataPlaneResponse_CONFIG_APPLY_REQUEST, - data.InstanceID, - ) - - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.ConfigApplyCompleteTopic, Data: applyResponse}) -} - -func (r *Resource) handleAgentConfigUpdate(ctx context.Context, msg *bus.Message) { - slog.DebugContext(ctx, "Resource plugin received agent config update message") - - r.agentConfigMutex.Lock() - defer r.agentConfigMutex.Unlock() - - agentConfig, ok := msg.Data.(*config.Config) - if !ok { - slog.ErrorContext(ctx, "Unable to cast message payload to *config.Config", "payload", msg.Data) - return - } - - r.agentConfig = agentConfig -} diff --git a/internal/watcher/health/health_watcher_service.go b/internal/watcher/health/health_watcher_service.go index 87768f4b1a..a675ede303 100644 --- a/internal/watcher/health/health_watcher_service.go +++ b/internal/watcher/health/health_watcher_service.go @@ -30,9 +30,9 @@ type ( HealthWatcherService struct { agentConfig *config.Config - cache map[string]*mpi.InstanceHealth // key is instanceID - watchers map[string]healthWatcherOperator // key is instanceID - instances map[string]*mpi.Instance // key is instanceID + cache map[string]*mpi.InstanceHealth // key is instanceID + watcher healthWatcherOperator // key is instanceID + instances map[string]*mpi.Instance // key is instanceID healthWatcherMutex sync.Mutex } @@ -44,22 +44,23 @@ type ( func NewHealthWatcherService(agentConfig *config.Config) *HealthWatcherService { return &HealthWatcherService{ - watchers: make(map[string]healthWatcherOperator), + watcher: NewNginxHealthWatcher(), cache: make(map[string]*mpi.InstanceHealth), instances: make(map[string]*mpi.Instance), agentConfig: agentConfig, } } -func (hw *HealthWatcherService) AddHealthWatcher(ctx context.Context, instances []*mpi.Instance) { +func (hw *HealthWatcherService) UpdateHealthWatcher(ctx context.Context, instances []*mpi.Instance) { hw.healthWatcherMutex.Lock() defer hw.healthWatcherMutex.Unlock() + clear(hw.instances) + for _, instance := range instances { switch instance.GetInstanceMeta().GetInstanceType() { case mpi.InstanceMeta_INSTANCE_TYPE_NGINX, mpi.InstanceMeta_INSTANCE_TYPE_NGINX_PLUS: - watcher := NewNginxHealthWatcher() - hw.watchers[instance.GetInstanceMeta().GetInstanceId()] = watcher + hw.instances[instance.GetInstanceMeta().GetInstanceId()] = instance case mpi.InstanceMeta_INSTANCE_TYPE_AGENT: case mpi.InstanceMeta_INSTANCE_TYPE_UNSPECIFIED, mpi.InstanceMeta_INSTANCE_TYPE_UNIT, @@ -72,26 +73,6 @@ func (hw *HealthWatcherService) AddHealthWatcher(ctx context.Context, instances "instance_type", instance.GetInstanceMeta().GetInstanceType(), ) } - hw.instances[instance.GetInstanceMeta().GetInstanceId()] = instance - } -} - -func (hw *HealthWatcherService) UpdateHealthWatcher(instances []*mpi.Instance) { - hw.healthWatcherMutex.Lock() - defer hw.healthWatcherMutex.Unlock() - - for _, instance := range instances { - hw.instances[instance.GetInstanceMeta().GetInstanceId()] = instance - } -} - -func (hw *HealthWatcherService) DeleteHealthWatcher(instances []*mpi.Instance) { - hw.healthWatcherMutex.Lock() - defer hw.healthWatcherMutex.Unlock() - - for _, instance := range instances { - delete(hw.watchers, instance.GetInstanceMeta().GetInstanceId()) - delete(hw.instances, instance.GetInstanceMeta().GetInstanceId()) } } @@ -138,11 +119,11 @@ func (hw *HealthWatcherService) Watch(ctx context.Context, ch chan<- InstanceHea func (hw *HealthWatcherService) health(ctx context.Context) (updatedStatuses []*mpi.InstanceHealth, isHealthDiff bool, ) { - currentHealth := make(map[string]*mpi.InstanceHealth, len(hw.watchers)) + currentHealth := make(map[string]*mpi.InstanceHealth, len(hw.instances)) allStatuses := make([]*mpi.InstanceHealth, 0) - for instanceID, watcher := range hw.watchers { - instanceHealth, err := watcher.Health(ctx, hw.instances[instanceID]) + for instanceID := range hw.instances { + instanceHealth, err := hw.watcher.Health(ctx, hw.instances[instanceID]) if instanceHealth == nil { instanceHealth = &mpi.InstanceHealth{ InstanceId: instanceID, diff --git a/internal/watcher/health/health_watcher_service_test.go b/internal/watcher/health/health_watcher_service_test.go index 9dc875f01d..65ceee351e 100644 --- a/internal/watcher/health/health_watcher_service_test.go +++ b/internal/watcher/health/health_watcher_service_test.go @@ -6,15 +6,13 @@ package health import ( - "context" "errors" "fmt" "reflect" "testing" - "github.com/nginx/agent/v3/internal/watcher/health/healthfakes" - mpi "github.com/nginx/agent/v3/api/grpc/mpi/v1" + "github.com/nginx/agent/v3/internal/watcher/health/healthfakes" "github.com/nginx/agent/v3/test/protos" "github.com/nginx/agent/v3/test/types" "github.com/stretchr/testify/assert" @@ -36,96 +34,53 @@ func TestHealthWatcherService_AddHealthWatcher(t *testing.T) { }, numWatchers: 1, }, - { - name: "Test 2: Not Supported Instance", - instances: []*mpi.Instance{ - protos.UnsupportedInstance(), - }, - }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { healthWatcher := NewHealthWatcherService(agentConfig) - healthWatcher.AddHealthWatcher(t.Context(), test.instances) - - if test.numWatchers == 1 { - assert.Len(t, healthWatcher.watchers, 1) - assert.NotNil(t, healthWatcher.watchers[instance.GetInstanceMeta().GetInstanceId()]) - assert.NotNil(t, healthWatcher.instances[instance.GetInstanceMeta().GetInstanceId()]) - } else { - assert.Empty(t, healthWatcher.watchers) - } + assert.NotNil(t, healthWatcher.watcher) }) } } -func TestHealthWatcherService_DeleteHealthWatcher(t *testing.T) { +func TestHealthWatcherService_UpdateHealthWatcher(t *testing.T) { agentConfig := types.AgentConfig() healthWatcher := NewHealthWatcherService(agentConfig) instance := protos.NginxOssInstance([]string{}) - instances := []*mpi.Instance{instance} - healthWatcher.AddHealthWatcher(t.Context(), instances) - assert.Len(t, healthWatcher.watchers, 1) - - healthWatcher.DeleteHealthWatcher(instances) - assert.Empty(t, healthWatcher.watchers) - assert.Nil(t, healthWatcher.instances[instance.GetInstanceMeta().GetInstanceId()]) -} + healthWatcher.instances = map[string]*mpi.Instance{ + instance.GetInstanceMeta().GetInstanceId(): instance, + } -func TestHealthWatcherService_UpdateHealthWatcher(t *testing.T) { - agentConfig := types.AgentConfig() - healthWatcher := NewHealthWatcherService(agentConfig) - instance := protos.NginxOssInstance([]string{}) updatedInstance := protos.NginxPlusInstance([]string{}) updatedInstance.GetInstanceMeta().InstanceId = instance.GetInstanceMeta().GetInstanceId() - instances := []*mpi.Instance{instance} - healthWatcher.AddHealthWatcher(t.Context(), instances) assert.Equal(t, instance, healthWatcher.instances[instance.GetInstanceMeta().GetInstanceId()]) - healthWatcher.UpdateHealthWatcher([]*mpi.Instance{updatedInstance}) - + healthWatcher.UpdateHealthWatcher(t.Context(), []*mpi.Instance{updatedInstance}) assert.Equal(t, updatedInstance, healthWatcher.instances[instance.GetInstanceMeta().GetInstanceId()]) } func TestHealthWatcherService_health(t *testing.T) { - ctx := context.Background() - agentConfig := types.AgentConfig() - healthWatcher := NewHealthWatcherService(agentConfig) ossInstance := protos.NginxOssInstance([]string{}) plusInstance := protos.NginxPlusInstance([]string{}) unspecifiedInstance := protos.UnsupportedInstance() - watchers := make(map[string]healthWatcherOperator) - - fakeOSSHealthOp := healthfakes.FakeHealthWatcherOperator{} - fakeOSSHealthOp.HealthReturns(protos.HealthyInstanceHealth(), nil) - - fakePlusHealthOp := healthfakes.FakeHealthWatcherOperator{} - fakePlusHealthOp.HealthReturns(protos.UnhealthyInstanceHealth(), nil) - - fakeUnspecifiedHealthOp := healthfakes.FakeHealthWatcherOperator{} - fakeUnspecifiedHealthOp.HealthReturns(nil, errors.New("unable to determine health")) - - watchers[plusInstance.GetInstanceMeta().GetInstanceId()] = &fakePlusHealthOp - watchers[ossInstance.GetInstanceMeta().GetInstanceId()] = &fakeOSSHealthOp - watchers[unspecifiedInstance.GetInstanceMeta().GetInstanceId()] = &fakeUnspecifiedHealthOp - healthWatcher.watchers = watchers - - expected := []*mpi.InstanceHealth{ - protos.HealthyInstanceHealth(), - protos.UnhealthyInstanceHealth(), - protos.UnspecifiedInstanceHealth(), - } tests := []struct { - cache map[string]*mpi.InstanceHealth - name string - isHealthDiff bool + instances map[string]*mpi.Instance + name string + cache map[string]*mpi.InstanceHealth + updatedInstances []*mpi.InstanceHealth + isHealthDiff bool }{ { - name: "Test 1: Status Changed", + name: "Test 1: NGINX Instance Status Changed", + instances: map[string]*mpi.Instance{ + ossInstance.GetInstanceMeta().GetInstanceId(): ossInstance, + plusInstance.GetInstanceMeta().GetInstanceId(): plusInstance, + unspecifiedInstance.GetInstanceMeta().GetInstanceId(): unspecifiedInstance, + }, cache: map[string]*mpi.InstanceHealth{ ossInstance.GetInstanceMeta().GetInstanceId(): protos.HealthyInstanceHealth(), plusInstance.GetInstanceMeta().GetInstanceId(): { @@ -135,36 +90,98 @@ func TestHealthWatcherService_health(t *testing.T) { unspecifiedInstance.GetInstanceMeta().GetInstanceId(): protos.UnspecifiedInstanceHealth(), }, isHealthDiff: true, + updatedInstances: []*mpi.InstanceHealth{ + protos.HealthyInstanceHealth(), + { + InstanceId: plusInstance.GetInstanceMeta().GetInstanceId(), + InstanceHealthStatus: mpi.InstanceHealth_INSTANCE_HEALTH_STATUS_UNHEALTHY, + }, + protos.UnspecifiedInstanceHealth(), + }, }, { - name: "Test 2: Status Not Changed", + name: "Test 2: NGINX Instance No Status Changed", + instances: map[string]*mpi.Instance{ + ossInstance.GetInstanceMeta().GetInstanceId(): ossInstance, + plusInstance.GetInstanceMeta().GetInstanceId(): plusInstance, + unspecifiedInstance.GetInstanceMeta().GetInstanceId(): unspecifiedInstance, + }, cache: map[string]*mpi.InstanceHealth{ ossInstance.GetInstanceMeta().GetInstanceId(): protos.HealthyInstanceHealth(), plusInstance.GetInstanceMeta().GetInstanceId(): protos.UnhealthyInstanceHealth(), unspecifiedInstance.GetInstanceMeta().GetInstanceId(): protos.UnspecifiedInstanceHealth(), }, isHealthDiff: false, + updatedInstances: []*mpi.InstanceHealth{ + protos.HealthyInstanceHealth(), + { + InstanceId: plusInstance.GetInstanceMeta().GetInstanceId(), + InstanceHealthStatus: mpi.InstanceHealth_INSTANCE_HEALTH_STATUS_UNHEALTHY, + }, + protos.UnspecifiedInstanceHealth(), + }, }, { - name: "Test 3: Less Instances", + name: "Test 3: Deleted NGINX Instances ", + instances: map[string]*mpi.Instance{ + ossInstance.GetInstanceMeta().GetInstanceId(): ossInstance, + unspecifiedInstance.GetInstanceMeta().GetInstanceId(): unspecifiedInstance, + }, cache: map[string]*mpi.InstanceHealth{ - ossInstance.GetInstanceMeta().GetInstanceId(): { - InstanceId: ossInstance.GetInstanceMeta().GetInstanceId(), + ossInstance.GetInstanceMeta().GetInstanceId(): protos.HealthyInstanceHealth(), + plusInstance.GetInstanceMeta().GetInstanceId(): protos.UnhealthyInstanceHealth(), + unspecifiedInstance.GetInstanceMeta().GetInstanceId(): protos.UnspecifiedInstanceHealth(), + }, + isHealthDiff: true, + updatedInstances: []*mpi.InstanceHealth{ + protos.HealthyInstanceHealth(), + { + InstanceId: plusInstance.GetInstanceMeta().GetInstanceId(), InstanceHealthStatus: mpi.InstanceHealth_INSTANCE_HEALTH_STATUS_UNHEALTHY, }, + protos.UnspecifiedInstanceHealth(), + }, + }, + { + name: "Test 4: Added NGINX Instances ", + instances: map[string]*mpi.Instance{ + ossInstance.GetInstanceMeta().GetInstanceId(): ossInstance, + plusInstance.GetInstanceMeta().GetInstanceId(): plusInstance, + unspecifiedInstance.GetInstanceMeta().GetInstanceId(): unspecifiedInstance, + }, + cache: map[string]*mpi.InstanceHealth{ + ossInstance.GetInstanceMeta().GetInstanceId(): protos.HealthyInstanceHealth(), unspecifiedInstance.GetInstanceMeta().GetInstanceId(): protos.UnspecifiedInstanceHealth(), }, isHealthDiff: true, + updatedInstances: []*mpi.InstanceHealth{ + protos.HealthyInstanceHealth(), + { + InstanceId: plusInstance.GetInstanceMeta().GetInstanceId(), + InstanceHealthStatus: mpi.InstanceHealth_INSTANCE_HEALTH_STATUS_UNHEALTHY, + }, + protos.UnspecifiedInstanceHealth(), + }, }, } for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { + t.Run(test.name, func(t *testing.T) { + agentConfig := types.AgentConfig() + healthWatcher := NewHealthWatcherService(agentConfig) + fakeHealthWatcher := healthfakes.FakeHealthWatcherOperator{} + + fakeHealthWatcher.HealthReturnsOnCall(0, protos.HealthyInstanceHealth(), nil) + fakeHealthWatcher.HealthReturnsOnCall(1, protos.UnhealthyInstanceHealth(), nil) + fakeHealthWatcher.HealthReturnsOnCall(2, nil, errors.New("unable to determine health")) + + healthWatcher.instances = test.instances healthWatcher.updateCache(test.cache) - instanceHealth, healthDiff := healthWatcher.health(ctx) - assert.Equal(t, test.isHealthDiff, healthDiff) + healthWatcher.watcher = &fakeHealthWatcher + updatedStatus, isHealthDiff := healthWatcher.health(t.Context()) + assert.Equal(t, test.isHealthDiff, isHealthDiff) - reflect.DeepEqual(instanceHealth, expected) + reflect.DeepEqual(test.updatedInstances, updatedStatus) }) } } diff --git a/internal/watcher/instance/instance_watcher_service.go b/internal/watcher/instance/instance_watcher_service.go index 0279375992..e4d67f62ac 100644 --- a/internal/watcher/instance/instance_watcher_service.go +++ b/internal/watcher/instance/instance_watcher_service.go @@ -14,7 +14,9 @@ import ( "sync/atomic" "time" + "github.com/nginx/agent/v3/pkg/host" "github.com/nginx/agent/v3/pkg/host/exec" + proto2 "google.golang.org/protobuf/proto" "github.com/nginx/agent/v3/internal/datasource/proto" @@ -41,29 +43,31 @@ type ( } InstanceWatcherService struct { - processOperator process.ProcessOperatorInterface - nginxConfigParser parser.ConfigParser - executer exec.ExecInterface - nginxParser processParser - enabled *atomic.Bool - agentConfig *config.Config - instanceCache map[string]*mpi.Instance - nginxConfigCache map[string]*model.NginxConfigContext - instancesChannel chan<- InstanceUpdatesMessage - nginxConfigContextChannel chan<- NginxConfigContextMessage - processCache []*nginxprocess.Process - cacheMutex sync.Mutex + processOperator process.ProcessOperatorInterface + nginxAppProtectInstanceWatcher *NginxAppProtectInstanceWatcher + nginxConfigParser parser.ConfigParser + nginxParser processParser + executer exec.ExecInterface + enabled *atomic.Bool + agentConfig *config.Config + instanceCache map[string]*mpi.Instance + nginxConfigCache map[string]*model.NginxConfigContext + instancesChannel chan<- ResourceUpdatesMessage + nginxConfigContextChannel chan<- NginxConfigContextMessage + info host.InfoInterface + resource *mpi.Resource + processCache []*nginxprocess.Process + cacheMutex sync.Mutex + resourceMutex sync.Mutex } InstanceUpdates struct { - NewInstances []*mpi.Instance UpdatedInstances []*mpi.Instance - DeletedInstances []*mpi.Instance } - InstanceUpdatesMessage struct { - CorrelationID slog.Attr - InstanceUpdates InstanceUpdates + ResourceUpdatesMessage struct { + CorrelationID slog.Attr + Resource *mpi.Resource } NginxConfigContextMessage struct { @@ -76,18 +80,26 @@ func NewInstanceWatcherService(agentConfig *config.Config) *InstanceWatcherServi enabled := &atomic.Bool{} enabled.Store(true) - return &InstanceWatcherService{ - agentConfig: agentConfig, - processOperator: process.NewProcessOperator(), - nginxParser: NewNginxProcessParser(), - nginxConfigParser: parser.NewNginxConfigParser(agentConfig), - instanceCache: make(map[string]*mpi.Instance), - cacheMutex: sync.Mutex{}, - nginxConfigCache: make(map[string]*model.NginxConfigContext), - executer: &exec.Exec{}, - enabled: enabled, - processCache: []*nginxprocess.Process{}, + napWatcher := NewNginxAppProtectInstanceWatcher(agentConfig) + + instanceWatcherService := &InstanceWatcherService{ + agentConfig: agentConfig, + nginxAppProtectInstanceWatcher: napWatcher, + processOperator: process.NewProcessOperator(), + nginxParser: NewNginxProcessParser(), + nginxConfigParser: parser.NewNginxConfigParser(agentConfig), + instanceCache: make(map[string]*mpi.Instance), + cacheMutex: sync.Mutex{}, + resourceMutex: sync.Mutex{}, + nginxConfigCache: make(map[string]*model.NginxConfigContext), + executer: &exec.Exec{}, + info: host.NewInfo(), + resource: &mpi.Resource{}, + enabled: enabled, + processCache: []*nginxprocess.Process{}, } + + return instanceWatcherService } func (iw *InstanceWatcherService) SetEnabled(enabled bool) { @@ -96,9 +108,12 @@ func (iw *InstanceWatcherService) SetEnabled(enabled bool) { func (iw *InstanceWatcherService) Watch( ctx context.Context, - instancesChannel chan<- InstanceUpdatesMessage, + instancesChannel chan<- ResourceUpdatesMessage, nginxConfigContextChannel chan<- NginxConfigContextMessage, ) { + iw.updateResourceInfo(ctx) + go iw.nginxAppProtectInstanceWatcher.Watch(ctx) + monitoringFrequency := iw.agentConfig.Watchers.InstanceWatcher.MonitoringFrequency slog.DebugContext(ctx, "Starting instance watcher monitoring", "monitoring_frequency", monitoringFrequency) @@ -176,11 +191,12 @@ func (iw *InstanceWatcherService) HandleNginxConfigContextUpdate(ctx context.Con } if updatesRequired { + iw.updateInstanceInResource(ctx, instance) instanceUpdates := InstanceUpdates{} instanceUpdates.UpdatedInstances = append(instanceUpdates.UpdatedInstances, instance) - iw.instancesChannel <- InstanceUpdatesMessage{ - CorrelationID: correlationID, - InstanceUpdates: instanceUpdates, + iw.instancesChannel <- ResourceUpdatesMessage{ + CorrelationID: correlationID, + Resource: iw.resource, } } } @@ -198,7 +214,6 @@ func (iw *InstanceWatcherService) checkForUpdates( } instancesToParse = append(instancesToParse, instanceUpdates.UpdatedInstances...) - instancesToParse = append(instancesToParse, instanceUpdates.NewInstances...) for _, newInstance := range instancesToParse { instanceType := newInstance.GetInstanceMeta().GetInstanceType() @@ -233,13 +248,56 @@ func (iw *InstanceWatcherService) checkForUpdates( } } - if len(instanceUpdates.NewInstances) > 0 || len(instanceUpdates.DeletedInstances) > 0 || - len(instanceUpdates.UpdatedInstances) > 0 { - iw.instancesChannel <- InstanceUpdatesMessage{ - CorrelationID: correlationID, - InstanceUpdates: instanceUpdates, + if iw.nginxAppProtectInstanceWatcher.NginxAppProtectInstance() != nil { + slog.DebugContext(ctx, "Adding nginx app protect instance to instance list") + instanceUpdates.UpdatedInstances = append(instanceUpdates.UpdatedInstances, + iw.nginxAppProtectInstanceWatcher.NginxAppProtectInstance()) + } + + if len(instanceUpdates.UpdatedInstances) > 0 { + iw.updateResourceInstanceList(ctx, instanceUpdates.UpdatedInstances) + + iw.instancesChannel <- ResourceUpdatesMessage{ + CorrelationID: correlationID, + Resource: iw.resource, + } + } +} + +func (iw *InstanceWatcherService) updateResourceInstanceList(ctx context.Context, instances []*mpi.Instance) { + iw.resourceMutex.Lock() + defer iw.resourceMutex.Unlock() + + resourceCopy, ok := proto2.Clone(iw.resource).(*mpi.Resource) + if ok { + resourceCopy.Instances = instances + } else { + slog.WarnContext(ctx, "Unable to clone resource while updating instances", "resource", + iw.resource, "instances", instances) + } + + iw.resource = resourceCopy +} + +func (iw *InstanceWatcherService) updateInstanceInResource(ctx context.Context, updatedInstance *mpi.Instance) { + iw.resourceMutex.Lock() + defer iw.resourceMutex.Unlock() + + resourceCopy, ok := proto2.Clone(iw.resource).(*mpi.Resource) + if ok { + for _, instance := range resourceCopy.GetInstances() { + if instance.GetInstanceMeta().GetInstanceId() == updatedInstance.GetInstanceMeta().GetInstanceId() { + instance.InstanceMeta = updatedInstance.GetInstanceMeta() + instance.InstanceRuntime = updatedInstance.GetInstanceRuntime() + instance.InstanceConfig = updatedInstance.GetInstanceConfig() + } } + } else { + slog.WarnContext(ctx, "Unable to clone resource while updating instances", "resource", + iw.resource, "instances", updatedInstance) } + + iw.resource = resourceCopy } func (iw *InstanceWatcherService) sendNginxConfigContextUpdate( @@ -295,19 +353,16 @@ func (iw *InstanceWatcherService) instanceUpdates(ctx context.Context) ( instancesFound[instance.GetInstanceMeta().GetInstanceId()] = instance } - newInstances, updatedInstances, deletedInstances := compareInstances(iw.instanceCache, instancesFound) - - instanceUpdates.NewInstances = newInstances - instanceUpdates.UpdatedInstances = updatedInstances - instanceUpdates.DeletedInstances = deletedInstances + if areInstanceDifferent(iw.instanceCache, instancesFound) { + var updatedInstances []*mpi.Instance + for _, instance := range instancesFound { + updatedInstances = append(updatedInstances, instance) + } - for _, instance := range slices.Concat[[]*mpi.Instance](newInstances, updatedInstances) { - iw.instanceCache[instance.GetInstanceMeta().GetInstanceId()] = instance + instanceUpdates.UpdatedInstances = updatedInstances } - for _, instance := range deletedInstances { - delete(iw.instanceCache, instance.GetInstanceMeta().GetInstanceId()) - } + iw.instanceCache = instancesFound return instanceUpdates, nil } @@ -360,47 +415,42 @@ func (iw *InstanceWatcherService) agentInstance(ctx context.Context) *mpi.Instan return instance } -func compareInstances(oldInstancesMap, instancesMap map[string]*mpi.Instance) ( - newInstances, updatedInstances, deletedInstances []*mpi.Instance, -) { +func areInstanceDifferent(oldInstancesMap, instancesMap map[string]*mpi.Instance) bool { updatedInstancesMap := make(map[string]*mpi.Instance) updatedOldInstancesMap := make(map[string]*mpi.Instance) for instanceID, instance := range instancesMap { _, ok := oldInstancesMap[instanceID] if !ok { - newInstances = append(newInstances, instance) - } else { - updatedInstancesMap[instanceID] = instance + return true } + updatedInstancesMap[instanceID] = instance } for instanceID, oldInstance := range oldInstancesMap { _, ok := instancesMap[instanceID] if !ok { - deletedInstances = append(deletedInstances, oldInstance) - } else { - updatedOldInstancesMap[instanceID] = oldInstance + return true } + updatedOldInstancesMap[instanceID] = oldInstance } - updatedInstances = checkForProcessChanges(updatedInstancesMap, updatedOldInstancesMap) - - return newInstances, updatedInstances, deletedInstances + return checkForProcessChanges(updatedInstancesMap, updatedOldInstancesMap) } func checkForProcessChanges( updatedInstancesMap map[string]*mpi.Instance, updatedOldInstancesMap map[string]*mpi.Instance, -) (updatedInstances []*mpi.Instance) { +) (updated bool) { + updated = false for instanceID, instance := range updatedInstancesMap { oldInstance := updatedOldInstancesMap[instanceID] if !areInstancesEqual(oldInstance.GetInstanceRuntime(), instance.GetInstanceRuntime()) { - updatedInstances = append(updatedInstances, instance) + return true } } - return updatedInstances + return updated } func areInstancesEqual(oldRuntime, currentRuntime *mpi.InstanceRuntime) (equal bool) { @@ -431,3 +481,31 @@ func areInstancesEqual(oldRuntime, currentRuntime *mpi.InstanceRuntime) (equal b return true } + +func (iw *InstanceWatcherService) updateResourceInfo(ctx context.Context) { + iw.resourceMutex.Lock() + defer iw.resourceMutex.Unlock() + + isContainer, err := iw.info.IsContainer() + if err != nil { + slog.WarnContext(ctx, "Failed to check if resource is container", "error", err) + } + + if isContainer { + iw.resource.Info, err = iw.info.ContainerInfo(ctx) + if err != nil { + slog.ErrorContext(ctx, "Failed to get container info", "error", err) + return + } + iw.resource.ResourceId = iw.resource.GetContainerInfo().GetContainerId() + iw.resource.Instances = []*mpi.Instance{} + } else { + iw.resource.Info, err = iw.info.HostInfo(ctx) + if err != nil { + slog.ErrorContext(ctx, "Failed to get host info", "error", err) + return + } + iw.resource.ResourceId = iw.resource.GetHostInfo().GetHostId() + iw.resource.Instances = []*mpi.Instance{} + } +} diff --git a/internal/watcher/instance/instance_watcher_service_test.go b/internal/watcher/instance/instance_watcher_service_test.go index f51daa6a78..8c303cc15e 100644 --- a/internal/watcher/instance/instance_watcher_service_test.go +++ b/internal/watcher/instance/instance_watcher_service_test.go @@ -7,6 +7,7 @@ package instance import ( "context" + "sort" "testing" "github.com/nginx/agent/v3/internal/datasource/config/configfakes" @@ -40,7 +41,7 @@ func TestInstanceWatcherService_checkForUpdates(t *testing.T) { fakeNginxConfigParser := &configfakes.FakeConfigParser{} fakeNginxConfigParser.ParseReturns(nginxConfigContext, nil) - instanceUpdatesChannel := make(chan InstanceUpdatesMessage, 1) + instanceUpdatesChannel := make(chan ResourceUpdatesMessage, 1) nginxConfigContextChannel := make(chan NginxConfigContextMessage, 1) instanceWatcherService := NewInstanceWatcherService(types.AgentConfig()) @@ -53,8 +54,8 @@ func TestInstanceWatcherService_checkForUpdates(t *testing.T) { instanceWatcherService.checkForUpdates(ctx) instanceUpdatesMessage := <-instanceUpdatesChannel - assert.Len(t, instanceUpdatesMessage.InstanceUpdates.NewInstances, 2) - assert.Empty(t, instanceUpdatesMessage.InstanceUpdates.DeletedInstances) + assert.Len(t, instanceUpdatesMessage.Resource.GetInstances(), 2) + // assert.Empty(t, instanceUpdatesMessage.InstanceUpdates.DeletedInstances) nginxConfigContextMessage := <-nginxConfigContextChannel assert.Equal(t, nginxConfigContext, nginxConfigContextMessage.NginxConfigContext) @@ -93,7 +94,8 @@ func TestInstanceWatcherService_instanceUpdates(t *testing.T) { nginxInstance.GetInstanceMeta().GetInstanceId(): nginxInstance, }, expectedInstanceUpdates: InstanceUpdates{ - NewInstances: []*mpi.Instance{ + UpdatedInstances: []*mpi.Instance{ + agentInstance, nginxInstance, }, }, @@ -110,6 +112,7 @@ func TestInstanceWatcherService_instanceUpdates(t *testing.T) { }, expectedInstanceUpdates: InstanceUpdates{ UpdatedInstances: []*mpi.Instance{ + agentInstance, nginxInstance, }, }, @@ -123,8 +126,8 @@ func TestInstanceWatcherService_instanceUpdates(t *testing.T) { }, parsedInstances: make(map[string]*mpi.Instance), expectedInstanceUpdates: InstanceUpdates{ - DeletedInstances: []*mpi.Instance{ - protos.NginxOssInstance([]string{}), + UpdatedInstances: []*mpi.Instance{ + agentInstance, }, }, }, @@ -151,7 +154,9 @@ func TestInstanceWatcherService_instanceUpdates(t *testing.T) { instanceUpdates, err := instanceWatcherService.instanceUpdates(ctx) require.NoError(tt, err) - assert.Equal(tt, test.expectedInstanceUpdates, instanceUpdates) + assert.Len(tt, instanceUpdates.UpdatedInstances, len(test.expectedInstanceUpdates.UpdatedInstances)) + + compareInstances(t, test.expectedInstanceUpdates.UpdatedInstances, instanceUpdates.UpdatedInstances) }) } } @@ -277,12 +282,18 @@ func TestInstanceWatcherService_ReparseConfig(t *testing.T) { for _, test := range tests { t.Run(test.name, func(tt *testing.T) { - instanceUpdatesChannel := make(chan InstanceUpdatesMessage, 1) + instanceUpdatesChannel := make(chan ResourceUpdatesMessage, 1) nginxConfigContextChannel := make(chan NginxConfigContextMessage, 1) instanceWatcherService := NewInstanceWatcherService(types.AgentConfig()) instanceWatcherService.instancesChannel = instanceUpdatesChannel instanceWatcherService.nginxConfigContextChannel = nginxConfigContextChannel + instanceWatcherService.resource = &mpi.Resource{ + ResourceId: protos.HostResource().GetResourceId(), + Instances: []*mpi.Instance{ + instance, + }, + } instanceWatcherService.nginxConfigCache = map[string]*model.NginxConfigContext{ instance.GetInstanceMeta().GetInstanceId(): nginxConfigContext, @@ -303,9 +314,42 @@ func TestInstanceWatcherService_ReparseConfig(t *testing.T) { nginxConfigCache[updatedInstance.GetInstanceMeta().GetInstanceId()]) instanceUpdatesMessage := <-instanceUpdatesChannel - assert.Len(t, instanceUpdatesMessage.InstanceUpdates.UpdatedInstances, 1) - assert.Equal(tt, updatedInstance, instanceUpdatesMessage.InstanceUpdates.UpdatedInstances[0]) - assert.Empty(t, instanceUpdatesMessage.InstanceUpdates.DeletedInstances) + assert.Len(t, instanceUpdatesMessage.Resource.GetInstances(), 1) + assert.Equal(tt, updatedInstance.GetInstanceRuntime().GetNginxRuntimeInfo().GetAccessLogs(), + instanceUpdatesMessage.Resource.GetInstances()[0].GetInstanceRuntime().GetNginxRuntimeInfo(). + GetAccessLogs()) }) } } + +func compareInstances(t *testing.T, expected, actual []*mpi.Instance) { + t.Helper() + + sort.Slice(actual, func(i, j int) bool { + return actual[i].GetInstanceMeta().GetInstanceId() < actual[j].GetInstanceMeta().GetInstanceId() + }) + + sort.Slice(expected, func(i, j int) bool { + return expected[i].GetInstanceMeta().GetInstanceId() < expected[j].GetInstanceMeta().GetInstanceId() + }) + + for id, instanceUpdate := range expected { + assert.Equal(t, instanceUpdate.GetInstanceMeta().GetInstanceId(), + actual[id].GetInstanceMeta().GetInstanceId()) + assert.Equal(t, instanceUpdate.GetInstanceMeta().GetInstanceType(), + actual[id].GetInstanceMeta().GetInstanceType()) + assert.Equal(t, instanceUpdate.GetInstanceMeta().GetVersion(), + actual[id].GetInstanceMeta().GetVersion()) + + assert.Equal(t, instanceUpdate.GetInstanceRuntime().GetInstanceChildren(), + actual[id].GetInstanceRuntime().GetInstanceChildren()) + assert.Equal(t, instanceUpdate.GetInstanceRuntime().GetConfigPath(), + actual[id].GetInstanceRuntime().GetConfigPath()) + assert.Equal(t, instanceUpdate.GetInstanceRuntime().GetBinaryPath(), + actual[id].GetInstanceRuntime().GetBinaryPath()) + assert.Equal(t, instanceUpdate.GetInstanceRuntime().GetDetails(), + actual[id].GetInstanceRuntime().GetDetails()) + assert.Equal(t, instanceUpdate.GetInstanceRuntime().GetProcessId(), + actual[id].GetInstanceRuntime().GetProcessId()) + } +} diff --git a/internal/watcher/instance/nginx-app-protect-instance-watcher.go b/internal/watcher/instance/nginx-app-protect-instance-watcher.go index b9ad448e67..dd62bc39fe 100644 --- a/internal/watcher/instance/nginx-app-protect-instance-watcher.go +++ b/internal/watcher/instance/nginx-app-protect-instance-watcher.go @@ -10,13 +10,14 @@ import ( "log/slog" "os" "strings" + "sync" "time" "github.com/fsnotify/fsnotify" mpi "github.com/nginx/agent/v3/api/grpc/mpi/v1" "github.com/nginx/agent/v3/internal/config" - "github.com/nginx/agent/v3/internal/logger" "github.com/nginx/agent/v3/pkg/id" + "google.golang.org/protobuf/proto" ) var ( @@ -38,7 +39,6 @@ var ( type NginxAppProtectInstanceWatcher struct { agentConfig *config.Config watcher *fsnotify.Watcher - instancesChannel chan<- InstanceUpdatesMessage nginxAppProtectInstance *mpi.Instance filesBeingWatched map[string]bool version string @@ -46,16 +46,18 @@ type NginxAppProtectInstanceWatcher struct { attackSignatureVersion string threatCampaignVersion string enforcerEngineVersion string + instanceMutex sync.Mutex } func NewNginxAppProtectInstanceWatcher(agentConfig *config.Config) *NginxAppProtectInstanceWatcher { return &NginxAppProtectInstanceWatcher{ agentConfig: agentConfig, filesBeingWatched: make(map[string]bool), + instanceMutex: sync.Mutex{}, } } -func (w *NginxAppProtectInstanceWatcher) Watch(ctx context.Context, instancesChannel chan<- InstanceUpdatesMessage) { +func (w *NginxAppProtectInstanceWatcher) Watch(ctx context.Context) { monitoringFrequency := w.agentConfig.Watchers.InstanceWatcher.MonitoringFrequency slog.DebugContext( ctx, @@ -70,7 +72,6 @@ func (w *NginxAppProtectInstanceWatcher) Watch(ctx context.Context, instancesCha } w.watcher = watcher - w.instancesChannel = instancesChannel w.watchVersionFiles(ctx) @@ -98,6 +99,13 @@ func (w *NginxAppProtectInstanceWatcher) Watch(ctx context.Context, instancesCha } } +func (w *NginxAppProtectInstanceWatcher) NginxAppProtectInstance() *mpi.Instance { + w.instanceMutex.Lock() + defer w.instanceMutex.Unlock() + + return w.nginxAppProtectInstance +} + func (w *NginxAppProtectInstanceWatcher) watchVersionFiles(ctx context.Context) { for _, versionFile := range versionFiles { if !w.filesBeingWatched[versionFile] { @@ -136,6 +144,9 @@ func (w *NginxAppProtectInstanceWatcher) addWatcher(ctx context.Context, version } func (w *NginxAppProtectInstanceWatcher) readVersionFile(ctx context.Context, versionFile string) { + w.instanceMutex.Lock() + defer w.instanceMutex.Unlock() + switch versionFile { case versionFilePath: w.version = w.readFile(ctx, versionFilePath) @@ -160,6 +171,9 @@ func (w *NginxAppProtectInstanceWatcher) handleEvent(ctx context.Context, event } func (w *NginxAppProtectInstanceWatcher) handleFileUpdateEvent(ctx context.Context, event fsnotify.Event) { + w.instanceMutex.Lock() + defer w.instanceMutex.Unlock() + switch event.Name { case versionFilePath: w.version = w.readFile(ctx, event.Name) @@ -175,6 +189,9 @@ func (w *NginxAppProtectInstanceWatcher) handleFileUpdateEvent(ctx context.Conte } func (w *NginxAppProtectInstanceWatcher) handleFileDeleteEvent(event fsnotify.Event) { + w.instanceMutex.Lock() + defer w.instanceMutex.Unlock() + switch event.Name { case versionFilePath: w.version = "" @@ -205,10 +222,15 @@ func (w *NginxAppProtectInstanceWatcher) checkForUpdates(ctx context.Context) { } func (w *NginxAppProtectInstanceWatcher) isNewInstance() bool { + w.instanceMutex.Lock() + defer w.instanceMutex.Unlock() + return w.nginxAppProtectInstance == nil && w.version != "" } func (w *NginxAppProtectInstanceWatcher) createInstance(ctx context.Context) { + w.instanceMutex.Lock() + defer w.instanceMutex.Unlock() w.nginxAppProtectInstance = &mpi.Instance{ InstanceMeta: &mpi.InstanceMeta{ InstanceId: id.Generate(versionFilePath), @@ -233,48 +255,41 @@ func (w *NginxAppProtectInstanceWatcher) createInstance(ctx context.Context) { } slog.InfoContext(ctx, "Discovered a new NGINX App Protect instance") - - w.instancesChannel <- InstanceUpdatesMessage{ - CorrelationID: logger.CorrelationIDAttr(ctx), - InstanceUpdates: InstanceUpdates{ - NewInstances: []*mpi.Instance{ - w.nginxAppProtectInstance, - }, - }, - } } func (w *NginxAppProtectInstanceWatcher) deleteInstance(ctx context.Context) { - slog.InfoContext(ctx, "NGINX App Protect instance not longer exists") + w.instanceMutex.Lock() + defer w.instanceMutex.Unlock() - w.instancesChannel <- InstanceUpdatesMessage{ - CorrelationID: logger.CorrelationIDAttr(ctx), - InstanceUpdates: InstanceUpdates{ - DeletedInstances: []*mpi.Instance{ - w.nginxAppProtectInstance, - }, - }, - } + slog.InfoContext(ctx, "NGINX App Protect instance not longer exists") w.nginxAppProtectInstance = nil } func (w *NginxAppProtectInstanceWatcher) updateInstance(ctx context.Context) { - w.nginxAppProtectInstance.GetInstanceMeta().Version = w.version - runtimeInfo := w.nginxAppProtectInstance.GetInstanceRuntime().GetNginxAppProtectRuntimeInfo() - runtimeInfo.Release = w.release - runtimeInfo.AttackSignatureVersion = w.attackSignatureVersion - runtimeInfo.ThreatCampaignVersion = w.threatCampaignVersion - runtimeInfo.EnforcerEngineVersion = w.enforcerEngineVersion - - slog.DebugContext(ctx, "NGINX App Protect instance updated") - - w.instancesChannel <- InstanceUpdatesMessage{ - CorrelationID: logger.CorrelationIDAttr(ctx), - InstanceUpdates: InstanceUpdates{ - UpdatedInstances: []*mpi.Instance{ - w.nginxAppProtectInstance, - }, - }, + w.instanceMutex.Lock() + defer w.instanceMutex.Unlock() + + instanceCopy, ok := proto.Clone(w.nginxAppProtectInstance).(*mpi.Instance) + + if ok { + instanceCopy.GetInstanceMeta().Version = w.version + runtimeInfo := instanceCopy.GetInstanceRuntime().GetNginxAppProtectRuntimeInfo() + if runtimeInfo == nil { + slog.ErrorContext(ctx, "Error updating NGINX App Protect instance runtimeInfo, instance no longer exists") + return + } + + runtimeInfo.Release = w.release + runtimeInfo.AttackSignatureVersion = w.attackSignatureVersion + runtimeInfo.ThreatCampaignVersion = w.threatCampaignVersion + runtimeInfo.EnforcerEngineVersion = w.enforcerEngineVersion + + w.nginxAppProtectInstance = instanceCopy + + slog.InfoContext(ctx, "NGINX App Protect instance updated") + } else { + slog.WarnContext(ctx, "Unable to clone instance while updating instance", "instance", + w.NginxAppProtectInstance()) } } diff --git a/internal/watcher/instance/nginx-app-protect-instance-watcher_test.go b/internal/watcher/instance/nginx-app-protect-instance-watcher_test.go index 2dae2d254e..18657d11e0 100644 --- a/internal/watcher/instance/nginx-app-protect-instance-watcher_test.go +++ b/internal/watcher/instance/nginx-app-protect-instance-watcher_test.go @@ -11,11 +11,10 @@ import ( "testing" "time" + "github.com/nginx/agent/v3/internal/config" "github.com/stretchr/testify/assert" "google.golang.org/protobuf/proto" - "github.com/nginx/agent/v3/internal/config" - "github.com/nginx/agent/v3/pkg/id" "github.com/nginx/agent/v3/test/protos" @@ -83,8 +82,6 @@ func TestNginxAppProtectInstanceWatcher_Watch(t *testing.T) { enforcerEngineVersionFilePath, } - instancesChannel := make(chan InstanceUpdatesMessage) - nginxAppProtectInstanceWatcher := NewNginxAppProtectInstanceWatcher( &config.Config{ Watchers: &config.Watchers{ @@ -95,22 +92,15 @@ func TestNginxAppProtectInstanceWatcher_Watch(t *testing.T) { }, ) - go nginxAppProtectInstanceWatcher.Watch(ctx, instancesChannel) + go nginxAppProtectInstanceWatcher.Watch(ctx) t.Run("Test 1: New instance", func(t *testing.T) { - select { - case instanceUpdates := <-instancesChannel: - assert.Len(t, instanceUpdates.InstanceUpdates.NewInstances, 1) - assert.Empty(t, instanceUpdates.InstanceUpdates.UpdatedInstances) - assert.Empty(t, instanceUpdates.InstanceUpdates.DeletedInstances) - assert.Truef( - t, - proto.Equal(instanceUpdates.InstanceUpdates.NewInstances[0], expectedInstance), - "expected %s, actual %s", expectedInstance, instanceUpdates.InstanceUpdates.NewInstances[0], - ) - case <-time.After(timeout): - t.Fatalf("Timed out waiting for instance updates") - } + assert.Eventually(t, func() bool { return nginxAppProtectInstanceWatcher.NginxAppProtectInstance() != nil }, + timeout, 10*time.Millisecond) + assert.Eventually(t, func() bool { + return nginxAppProtectInstanceWatcher.NginxAppProtectInstance().GetInstanceMeta().GetInstanceId() == + expectedInstance.GetInstanceMeta().GetInstanceId() + }, timeout, 10*time.Millisecond) }) t.Run("Test 2: Update instance", func(t *testing.T) { _, err = enforcerEngineVersionFile.WriteAt([]byte("6.113.0"), 0) @@ -118,37 +108,17 @@ func TestNginxAppProtectInstanceWatcher_Watch(t *testing.T) { expectedInstance.GetInstanceRuntime().GetNginxAppProtectRuntimeInfo().EnforcerEngineVersion = "6.113.0" - select { - case instanceUpdates := <-instancesChannel: - assert.Len(t, instanceUpdates.InstanceUpdates.UpdatedInstances, 1) - assert.Empty(t, instanceUpdates.InstanceUpdates.NewInstances) - assert.Empty(t, instanceUpdates.InstanceUpdates.DeletedInstances) - assert.Truef( - t, - proto.Equal(instanceUpdates.InstanceUpdates.UpdatedInstances[0], expectedInstance), - "expected %s, actual %s", expectedInstance, instanceUpdates.InstanceUpdates.UpdatedInstances[0], - ) - case <-time.After(timeout): - t.Fatalf("Timed out waiting for instance updates") - } + assert.Eventually(t, func() bool { + return proto.Equal(nginxAppProtectInstanceWatcher.NginxAppProtectInstance(), expectedInstance) + }, timeout, 30*time.Millisecond) }) t.Run("Test 3: Delete instance", func(t *testing.T) { helpers.RemoveFileWithErrorCheck(t, versionFile.Name()) closeErr := versionFile.Close() require.NoError(t, closeErr) - select { - case instanceUpdates := <-instancesChannel: - assert.Len(t, instanceUpdates.InstanceUpdates.DeletedInstances, 1) - assert.Empty(t, instanceUpdates.InstanceUpdates.NewInstances) - assert.Empty(t, instanceUpdates.InstanceUpdates.UpdatedInstances) - assert.Truef( - t, - proto.Equal(instanceUpdates.InstanceUpdates.DeletedInstances[0], expectedInstance), - "expected %s, actual %s", expectedInstance, instanceUpdates.InstanceUpdates.DeletedInstances[0], - ) - case <-time.After(timeout): - t.Fatalf("Timed out waiting for instance updates") - } + assert.Eventually(t, func() bool { + return nginxAppProtectInstanceWatcher.NginxAppProtectInstance() == nil + }, timeout, 10*time.Millisecond) }) } diff --git a/internal/watcher/watcher_plugin.go b/internal/watcher/watcher_plugin.go index 99a4b673f9..ca609b107c 100644 --- a/internal/watcher/watcher_plugin.go +++ b/internal/watcher/watcher_plugin.go @@ -35,12 +35,11 @@ type ( messagePipe bus.MessagePipeInterface agentConfig *config.Config instanceWatcherService instanceWatcherServiceInterface - nginxAppProtectInstanceWatcher *instance.NginxAppProtectInstanceWatcher healthWatcherService *health.HealthWatcherService fileWatcherService *file.FileWatcherService commandCredentialWatcherService credentialWatcherServiceInterface auxiliaryCredentialWatcherService credentialWatcherServiceInterface - instanceUpdatesChannel chan instance.InstanceUpdatesMessage + resourceUpdatesChannel chan instance.ResourceUpdatesMessage nginxConfigContextChannel chan instance.NginxConfigContextMessage instanceHealthChannel chan health.InstanceHealthMessage fileUpdatesChannel chan file.FileUpdateMessage @@ -55,7 +54,7 @@ type ( instanceWatcherServiceInterface interface { Watch( ctx context.Context, - instancesChannel chan<- instance.InstanceUpdatesMessage, + instancesChannel chan<- instance.ResourceUpdatesMessage, nginxConfigContextChannel chan<- instance.NginxConfigContextMessage, ) HandleNginxConfigContextUpdate(ctx context.Context, instanceID string, configContext *model.NginxConfigContext) @@ -77,12 +76,11 @@ func NewWatcher(agentConfig *config.Config) *Watcher { return &Watcher{ agentConfig: agentConfig, instanceWatcherService: instance.NewInstanceWatcherService(agentConfig), - nginxAppProtectInstanceWatcher: instance.NewNginxAppProtectInstanceWatcher(agentConfig), healthWatcherService: health.NewHealthWatcherService(agentConfig), fileWatcherService: file.NewFileWatcherService(agentConfig), commandCredentialWatcherService: credentials.NewCredentialWatcherService(agentConfig, model.Command), auxiliaryCredentialWatcherService: credentials.NewCredentialWatcherService(agentConfig, model.Auxiliary), - instanceUpdatesChannel: make(chan instance.InstanceUpdatesMessage), + resourceUpdatesChannel: make(chan instance.ResourceUpdatesMessage), nginxConfigContextChannel: make(chan instance.NginxConfigContextMessage), instanceHealthChannel: make(chan health.InstanceHealthMessage), fileUpdatesChannel: make(chan file.FileUpdateMessage), @@ -102,8 +100,7 @@ func (w *Watcher) Init(ctx context.Context, messagePipe bus.MessagePipeInterface watcherContext, cancel := context.WithCancel(ctx) w.cancel = cancel - go w.nginxAppProtectInstanceWatcher.Watch(watcherContext, w.instanceUpdatesChannel) - go w.instanceWatcherService.Watch(watcherContext, w.instanceUpdatesChannel, w.nginxConfigContextChannel) + go w.instanceWatcherService.Watch(watcherContext, w.resourceUpdatesChannel, w.nginxConfigContextChannel) go w.healthWatcherService.Watch(watcherContext, w.instanceHealthChannel) go w.commandCredentialWatcherService.Watch(watcherContext, w.commandCredentialUpdatesChannel) @@ -249,7 +246,7 @@ func (w *Watcher) monitorWatchers(ctx context.Context) { w.handleCredentialUpdate(ctx, message) case message := <-w.auxiliaryCredentialUpdatesChannel: w.handleCredentialUpdate(ctx, message) - case message := <-w.instanceUpdatesChannel: + case message := <-w.resourceUpdatesChannel: newCtx := context.WithValue(ctx, logger.CorrelationIDContextKey, message.CorrelationID) w.handleInstanceUpdates(newCtx, message) case message := <-w.nginxConfigContextChannel: @@ -303,31 +300,12 @@ func (w *Watcher) handleCredentialUpdate(ctx context.Context, message credential }) } -func (w *Watcher) handleInstanceUpdates(newCtx context.Context, message instance.InstanceUpdatesMessage) { - if len(message.InstanceUpdates.NewInstances) > 0 { - slog.DebugContext(newCtx, "New instances found", "instances", message.InstanceUpdates.NewInstances) - w.healthWatcherService.AddHealthWatcher(newCtx, message.InstanceUpdates.NewInstances) - w.messagePipe.Process( - newCtx, - &bus.Message{Topic: bus.AddInstancesTopic, Data: message.InstanceUpdates.NewInstances}, - ) - } - if len(message.InstanceUpdates.UpdatedInstances) > 0 { - slog.DebugContext(newCtx, "Instances updated", "instances", message.InstanceUpdates.UpdatedInstances) - w.healthWatcherService.UpdateHealthWatcher(message.InstanceUpdates.UpdatedInstances) - w.messagePipe.Process( - newCtx, - &bus.Message{Topic: bus.UpdatedInstancesTopic, Data: message.InstanceUpdates.UpdatedInstances}, - ) - } - if len(message.InstanceUpdates.DeletedInstances) > 0 { - slog.DebugContext(newCtx, "Instances deleted", "instances", message.InstanceUpdates.DeletedInstances) - w.healthWatcherService.DeleteHealthWatcher(message.InstanceUpdates. - DeletedInstances) - w.messagePipe.Process( - newCtx, - &bus.Message{Topic: bus.DeletedInstancesTopic, Data: message.InstanceUpdates.DeletedInstances}, - ) +func (w *Watcher) handleInstanceUpdates(newCtx context.Context, message instance.ResourceUpdatesMessage) { + if message.Resource != nil { + slog.DebugContext(newCtx, "Resource updated", "resource", message.Resource) + w.healthWatcherService.UpdateHealthWatcher(newCtx, message.Resource.GetInstances()) + + w.messagePipe.Process(newCtx, &bus.Message{Topic: bus.ResourceUpdateTopic, Data: message.Resource}) } } diff --git a/internal/watcher/watcher_plugin_test.go b/internal/watcher/watcher_plugin_test.go index 424009c60e..1b417ef5d3 100644 --- a/internal/watcher/watcher_plugin_test.go +++ b/internal/watcher/watcher_plugin_test.go @@ -49,18 +49,14 @@ func TestWatcher_Init(t *testing.T) { assert.Empty(t, messages) - instanceUpdatesMessage := instance.InstanceUpdatesMessage{ + resourceUpdatesMessage := instance.ResourceUpdatesMessage{ CorrelationID: logger.GenerateCorrelationID(), - InstanceUpdates: instance.InstanceUpdates{ - NewInstances: []*mpi.Instance{ + Resource: &mpi.Resource{ + ResourceId: protos.HostResource().GetResourceId(), + Instances: []*mpi.Instance{ protos.NginxOssInstance([]string{}), }, - UpdatedInstances: []*mpi.Instance{ - protos.NginxOssInstance([]string{}), - }, - DeletedInstances: []*mpi.Instance{ - protos.NginxPlusInstance([]string{}), - }, + Info: protos.HostResource().GetInfo(), }, } @@ -80,42 +76,32 @@ func TestWatcher_Init(t *testing.T) { GrpcConnection: &grpc.GrpcConnection{}, } - watcherPlugin.instanceUpdatesChannel <- instanceUpdatesMessage + watcherPlugin.resourceUpdatesChannel <- resourceUpdatesMessage watcherPlugin.nginxConfigContextChannel <- nginxConfigContextMessage watcherPlugin.instanceHealthChannel <- instanceHealthMessage watcherPlugin.commandCredentialUpdatesChannel <- credentialUpdateMessage - assert.Eventually(t, func() bool { return len(messagePipe.Messages()) == 6 }, 2*time.Second, 10*time.Millisecond) + assert.Eventually(t, func() bool { return len(messagePipe.Messages()) == 4 }, 2*time.Second, 10*time.Millisecond) messages = messagePipe.Messages() assert.Equal( t, - &bus.Message{Topic: bus.AddInstancesTopic, Data: instanceUpdatesMessage.InstanceUpdates.NewInstances}, + &bus.Message{Topic: bus.ResourceUpdateTopic, Data: resourceUpdatesMessage.Resource}, messages[0], ) - assert.Equal( - t, - &bus.Message{Topic: bus.UpdatedInstancesTopic, Data: instanceUpdatesMessage.InstanceUpdates.UpdatedInstances}, - messages[1], - ) - assert.Equal( - t, - &bus.Message{Topic: bus.DeletedInstancesTopic, Data: instanceUpdatesMessage.InstanceUpdates.DeletedInstances}, - messages[2], - ) assert.Equal( t, &bus.Message{Topic: bus.NginxConfigUpdateTopic, Data: nginxConfigContextMessage.NginxConfigContext}, - messages[3], + messages[1], ) assert.Equal( t, &bus.Message{Topic: bus.InstanceHealthTopic, Data: instanceHealthMessage.InstanceHealth}, - messages[4], + messages[2], ) assert.Equal(t, &bus.Message{Topic: bus.ConnectionResetTopic, Data: &grpc.GrpcConnection{}}, - messages[5]) + messages[3]) } func TestWatcher_Info(t *testing.T) { diff --git a/internal/watcher/watcherfakes/fake_instance_watcher_service_interface.go b/internal/watcher/watcherfakes/fake_instance_watcher_service_interface.go index 27ddf3807a..8fe0436b05 100644 --- a/internal/watcher/watcherfakes/fake_instance_watcher_service_interface.go +++ b/internal/watcher/watcherfakes/fake_instance_watcher_service_interface.go @@ -27,11 +27,11 @@ type FakeInstanceWatcherServiceInterface struct { setEnabledArgsForCall []struct { arg1 bool } - WatchStub func(context.Context, chan<- instance.InstanceUpdatesMessage, chan<- instance.NginxConfigContextMessage) + WatchStub func(context.Context, chan<- instance.ResourceUpdatesMessage, chan<- instance.NginxConfigContextMessage) watchMutex sync.RWMutex watchArgsForCall []struct { arg1 context.Context - arg2 chan<- instance.InstanceUpdatesMessage + arg2 chan<- instance.ResourceUpdatesMessage arg3 chan<- instance.NginxConfigContextMessage } invocations map[string][][]interface{} @@ -136,11 +136,11 @@ func (fake *FakeInstanceWatcherServiceInterface) SetEnabledArgsForCall(i int) bo return argsForCall.arg1 } -func (fake *FakeInstanceWatcherServiceInterface) Watch(arg1 context.Context, arg2 chan<- instance.InstanceUpdatesMessage, arg3 chan<- instance.NginxConfigContextMessage) { +func (fake *FakeInstanceWatcherServiceInterface) Watch(arg1 context.Context, arg2 chan<- instance.ResourceUpdatesMessage, arg3 chan<- instance.NginxConfigContextMessage) { fake.watchMutex.Lock() fake.watchArgsForCall = append(fake.watchArgsForCall, struct { arg1 context.Context - arg2 chan<- instance.InstanceUpdatesMessage + arg2 chan<- instance.ResourceUpdatesMessage arg3 chan<- instance.NginxConfigContextMessage }{arg1, arg2, arg3}) stub := fake.WatchStub @@ -157,13 +157,13 @@ func (fake *FakeInstanceWatcherServiceInterface) WatchCallCount() int { return len(fake.watchArgsForCall) } -func (fake *FakeInstanceWatcherServiceInterface) WatchCalls(stub func(context.Context, chan<- instance.InstanceUpdatesMessage, chan<- instance.NginxConfigContextMessage)) { +func (fake *FakeInstanceWatcherServiceInterface) WatchCalls(stub func(context.Context, chan<- instance.ResourceUpdatesMessage, chan<- instance.NginxConfigContextMessage)) { fake.watchMutex.Lock() defer fake.watchMutex.Unlock() fake.WatchStub = stub } -func (fake *FakeInstanceWatcherServiceInterface) WatchArgsForCall(i int) (context.Context, chan<- instance.InstanceUpdatesMessage, chan<- instance.NginxConfigContextMessage) { +func (fake *FakeInstanceWatcherServiceInterface) WatchArgsForCall(i int) (context.Context, chan<- instance.ResourceUpdatesMessage, chan<- instance.NginxConfigContextMessage) { fake.watchMutex.RLock() defer fake.watchMutex.RUnlock() argsForCall := fake.watchArgsForCall[i] diff --git a/test/integration/utils/grpc_management_plane_utils.go b/test/integration/utils/grpc_management_plane_utils.go index dad3f04a95..504474d424 100644 --- a/test/integration/utils/grpc_management_plane_utils.go +++ b/test/integration/utils/grpc_management_plane_utils.go @@ -42,7 +42,7 @@ const ( instanceLen = 2 statusRetryCount = 3 retryWait = 50 * time.Millisecond - retryMaxWait = 200 * time.Millisecond + retryMaxWait = 2 * time.Second plusPath = "/nginx-plus/agent" )