diff --git a/.licenses-gomod.sha256 b/.licenses-gomod.sha256 index 6f1c47c65a..ac41a74b97 100644 --- a/.licenses-gomod.sha256 +++ b/.licenses-gomod.sha256 @@ -1 +1 @@ -100644 94a84b9463e31c692f3c733c7060027a1eb1fc35 go.mod +100644 7c7c11b99ba2ae373c50507dde38265471106a8d go.mod diff --git a/Makefile b/Makefile index bbeb65bdfe..d07ff8ca81 100644 --- a/Makefile +++ b/Makefile @@ -13,6 +13,15 @@ DOCKER_SBOM_PLUGIN_VERSION=0.6.1 # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) VERSION ?= $(shell git describe --always --tags --dirty --broken | cut -c 2-) +# LD_FLAGS +LD_FLAG_SET_VERSION = -X github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/version.Version=$(VERSION) +LD_FLAGS_SET_EXPERIMENTAL = -X github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/version.Experimental=$(EXPERIMENTAL) +ifdef EXPERIMENTAL +LD_FLAGS = $(LD_FLAGS_SET_EXPERIMENTAL) $(LD_FLAG_SET_VERSION) +else +LD_FLAGS = $(LD_FLAG_SET_VERSION) +endif + # NEXT_VERSION represents a version that is higher than anything released # VERSION default value does not play well with the run target which might end up failing # with errors such as: @@ -218,7 +227,7 @@ bin/$(TARGET_OS)/$(TARGET_ARCH): bin/$(TARGET_OS)/$(TARGET_ARCH)/manager: $(GO_SOURCES) bin/$(TARGET_OS)/$(TARGET_ARCH) @echo "Building operator with version $(VERSION); $(TARGET_OS) - $(TARGET_ARCH)" - CGO_ENABLED=0 GOOS=$(TARGET_OS) GOARCH=$(TARGET_ARCH) go build -o $@ -ldflags="-X github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/version.Version=$(VERSION)" cmd/main.go + CGO_ENABLED=0 GOOS=$(TARGET_OS) GOARCH=$(TARGET_ARCH) go build -o $@ -ldflags="$(LD_FLAGS)" cmd/main.go @touch $@ bin/manager: bin/$(TARGET_OS)/$(TARGET_ARCH)/manager @@ -245,7 +254,9 @@ manifests: CRD_OPTIONS ?= "crd:crdVersions=v1,ignoreUnexportedFields=true" manifests: fmt ## Generate manifests e.g. CRD, RBAC etc. controller-gen $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./api/..." paths="./internal/controller/..." output:crd:artifacts:config=config/crd/bases @./scripts/split_roles_yaml.sh - +ifdef EXPERIMENTAL + controller-gen crd paths="./internal/nextapi/v1" output:crd:artifacts:config=internal/next-crds +endif .PHONY: lint lint: ## Run the lint against the code @@ -274,6 +285,9 @@ vet: $(TIMESTAMPS_DIR)/vet ## Run go vet against code .PHONY: generate generate: ${GO_SOURCES} ## Generate code controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./api/..." paths="./internal/controller/..." +ifdef EXPERIMENTAL + controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./internal/nextapi/v1/..." +endif $(MAKE) fmt .PHONY: check-missing-files @@ -529,6 +543,9 @@ clear-e2e-leftovers: ## Clear the e2e test leftovers quickly .PHONY: install-crds install-crds: ## Install CRDs in Kubernetes kubectl apply -k config/crd +ifdef EXPERIMENTAL + kubectl apply -f internal/next-crds/*.yaml +endif .PHONY: set-namespace set-namespace: @@ -552,14 +569,28 @@ prepare-run: generate vet manifests run-kind install-crds install-credentials .PHONY: run run: prepare-run ## Run a freshly compiled manager against kind ifdef RUN_YAML - kubectl apply -f $(RUN_YAML) + kubectl apply -n $(OPERATOR_NAMESPACE) -f $(RUN_YAML) endif +ifdef BACKGROUND + @bash -c '(VERSION=$(NEXT_VERSION) \ + OPERATOR_POD_NAME=$(OPERATOR_POD_NAME) \ + OPERATOR_NAMESPACE=$(OPERATOR_NAMESPACE) \ + nohup bin/manager --object-deletion-protection=false --log-level=$(RUN_LOG_LEVEL) \ + --atlas-domain=$(ATLAS_DOMAIN) \ + --global-api-secret-name=$(ATLAS_KEY_SECRET_NAME) > ako.log 2>&1 & echo $$! > ako.pid \ + && echo "OPERATOR_PID=$$!")' +else VERSION=$(NEXT_VERSION) \ OPERATOR_POD_NAME=$(OPERATOR_POD_NAME) \ OPERATOR_NAMESPACE=$(OPERATOR_NAMESPACE) \ bin/manager --object-deletion-protection=false --log-level=$(RUN_LOG_LEVEL) \ --atlas-domain=$(ATLAS_DOMAIN) \ --global-api-secret-name=$(ATLAS_KEY_SECRET_NAME) +endif + +.PHONY: stop-ako +stop-ako: + @kill `cat ako.pid` && rm ako.pid || echo "AKO process not found or already stopped!" .PHONY: local-docker-build local-docker-build: diff --git a/cmd/main.go b/cmd/main.go index ed292de6ef..08de9a2ecd 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -39,6 +39,7 @@ import ( "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/collection" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/featureflags" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/kube" + akov2next "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/nextapi/v1" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/operator" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/version" ) @@ -73,6 +74,10 @@ func main() { ctrl.SetLogger(logrLogger.WithName("ctrl")) klog.SetLogger(logrLogger.WithName("klog")) setupLog := logger.Named("setup").Sugar() + if version.IsExperimental() { + setupLog.Warn("Experimental features enabled!") + utilruntime.Must(akov2next.AddToScheme(akoScheme)) + } setupLog.Info("starting with configuration", zap.Any("config", config), zap.Any("version", version.Version)) runnable, err := operator.NewBuilder(operator.ManagerProviderFunc(ctrl.NewManager), akoScheme, time.Duration(minimumIndependentSyncPeriod)*time.Minute). @@ -87,6 +92,7 @@ func main() { WithDeletionProtection(config.ObjectDeletionProtection). WithIndependentSyncPeriod(time.Duration(config.IndependentSyncPeriod) * time.Minute). WithDryRun(config.DryRun). + WithExperimentalReconcilers(version.IsExperimental()). Build(ctx) if err != nil { setupLog.Error(err, "unable to start operator") diff --git a/go.mod b/go.mod index 94a84b9463..7c7c11b99b 100644 --- a/go.mod +++ b/go.mod @@ -28,6 +28,7 @@ require ( go.mongodb.org/atlas-sdk/v20231115004 v20231115004.1.0 go.mongodb.org/atlas-sdk/v20231115008 v20231115008.5.0 go.mongodb.org/atlas-sdk/v20241113001 v20241113001.0.0 + go.mongodb.org/atlas-sdk/v20250312002 v20250312002.0.0 go.mongodb.org/mongo-driver v1.17.3 go.uber.org/zap v1.27.0 golang.org/x/sync v0.14.0 @@ -143,7 +144,7 @@ require ( golang.org/x/sys v0.33.0 // indirect golang.org/x/term v0.32.0 // indirect golang.org/x/text v0.25.0 // indirect - golang.org/x/time v0.11.0 // indirect + golang.org/x/time v0.11.0 gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect google.golang.org/grpc v1.72.1 // indirect diff --git a/go.sum b/go.sum index 35156735dc..5c8aaf9992 100644 --- a/go.sum +++ b/go.sum @@ -266,6 +266,8 @@ go.mongodb.org/atlas-sdk/v20231115008 v20231115008.5.0 h1:OuV1HfIpZUZa4+BKvtrvDl go.mongodb.org/atlas-sdk/v20231115008 v20231115008.5.0/go.mod h1:0707RpWIrNFZ6Msy/dwRDCzC5JVDon61JoOqcbfCujg= go.mongodb.org/atlas-sdk/v20241113001 v20241113001.0.0 h1:G3UZcWwWziGUuaILWp/Gc+jLm1tfu7OUhUOpMWVZSWc= go.mongodb.org/atlas-sdk/v20241113001 v20241113001.0.0/go.mod h1:fMiUyCacIAm+XwFkJ4j+rJtYLRsGU7hButtgGv+SBU4= +go.mongodb.org/atlas-sdk/v20250312002 v20250312002.0.0 h1:KX8PrYp3/PCSxG4NbGLcc3+EsNcfyhcvylGbe/oRlx8= +go.mongodb.org/atlas-sdk/v20250312002 v20250312002.0.0/go.mod h1:HHCmHxHPdJRr1bUXlvRIZbm7M4gRujjur1GnjE44YgA= go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ= go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= diff --git a/internal/controller/atlas/provider.go b/internal/controller/atlas/provider.go index 929a8aa0ca..7049a9d545 100644 --- a/internal/controller/atlas/provider.go +++ b/internal/controller/atlas/provider.go @@ -25,6 +25,7 @@ import ( "github.com/mongodb-forks/digest" adminv20231115008 "go.mongodb.org/atlas-sdk/v20231115008/admin" adminv20241113001 "go.mongodb.org/atlas-sdk/v20241113001/admin" + adminv20250312002 "go.mongodb.org/atlas-sdk/v20250312002/admin" "go.mongodb.org/atlas/mongodbatlas" "go.uber.org/zap" @@ -49,6 +50,7 @@ type Provider interface { type ClientSet struct { SdkClient20231115008 *adminv20231115008.APIClient SdkClient20241113001 *adminv20241113001.APIClient + SdkClient20250312002 *adminv20250312002.APIClient } type ProductionProvider struct { @@ -160,9 +162,18 @@ func (p *ProductionProvider) SdkClientSet(ctx context.Context, creds *Credential return nil, err } + clientv20241113002, err := adminv20250312002.NewClient( + adminv20250312002.UseBaseURL(p.domain), + adminv20250312002.UseHTTPClient(httpClient), + adminv20250312002.UseUserAgent(operatorUserAgent())) + if err != nil { + return nil, err + } + return &ClientSet{ SdkClient20231115008: clientv20231115008, SdkClient20241113001: clientv20241113001, + SdkClient20250312002: clientv20241113002, }, nil } diff --git a/internal/controller/atlascustomrole/atlascustomrole_controller.go b/internal/controller/atlascustomrole/atlascustomrole_controller.go index 8463eee0b6..135f1e2698 100644 --- a/internal/controller/atlascustomrole/atlascustomrole_controller.go +++ b/internal/controller/atlascustomrole/atlascustomrole_controller.go @@ -50,7 +50,6 @@ type AtlasCustomRoleReconciler struct { reconciler.AtlasReconciler Scheme *runtime.Scheme EventRecorder record.EventRecorder - AtlasProvider atlas.Provider GlobalPredicates []predicate.Predicate ObjectDeletionProtection bool SubObjectDeletionProtection bool @@ -71,10 +70,10 @@ func NewAtlasCustomRoleReconciler( Client: c.GetClient(), Log: logger.Named("controllers").Named("AtlasCustomRoles").Sugar(), GlobalSecretRef: globalSecretRef, + AtlasProvider: atlasProvider, }, Scheme: c.GetScheme(), EventRecorder: c.GetEventRecorderFor("AtlasCustomRoles"), - AtlasProvider: atlasProvider, GlobalPredicates: predicates, ObjectDeletionProtection: deletionProtection, independentSyncPeriod: independentSyncPeriod, diff --git a/internal/controller/atlascustomrole/atlascustomrole_controller_test.go b/internal/controller/atlascustomrole/atlascustomrole_controller_test.go index 51da1469fe..782707ba89 100644 --- a/internal/controller/atlascustomrole/atlascustomrole_controller_test.go +++ b/internal/controller/atlascustomrole/atlascustomrole_controller_test.go @@ -368,48 +368,48 @@ func TestAtlasCustomRoleReconciler_Reconcile(t *testing.T) { AtlasReconciler: reconciler.AtlasReconciler{ Client: k8sClient, Log: zap.S(), - }, - Scheme: testScheme, - EventRecorder: record.NewFakeRecorder(10), - AtlasProvider: &atlasmocks.TestProvider{ - SdkClientSetFunc: func(ctx context.Context, creds *atlas.Credentials, log *zap.SugaredLogger) (*atlas.ClientSet, error) { - if tt.sdkShouldError { - return nil, fmt.Errorf("failed to create sdk") - } - cdrAPI := mockadmin.NewCustomDatabaseRolesApi(t) - cdrAPI.EXPECT().GetCustomDatabaseRole(mock.Anything, "testProjectID", "TestRoleName"). - Return(admin.GetCustomDatabaseRoleApiRequest{ApiService: cdrAPI}) - cdrAPI.EXPECT().GetCustomDatabaseRoleExecute(admin.GetCustomDatabaseRoleApiRequest{ApiService: cdrAPI}). - Return(&admin.UserCustomDBRole{}, &http.Response{StatusCode: http.StatusNotFound}, nil) - cdrAPI.EXPECT().CreateCustomDatabaseRole(mock.Anything, "testProjectID", - mock.AnythingOfType("*admin.UserCustomDBRole")). - Return(admin.CreateCustomDatabaseRoleApiRequest{ApiService: cdrAPI}) - cdrAPI.EXPECT().CreateCustomDatabaseRoleExecute(admin.CreateCustomDatabaseRoleApiRequest{ApiService: cdrAPI}). - Return(nil, nil, nil) + AtlasProvider: &atlasmocks.TestProvider{ + SdkClientSetFunc: func(ctx context.Context, creds *atlas.Credentials, log *zap.SugaredLogger) (*atlas.ClientSet, error) { + if tt.sdkShouldError { + return nil, fmt.Errorf("failed to create sdk") + } + cdrAPI := mockadmin.NewCustomDatabaseRolesApi(t) + cdrAPI.EXPECT().GetCustomDatabaseRole(mock.Anything, "testProjectID", "TestRoleName"). + Return(admin.GetCustomDatabaseRoleApiRequest{ApiService: cdrAPI}) + cdrAPI.EXPECT().GetCustomDatabaseRoleExecute(admin.GetCustomDatabaseRoleApiRequest{ApiService: cdrAPI}). + Return(&admin.UserCustomDBRole{}, &http.Response{StatusCode: http.StatusNotFound}, nil) + cdrAPI.EXPECT().CreateCustomDatabaseRole(mock.Anything, "testProjectID", + mock.AnythingOfType("*admin.UserCustomDBRole")). + Return(admin.CreateCustomDatabaseRoleApiRequest{ApiService: cdrAPI}) + cdrAPI.EXPECT().CreateCustomDatabaseRoleExecute(admin.CreateCustomDatabaseRoleApiRequest{ApiService: cdrAPI}). + Return(nil, nil, nil) - pAPI := mockadmin.NewProjectsApi(t) - if tt.akoCustomRole.Spec.ExternalProjectRef != nil { - grp := &admin.Group{ - Id: &tt.akoCustomRole.Spec.ExternalProjectRef.ID, - Name: tt.akoCustomRole.Spec.ExternalProjectRef.ID, + pAPI := mockadmin.NewProjectsApi(t) + if tt.akoCustomRole.Spec.ExternalProjectRef != nil { + grp := &admin.Group{ + Id: &tt.akoCustomRole.Spec.ExternalProjectRef.ID, + Name: tt.akoCustomRole.Spec.ExternalProjectRef.ID, + } + pAPI.EXPECT().GetProject(context.Background(), tt.akoCustomRole.Spec.ExternalProjectRef.ID). + Return(admin.GetProjectApiRequest{ApiService: pAPI}) + pAPI.EXPECT().GetProjectExecute(admin.GetProjectApiRequest{ApiService: pAPI}). + Return(grp, nil, nil) } - pAPI.EXPECT().GetProject(context.Background(), tt.akoCustomRole.Spec.ExternalProjectRef.ID). - Return(admin.GetProjectApiRequest{ApiService: pAPI}) - pAPI.EXPECT().GetProjectExecute(admin.GetProjectApiRequest{ApiService: pAPI}). - Return(grp, nil, nil) - } - return &atlas.ClientSet{SdkClient20231115008: &admin.APIClient{ - CustomDatabaseRolesApi: cdrAPI, - ProjectsApi: pAPI, - }}, nil - }, - IsCloudGovFunc: func() bool { - return false - }, - IsSupportedFunc: func() bool { - return tt.isSupported + return &atlas.ClientSet{SdkClient20231115008: &admin.APIClient{ + CustomDatabaseRolesApi: cdrAPI, + ProjectsApi: pAPI, + }}, nil + }, + IsCloudGovFunc: func() bool { + return false + }, + IsSupportedFunc: func() bool { + return tt.isSupported + }, }, }, + Scheme: testScheme, + EventRecorder: record.NewFakeRecorder(10), } result, err := r.Reconcile(context.Background(), ctrl.Request{ diff --git a/internal/controller/atlasdatabaseuser/atlasdatabaseuser_controller.go b/internal/controller/atlasdatabaseuser/atlasdatabaseuser_controller.go index a29c6b3634..1d04b2e990 100644 --- a/internal/controller/atlasdatabaseuser/atlasdatabaseuser_controller.go +++ b/internal/controller/atlasdatabaseuser/atlasdatabaseuser_controller.go @@ -54,7 +54,6 @@ var ErrOIDCNotEnabled = fmt.Errorf("'OIDCAuthType' field is set but OIDC authent // AtlasDatabaseUserReconciler reconciles an AtlasDatabaseUser object type AtlasDatabaseUserReconciler struct { reconciler.AtlasReconciler - AtlasProvider atlas.Provider Scheme *runtime.Scheme EventRecorder record.EventRecorder GlobalPredicates []predicate.Predicate @@ -287,8 +286,8 @@ func NewAtlasDatabaseUserReconciler( Client: c.GetClient(), Log: logger.Named("controllers").Named("AtlasDatabaseUser").Sugar(), GlobalSecretRef: globalSecretRef, + AtlasProvider: atlasProvider, }, - AtlasProvider: atlasProvider, Scheme: c.GetScheme(), EventRecorder: c.GetEventRecorderFor("AtlasDatabaseUser"), GlobalPredicates: predicates, diff --git a/internal/controller/atlasdatabaseuser/atlasdatabaseuser_controller_test.go b/internal/controller/atlasdatabaseuser/atlasdatabaseuser_controller_test.go index 7f30215eca..fb0b8f4de7 100644 --- a/internal/controller/atlasdatabaseuser/atlasdatabaseuser_controller_test.go +++ b/internal/controller/atlasdatabaseuser/atlasdatabaseuser_controller_test.go @@ -162,10 +162,10 @@ func TestReconcile(t *testing.T) { Build() r := &AtlasDatabaseUserReconciler{ AtlasReconciler: reconciler.AtlasReconciler{ - Client: k8sClient, - Log: zaptest.NewLogger(t).Sugar(), + Client: k8sClient, + Log: zaptest.NewLogger(t).Sugar(), + AtlasProvider: DefaultTestProvider(t), }, - AtlasProvider: DefaultTestProvider(t), EventRecorder: record.NewFakeRecorder(10), } diff --git a/internal/controller/atlasdatabaseuser/databaseuser_test.go b/internal/controller/atlasdatabaseuser/databaseuser_test.go index f3ac450846..fad66c0a5a 100644 --- a/internal/controller/atlasdatabaseuser/databaseuser_test.go +++ b/internal/controller/atlasdatabaseuser/databaseuser_test.go @@ -310,8 +310,8 @@ func TestHandleDatabaseUser(t *testing.T) { Namespace: "default", Name: "secret", }, + AtlasProvider: tt.atlasProvider, }, - AtlasProvider: tt.atlasProvider, } ctx := &workflow.Context{ Context: context.Background(), diff --git a/internal/controller/atlasdeployment/atlasdeployment_controller.go b/internal/controller/atlasdeployment/atlasdeployment_controller.go index e1ebbdf269..c85fdc8299 100644 --- a/internal/controller/atlasdeployment/atlasdeployment_controller.go +++ b/internal/controller/atlasdeployment/atlasdeployment_controller.go @@ -60,7 +60,6 @@ type AtlasDeploymentReconciler struct { Scheme *runtime.Scheme GlobalPredicates []predicate.Predicate EventRecorder record.EventRecorder - AtlasProvider atlas.Provider ObjectDeletionProtection bool SubObjectDeletionProtection bool independentSyncPeriod time.Duration @@ -422,11 +421,11 @@ func NewAtlasDeploymentReconciler( Client: c.GetClient(), Log: suggaredLogger, GlobalSecretRef: globalSecretref, + AtlasProvider: atlasProvider, }, Scheme: c.GetScheme(), EventRecorder: c.GetEventRecorderFor("AtlasDeployment"), GlobalPredicates: predicates, - AtlasProvider: atlasProvider, ObjectDeletionProtection: deletionProtection, independentSyncPeriod: independentSyncPeriod, } diff --git a/internal/controller/atlasdeployment/atlasdeployment_controller_test.go b/internal/controller/atlasdeployment/atlasdeployment_controller_test.go index a015a4bbc5..9a4ea408df 100644 --- a/internal/controller/atlasdeployment/atlasdeployment_controller_test.go +++ b/internal/controller/atlasdeployment/atlasdeployment_controller_test.go @@ -106,10 +106,10 @@ func TestCleanupBindings(t *testing.T) { } r := &AtlasDeploymentReconciler{ AtlasReconciler: reconciler.AtlasReconciler{ - Log: testLog(t), - Client: testK8sClient(), + Log: testLog(t), + Client: testK8sClient(), + AtlasProvider: atlasProvider, }, - AtlasProvider: atlasProvider, } policy := testBackupPolicy() // deployment -> schedule -> policy require.NoError(t, r.Client.Create(context.Background(), policy)) @@ -140,10 +140,10 @@ func TestCleanupBindings(t *testing.T) { } r := &AtlasDeploymentReconciler{ AtlasReconciler: reconciler.AtlasReconciler{ - Log: testLog(t), - Client: testK8sClient(), + Log: testLog(t), + Client: testK8sClient(), + AtlasProvider: atlasProvider, }, - AtlasProvider: atlasProvider, } policy := testBackupPolicy() // deployment + deployment2 -> schedule -> policy require.NoError(t, r.Client.Create(context.Background(), policy)) @@ -179,10 +179,10 @@ func TestCleanupBindings(t *testing.T) { } r := &AtlasDeploymentReconciler{ AtlasReconciler: reconciler.AtlasReconciler{ - Log: testLog(t), - Client: testK8sClient(), + Log: testLog(t), + Client: testK8sClient(), + AtlasProvider: atlasProvider, }, - AtlasProvider: atlasProvider, } policy := testBackupPolicy() // deployment -> schedule + schedule2 -> policy require.NoError(t, r.Client.Create(context.Background(), policy)) @@ -556,10 +556,10 @@ func TestRegularClusterReconciliation(t *testing.T) { reconciler := &AtlasDeploymentReconciler{ AtlasReconciler: reconciler.AtlasReconciler{ - Client: k8sClient, - Log: logger.Sugar(), + Client: k8sClient, + Log: logger.Sugar(), + AtlasProvider: atlasProvider, }, - AtlasProvider: atlasProvider, EventRecorder: record.NewFakeRecorder(10), ObjectDeletionProtection: false, SubObjectDeletionProtection: false, @@ -693,10 +693,10 @@ func TestServerlessInstanceReconciliation(t *testing.T) { reconciler := &AtlasDeploymentReconciler{ AtlasReconciler: reconciler.AtlasReconciler{ - Client: k8sClient, - Log: logger.Sugar(), + Client: k8sClient, + Log: logger.Sugar(), + AtlasProvider: atlasProvider, }, - AtlasProvider: atlasProvider, EventRecorder: record.NewFakeRecorder(10), ObjectDeletionProtection: false, SubObjectDeletionProtection: false, @@ -818,10 +818,10 @@ func TestFlexClusterReconciliation(t *testing.T) { reconciler := &AtlasDeploymentReconciler{ AtlasReconciler: reconciler.AtlasReconciler{ - Client: k8sClient, - Log: logger.Sugar(), + Client: k8sClient, + Log: logger.Sugar(), + AtlasProvider: atlasProvider, }, - AtlasProvider: atlasProvider, EventRecorder: record.NewFakeRecorder(10), ObjectDeletionProtection: false, SubObjectDeletionProtection: false, @@ -1002,10 +1002,10 @@ func TestDeletionReconciliation(t *testing.T) { reconciler := &AtlasDeploymentReconciler{ AtlasReconciler: reconciler.AtlasReconciler{ - Client: k8sClient, - Log: logger, + Client: k8sClient, + Log: logger, + AtlasProvider: atlasProvider, }, - AtlasProvider: atlasProvider, EventRecorder: record.NewFakeRecorder(10), ObjectDeletionProtection: false, SubObjectDeletionProtection: false, @@ -1455,10 +1455,10 @@ func TestChangeDeploymentType(t *testing.T) { r := &AtlasDeploymentReconciler{ AtlasReconciler: reconciler.AtlasReconciler{ - Client: k8sClient, - Log: logger.Sugar(), + Client: k8sClient, + Log: logger.Sugar(), + AtlasProvider: tt.atlasProvider, }, - AtlasProvider: tt.atlasProvider, EventRecorder: record.NewFakeRecorder(10), } result, err := r.Reconcile( diff --git a/internal/controller/atlasipaccesslist/atlasipaccesslist_controller.go b/internal/controller/atlasipaccesslist/atlasipaccesslist_controller.go index 808e14ad04..ee5e3e85ca 100644 --- a/internal/controller/atlasipaccesslist/atlasipaccesslist_controller.go +++ b/internal/controller/atlasipaccesslist/atlasipaccesslist_controller.go @@ -45,10 +45,8 @@ import ( // AtlasIPAccessListReconciler reconciles a AtlasIPAccessList object type AtlasIPAccessListReconciler struct { reconciler.AtlasReconciler - Scheme *runtime.Scheme EventRecorder record.EventRecorder - AtlasProvider atlas.Provider GlobalPredicates []predicate.Predicate ObjectDeletionProtection bool independentSyncPeriod time.Duration @@ -151,10 +149,10 @@ func NewAtlasIPAccessListReconciler( Client: c.GetClient(), Log: logger.Named("controllers").Named("AtlasIPAccessList").Sugar(), GlobalSecretRef: globalSecretRef, + AtlasProvider: atlasProvider, }, Scheme: c.GetScheme(), EventRecorder: c.GetEventRecorderFor("AtlasIPAccessList"), - AtlasProvider: atlasProvider, GlobalPredicates: predicates, ObjectDeletionProtection: deletionProtection, independentSyncPeriod: independentSyncPeriod, diff --git a/internal/controller/atlasipaccesslist/atlasipaccesslist_controller_test.go b/internal/controller/atlasipaccesslist/atlasipaccesslist_controller_test.go index d83e0d5910..c41357a4f3 100644 --- a/internal/controller/atlasipaccesslist/atlasipaccesslist_controller_test.go +++ b/internal/controller/atlasipaccesslist/atlasipaccesslist_controller_test.go @@ -157,14 +157,13 @@ func TestReconcile(t *testing.T) { logger := zaptest.NewLogger(t).Sugar() ctx := &workflow.Context{ Context: context.Background(), - Log: logger, } r := &AtlasIPAccessListReconciler{ AtlasReconciler: reconciler.AtlasReconciler{ - Client: k8sClient, - Log: logger, + Client: k8sClient, + Log: logger, + AtlasProvider: tt.provider, }, - AtlasProvider: tt.provider, EventRecorder: record.NewFakeRecorder(10), } result, err := r.Reconcile(ctx.Context, tt.request) diff --git a/internal/controller/atlasipaccesslist/state_test.go b/internal/controller/atlasipaccesslist/state_test.go index 4be0feeecc..ed1b314ea6 100644 --- a/internal/controller/atlasipaccesslist/state_test.go +++ b/internal/controller/atlasipaccesslist/state_test.go @@ -347,14 +347,13 @@ func TestHandleCustomResource(t *testing.T) { logger := zaptest.NewLogger(t).Sugar() ctx := &workflow.Context{ Context: context.Background(), - Log: logger, } r := &AtlasIPAccessListReconciler{ AtlasReconciler: reconciler.AtlasReconciler{ - Client: k8sClient, - Log: logger, + Client: k8sClient, + Log: logger, + AtlasProvider: tt.provider, }, - AtlasProvider: tt.provider, EventRecorder: record.NewFakeRecorder(10), } result := r.handleCustomResource(ctx.Context, &tt.ipAccessList) diff --git a/internal/controller/atlasnetworkcontainer/atlasnetworkcontainer_controller.go b/internal/controller/atlasnetworkcontainer/atlasnetworkcontainer_controller.go index 539de2d67f..742b6c474f 100644 --- a/internal/controller/atlasnetworkcontainer/atlasnetworkcontainer_controller.go +++ b/internal/controller/atlasnetworkcontainer/atlasnetworkcontainer_controller.go @@ -42,7 +42,6 @@ import ( // AtlasNetworkContainerReconciler reconciles a AtlasNetworkContainer object type AtlasNetworkContainerReconciler struct { reconciler.AtlasReconciler - AtlasProvider atlas.Provider Scheme *runtime.Scheme EventRecorder record.EventRecorder GlobalPredicates []predicate.Predicate @@ -123,10 +122,10 @@ func NewAtlasNetworkContainerReconciler( Client: c.GetClient(), Log: logger.Named("controllers").Named("AtlasNetworkContainer").Sugar(), GlobalSecretRef: globalSecretRef, + AtlasProvider: atlasProvider, }, Scheme: c.GetScheme(), EventRecorder: c.GetEventRecorderFor("AtlasNetworkContainer"), - AtlasProvider: atlasProvider, GlobalPredicates: predicates, ObjectDeletionProtection: deletionProtection, independentSyncPeriod: independentSyncPeriod, diff --git a/internal/controller/atlasnetworkcontainer/state_test.go b/internal/controller/atlasnetworkcontainer/state_test.go index 6451eac783..b67c756254 100644 --- a/internal/controller/atlasnetworkcontainer/state_test.go +++ b/internal/controller/atlasnetworkcontainer/state_test.go @@ -884,10 +884,10 @@ func getConditions(networkContainer *akov2.AtlasNetworkContainer) []api.Conditio func testReconciler(k8sClient client.Client, provider atlas.Provider, logger *zap.Logger) *AtlasNetworkContainerReconciler { return &AtlasNetworkContainerReconciler{ AtlasReconciler: reconciler.AtlasReconciler{ - Client: k8sClient, - Log: logger.Sugar(), + Client: k8sClient, + Log: logger.Sugar(), + AtlasProvider: provider, }, - AtlasProvider: provider, EventRecorder: record.NewFakeRecorder(10), } } diff --git a/internal/controller/atlasnetworkpeering/atlasnetworkpeering_controller.go b/internal/controller/atlasnetworkpeering/atlasnetworkpeering_controller.go index c26a41f8c9..f91a1ce1f2 100644 --- a/internal/controller/atlasnetworkpeering/atlasnetworkpeering_controller.go +++ b/internal/controller/atlasnetworkpeering/atlasnetworkpeering_controller.go @@ -47,7 +47,6 @@ import ( // AtlasNetworkPeeringReconciler reconciles a AtlasNetworkPeering object type AtlasNetworkPeeringReconciler struct { reconciler.AtlasReconciler - AtlasProvider atlas.Provider Scheme *runtime.Scheme EventRecorder record.EventRecorder GlobalPredicates []predicate.Predicate @@ -69,10 +68,10 @@ func NewAtlasNetworkPeeringsReconciler( Client: c.GetClient(), Log: logger.Named("controllers").Named("AtlasNetworkPeering").Sugar(), GlobalSecretRef: globalSecretRef, + AtlasProvider: atlasProvider, }, Scheme: c.GetScheme(), EventRecorder: c.GetEventRecorderFor("AtlasPrivateEndpoint"), - AtlasProvider: atlasProvider, GlobalPredicates: predicates, ObjectDeletionProtection: deletionProtection, independentSyncPeriod: independentSyncPeriod, diff --git a/internal/controller/atlasnetworkpeering/state_test.go b/internal/controller/atlasnetworkpeering/state_test.go index 21c6c66362..5b1820491e 100644 --- a/internal/controller/atlasnetworkpeering/state_test.go +++ b/internal/controller/atlasnetworkpeering/state_test.go @@ -941,8 +941,8 @@ func testReconciler(k8sClient client.Client, provider atlas.Provider, logger *za Namespace: "default", Name: "secret", }, + AtlasProvider: provider, }, - AtlasProvider: provider, EventRecorder: record.NewFakeRecorder(10), } } diff --git a/internal/controller/atlasprivateendpoint/atlasprivateendpoint_controller.go b/internal/controller/atlasprivateendpoint/atlasprivateendpoint_controller.go index 2558a0f04e..6109930be9 100644 --- a/internal/controller/atlasprivateendpoint/atlasprivateendpoint_controller.go +++ b/internal/controller/atlasprivateendpoint/atlasprivateendpoint_controller.go @@ -52,7 +52,6 @@ type AtlasPrivateEndpointReconciler struct { reconciler.AtlasReconciler Scheme *runtime.Scheme EventRecorder record.EventRecorder - AtlasProvider atlas.Provider GlobalPredicates []predicate.Predicate ObjectDeletionProtection bool @@ -299,10 +298,10 @@ func NewAtlasPrivateEndpointReconciler( Client: c.GetClient(), Log: logger.Named("controllers").Named("AtlasPrivateEndpoint").Sugar(), GlobalSecretRef: globalSecretRef, + AtlasProvider: atlasProvider, }, Scheme: c.GetScheme(), EventRecorder: c.GetEventRecorderFor("AtlasPrivateEndpoint"), - AtlasProvider: atlasProvider, GlobalPredicates: predicates, ObjectDeletionProtection: deletionProtection, independentSyncPeriod: independentSyncPeriod, diff --git a/internal/controller/atlasprivateendpoint/atlasprivateendpoint_controller_test.go b/internal/controller/atlasprivateendpoint/atlasprivateendpoint_controller_test.go index cb464ad6f9..9c2d43063b 100644 --- a/internal/controller/atlasprivateendpoint/atlasprivateendpoint_controller_test.go +++ b/internal/controller/atlasprivateendpoint/atlasprivateendpoint_controller_test.go @@ -398,10 +398,10 @@ func TestEnsureCustomResource(t *testing.T) { Build() r := &AtlasPrivateEndpointReconciler{ AtlasReconciler: reconciler.AtlasReconciler{ - Client: fakeClient, - Log: zap.New(core).Sugar(), + Client: fakeClient, + Log: zap.New(core).Sugar(), + AtlasProvider: tt.provider, }, - AtlasProvider: tt.provider, EventRecorder: record.NewFakeRecorder(10), } result, err := r.ensureCustomResource(ctx, tt.atlasPrivateEndpoint) diff --git a/internal/controller/atlasthirdpartyintegrations/handler.go b/internal/controller/atlasthirdpartyintegrations/handler.go new file mode 100644 index 0000000000..98c38908ce --- /dev/null +++ b/internal/controller/atlasthirdpartyintegrations/handler.go @@ -0,0 +1,200 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package integrations + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "reflect" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + akov2next "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/nextapi/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/thirdpartyintegration" + ctrlstate "github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/controller/state" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/result" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/state" +) + +const ( + AnnotationContentHash = "mongodb.com/content-hash" +) + +func (h *AtlasThirdPartyIntegrationHandler) HandleInitial(ctx context.Context, integration *akov2next.AtlasThirdPartyIntegration) (ctrlstate.Result, error) { + return h.upsert(ctx, state.StateInitial, state.StateCreated, integration) +} + +func (h *AtlasThirdPartyIntegrationHandler) HandleCreated(ctx context.Context, integration *akov2next.AtlasThirdPartyIntegration) (ctrlstate.Result, error) { + return h.upsert(ctx, state.StateCreated, state.StateCreated, integration) +} + +func (h *AtlasThirdPartyIntegrationHandler) HandleUpdated(ctx context.Context, integration *akov2next.AtlasThirdPartyIntegration) (ctrlstate.Result, error) { + return h.upsert(ctx, state.StateUpdated, state.StateUpdated, integration) +} + +func (h *AtlasThirdPartyIntegrationHandler) HandleDeletionRequested(ctx context.Context, integration *akov2next.AtlasThirdPartyIntegration) (ctrlstate.Result, error) { + req, err := h.newReconcileRequest(ctx, integration) + if err != nil { + // TODO is this good for all cases? + return h.unmanage(integration.Spec.Type) + } + + if !h.deletionProtection { + return h.delete(ctx, req, integration.Spec.Type) + } + return h.unmanage(integration.Spec.Type) +} + +func (h *AtlasThirdPartyIntegrationHandler) upsert(ctx context.Context, currentState, nextState state.ResourceState, integration *akov2next.AtlasThirdPartyIntegration) (ctrlstate.Result, error) { + req, err := h.newReconcileRequest(ctx, integration) + if err != nil { + return result.Error(currentState, fmt.Errorf("failed to build reconcile request: %w", err)) + } + + integrationSpec, err := h.populateIntegration(ctx, integration) + if err != nil { + return result.Error(currentState, fmt.Errorf("failed to populate integration: %w", err)) + } + atlasIntegration, err := req.Service.Get(ctx, req.Project.ID, integrationSpec.Type) + if errors.Is(err, thirdpartyintegration.ErrNotFound) { + return h.create(ctx, currentState, req, integrationSpec) + } + if err != nil { + return result.Error( + currentState, + fmt.Errorf("Error getting %s Atlas Integration for project %s: %w", + integrationSpec.Type, req.Project.ID, err), + ) + } + atlas := atlasIntegration.Comparable() + spec := integrationSpec.Comparable() + secretChanged, err := h.secretChanged(ctx, integration) + if err != nil { + return result.Error( + currentState, + fmt.Errorf("Error evaluating secret changes for %s Atlas Integration for project %s: %w", + integrationSpec.Type, req.Project.ID, err), + ) + } + if secretChanged || !reflect.DeepEqual(atlas, spec) { + return h.update(ctx, currentState, req, integrationSpec) + } + return result.NextState( + nextState, + fmt.Sprintf("%s Atlas Third Party Integration for %s", integrationSpec.Type, req.Project.ID), + ) +} + +func (h *AtlasThirdPartyIntegrationHandler) create(ctx context.Context, currentState state.ResourceState, req *reconcileRequest, integrationSpec *thirdpartyintegration.ThirdPartyIntegration) (ctrlstate.Result, error) { + newIntegration, err := req.Service.Create(ctx, req.Project.ID, integrationSpec) + if err != nil { + return result.Error(currentState, fmt.Errorf("failed to create %s Atlas Third Party Integration for project %s: %w", + integrationSpec.Type, req.Project.ID, err)) + } + req.integration.Status.ID = newIntegration.ID + if err := h.patchNonConditionStatus(ctx, req); err != nil { + return result.Error(currentState, fmt.Errorf("failed to record id for %s Atlas Third Party Integration for project %s: %w", + integrationSpec.Type, req.Project.ID, err)) + } + settled := true + ensureReadyCondition(req.integration, "Resource Settled", settled) + if err := h.ensureSecretHash(ctx, req.integration); err != nil { + return result.Error(currentState, fmt.Errorf("failed to ensure secret is hashed to detect further changes "+ + "for %s Atlas Third Party Integration for project %s: %w", + integrationSpec.Type, req.Project.ID, err)) + } + return result.NextState( + state.StateCreated, + fmt.Sprintf("Created Atlas Third Party Integration for %s", integrationSpec.Type), + ) +} + +func (h *AtlasThirdPartyIntegrationHandler) update(ctx context.Context, currentState state.ResourceState, req *reconcileRequest, integrationSpec *thirdpartyintegration.ThirdPartyIntegration) (ctrlstate.Result, error) { + _, err := req.Service.Update(ctx, req.Project.ID, integrationSpec) + if err != nil { + return result.Error(currentState, fmt.Errorf("failed to update %s Atlas Third Party Integration for project %s: %w", + integrationSpec.Type, req.Project.ID, err)) + } + settled := true + ensureReadyCondition(req.integration, "Resource Settled", settled) + return result.NextState( + state.StateUpdated, + fmt.Sprintf("Updated Atlas Third Party Integration for %s", integrationSpec.Type), + ) +} + +func (h *AtlasThirdPartyIntegrationHandler) delete(ctx context.Context, req *reconcileRequest, integrationType string) (ctrlstate.Result, error) { + err := req.Service.Delete(ctx, req.Project.ID, integrationType) + if errors.Is(err, thirdpartyintegration.ErrNotFound) { + return h.unmanage(integrationType) + } + if err != nil { + return result.Error( + state.StateDeletionRequested, + fmt.Errorf("Error deleting %s Atlas Integration for project %s: %w", integrationType, req.Project.ID, err), + ) + } + return h.unmanage(integrationType) +} + +func (h *AtlasThirdPartyIntegrationHandler) unmanage(integrationType string) (ctrlstate.Result, error) { + return result.NextState( + state.StateDeleted, + fmt.Sprintf("Deleted Atlas Third Party Integration for %s", integrationType), + ) +} + +func (h *AtlasThirdPartyIntegrationHandler) patchNonConditionStatus(ctx context.Context, req *reconcileRequest) error { + statusJSON, err := json.Marshal(req.integration) + if err != nil { + return fmt.Errorf("failed to marshal status: %w", err) + } + if err := h.Client.Status().Patch(ctx, req.integration, client.RawPatch(types.MergePatchType, statusJSON)); err != nil { + return fmt.Errorf("failed to patch: %w", err) + } + return nil +} + +func ensureReadyCondition(integration *akov2next.AtlasThirdPartyIntegration, msg string, ready bool) { + s := metav1.ConditionFalse + if ready { + s = metav1.ConditionTrue + } + + meta.SetStatusCondition(&integration.Status.Conditions, metav1.Condition{ + Type: "Ready", + Status: s, + LastTransitionTime: metav1.NewTime(time.Now()), + Message: msg, + }) +} + +func (h *AtlasThirdPartyIntegrationHandler) populateIntegration(ctx context.Context, integration *akov2next.AtlasThirdPartyIntegration) (*thirdpartyintegration.ThirdPartyIntegration, error) { + secrets, err := fetchIntegrationSecrets(ctx, h.Client, integration) + if err != nil { + return nil, fmt.Errorf("failed to fetch integration secrets: %w", err) + } + internalIntegration, err := thirdpartyintegration.NewFromSpec(integration, secrets) + if err != nil { + return nil, fmt.Errorf("failed to populate integration: %w", err) + } + return internalIntegration, err +} diff --git a/internal/controller/atlasthirdpartyintegrations/secret.go b/internal/controller/atlasthirdpartyintegrations/secret.go new file mode 100644 index 0000000000..32d694a671 --- /dev/null +++ b/internal/controller/atlasthirdpartyintegrations/secret.go @@ -0,0 +1,141 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package integrations + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "sort" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + akov2next "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/nextapi/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/thirdpartyintegration" +) + +func (h *AtlasThirdPartyIntegrationHandler) secretChanged(ctx context.Context, integration *akov2next.AtlasThirdPartyIntegration) (bool, error) { + secretName, err := secretName(integration) // at this point the secret should have filed at populateIntegration + if err != nil { + return false, fmt.Errorf("failed to check for secret changes: %w", err) + } + secret, err := fetchSecret(ctx, h.Client, secretName, integration.Namespace) + if err != nil { + return false, fmt.Errorf("failed to retrieve secret %s to evaluate changes: %w", secretName, err) + } + currentValue := hashSecret(secret.Data) + for k, v := range secret.Annotations { + if k == AnnotationContentHash { + if v == currentValue { + return false, nil + } + } + } + if err := patchSecretAnnotation(ctx, h.Client, secret, AnnotationContentHash, currentValue); err != nil { + return false, fmt.Errorf("failed to record current secret %s value hash: %w", secretName, err) + } + return true, nil +} + +func (h *AtlasThirdPartyIntegrationHandler) ensureSecretHash(ctx context.Context, integration *akov2next.AtlasThirdPartyIntegration) error { + _, err := h.secretChanged(ctx, integration) + return fmt.Errorf("failed to ensure secret hash annotation: %w", err) +} + +func hashSecret(secretData map[string][]byte) string { + keys := make([]string, 0, len(secretData)) + for k := range secretData { + keys = append(keys, k) + } + sort.Strings(keys) + h := sha256.New() + for _, key := range keys { + h.Write(([]byte)(key)) + h.Write(secretData[key]) + } + return hex.EncodeToString(h.Sum(nil)) +} + +func fetchIntegrationSecrets(ctx context.Context, kubeClient client.Client, integration *akov2next.AtlasThirdPartyIntegration) (map[string][]byte, error) { + name, err := secretName(integration) + if err != nil { + return nil, fmt.Errorf("failed to solve integration secret name: %w", err) + } + return fetchSecretData(ctx, kubeClient, name, integration.Namespace) +} + +func secretName(integration *akov2next.AtlasThirdPartyIntegration) (string, error) { + switch integration.Spec.Type { + case "DATADOG": + return integration.Spec.Datadog.APIKeySecretRef.Name, nil + case "MICROSOFT_TEAMS": + return integration.Spec.MicrosoftTeams.URLSecretRef.Name, nil + case "NEW_RELIC": + return integration.Spec.NewRelic.CredentialsSecretRef.Name, nil + case "OPS_GENIE": + return integration.Spec.OpsGenie.APIKeySecretRef.Name, nil + case "PAGER_DUTY": + return integration.Spec.PagerDuty.ServiceKeySecretRef.Name, nil + case "PROMETHEUS": + return integration.Spec.Prometheus.PrometheusCredentialsSecretRef.Name, nil + case "SLACK": + return integration.Spec.Slack.APITokenSecretRef.Name, nil + case "VICTOR_OPS": + return integration.Spec.VictorOps.APIKeySecretRef.Name, nil + case "WEBHOOK": + return integration.Spec.Webhook.URLSecretRef.Name, nil + default: + return "", fmt.Errorf("%w %v", thirdpartyintegration.ErrUnsupportedIntegrationType, integration.Spec.Type) + } +} + +func fetchSecret(ctx context.Context, kubeClient client.Client, name, namespace string) (*v1.Secret, error) { + secret := v1.Secret{} + err := kubeClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, &secret) + if err != nil { + return nil, fmt.Errorf("failed to fetch secret %q: %w", name, err) + } + return &secret, nil +} + +func patchSecretAnnotation(ctx context.Context, kubeClient client.Client, secret *v1.Secret, annotation, value string) error { + updatedSecret := secret.DeepCopy() + if updatedSecret.Annotations == nil { + updatedSecret.Annotations = map[string]string{} + } + updatedSecret.Annotations[annotation] = value + + secretJSON, err := json.Marshal(updatedSecret) + if err != nil { + return fmt.Errorf("failed to marshal secret: %w", err) + } + patchErr := kubeClient.Patch(ctx, secret, client.RawPatch(types.MergePatchType, secretJSON)) + if patchErr != nil { + return fmt.Errorf("failed to patch secret: %w", patchErr) + } + return nil +} + +func fetchSecretData(ctx context.Context, kubeClient client.Client, name, namespace string) (map[string][]byte, error) { + secret, err := fetchSecret(ctx, kubeClient, name, namespace) + if err != nil { + return nil, fmt.Errorf("failed to retrieve secret value: %w", err) + } + return secret.Data, nil +} diff --git a/internal/controller/atlasthirdpartyintegrations/setup.go b/internal/controller/atlasthirdpartyintegrations/setup.go new file mode 100644 index 0000000000..cceb62638f --- /dev/null +++ b/internal/controller/atlasthirdpartyintegrations/setup.go @@ -0,0 +1,186 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package integrations + +import ( + "context" + "fmt" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/fields" + controllerruntime "sigs.k8s.io/controller-runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + ctrlrtbuilder "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlas" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/reconciler" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/indexer" + akov2next "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/nextapi/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/project" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/thirdpartyintegration" + ctrlstate "github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/controller/state" + mckpredicate "github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/predicate" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/ratelimit" +) + +type AtlasThirdPartyIntegrationHandler struct { + ctrlstate.StateHandler[akov2next.AtlasThirdPartyIntegration] + reconciler.AtlasReconciler + deletionProtection bool +} + +func NewAtlasThirdPartyIntegrationsReconciler( + c cluster.Cluster, + atlasProvider atlas.Provider, + deletionProtection bool, + logger *zap.Logger, + globalSecretRef client.ObjectKey, + reapplySupport bool, +) *ctrlstate.Reconciler[akov2next.AtlasThirdPartyIntegration] { + handler := &AtlasThirdPartyIntegrationHandler{ + AtlasReconciler: reconciler.AtlasReconciler{ + Client: c.GetClient(), + AtlasProvider: atlasProvider, + Log: logger.Named("controllers").Named("AtlasThirdPartyIntegration").Sugar(), + GlobalSecretRef: globalSecretRef, + }, + deletionProtection: deletionProtection, + } + return ctrlstate.NewStateReconciler( + handler, + ctrlstate.WithCluster[akov2next.AtlasThirdPartyIntegration](c), + ctrlstate.WithReapplySupport[akov2next.AtlasThirdPartyIntegration](reapplySupport), + ) +} + +// For prepares the controller for its target Custom Resource; AtlasThirdPartyIntegration +func (r *AtlasThirdPartyIntegrationHandler) For() (client.Object, builder.Predicates) { + return &akov2next.AtlasThirdPartyIntegration{}, builder.WithPredicates(predicate.GenerationChangedPredicate{}) +} + +func (h *AtlasThirdPartyIntegrationHandler) SetupWithManager(mgr ctrl.Manager, rec reconcile.Reconciler, opts ...ctrlstate.SetupManagerOption) error { + h.Client = mgr.GetClient() + obj := &akov2next.AtlasThirdPartyIntegration{} + builder := controllerruntime.NewControllerManagedBy(mgr). + For( + obj, + ctrlrtbuilder.WithPredicates( + predicate.Or( + mckpredicate.AnnotationChanged("mongodb.com/reapply-period"), + predicate.GenerationChangedPredicate{}, + ), + mckpredicate.IgnoreDeletedPredicate[client.Object](), + ), + ). + Watches( + &akov2.AtlasProject{}, + handler.EnqueueRequestsFromMapFunc(h.integrationForProjectMapFunc()), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + ). + Watches( + &corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(h.integrationForSecretMapFunc()), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + ). + WithOptions(controller.Options{ + RateLimiter: ratelimit.NewRateLimiter[reconcile.Request](), + }) + return ctrlstate.ApplyOptions(builder, opts...).Complete(rec) +} + +func (h *AtlasThirdPartyIntegrationHandler) integrationForProjectMapFunc() handler.MapFunc { + return indexer.ProjectsIndexMapperFunc( + string(indexer.AtlasThirdPartyIntegrationByProjectIndex), + func() *akov2next.AtlasThirdPartyIntegrationList { return &akov2next.AtlasThirdPartyIntegrationList{} }, + indexer.AtlasThirdPartyIntegrationRequests, + h.Client, + h.Log, + ) +} + +func (h *AtlasThirdPartyIntegrationHandler) integrationForSecretMapFunc() handler.MapFunc { + return func(ctx context.Context, obj client.Object) []reconcile.Request { + secret, ok := obj.(*corev1.Secret) + if !ok { + h.Log.Warnf("watching Secret but got %T", obj) + return nil + } + + listOpts := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector( + indexer.AtlasThirdPartyIntegrationBySecretsIndex, + client.ObjectKeyFromObject(secret).String(), + ), + } + list1 := &akov2next.AtlasThirdPartyIntegrationList{} + err := h.Client.List(ctx, list1, listOpts) + if err != nil { + h.Log.Errorf("failed to list from indexer %s: %v", + indexer.AtlasThirdPartyIntegrationBySecretsIndex, err) + return nil + } + requests1 := indexer.AtlasThirdPartyIntegrationRequests(list1) + + listOpts = &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector( + indexer.AtlasThirdPartyIntegrationCredentialsIndex, + client.ObjectKeyFromObject(secret).String(), + ), + } + list2 := &akov2next.AtlasThirdPartyIntegrationList{} + err = h.Client.List(ctx, list2, listOpts) + if err != nil { + h.Log.Errorf("failed to list from indexer %s: %v", + indexer.AtlasThirdPartyIntegrationCredentialsIndex, err) + return nil + } + requests2 := indexer.AtlasThirdPartyIntegrationRequests(list2) + + return append(requests1, requests2...) + } +} + +type reconcileRequest struct { + ClientSet *atlas.ClientSet + Project *project.Project + Service thirdpartyintegration.ThirdPartyIntegrationService + integration *akov2next.AtlasThirdPartyIntegration +} + +func (h *AtlasThirdPartyIntegrationHandler) newReconcileRequest(ctx context.Context, integration *akov2next.AtlasThirdPartyIntegration) (*reconcileRequest, error) { + req := reconcileRequest{} + sdkClientSet, err := h.ResolveSDKClientSet(ctx, integration) + if err != nil { + return nil, fmt.Errorf("failed to resolve connection config: %w", err) + } + req.ClientSet = sdkClientSet + req.Service = thirdpartyintegration.NewThirdPartyIntegrationServiceFromClientSet(sdkClientSet) + project, err := h.ResolveProject(ctx, sdkClientSet.SdkClient20231115008, integration) + if err != nil { + return nil, fmt.Errorf("failed to fetch referenced project: %w", err) + } + req.Project = project + req.integration = integration + return &req, nil +} diff --git a/internal/controller/reconciler/clientset.go b/internal/controller/reconciler/clientset.go new file mode 100644 index 0000000000..45df80b59e --- /dev/null +++ b/internal/controller/reconciler/clientset.go @@ -0,0 +1,35 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package reconciler + +import ( + "context" + "fmt" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlas" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/project" +) + +func (r *AtlasReconciler) ResolveSDKClientSet(ctx context.Context, referrer project.ProjectReferrerObject) (*atlas.ClientSet, error) { + connectionConfig, err := r.ResolveConnectionConfig(ctx, referrer) + if err != nil { + return nil, fmt.Errorf("failed to resolve connection config: %w", err) + } + sdkClientSet, err := r.AtlasProvider.SdkClientSet(ctx, connectionConfig.Credentials, r.Log) + if err != nil { + return nil, fmt.Errorf("failed to instantiate client set: %w", err) + } + return sdkClientSet, nil +} diff --git a/internal/controller/reconciler/reconciler.go b/internal/controller/reconciler/reconciler.go index 8f2fe9b250..d5f81780fa 100644 --- a/internal/controller/reconciler/reconciler.go +++ b/internal/controller/reconciler/reconciler.go @@ -23,11 +23,13 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlas" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/customresource" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/workflow" ) type AtlasReconciler struct { + AtlasProvider atlas.Provider Client client.Client Log *zap.SugaredLogger GlobalSecretRef client.ObjectKey diff --git a/internal/controller/registry.go b/internal/controller/registry.go index e7626d6d39..5962e206ee 100644 --- a/internal/controller/registry.go +++ b/internal/controller/registry.go @@ -23,6 +23,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -40,11 +41,16 @@ import ( "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlasproject" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlassearchindexconfig" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlasstream" + integrations "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlasthirdpartyintegrations" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/watch" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/dryrun" "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/featureflags" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/pointer" + ctrlstate "github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/controller/state" ) +const DefaultReapplySupport = true + type Reconciler interface { reconcile.Reconciler For() (client.Object, builder.Predicates) @@ -57,19 +63,24 @@ type Registry struct { independentSyncPeriod time.Duration featureFlags *featureflags.FeatureFlags - logger *zap.Logger - reconcilers []Reconciler - globalSecretRef client.ObjectKey + logger *zap.Logger + reconcilers []Reconciler + globalSecretRef client.ObjectKey + experimentalReconcilers bool + + reapplySupport bool } -func NewRegistry(predicates []predicate.Predicate, deletionProtection bool, logger *zap.Logger, independentSyncPeriod time.Duration, featureFlags *featureflags.FeatureFlags, globalSecretRef client.ObjectKey) *Registry { +func NewRegistry(predicates []predicate.Predicate, deletionProtection bool, logger *zap.Logger, independentSyncPeriod time.Duration, featureFlags *featureflags.FeatureFlags, globalSecretRef client.ObjectKey, experimentalReconcilers bool) *Registry { return &Registry{ - sharedPredicates: predicates, - deletionProtection: deletionProtection, - logger: logger, - independentSyncPeriod: independentSyncPeriod, - featureFlags: featureFlags, - globalSecretRef: globalSecretRef, + sharedPredicates: predicates, + deletionProtection: deletionProtection, + logger: logger, + independentSyncPeriod: independentSyncPeriod, + featureFlags: featureFlags, + globalSecretRef: globalSecretRef, + experimentalReconcilers: experimentalReconcilers, + reapplySupport: DefaultReapplySupport, } } @@ -114,9 +125,27 @@ func (r *Registry) registerControllers(c cluster.Cluster, ap atlas.Provider) { reconcilers = append(reconcilers, atlasipaccesslist.NewAtlasIPAccessListReconciler(c, r.defaultPredicates(), ap, r.deletionProtection, r.independentSyncPeriod, r.logger, r.globalSecretRef)) reconcilers = append(reconcilers, atlasnetworkcontainer.NewAtlasNetworkContainerReconciler(c, r.defaultPredicates(), ap, r.deletionProtection, r.logger, r.independentSyncPeriod, r.globalSecretRef)) reconcilers = append(reconcilers, atlasnetworkpeering.NewAtlasNetworkPeeringsReconciler(c, r.defaultPredicates(), ap, r.deletionProtection, r.logger, r.independentSyncPeriod, r.globalSecretRef)) + if r.experimentalReconcilers { + reconcilers = r.appendExperimentalReconcilers(reconcilers, c, ap) + } r.reconcilers = reconcilers } +func (r *Registry) appendExperimentalReconcilers(reconcilers []Reconciler, c cluster.Cluster, ap atlas.Provider) []Reconciler { + // TODO cluster.Cluster needed in initialization + integrationsReconciler := integrations.NewAtlasThirdPartyIntegrationsReconciler( + c, + ap, + r.deletionProtection, + r.logger, + r.globalSecretRef, + r.reapplySupport, + ) + compatibleIntegrationsReconciler := newCtrlStateReconciler(*integrationsReconciler) + reconcilers = append(reconcilers, compatibleIntegrationsReconciler) + return reconcilers +} + // deprecatedPredicates are to be phased out in favor of defaultPredicates func (r *Registry) deprecatedPredicates() []predicate.Predicate { return append(r.sharedPredicates, watch.DeprecatedCommonPredicates()) @@ -127,3 +156,22 @@ func (r *Registry) deprecatedPredicates() []predicate.Predicate { func (r *Registry) defaultPredicates() []predicate.Predicate { return append(r.sharedPredicates, watch.DefaultPredicates[client.Object]()) } + +type ctrlStateReconciler[T any] struct { + ctrlstate.Reconciler[T] +} + +func newCtrlStateReconciler[T any](r ctrlstate.Reconciler[T]) *ctrlStateReconciler[T] { + return &ctrlStateReconciler[T]{Reconciler: r} +} + +func (nr *ctrlStateReconciler[T]) SetupWithManager(mgr ctrl.Manager, skipNameValidation bool) error { + skipNameOptionFn := func(skipNameValidation bool) ctrlstate.SetupManagerOption { + return func(builder *ctrlstate.ControllerSetupBuilder) *ctrlstate.ControllerSetupBuilder { + return builder.WithOptions(controller.TypedOptions[reconcile.Request]{ + SkipNameValidation: pointer.MakePtr(skipNameValidation), + }) + } + } + return nr.Reconciler.SetupWithManager(mgr, skipNameOptionFn(skipNameValidation)) +} diff --git a/internal/indexer/atlasthirdpartyintegrationcredentials.go b/internal/indexer/atlasthirdpartyintegrationcredentials.go new file mode 100644 index 0000000000..0ca991ce7d --- /dev/null +++ b/internal/indexer/atlasthirdpartyintegrationcredentials.go @@ -0,0 +1,38 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package indexer + +import ( + "go.uber.org/zap" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + akov2next "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/nextapi/v1" +) + +const ( + AtlasThirdPartyIntegrationCredentialsIndex = "atlasthirdpartyintegrations.credentials" +) + +func NewAtlasThirdPartyIntegrationByCredentialIndexer(logger *zap.Logger) *LocalCredentialIndexer { + return NewLocalCredentialsIndexer(AtlasThirdPartyIntegrationCredentialsIndex, &akov2next.AtlasThirdPartyIntegration{}, logger) +} + +func AtlasThirdPartyIntegrationRequests(list *akov2next.AtlasThirdPartyIntegrationList) []reconcile.Request { + requests := make([]reconcile.Request, 0, len(list.Items)) + for _, item := range list.Items { + requests = append(requests, toRequest(&item)) + } + return requests +} diff --git a/internal/indexer/atlasthirdpartyintegrationsecrets.go b/internal/indexer/atlasthirdpartyintegrationsecrets.go new file mode 100644 index 0000000000..3c666d50dc --- /dev/null +++ b/internal/indexer/atlasthirdpartyintegrationsecrets.go @@ -0,0 +1,99 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package indexer + +import ( + "go.uber.org/zap" + "sigs.k8s.io/controller-runtime/pkg/client" + + akov2next "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/nextapi/v1" +) + +const ( + AtlasThirdPartyIntegrationBySecretsIndex = "atlasthirdpartyintegration.spec.secrets" +) + +type AtlasThirdPartyIntegrationBySecretsIndexer struct { + logger *zap.SugaredLogger +} + +func NewAtlasThirdPartyIntegrationBySecretsIndexer(logger *zap.Logger) *AtlasThirdPartyIntegrationBySecretsIndexer { + return &AtlasThirdPartyIntegrationBySecretsIndexer{ + logger: logger.Named(AtlasThirdPartyIntegrationBySecretsIndex).Sugar(), + } +} + +func (*AtlasThirdPartyIntegrationBySecretsIndexer) Object() client.Object { + return &akov2next.AtlasThirdPartyIntegration{} +} + +func (*AtlasThirdPartyIntegrationBySecretsIndexer) Name() string { + return AtlasThirdPartyIntegrationBySecretsIndex +} + +func (a *AtlasThirdPartyIntegrationBySecretsIndexer) Keys(object client.Object) []string { + tpi, ok := object.(*akov2next.AtlasThirdPartyIntegration) + if !ok { + a.logger.Errorf("expected %T but got %T", &akov2next.AtlasThirdPartyIntegration{}, object) + return nil + } + name := keyName(tpi) + if name == "" { + return nil + } + return []string{client.ObjectKey{Name: name, Namespace: tpi.Namespace}.String()} +} + +func keyName(tpi *akov2next.AtlasThirdPartyIntegration) string { + switch tpi.Spec.Type { + case "DATADOG": + if tpi.Spec.Datadog != nil && tpi.Spec.Datadog.APIKeySecretRef.Name != "" { + return tpi.Spec.Datadog.APIKeySecretRef.Name + } + case "MICROSOFT_TEAMS": + if tpi.Spec.MicrosoftTeams != nil && tpi.Spec.MicrosoftTeams.URLSecretRef.Name != "" { + return tpi.Spec.MicrosoftTeams.URLSecretRef.Name + } + case "NEW_RELIC": + if tpi.Spec.NewRelic != nil && tpi.Spec.NewRelic.CredentialsSecretRef.Name != "" { + return tpi.Spec.NewRelic.CredentialsSecretRef.Name + } + case "OPS_GENIE": + if tpi.Spec.OpsGenie != nil && tpi.Spec.OpsGenie.APIKeySecretRef.Name != "" { + return tpi.Spec.OpsGenie.APIKeySecretRef.Name + } + case "PAGER_DUTY": + if tpi.Spec.PagerDuty != nil && tpi.Spec.PagerDuty.ServiceKeySecretRef.Name != "" { + return tpi.Spec.PagerDuty.ServiceKeySecretRef.Name + } + case "PROMETHEUS": + if tpi.Spec.Prometheus != nil && tpi.Spec.Prometheus.PrometheusCredentialsSecretRef.Name != "" { + return tpi.Spec.Prometheus.PrometheusCredentialsSecretRef.Name + } + case "SLACK": + if tpi.Spec.Slack != nil && tpi.Spec.Slack.APITokenSecretRef.Name != "" { + return tpi.Spec.Slack.APITokenSecretRef.Name + } + case "VICTOR_OPS": + if tpi.Spec.VictorOps != nil && tpi.Spec.VictorOps.APIKeySecretRef.Name != "" { + return tpi.Spec.VictorOps.APIKeySecretRef.Name + } + case "WEBHOOK": + if tpi.Spec.Webhook != nil && tpi.Spec.Webhook.URLSecretRef.Name != "" { + return tpi.Spec.Webhook.URLSecretRef.Name + } + } + return "" +} diff --git a/internal/indexer/atlasthirdpartyintegrationsecrets_test.go b/internal/indexer/atlasthirdpartyintegrationsecrets_test.go new file mode 100644 index 0000000000..204909fc16 --- /dev/null +++ b/internal/indexer/atlasthirdpartyintegrationsecrets_test.go @@ -0,0 +1,212 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package indexer + +import ( + "sort" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap/zaptest" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + akov2next "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/nextapi/v1" +) + +func TestAtlasThirdPartyIntgerationBySecretsIndexer(t *testing.T) { + for _, tc := range []struct { + name string + object client.Object + wantKeys []string + }{ + { + name: "should return nil on wrong type", + object: &akov2.AtlasProject{}, + }, + { + name: "should return nil when there are no references", + object: &akov2next.AtlasThirdPartyIntegration{}, + }, + { + name: "should return nil when there is an empty reference", + object: &akov2next.AtlasThirdPartyIntegration{ + Spec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "DATADOG", + Datadog: &akov2next.DatadogIntegration{ + APIKeySecretRef: api.LocalObjectReference{}, + }, + }, + }, + }, + { + name: "should return the datadog secret name", + object: &akov2next.AtlasThirdPartyIntegration{ + ObjectMeta: v1.ObjectMeta{Namespace: "ns"}, + Spec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "DATADOG", + Datadog: &akov2next.DatadogIntegration{ + APIKeySecretRef: api.LocalObjectReference{ + Name: "datadogSecret", + }, + }, + }, + }, + wantKeys: []string{"ns/datadogSecret"}, + }, + { + name: "should return the microsoft teams secret name", + object: &akov2next.AtlasThirdPartyIntegration{ + ObjectMeta: v1.ObjectMeta{Namespace: "ns"}, + Spec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "MICROSOFT_TEAMS", + MicrosoftTeams: &akov2next.MicrosoftTeamsIntegration{ + URLSecretRef: api.LocalObjectReference{ + Name: "microsoftTeamsSecret", + }, + }, + }, + }, + wantKeys: []string{"ns/microsoftTeamsSecret"}, + }, + { + name: "should return the new relic secret name", + object: &akov2next.AtlasThirdPartyIntegration{ + ObjectMeta: v1.ObjectMeta{Namespace: "ns"}, + Spec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "NEW_RELIC", + NewRelic: &akov2next.NewRelicIntegration{ + CredentialsSecretRef: api.LocalObjectReference{ + Name: "newRelicSecret", + }, + }, + }, + }, + wantKeys: []string{"ns/newRelicSecret"}, + }, + { + name: "should return the ops genie secret name", + object: &akov2next.AtlasThirdPartyIntegration{ + ObjectMeta: v1.ObjectMeta{Namespace: "ns"}, + Spec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "OPS_GENIE", + OpsGenie: &akov2next.OpsGenieIntegration{ + APIKeySecretRef: api.LocalObjectReference{ + Name: "opsGenieSecret", + }, + }, + }, + }, + wantKeys: []string{"ns/opsGenieSecret"}, + }, + { + name: "should return the pager duty secret name", + object: &akov2next.AtlasThirdPartyIntegration{ + ObjectMeta: v1.ObjectMeta{Namespace: "ns"}, + Spec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "PAGER_DUTY", + PagerDuty: &akov2next.PagerDutyIntegration{ + ServiceKeySecretRef: api.LocalObjectReference{ + Name: "pagerDutySecret", + }, + }, + }, + }, + wantKeys: []string{"ns/pagerDutySecret"}, + }, + { + name: "should return the prometheus secret name", + object: &akov2next.AtlasThirdPartyIntegration{ + ObjectMeta: v1.ObjectMeta{Namespace: "ns"}, + Spec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "PROMETHEUS", + Prometheus: &akov2next.PrometheusIntegration{ + PrometheusCredentialsSecretRef: api.LocalObjectReference{ + Name: "prometheusSecret", + }, + }, + }, + }, + wantKeys: []string{"ns/prometheusSecret"}, + }, + { + name: "should return the slack secret name", + object: &akov2next.AtlasThirdPartyIntegration{ + ObjectMeta: v1.ObjectMeta{Namespace: "ns"}, + Spec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "SLACK", + Slack: &akov2next.SlackIntegration{ + APITokenSecretRef: api.LocalObjectReference{ + Name: "slackSecret", + }, + }, + }, + }, + wantKeys: []string{"ns/slackSecret"}, + }, + { + name: "should return the victor ops secret name", + object: &akov2next.AtlasThirdPartyIntegration{ + ObjectMeta: v1.ObjectMeta{Namespace: "ns"}, + Spec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "VICTOR_OPS", + VictorOps: &akov2next.VictorOpsIntegration{ + APIKeySecretRef: api.LocalObjectReference{ + Name: "victorOpsSecret", + }, + }, + }, + }, + wantKeys: []string{"ns/victorOpsSecret"}, + }, + { + name: "should return the webhook api key name", + object: &akov2next.AtlasThirdPartyIntegration{ + ObjectMeta: v1.ObjectMeta{Namespace: "ns"}, + Spec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "WEBHOOK", + Webhook: &akov2next.WebhookIntegration{ + URLSecretRef: api.LocalObjectReference{ + Name: "webhookSecret", + }, + }, + }, + }, + wantKeys: []string{"ns/webhookSecret"}, + }, + { + name: "wrong type returns nothing", + object: &akov2next.AtlasThirdPartyIntegration{ + Spec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "DATADOG", + Webhook: &akov2next.WebhookIntegration{ + URLSecretRef: api.LocalObjectReference{ + Name: "webhookSecret", + }, + }, + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + indexer := NewAtlasThirdPartyIntegrationBySecretsIndexer(zaptest.NewLogger(t)) + keys := indexer.Keys(tc.object) + sort.Strings(keys) + assert.Equal(t, tc.wantKeys, keys) + }) + } +} diff --git a/internal/indexer/atlasthirdpartyintegrationsprojects.go b/internal/indexer/atlasthirdpartyintegrationsprojects.go new file mode 100644 index 0000000000..2500a843fc --- /dev/null +++ b/internal/indexer/atlasthirdpartyintegrationsprojects.go @@ -0,0 +1,44 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//nolint:dupl +package indexer + +import ( + "go.uber.org/zap" + "sigs.k8s.io/controller-runtime/pkg/client" + + akov2next "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/nextapi/v1" +) + +const ( + AtlasThirdPartyIntegrationByProjectIndex = "atlasThirdPartyIntegration.spec.projectRef" +) + +type AtlasThirdPartyIntegrationByProjectIndexer struct { + AtlasReferrerByProjectIndexerBase +} + +func NewAtlasThirdPartyIntegrationByProjectIndexer(logger *zap.Logger) *AtlasThirdPartyIntegrationByProjectIndexer { + return &AtlasThirdPartyIntegrationByProjectIndexer{ + AtlasReferrerByProjectIndexerBase: *NewAtlasReferrerByProjectIndexer( + logger, + AtlasThirdPartyIntegrationByProjectIndex, + ), + } +} + +func (*AtlasThirdPartyIntegrationByProjectIndexer) Object() client.Object { + return &akov2next.AtlasThirdPartyIntegration{} +} diff --git a/internal/indexer/atlasthirdpartyintegrationsprojects_test.go b/internal/indexer/atlasthirdpartyintegrationsprojects_test.go new file mode 100644 index 0000000000..555a017c03 --- /dev/null +++ b/internal/indexer/atlasthirdpartyintegrationsprojects_test.go @@ -0,0 +1,61 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package indexer + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap/zaptest" + + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/common" + akov2next "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/nextapi/v1" +) + +func TestAtlasThirdPartyIntegrationByProjectIndices(t *testing.T) { + t.Run("should return nil when instance has no project associated to it", func(t *testing.T) { + pe := &akov2next.AtlasThirdPartyIntegration{ + Spec: akov2next.AtlasThirdPartyIntegrationSpec{}, + } + + indexer := NewAtlasThirdPartyIntegrationByProjectIndexer(zaptest.NewLogger(t)) + keys := indexer.Keys(pe) + assert.Nil(t, keys) + }) + + t.Run("should return indexes slice when instance has project associated to it", func(t *testing.T) { + pe := &akov2next.AtlasThirdPartyIntegration{ + Spec: akov2next.AtlasThirdPartyIntegrationSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ProjectRef: &common.ResourceRefNamespaced{ + Name: "project-1", + Namespace: "default", + }, + }, + }, + } + + indexer := NewAtlasThirdPartyIntegrationByProjectIndexer(zaptest.NewLogger(t)) + keys := indexer.Keys(pe) + assert.Equal( + t, + []string{ + "default/project-1", + }, + keys, + ) + }) +} diff --git a/internal/indexer/indexer.go b/internal/indexer/indexer.go index 38a80b2e60..7b10b5388a 100644 --- a/internal/indexer/indexer.go +++ b/internal/indexer/indexer.go @@ -21,6 +21,8 @@ import ( "go.uber.org/zap" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/cluster" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/version" ) type Indexer interface { @@ -34,7 +36,15 @@ type Indexer interface { // passing that to each indexer. func RegisterAll(ctx context.Context, c cluster.Cluster, logger *zap.Logger) error { logger = logger.Named("indexer") - return Register(ctx, c, + indexers := productionIndexers([]Indexer{}, ctx, c, logger) + if version.IsExperimental() { + indexers = experimentalIndexers(indexers, ctx, c, logger) + } + return Register(ctx, c, indexers...) +} + +func productionIndexers(indexers []Indexer, ctx context.Context, c cluster.Cluster, logger *zap.Logger) []Indexer { + return append(indexers, NewAtlasBackupScheduleByBackupPolicyIndexer(logger), NewAtlasDeploymentByBackupScheduleIndexer(logger), NewAtlasDeploymentBySearchIndexIndexer(logger), @@ -64,6 +74,14 @@ func RegisterAll(ctx context.Context, c cluster.Cluster, logger *zap.Logger) err ) } +func experimentalIndexers(indexers []Indexer, _ context.Context, _ cluster.Cluster, logger *zap.Logger) []Indexer { + return append(indexers, + NewAtlasThirdPartyIntegrationByProjectIndexer(logger), + NewAtlasThirdPartyIntegrationByCredentialIndexer(logger), + NewAtlasThirdPartyIntegrationBySecretsIndexer(logger), + ) +} + // Register registers the given indexers to the given manager's field indexer. func Register(ctx context.Context, c cluster.Cluster, indexers ...Indexer) error { for _, indexer := range indexers { diff --git a/internal/indexer/localcredentials_test.go b/internal/indexer/localcredentials_test.go index 4159346693..c51ba96a1b 100644 --- a/internal/indexer/localcredentials_test.go +++ b/internal/indexer/localcredentials_test.go @@ -256,7 +256,7 @@ func TestCredentialsIndexMapperFunc(t *testing.T) { index: AtlasDeploymentCredentialsIndex, output: &akov2.AtlasDeployment{}, mapperFn: func(kubeClient client.Client, logger *zap.SugaredLogger) handler.MapFunc { - return CredentialsIndexMapperFunc[*akov2.AtlasDeploymentList]( + return CredentialsIndexMapperFunc( AtlasDeploymentCredentialsIndex, func() *akov2.AtlasDeploymentList { return &akov2.AtlasDeploymentList{} }, DeploymentRequests, @@ -292,7 +292,7 @@ func TestCredentialsIndexMapperFunc(t *testing.T) { index: AtlasCustomRoleCredentialsIndex, output: &akov2.AtlasCustomRole{}, mapperFn: func(kubeClient client.Client, logger *zap.SugaredLogger) handler.MapFunc { - return CredentialsIndexMapperFunc[*akov2.AtlasCustomRoleList]( + return CredentialsIndexMapperFunc( AtlasCustomRoleCredentialsIndex, func() *akov2.AtlasCustomRoleList { return &akov2.AtlasCustomRoleList{} }, CustomRoleRequests, @@ -328,7 +328,7 @@ func TestCredentialsIndexMapperFunc(t *testing.T) { index: AtlasPrivateEndpointCredentialsIndex, output: &akov2.AtlasPrivateEndpoint{}, mapperFn: func(kubeClient client.Client, logger *zap.SugaredLogger) handler.MapFunc { - return CredentialsIndexMapperFunc[*akov2.AtlasPrivateEndpointList]( + return CredentialsIndexMapperFunc( AtlasPrivateEndpointCredentialsIndex, func() *akov2.AtlasPrivateEndpointList { return &akov2.AtlasPrivateEndpointList{} }, PrivateEndpointRequests, @@ -364,7 +364,7 @@ func TestCredentialsIndexMapperFunc(t *testing.T) { index: AtlasNetworkContainerCredentialsIndex, output: &akov2.AtlasNetworkContainer{}, mapperFn: func(kubeClient client.Client, logger *zap.SugaredLogger) handler.MapFunc { - return CredentialsIndexMapperFunc[*akov2.AtlasNetworkContainerList]( + return CredentialsIndexMapperFunc( AtlasNetworkContainerCredentialsIndex, func() *akov2.AtlasNetworkContainerList { return &akov2.AtlasNetworkContainerList{} }, NetworkContainerRequests, @@ -400,7 +400,7 @@ func TestCredentialsIndexMapperFunc(t *testing.T) { index: AtlasNetworkPeeringCredentialsIndex, output: &akov2.AtlasNetworkPeering{}, mapperFn: func(kubeClient client.Client, logger *zap.SugaredLogger) handler.MapFunc { - return CredentialsIndexMapperFunc[*akov2.AtlasNetworkPeeringList]( + return CredentialsIndexMapperFunc( AtlasNetworkPeeringCredentialsIndex, func() *akov2.AtlasNetworkPeeringList { return &akov2.AtlasNetworkPeeringList{} }, NetworkPeeringRequests, @@ -519,7 +519,7 @@ func newTestSecret(name string) *corev1.Secret { } func dbUserMapperFunc(kubeClient client.Client, logger *zap.SugaredLogger) handler.MapFunc { - return CredentialsIndexMapperFunc[*akov2.AtlasDatabaseUserList]( + return CredentialsIndexMapperFunc( AtlasDatabaseUserCredentialsIndex, func() *akov2.AtlasDatabaseUserList { return &akov2.AtlasDatabaseUserList{} }, DatabaseUserRequests, @@ -539,5 +539,6 @@ func testIndexers(t *testing.T) map[string]*LocalCredentialIndexer { indexers[AtlasPrivateEndpointCredentialsIndex] = NewAtlasPrivateEndpointByCredentialIndexer(logger) indexers[AtlasNetworkContainerCredentialsIndex] = NewAtlasNetworkContainerByCredentialIndexer(logger) indexers[AtlasNetworkPeeringCredentialsIndex] = NewAtlasNetworkPeeringByCredentialIndexer(logger) + indexers[AtlasThirdPartyIntegrationCredentialsIndex] = NewAtlasThirdPartyIntegrationByCredentialIndexer(logger) return indexers } diff --git a/internal/next-crds/atlas.nextapi.mongodb.com_atlasthirdpartyintegrations.yaml b/internal/next-crds/atlas.nextapi.mongodb.com_atlasthirdpartyintegrations.yaml new file mode 100644 index 0000000000..2ef8a3378b --- /dev/null +++ b/internal/next-crds/atlas.nextapi.mongodb.com_atlasthirdpartyintegrations.yaml @@ -0,0 +1,424 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.2 + name: atlasthirdpartyintegrations.atlas.nextapi.mongodb.com +spec: + group: atlas.nextapi.mongodb.com + names: + categories: + - atlas + kind: AtlasThirdPartyIntegration + listKind: AtlasThirdPartyIntegrationList + plural: atlasthirdpartyintegrations + shortNames: + - atpi + singular: atlasthirdpartyintegration + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + name: v1 + schema: + openAPIV3Schema: + description: AtlasThirdPartyIntegration is the Schema for the atlas 3rd party + integrations API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AtlasThirdPartyIntegrationSpec contains the expected configuration + for an integration + properties: + connectionSecret: + description: Name of the secret containing Atlas API private and public + keys + properties: + name: + description: |- + Name of the resource being referred to + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + required: + - name + type: object + datadog: + description: Datadog contains the config fields for Datadog's Integration + properties: + apiKeySecretRef: + description: APIKeySecretRef holds the name of a secret containing + the datadog api key + properties: + name: + description: |- + Name of the resource being referred to + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + required: + - name + type: object + region: + description: Region is the Datadog region + type: string + sendCollectionLatencyMetrics: + default: disabled + description: SendCollectionLatencyMetrics toggles sending collection + latency metrics + enum: + - enabled + - disabled + type: string + sendDatabaseMetrics: + default: disabled + description: |- + SendDatabaseMetrics toggles sending database metrics, + including database and collection names + enum: + - enabled + - disabled + type: string + required: + - apiKeySecretRef + - region + type: object + externalProjectRef: + description: |- + "externalProjectRef" holds the parent Atlas project ID. + Mutually exclusive with the "projectRef" field + properties: + id: + description: ID is the Atlas project ID + type: string + required: + - id + type: object + microsoftTeams: + description: MicrosoftTeams contains the config fields for Microsoft + Teams's Integration + properties: + urlSecretRef: + description: URLSecretRef holds the name of a secret containing + the microsoft teams secret URL + properties: + name: + description: |- + Name of the resource being referred to + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + required: + - name + type: object + required: + - urlSecretRef + type: object + newRelic: + description: NewRelic contains the config fields for New Relic's Integration + properties: + credentialsSecretRef: + description: |- + CredentialsSecretRef holds the name of a secret containing new relic's credentials: + account id, license key, read and write tokens + properties: + name: + description: |- + Name of the resource being referred to + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + required: + - name + type: object + required: + - credentialsSecretRef + type: object + opsGenie: + description: OpsGenie contains the config fields for Ops Genie's Integration + properties: + apiKeySecretRef: + description: APIKeySecretRef holds the name of a secret containing + Ops Genie's API key + properties: + name: + description: |- + Name of the resource being referred to + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + required: + - name + type: object + region: + description: Region is the Ops Genie region + type: string + required: + - apiKeySecretRef + - region + type: object + pagerDuty: + description: PagerDuty contains the config fields for PagerDuty's + Integration + properties: + region: + description: Region is the Pager Duty region + type: string + serviceKeySecretRef: + description: ServiceKeySecretRef holds the name of a secret containing + Pager Duty service key + properties: + name: + description: |- + Name of the resource being referred to + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + required: + - name + type: object + required: + - region + - serviceKeySecretRef + type: object + projectRef: + description: |- + "projectRef" is a reference to the parent AtlasProject resource. + Mutually exclusive with the "externalProjectRef" field + properties: + name: + description: Name is the name of the Kubernetes Resource + type: string + namespace: + description: Namespace is the namespace of the Kubernetes Resource + type: string + required: + - name + type: object + prometheus: + description: Prometheus contains the config fields for Prometheus's + Integration + properties: + enabled: + description: Enabled is true when Prometheus integration is enabled + type: string + prometheusCredentialsSecretRef: + description: |- + PrometheusCredentialsSecretRef holds the name of a secret containing the Prometheus + username & password + properties: + name: + description: |- + Name of the resource being referred to + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + required: + - name + type: object + serviceDiscovery: + description: ServiceDiscovery to be used by Prometheus + enum: + - file + - http + type: string + required: + - enabled + - prometheusCredentialsSecretRef + - serviceDiscovery + type: object + slack: + description: Slack contains the config fields for Slack's Integration + properties: + apiTokenSecretRef: + description: APITokenSecretRef holds the name of a secret containing + the Slack API token + properties: + name: + description: |- + Name of the resource being referred to + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + required: + - name + type: object + channelName: + description: ChannelName to be used by Prometheus + type: string + teamName: + description: TeamName flags whether or not Prometheus integration + is enabled + type: string + required: + - apiTokenSecretRef + - channelName + - teamName + type: object + type: + description: Type of the integration + enum: + - DATADOG + - MICROSOFT_TEAMS + - NEW_RELIC + - OPS_GENIE + - PAGER_DUTY + - PROMETHEUS + - SLACK + - VICTOR_OPS + - WEBHOOK + type: string + victorOps: + description: VictorOps contains the config fields for VictorOps's + Integration + properties: + apiKeySecretRef: + description: APIKeySecretRef is the name of a secret containing + Victor Ops API key + properties: + name: + description: |- + Name of the resource being referred to + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + required: + - name + type: object + routingKey: + description: RoutingKey holds VictorOps routing key + type: string + required: + - apiKeySecretRef + - routingKey + type: object + webhook: + description: Webhook contains the config fields for Webhook's Integration + properties: + urlSecretRef: + description: URLSecretRef holds the name of a secret containing + Webhook URL and secret + properties: + name: + description: |- + Name of the resource being referred to + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + required: + - name + type: object + required: + - urlSecretRef + type: object + required: + - type + type: object + x-kubernetes-validations: + - message: must define only one project reference through externalProjectRef + or projectRef + rule: (has(self.externalProjectRef) && !has(self.projectRef)) || (!has(self.externalProjectRef) + && has(self.projectRef)) + - message: must define a local connection secret when referencing an external + project + rule: (has(self.externalProjectRef) && has(self.connectionSecret)) || + !has(self.externalProjectRef) + - message: must define a type of integration + rule: has(self.type) && self.type.size() != 0 + - message: only DATADOG type may set datadog fields + rule: '!has(self.datadog) || (self.type == ''DATADOG'' && has(self.datadog))' + - message: only MICROSOFT_TEAMS type may set microsoftTeams fields + rule: '!has(self.microsoftTeams) || (self.type == ''MICROSOFT_TEAMS'' + && has(self.microsoftTeams))' + - message: only NEW_RELIC type may set newRelic fields + rule: '!has(self.newRelic) || (self.type == ''NEW_RELIC'' && has(self.newRelic))' + - message: only OPS_GENIE type may set opsGenie fields + rule: '!has(self.opsGenie) || (self.type == ''OPS_GENIE'' && has(self.opsGenie))' + - message: only PROMETHEUS type may set prometheus fields + rule: '!has(self.prometheus) || (self.type == ''PROMETHEUS'' && has(self.prometheus))' + - message: only PAGER_DUTY type may set pagerDuty fields + rule: '!has(self.pagerDuty) || (self.type == ''PAGER_DUTY'' && has(self.pagerDuty))' + - message: only SLACK type may set slack fields + rule: '!has(self.slack) || (self.type == ''SLACK'' && has(self.slack))' + - message: only VICTOR_OPS type may set victorOps fields + rule: '!has(self.victorOps) || (self.type == ''VICTOR_OPS'' && has(self.victorOps))' + - message: only WEBHOOK type may set webhook fields + rule: '!has(self.webhook) || (self.type == ''WEBHOOK'' && has(self.webhook))' + status: + description: AtlasThirdPartyIntegrationStatus holds the status of an integration + properties: + conditions: + description: Conditions holding the status details + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + id: + description: ID of the third party integration resource in Atlas + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/internal/nextapi/v1/atlasthirdpartyintegration_types.go b/internal/nextapi/v1/atlasthirdpartyintegration_types.go new file mode 100644 index 0000000000..9ac45eba06 --- /dev/null +++ b/internal/nextapi/v1/atlasthirdpartyintegration_types.go @@ -0,0 +1,224 @@ +// Copyright 2025 MongoDB. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/nextapi/v1/status" +) + +func init() { + SchemeBuilder.Register(&AtlasThirdPartyIntegration{}, &AtlasThirdPartyIntegrationList{}) +} + +// AtlasThirdPartyIntegration is the Schema for the atlas 3rd party integrations API. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].status` +// +kubebuilder:subresource:status +// +groupName:=atlas.nextapi.mongodb.com +// +kubebuilder:resource:categories=atlas,shortName=atpi +type AtlasThirdPartyIntegration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AtlasThirdPartyIntegrationSpec `json:"spec,omitempty"` + Status status.AtlasThirdPartyIntegrationStatus `json:"status,omitempty"` +} + +func (np *AtlasThirdPartyIntegration) Credentials() *api.LocalObjectReference { + return np.Spec.ConnectionSecret +} + +func (atpi *AtlasThirdPartyIntegration) GetConditions() []metav1.Condition { + return atpi.Status.Conditions +} + +// +k8s:deepcopy-gen=true + +// AtlasThirdPartyIntegrationSpec contains the expected configuration for an integration +// +kubebuilder:validation:XValidation:rule="(has(self.externalProjectRef) && !has(self.projectRef)) || (!has(self.externalProjectRef) && has(self.projectRef))",message="must define only one project reference through externalProjectRef or projectRef" +// +kubebuilder:validation:XValidation:rule="(has(self.externalProjectRef) && has(self.connectionSecret)) || !has(self.externalProjectRef)",message="must define a local connection secret when referencing an external project" +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type.size() != 0",message="must define a type of integration" +// +kubebuilder:validation:XValidation:rule="!has(self.datadog) || (self.type == 'DATADOG' && has(self.datadog))",message="only DATADOG type may set datadog fields" +// +kubebuilder:validation:XValidation:rule="!has(self.microsoftTeams) || (self.type == 'MICROSOFT_TEAMS' && has(self.microsoftTeams))",message="only MICROSOFT_TEAMS type may set microsoftTeams fields" +// +kubebuilder:validation:XValidation:rule="!has(self.newRelic) || (self.type == 'NEW_RELIC' && has(self.newRelic))",message="only NEW_RELIC type may set newRelic fields" +// +kubebuilder:validation:XValidation:rule="!has(self.opsGenie) || (self.type == 'OPS_GENIE' && has(self.opsGenie))",message="only OPS_GENIE type may set opsGenie fields" +// +kubebuilder:validation:XValidation:rule="!has(self.prometheus) || (self.type == 'PROMETHEUS' && has(self.prometheus))",message="only PROMETHEUS type may set prometheus fields" +// +kubebuilder:validation:XValidation:rule="!has(self.pagerDuty) || (self.type == 'PAGER_DUTY' && has(self.pagerDuty))",message="only PAGER_DUTY type may set pagerDuty fields" +// +kubebuilder:validation:XValidation:rule="!has(self.slack) || (self.type == 'SLACK' && has(self.slack))",message="only SLACK type may set slack fields" +// +kubebuilder:validation:XValidation:rule="!has(self.victorOps) || (self.type == 'VICTOR_OPS' && has(self.victorOps))",message="only VICTOR_OPS type may set victorOps fields" +// +kubebuilder:validation:XValidation:rule="!has(self.webhook) || (self.type == 'WEBHOOK' && has(self.webhook))",message="only WEBHOOK type may set webhook fields" +type AtlasThirdPartyIntegrationSpec struct { + akov2.ProjectDualReference `json:",inline"` + + // Type of the integration + // +kubebuilder:validation:Enum:=DATADOG;MICROSOFT_TEAMS;NEW_RELIC;OPS_GENIE;PAGER_DUTY;PROMETHEUS;SLACK;VICTOR_OPS;WEBHOOK + // +kubebuilder:validation:Required + Type string `json:"type"` + + // Datadog contains the config fields for Datadog's Integration + // +kubebuilder:validation:Optional + Datadog *DatadogIntegration `json:"datadog,omitempty"` + + // MicrosoftTeams contains the config fields for Microsoft Teams's Integration + // +kubebuilder:validation:Optional + MicrosoftTeams *MicrosoftTeamsIntegration `json:"microsoftTeams,omitempty"` + + // NewRelic contains the config fields for New Relic's Integration + // +kubebuilder:validation:Optional + NewRelic *NewRelicIntegration `json:"newRelic,omitempty"` + + // OpsGenie contains the config fields for Ops Genie's Integration + // +kubebuilder:validation:Optional + OpsGenie *OpsGenieIntegration `json:"opsGenie,omitempty"` + + // PagerDuty contains the config fields for PagerDuty's Integration + // +kubebuilder:validation:Optional + PagerDuty *PagerDutyIntegration `json:"pagerDuty,omitempty"` + + // Prometheus contains the config fields for Prometheus's Integration + // +kubebuilder:validation:Optional + Prometheus *PrometheusIntegration `json:"prometheus,omitempty"` + + // Slack contains the config fields for Slack's Integration + // +kubebuilder:validation:Optional + Slack *SlackIntegration `json:"slack,omitempty"` + + // VictorOps contains the config fields for VictorOps's Integration + // +kubebuilder:validation:Optional + VictorOps *VictorOpsIntegration `json:"victorOps,omitempty"` + + // Webhook contains the config fields for Webhook's Integration + // +kubebuilder:validation:Optional + Webhook *WebhookIntegration `json:"webhook,omitempty"` +} + +type DatadogIntegration struct { + // APIKeySecretRef holds the name of a secret containing the datadog api key + // +kubebuilder:validation:Required + APIKeySecretRef api.LocalObjectReference `json:"apiKeySecretRef"` + + // Region is the Datadog region + // +kubebuilder:validation:Required + Region string `json:"region"` + + // SendCollectionLatencyMetrics toggles sending collection latency metrics + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Enum=enabled;disabled + // +kubebuilder:default:=disabled + SendCollectionLatencyMetrics *string `json:"sendCollectionLatencyMetrics"` + + // SendDatabaseMetrics toggles sending database metrics, + // including database and collection names + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Enum=enabled;disabled + // +kubebuilder:default:=disabled + SendDatabaseMetrics *string `json:"sendDatabaseMetrics"` +} + +type MicrosoftTeamsIntegration struct { + // URLSecretRef holds the name of a secret containing the microsoft teams secret URL + // +kubebuilder:validation:Required + URLSecretRef api.LocalObjectReference `json:"urlSecretRef"` +} + +type NewRelicIntegration struct { + // CredentialsSecretRef holds the name of a secret containing new relic's credentials: + // account id, license key, read and write tokens + // +kubebuilder:validation:Required + CredentialsSecretRef api.LocalObjectReference `json:"credentialsSecretRef"` +} + +type OpsGenieIntegration struct { + // APIKeySecretRef holds the name of a secret containing Ops Genie's API key + // +kubebuilder:validation:Required + APIKeySecretRef api.LocalObjectReference `json:"apiKeySecretRef"` + + // Region is the Ops Genie region + // +kubebuilder:validation:Required + Region string `json:"region"` +} + +type PagerDutyIntegration struct { + // ServiceKeySecretRef holds the name of a secret containing Pager Duty service key + // +kubebuilder:validation:Required + ServiceKeySecretRef api.LocalObjectReference `json:"serviceKeySecretRef"` + + // Region is the Pager Duty region + // +kubebuilder:validation:Required + Region string `json:"region"` +} + +type PrometheusIntegration struct { + // Enabled is true when Prometheus integration is enabled + Enabled *string `json:"enabled"` + + // PrometheusCredentialsSecretRef holds the name of a secret containing the Prometheus + // username & password + // +kubebuilder:validation:Required + PrometheusCredentialsSecretRef api.LocalObjectReference `json:"prometheusCredentialsSecretRef"` + + // ServiceDiscovery to be used by Prometheus + // +kubebuilder:validation:Enum:=file;http + // +kubebuilder:validation:Required + ServiceDiscovery string `json:"serviceDiscovery"` +} + +type SlackIntegration struct { + // APITokenSecretRef holds the name of a secret containing the Slack API token + // +kubebuilder:validation:Required + APITokenSecretRef api.LocalObjectReference `json:"apiTokenSecretRef"` + + // ChannelName to be used by Prometheus + // +kubebuilder:validation:Required + ChannelName string `json:"channelName"` + + // TeamName flags whether or not Prometheus integration is enabled + // +kubebuilder:validation:Required + TeamName string `json:"teamName"` +} + +type VictorOpsIntegration struct { + // RoutingKey holds VictorOps routing key + // +kubebuilder:validation:Required + RoutingKey string `json:"routingKey"` + + // APIKeySecretRef is the name of a secret containing Victor Ops API key + // +kubebuilder:validation:Required + APIKeySecretRef api.LocalObjectReference `json:"apiKeySecretRef"` +} + +type WebhookIntegration struct { + // URLSecretRef holds the name of a secret containing Webhook URL and secret + // +kubebuilder:validation:Required + URLSecretRef api.LocalObjectReference `json:"urlSecretRef"` +} + +// +kubebuilder:object:root=true + +// AtlasThirdPartyIntegrationList contains a list of Atlas Integrations. +type AtlasThirdPartyIntegrationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AtlasThirdPartyIntegration `json:"items"` +} + +func (i *AtlasThirdPartyIntegration) ProjectDualRef() *akov2.ProjectDualReference { + return &i.Spec.ProjectDualReference +} diff --git a/internal/nextapi/v1/atlasthirdpartyintegration_types_test.go b/internal/nextapi/v1/atlasthirdpartyintegration_types_test.go new file mode 100644 index 0000000000..e82c90d92e --- /dev/null +++ b/internal/nextapi/v1/atlasthirdpartyintegration_types_test.go @@ -0,0 +1,215 @@ +// Copyright 2025 MongoDB. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1/common" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/cel" +) + +func TestIntegrationCELChecks(t *testing.T) { + for _, tc := range []struct { + title string + obj *AtlasThirdPartyIntegration + expectedErrors []string + }{ + { + title: "fails with no type", + obj: &AtlasThirdPartyIntegration{}, + expectedErrors: []string{"spec: Invalid value: \"object\": must define a type of integration"}, + }, + { + title: "Datadog works", + obj: &AtlasThirdPartyIntegration{ + Spec: AtlasThirdPartyIntegrationSpec{ + Type: "DATADOG", + Datadog: &DatadogIntegration{ + APIKeySecretRef: api.LocalObjectReference{ + Name: "api-key-secretname", + }, + Region: "US", + }, + }, + }, + }, + { + title: "Microsoft Teams works", + obj: &AtlasThirdPartyIntegration{ + Spec: AtlasThirdPartyIntegrationSpec{ + Type: "MICROSOFT_TEAMS", + MicrosoftTeams: &MicrosoftTeamsIntegration{ + URLSecretRef: api.LocalObjectReference{ + Name: "url-secretname", + }, + }, + }, + }, + }, + { + title: "New Relic works", + obj: &AtlasThirdPartyIntegration{ + Spec: AtlasThirdPartyIntegrationSpec{ + Type: "NEW_RELIC", + NewRelic: &NewRelicIntegration{ + CredentialsSecretRef: api.LocalObjectReference{ + Name: "credentials-secretname", + }, + }, + }, + }, + }, + { + title: "Ops Genie works", + obj: &AtlasThirdPartyIntegration{ + Spec: AtlasThirdPartyIntegrationSpec{ + Type: "OPS_GENIE", + OpsGenie: &OpsGenieIntegration{ + APIKeySecretRef: api.LocalObjectReference{ + Name: "api-key-secretname", + }, + Region: "US", + }, + }, + }, + }, + { + title: "Pager Duty works", + obj: &AtlasThirdPartyIntegration{ + Spec: AtlasThirdPartyIntegrationSpec{ + Type: "PAGER_DUTY", + PagerDuty: &PagerDutyIntegration{ + ServiceKeySecretRef: api.LocalObjectReference{ + Name: "service-key-secretname", + }, + Region: "US", + }, + }, + }, + }, + { + title: "Prometheus duty works", + obj: &AtlasThirdPartyIntegration{ + Spec: AtlasThirdPartyIntegrationSpec{ + Type: "PROMETHEUS", + Prometheus: &PrometheusIntegration{ + PrometheusCredentialsSecretRef: api.LocalObjectReference{ + Name: "prometheus-credentials", + }, + ServiceDiscovery: "http", + }, + }, + }, + }, + { + title: "Slack works", + obj: &AtlasThirdPartyIntegration{ + Spec: AtlasThirdPartyIntegrationSpec{ + Type: "SLACK", + Slack: &SlackIntegration{ + APITokenSecretRef: api.LocalObjectReference{ + Name: "api-tooken-secretname", + }, + ChannelName: "channel", + TeamName: "team", + }, + }, + }, + }, + { + title: "Victor ops works", + obj: &AtlasThirdPartyIntegration{ + Spec: AtlasThirdPartyIntegrationSpec{ + Type: "VICTOR_OPS", + VictorOps: &VictorOpsIntegration{ + RoutingKey: "routing-key", + APIKeySecretRef: api.LocalObjectReference{ + Name: "keys-secetname", + }, + }, + }, + }, + }, + { + title: "Webhook works", + obj: &AtlasThirdPartyIntegration{ + Spec: AtlasThirdPartyIntegrationSpec{ + Type: "WEBHOOK", + Webhook: &WebhookIntegration{ + URLSecretRef: api.LocalObjectReference{ + Name: "url-secretname", + }, + }, + }, + }, + }, + { + title: "Prometheus on Pager Duty type fails", + obj: &AtlasThirdPartyIntegration{ + Spec: AtlasThirdPartyIntegrationSpec{ + Type: "PAGER_DUTY", + PagerDuty: &PagerDutyIntegration{}, + Prometheus: &PrometheusIntegration{ + PrometheusCredentialsSecretRef: api.LocalObjectReference{ + Name: "prometheus-credentials", + }, + ServiceDiscovery: "http", + }, + }, + }, + expectedErrors: []string{"spec: Invalid value: \"object\": only PROMETHEUS type may set prometheus fields"}, + }, + { + title: "Datadog on Webhook type fails", + obj: &AtlasThirdPartyIntegration{ + Spec: AtlasThirdPartyIntegrationSpec{ + Type: "WEBHOOK", + Datadog: &DatadogIntegration{ + APIKeySecretRef: api.LocalObjectReference{ + Name: "api-key-secretname", + }, + Region: "US", + }, + Webhook: &WebhookIntegration{ + URLSecretRef: api.LocalObjectReference{ + Name: "url-secretname", + }, + }, + }, + }, + expectedErrors: []string{"spec: Invalid value: \"object\": only DATADOG type may set datadog fields"}, + }, + } { + t.Run(tc.title, func(t *testing.T) { + // inject a project to avoid other CEL validations being hit + tc.obj.Spec.ProjectRef = &common.ResourceRefNamespaced{Name: "some-project"} + unstructuredObject, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&tc.obj) + require.NoError(t, err) + + crdPath := "../../next-crds/atlas.nextapi.mongodb.com_atlasthirdpartyintegrations.yaml" + validator, err := cel.VersionValidatorFromFile(t, crdPath, "v1") + assert.NoError(t, err) + errs := validator(unstructuredObject, nil) + + require.Equal(t, tc.expectedErrors, cel.ErrorListAsStrings(errs)) + }) + } +} diff --git a/internal/nextapi/v1/groupversion_info.go b/internal/nextapi/v1/groupversion_info.go new file mode 100644 index 0000000000..3f04c94726 --- /dev/null +++ b/internal/nextapi/v1/groupversion_info.go @@ -0,0 +1,33 @@ +// Copyright 2020 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +kubebuilder:object:generate=true +// +groupName=atlas.nextapi.mongodb.com +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "atlas.nextapi.mongodb.com", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/internal/nextapi/v1/status/atlasthirdpartyintegration.go b/internal/nextapi/v1/status/atlasthirdpartyintegration.go new file mode 100644 index 0000000000..1c36c2e501 --- /dev/null +++ b/internal/nextapi/v1/status/atlasthirdpartyintegration.go @@ -0,0 +1,43 @@ +// Copyright 2025 MongoDB. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package status + +// +k8s:deepcopy-gen=true + +// AtlasThirdPartyIntegrationStatus holds the status of an integration +type AtlasThirdPartyIntegrationStatus struct { + UnifiedStatus `json:",inline"` + + // ID of the third party integration resource in Atlas + ID string `json:"id,omitempty"` +} + +// +k8s:deepcopy-gen=false + +type IntegrationStatusOption func(status *AtlasThirdPartyIntegrationStatus) + +func NewAtlasThirdPartyIntegrationStatus(options ...IntegrationStatusOption) AtlasThirdPartyIntegrationStatus { + result := &AtlasThirdPartyIntegrationStatus{} + for i := range options { + options[i](result) + } + return *result +} + +func WithIntegrationID(id string) IntegrationStatusOption { + return func(i *AtlasThirdPartyIntegrationStatus) { + i.ID = id + } +} diff --git a/internal/nextapi/v1/status/unifiedstatus.go b/internal/nextapi/v1/status/unifiedstatus.go new file mode 100644 index 0000000000..727b16176a --- /dev/null +++ b/internal/nextapi/v1/status/unifiedstatus.go @@ -0,0 +1,27 @@ +// Copyright 2025 MongoDB. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package status + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen=true + +// UnifiedStatus is shared by CRDs with Unified State Machine support +type UnifiedStatus struct { + // Conditions holding the status details + Conditions []metav1.Condition `json:"conditions,omitempty"` +} diff --git a/internal/nextapi/v1/status/zz_generated.deepcopy.go b/internal/nextapi/v1/status/zz_generated.deepcopy.go new file mode 100644 index 0000000000..0d840bb901 --- /dev/null +++ b/internal/nextapi/v1/status/zz_generated.deepcopy.go @@ -0,0 +1,61 @@ +//go:build !ignore_autogenerated + +//Copyright 2025 MongoDB Inc +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by controller-gen. DO NOT EDIT. + +package status + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AtlasThirdPartyIntegrationStatus) DeepCopyInto(out *AtlasThirdPartyIntegrationStatus) { + *out = *in + in.UnifiedStatus.DeepCopyInto(&out.UnifiedStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AtlasThirdPartyIntegrationStatus. +func (in *AtlasThirdPartyIntegrationStatus) DeepCopy() *AtlasThirdPartyIntegrationStatus { + if in == nil { + return nil + } + out := new(AtlasThirdPartyIntegrationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnifiedStatus) DeepCopyInto(out *UnifiedStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnifiedStatus. +func (in *UnifiedStatus) DeepCopy() *UnifiedStatus { + if in == nil { + return nil + } + out := new(UnifiedStatus) + in.DeepCopyInto(out) + return out +} diff --git a/internal/nextapi/v1/zz_generated.deepcopy.go b/internal/nextapi/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..a3a2761722 --- /dev/null +++ b/internal/nextapi/v1/zz_generated.deepcopy.go @@ -0,0 +1,302 @@ +//go:build !ignore_autogenerated + +//Copyright 2025 MongoDB Inc +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AtlasThirdPartyIntegration) DeepCopyInto(out *AtlasThirdPartyIntegration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AtlasThirdPartyIntegration. +func (in *AtlasThirdPartyIntegration) DeepCopy() *AtlasThirdPartyIntegration { + if in == nil { + return nil + } + out := new(AtlasThirdPartyIntegration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AtlasThirdPartyIntegration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AtlasThirdPartyIntegrationList) DeepCopyInto(out *AtlasThirdPartyIntegrationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AtlasThirdPartyIntegration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AtlasThirdPartyIntegrationList. +func (in *AtlasThirdPartyIntegrationList) DeepCopy() *AtlasThirdPartyIntegrationList { + if in == nil { + return nil + } + out := new(AtlasThirdPartyIntegrationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AtlasThirdPartyIntegrationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AtlasThirdPartyIntegrationSpec) DeepCopyInto(out *AtlasThirdPartyIntegrationSpec) { + *out = *in + in.ProjectDualReference.DeepCopyInto(&out.ProjectDualReference) + if in.Datadog != nil { + in, out := &in.Datadog, &out.Datadog + *out = new(DatadogIntegration) + (*in).DeepCopyInto(*out) + } + if in.MicrosoftTeams != nil { + in, out := &in.MicrosoftTeams, &out.MicrosoftTeams + *out = new(MicrosoftTeamsIntegration) + **out = **in + } + if in.NewRelic != nil { + in, out := &in.NewRelic, &out.NewRelic + *out = new(NewRelicIntegration) + **out = **in + } + if in.OpsGenie != nil { + in, out := &in.OpsGenie, &out.OpsGenie + *out = new(OpsGenieIntegration) + **out = **in + } + if in.PagerDuty != nil { + in, out := &in.PagerDuty, &out.PagerDuty + *out = new(PagerDutyIntegration) + **out = **in + } + if in.Prometheus != nil { + in, out := &in.Prometheus, &out.Prometheus + *out = new(PrometheusIntegration) + (*in).DeepCopyInto(*out) + } + if in.Slack != nil { + in, out := &in.Slack, &out.Slack + *out = new(SlackIntegration) + **out = **in + } + if in.VictorOps != nil { + in, out := &in.VictorOps, &out.VictorOps + *out = new(VictorOpsIntegration) + **out = **in + } + if in.Webhook != nil { + in, out := &in.Webhook, &out.Webhook + *out = new(WebhookIntegration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AtlasThirdPartyIntegrationSpec. +func (in *AtlasThirdPartyIntegrationSpec) DeepCopy() *AtlasThirdPartyIntegrationSpec { + if in == nil { + return nil + } + out := new(AtlasThirdPartyIntegrationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatadogIntegration) DeepCopyInto(out *DatadogIntegration) { + *out = *in + out.APIKeySecretRef = in.APIKeySecretRef + if in.SendCollectionLatencyMetrics != nil { + in, out := &in.SendCollectionLatencyMetrics, &out.SendCollectionLatencyMetrics + *out = new(string) + **out = **in + } + if in.SendDatabaseMetrics != nil { + in, out := &in.SendDatabaseMetrics, &out.SendDatabaseMetrics + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatadogIntegration. +func (in *DatadogIntegration) DeepCopy() *DatadogIntegration { + if in == nil { + return nil + } + out := new(DatadogIntegration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MicrosoftTeamsIntegration) DeepCopyInto(out *MicrosoftTeamsIntegration) { + *out = *in + out.URLSecretRef = in.URLSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicrosoftTeamsIntegration. +func (in *MicrosoftTeamsIntegration) DeepCopy() *MicrosoftTeamsIntegration { + if in == nil { + return nil + } + out := new(MicrosoftTeamsIntegration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NewRelicIntegration) DeepCopyInto(out *NewRelicIntegration) { + *out = *in + out.CredentialsSecretRef = in.CredentialsSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NewRelicIntegration. +func (in *NewRelicIntegration) DeepCopy() *NewRelicIntegration { + if in == nil { + return nil + } + out := new(NewRelicIntegration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpsGenieIntegration) DeepCopyInto(out *OpsGenieIntegration) { + *out = *in + out.APIKeySecretRef = in.APIKeySecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpsGenieIntegration. +func (in *OpsGenieIntegration) DeepCopy() *OpsGenieIntegration { + if in == nil { + return nil + } + out := new(OpsGenieIntegration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PagerDutyIntegration) DeepCopyInto(out *PagerDutyIntegration) { + *out = *in + out.ServiceKeySecretRef = in.ServiceKeySecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PagerDutyIntegration. +func (in *PagerDutyIntegration) DeepCopy() *PagerDutyIntegration { + if in == nil { + return nil + } + out := new(PagerDutyIntegration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusIntegration) DeepCopyInto(out *PrometheusIntegration) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(string) + **out = **in + } + out.PrometheusCredentialsSecretRef = in.PrometheusCredentialsSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusIntegration. +func (in *PrometheusIntegration) DeepCopy() *PrometheusIntegration { + if in == nil { + return nil + } + out := new(PrometheusIntegration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlackIntegration) DeepCopyInto(out *SlackIntegration) { + *out = *in + out.APITokenSecretRef = in.APITokenSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlackIntegration. +func (in *SlackIntegration) DeepCopy() *SlackIntegration { + if in == nil { + return nil + } + out := new(SlackIntegration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VictorOpsIntegration) DeepCopyInto(out *VictorOpsIntegration) { + *out = *in + out.APIKeySecretRef = in.APIKeySecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VictorOpsIntegration. +func (in *VictorOpsIntegration) DeepCopy() *VictorOpsIntegration { + if in == nil { + return nil + } + out := new(VictorOpsIntegration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookIntegration) DeepCopyInto(out *WebhookIntegration) { + *out = *in + out.URLSecretRef = in.URLSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookIntegration. +func (in *WebhookIntegration) DeepCopy() *WebhookIntegration { + if in == nil { + return nil + } + out := new(WebhookIntegration) + in.DeepCopyInto(out) + return out +} diff --git a/internal/operator/builder.go b/internal/operator/builder.go index 07a059d997..54955f1c8f 100644 --- a/internal/operator/builder.go +++ b/internal/operator/builder.go @@ -81,14 +81,15 @@ type Builder struct { leaderElection bool leaderElectionID string - atlasDomain string - predicates []predicate.Predicate - apiSecret client.ObjectKey - atlasProvider atlas.Provider - featureFlags *featureflags.FeatureFlags - deletionProtection bool - skipNameValidation bool - dryRun bool + atlasDomain string + predicates []predicate.Predicate + apiSecret client.ObjectKey + atlasProvider atlas.Provider + featureFlags *featureflags.FeatureFlags + deletionProtection bool + skipNameValidation bool + dryRun bool + experimentalReconcilers bool } func (b *Builder) WithConfig(config *rest.Config) *Builder { @@ -175,6 +176,11 @@ func (b *Builder) WithDryRun(dryRun bool) *Builder { return b } +func (b *Builder) WithExperimentalReconcilers(experimentalReconcilers bool) *Builder { + b.experimentalReconcilers = experimentalReconcilers + return b +} + // Build builds the cluster object and configures operator controllers func (b *Builder) Build(ctx context.Context) (cluster.Cluster, error) { mergeDefaults(b) @@ -202,7 +208,15 @@ func (b *Builder) Build(ctx context.Context) (cluster.Cluster, error) { } } - controllerRegistry := controller.NewRegistry(b.predicates, b.deletionProtection, b.logger, b.independentSyncPeriod, b.featureFlags, b.apiSecret) + controllerRegistry := controller.NewRegistry( + b.predicates, + b.deletionProtection, + b.logger, + b.independentSyncPeriod, + b.featureFlags, + b.apiSecret, + b.experimentalReconcilers, + ) var akoCluster cluster.Cluster if b.dryRun { diff --git a/internal/translation/networkpeering/conversion_test.go b/internal/translation/networkpeering/conversion_test.go index 4a26e1083d..5180ddac70 100644 --- a/internal/translation/networkpeering/conversion_test.go +++ b/internal/translation/networkpeering/conversion_test.go @@ -76,7 +76,7 @@ func FuzzConvertListOfConnections(f *testing.F) { func fuzzPeer(fuzzer *gofuzz.Fuzzer, index uint, peer *NetworkPeer) { fuzzer.NilChance(0).Fuzz(peer) - peer.ID = "" // ID is provided by Atlas, cannoy complete a roundtrip + peer.ID = "" // ID is provided by Atlas, cannot complete a roundtrip peer.Provider = providerNames[index%3] // provider can only be one of 3 AWS, AZURE or GCP switch peer.Provider { // only the selected provider config is expected case string(provider.ProviderAWS): diff --git a/internal/translation/thirdpartyintegration/conversion.go b/internal/translation/thirdpartyintegration/conversion.go new file mode 100644 index 0000000000..342a40a39c --- /dev/null +++ b/internal/translation/thirdpartyintegration/conversion.go @@ -0,0 +1,345 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package thirdpartyintegration + +import ( + "errors" + "fmt" + "strings" + + "go.mongodb.org/atlas-sdk/v20250312002/admin" + + akov2next "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/nextapi/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/pointer" +) + +var ( + // ErrUnsupportedIntegrationType when the integration type is not supported + ErrUnsupportedIntegrationType = errors.New("unsupported integration type") +) + +func NewFromSpec(crd *akov2next.AtlasThirdPartyIntegration, secrets map[string][]byte) (*ThirdPartyIntegration, error) { + tpi := ThirdPartyIntegration{ + AtlasThirdPartyIntegrationSpec: crd.Spec, + ID: crd.Status.ID, + } + switch tpi.Type { + case "DATADOG": + tpi.DatadogSecrets = &DatadogSecrets{ + APIKey: string(secrets["apiKey"]), + } + case "MICROSOFT_TEAMS": + tpi.MicrosoftTeamsSecrets = &MicrosoftTeamsSecrets{ + WebhookUrl: string(secrets["webhookURL"]), + } + case "NEW_RELIC": + tpi.NewRelicSecrets = &NewRelicSecrets{ + AccountID: string(secrets["accountId"]), + LicenseKey: string(secrets["licenseKey"]), + ReadToken: string(secrets["readToken"]), + WriteToken: string(secrets["writeToken"]), + } + case "OPS_GENIE": + tpi.OpsGenieSecrets = &OpsGenieSecrets{ + APIKey: string(secrets["apiKey"]), + } + case "PAGER_DUTY": + tpi.PagerDutySecrets = &PagerDutySecrets{ + ServiceKey: string(secrets["serviceKey"]), + } + case "PROMETHEUS": + tpi.PrometheusSecrets = &PrometheusSecrets{ + Username: string(secrets["username"]), + Password: string(secrets["password"]), + } + case "SLACK": + tpi.SlackSecrets = &SlackSecrets{ + APIToken: string(secrets["apiToken"]), + } + case "VICTOR_OPS": + tpi.VictorOpsSecrets = &VictorOpsSecrets{ + APIKey: string(secrets["apiKey"]), + } + case "WEBHOOK": + tpi.WebhookSecrets = &WebhookSecrets{ + URL: string(secrets["url"]), + Secret: string(secrets["secret"]), + } + default: + return nil, fmt.Errorf("%w %v", ErrUnsupportedIntegrationType, tpi.Type) + } + return &tpi, nil +} + +type ThirdPartyIntegration struct { + akov2next.AtlasThirdPartyIntegrationSpec + ID string + DatadogSecrets *DatadogSecrets + MicrosoftTeamsSecrets *MicrosoftTeamsSecrets + NewRelicSecrets *NewRelicSecrets + OpsGenieSecrets *OpsGenieSecrets + PagerDutySecrets *PagerDutySecrets + PrometheusSecrets *PrometheusSecrets + SlackSecrets *SlackSecrets + VictorOpsSecrets *VictorOpsSecrets + WebhookSecrets *WebhookSecrets +} + +// Comparable returns a copy of ThirdPartyIntegration without secrets, +// so that it is comparable +func (tpi *ThirdPartyIntegration) Comparable() *ThirdPartyIntegration { + comparable := &ThirdPartyIntegration{ + AtlasThirdPartyIntegrationSpec: *tpi.AtlasThirdPartyIntegrationSpec.DeepCopy(), + ID: tpi.ID, + } + comparable.AtlasThirdPartyIntegrationSpec.ConnectionSecret = nil + comparable.AtlasThirdPartyIntegrationSpec.ExternalProjectRef = nil + comparable.AtlasThirdPartyIntegrationSpec.ProjectRef = nil + if comparable.AtlasThirdPartyIntegrationSpec.Datadog != nil { + comparable.AtlasThirdPartyIntegrationSpec.Datadog.APIKeySecretRef.Name = "" + } + if comparable.AtlasThirdPartyIntegrationSpec.MicrosoftTeams != nil { + comparable.AtlasThirdPartyIntegrationSpec.MicrosoftTeams.URLSecretRef.Name = "" + } + if comparable.AtlasThirdPartyIntegrationSpec.NewRelic != nil { + comparable.AtlasThirdPartyIntegrationSpec.NewRelic.CredentialsSecretRef.Name = "" + } + if comparable.AtlasThirdPartyIntegrationSpec.OpsGenie != nil { + comparable.AtlasThirdPartyIntegrationSpec.OpsGenie.APIKeySecretRef.Name = "" + } + if comparable.AtlasThirdPartyIntegrationSpec.PagerDuty != nil { + comparable.AtlasThirdPartyIntegrationSpec.PagerDuty.ServiceKeySecretRef.Name = "" + } + if comparable.AtlasThirdPartyIntegrationSpec.Prometheus != nil { + comparable.AtlasThirdPartyIntegrationSpec.Prometheus.PrometheusCredentialsSecretRef.Name = "" + } + if comparable.AtlasThirdPartyIntegrationSpec.Slack != nil { + comparable.AtlasThirdPartyIntegrationSpec.Slack.APITokenSecretRef.Name = "" + } + if comparable.AtlasThirdPartyIntegrationSpec.VictorOps != nil { + comparable.AtlasThirdPartyIntegrationSpec.VictorOps.APIKeySecretRef.Name = "" + } + if comparable.AtlasThirdPartyIntegrationSpec.Webhook != nil { + comparable.AtlasThirdPartyIntegrationSpec.Webhook.URLSecretRef.Name = "" + } + return comparable +} + +type DatadogSecrets struct { + APIKey string +} + +type MicrosoftTeamsSecrets struct { + WebhookUrl string +} + +type NewRelicSecrets struct { + AccountID string + LicenseKey string + ReadToken string + WriteToken string +} + +type OpsGenieSecrets struct { + APIKey string +} + +type PagerDutySecrets struct { + ServiceKey string +} + +type PrometheusSecrets struct { + Username string + Password string +} + +type SlackSecrets struct { + APIToken string +} + +type VictorOpsSecrets struct { + APIKey string +} + +type WebhookSecrets struct { + URL string + Secret string +} + +func toAtlas(tpi *ThirdPartyIntegration) (*admin.ThirdPartyIntegration, error) { + ai := &admin.ThirdPartyIntegration{ + Id: &tpi.ID, + Type: &tpi.Type, + } + switch tpi.Type { + case "DATADOG": + if tpi.Datadog == nil || tpi.DatadogSecrets == nil { + return nil, errors.New("missing Datadog settings") + } + ai.ApiKey = &tpi.DatadogSecrets.APIKey + ai.Region = &tpi.Datadog.Region + ai.SendCollectionLatencyMetrics = pointer.MakePtr(isEnabled(tpi.Datadog.SendCollectionLatencyMetrics)) + ai.SendDatabaseMetrics = pointer.MakePtr(isEnabled(tpi.Datadog.SendDatabaseMetrics)) + case "MICROSOFT_TEAMS": + if tpi.MicrosoftTeams == nil || tpi.MicrosoftTeamsSecrets == nil { + return nil, errors.New("missing Microsoft teams settings") + } + ai.MicrosoftTeamsWebhookUrl = &tpi.MicrosoftTeamsSecrets.WebhookUrl + case "NEW_RELIC": + if tpi.NewRelic == nil || tpi.NewRelicSecrets == nil { + return nil, errors.New("missing New Relic settings") + } + ai.AccountId = &tpi.NewRelicSecrets.AccountID + ai.LicenseKey = &tpi.NewRelicSecrets.LicenseKey + ai.ReadToken = &tpi.NewRelicSecrets.ReadToken + ai.WriteToken = &tpi.NewRelicSecrets.WriteToken + case "OPS_GENIE": + if tpi.OpsGenie == nil || tpi.OpsGenieSecrets == nil { + return nil, errors.New("missing OpsGenie settings") + } + ai.ApiKey = &tpi.OpsGenieSecrets.APIKey + ai.Region = &tpi.OpsGenie.Region + case "PAGER_DUTY": + if tpi.PagerDuty == nil || tpi.PagerDutySecrets == nil { + return nil, errors.New("missing Pager Duty settings") + } + ai.ServiceKey = &tpi.PagerDutySecrets.ServiceKey + ai.Region = &tpi.PagerDuty.Region + case "PROMETHEUS": + if tpi.Prometheus == nil || tpi.PrometheusSecrets == nil { + return nil, errors.New("missing Prometheus settings") + } + ai.Enabled = pointer.MakePtr(isEnabled(tpi.Prometheus.Enabled)) + ai.Username = &tpi.PrometheusSecrets.Username + ai.Password = &tpi.PrometheusSecrets.Password + ai.ServiceDiscovery = &tpi.Prometheus.ServiceDiscovery + case "SLACK": + if tpi.Slack == nil || tpi.SlackSecrets == nil { + return nil, errors.New("missing Slack settings") + } + ai.ApiToken = &tpi.SlackSecrets.APIToken + ai.ChannelName = &tpi.Slack.ChannelName + ai.TeamName = &tpi.Slack.TeamName + case "VICTOR_OPS": + if tpi.VictorOps == nil || tpi.VictorOpsSecrets == nil { + return nil, errors.New("missing Victor Ops settings") + } + ai.ApiKey = &tpi.VictorOpsSecrets.APIKey + ai.RoutingKey = &tpi.VictorOps.RoutingKey + case "WEBHOOK": + if tpi.Webhook == nil || tpi.WebhookSecrets == nil { + return nil, errors.New("missing Webhook settings") + } + ai.Url = &tpi.WebhookSecrets.URL + ai.Secret = &tpi.WebhookSecrets.Secret + default: + return nil, fmt.Errorf("%w %v", ErrUnsupportedIntegrationType, tpi.Type) + } + return ai, nil +} + +func fromAtlas(ai *admin.ThirdPartyIntegration) (*ThirdPartyIntegration, error) { + tpi := &ThirdPartyIntegration{ + AtlasThirdPartyIntegrationSpec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: ai.GetType(), + }, + ID: ai.GetId(), + } + switch ai.GetType() { + case "DATADOG": + tpi.DatadogSecrets = &DatadogSecrets{ + APIKey: ai.GetApiKey(), + } + tpi.Datadog = &akov2next.DatadogIntegration{ + Region: ai.GetRegion(), + SendCollectionLatencyMetrics: encodeEnabled(ai.GetSendCollectionLatencyMetrics()), + SendDatabaseMetrics: encodeEnabled(ai.GetSendDatabaseMetrics()), + } + case "MICROSOFT_TEAMS": + tpi.MicrosoftTeamsSecrets = &MicrosoftTeamsSecrets{ + WebhookUrl: ai.GetMicrosoftTeamsWebhookUrl(), + } + tpi.MicrosoftTeams = &akov2next.MicrosoftTeamsIntegration{} + case "NEW_RELIC": + tpi.NewRelicSecrets = &NewRelicSecrets{ + AccountID: ai.GetAccountId(), + LicenseKey: ai.GetLicenseKey(), + ReadToken: ai.GetReadToken(), + WriteToken: ai.GetWriteToken(), + } + tpi.NewRelic = &akov2next.NewRelicIntegration{} + case "OPS_GENIE": + tpi.OpsGenieSecrets = &OpsGenieSecrets{ + APIKey: ai.GetApiKey(), + } + tpi.OpsGenie = &akov2next.OpsGenieIntegration{ + Region: ai.GetRegion(), + } + case "PAGER_DUTY": + tpi.PagerDutySecrets = &PagerDutySecrets{ + ServiceKey: ai.GetServiceKey(), + } + tpi.PagerDuty = &akov2next.PagerDutyIntegration{ + Region: ai.GetRegion(), + } + case "PROMETHEUS": + tpi.Prometheus = &akov2next.PrometheusIntegration{ + Enabled: encodeEnabled(ai.GetEnabled()), + ServiceDiscovery: ai.GetServiceDiscovery(), + } + tpi.PrometheusSecrets = &PrometheusSecrets{ + Username: ai.GetUsername(), + Password: ai.GetPassword(), + } + case "SLACK": + tpi.Slack = &akov2next.SlackIntegration{ + ChannelName: ai.GetChannelName(), + TeamName: ai.GetTeamName(), + } + tpi.SlackSecrets = &SlackSecrets{ + APIToken: ai.GetApiToken(), + } + case "VICTOR_OPS": + tpi.VictorOps = &akov2next.VictorOpsIntegration{ + RoutingKey: ai.GetRoutingKey(), + } + tpi.VictorOpsSecrets = &VictorOpsSecrets{ + APIKey: ai.GetApiKey(), + } + case "WEBHOOK": + tpi.Webhook = &akov2next.WebhookIntegration{} + tpi.WebhookSecrets = &WebhookSecrets{ + URL: ai.GetUrl(), + Secret: ai.GetSecret(), + } + default: + return nil, fmt.Errorf("%w %v", ErrUnsupportedIntegrationType, tpi.Type) + } + return tpi, nil +} + +func isEnabled(field *string) bool { + if field == nil { + return false + } + return strings.EqualFold(*field, "enabled") +} + +func encodeEnabled(on bool) *string { + if on { + return pointer.MakePtr("enabled") + } + return pointer.MakePtr("disabled") +} diff --git a/internal/translation/thirdpartyintegration/conversion_test.go b/internal/translation/thirdpartyintegration/conversion_test.go new file mode 100644 index 0000000000..17de2b7308 --- /dev/null +++ b/internal/translation/thirdpartyintegration/conversion_test.go @@ -0,0 +1,137 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package thirdpartyintegration + +import ( + "fmt" + "testing" + + gofuzz "github.com/google/gofuzz" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/pointer" +) + +const fuzzIterations = 100 + +var integrationTypes = []string{ + "DATADOG", + "MICROSOFT_TEAMS", + "NEW_RELIC", + "OPS_GENIE", + "PAGER_DUTY", + "PROMETHEUS", + "SLACK", + "VICTOR_OPS", + "WEBHOOK", +} + +var enabledValues = []*string{ + pointer.MakePtr("enabled"), + pointer.MakePtr("disabled"), +} + +func FuzzConvertIntegrations(f *testing.F) { + for i := uint(0); i < fuzzIterations; i++ { + f.Add(([]byte)(fmt.Sprintf("seed sample %x", i)), i) + } + f.Fuzz(func(t *testing.T, data []byte, index uint) { + integration := ThirdPartyIntegration{} + fuzzIntegration(gofuzz.NewFromGoFuzz(data), index, &integration) + atlasIntegration, err := toAtlas(&integration) + require.NoError(t, err) + result, err := fromAtlas(atlasIntegration) + require.NoError(t, err) + assert.Equal(t, &integration, result, "failed for index=%d", index) + }) +} + +func fuzzIntegration(fuzzer *gofuzz.Fuzzer, index uint, integration *ThirdPartyIntegration) { + fuzzer.NilChance(0).Fuzz(integration) + integration.ID = "" // ID is provided by Atlas, cannot complete a roundtrip + integration.ProjectDualReference.ExternalProjectRef = nil + integration.ProjectDualReference.ProjectRef = nil + integration.ProjectDualReference.ConnectionSecret = nil + + integration.Type = integrationTypes[index%uint(len(integrationTypes))] // type by index + + if integration.Type == "DATADOG" { + index2 := index + 1 + integration.Datadog.SendCollectionLatencyMetrics = enabledValues[index%uint(len(enabledValues))] + integration.Datadog.SendDatabaseMetrics = enabledValues[index2%uint(len(enabledValues))] + integration.Datadog.APIKeySecretRef.Name = "" // not part of the atlas conversion roundtrip + } else { + integration.Datadog = nil + integration.DatadogSecrets = nil + } + + if integration.Type == "MICROSOFT_TEAMS" { + integration.MicrosoftTeams.URLSecretRef.Name = "" // not part of the atlas conversion roundtrip + } else { + integration.MicrosoftTeams = nil + integration.MicrosoftTeamsSecrets = nil + } + + if integration.Type == "NEW_RELIC" { + integration.NewRelic.CredentialsSecretRef.Name = "" // not part of the atlas conversion roundtrip + } else { + integration.NewRelic = nil + integration.NewRelicSecrets = nil + } + + if integration.Type == "OPS_GENIE" { + integration.OpsGenie.APIKeySecretRef.Name = "" // not part of the atlas conversion roundtrip + } else { + integration.OpsGenie = nil + integration.OpsGenieSecrets = nil + } + + if integration.Type == "PAGER_DUTY" { + integration.PagerDuty.ServiceKeySecretRef.Name = "" // not part of the atlas conversion roundtrip + } else { + integration.PagerDuty = nil + integration.PagerDutySecrets = nil + } + + if integration.Type == "PROMETHEUS" { + integration.Prometheus.Enabled = enabledValues[index%uint(len(enabledValues))] + integration.Prometheus.PrometheusCredentialsSecretRef.Name = "" // not part of the atlas conversion roundtrip + } else { + integration.Prometheus = nil + integration.PrometheusSecrets = nil + } + + if integration.Type == "SLACK" { + integration.Slack.APITokenSecretRef.Name = "" // not part of the atlas conversion roundtrip + } else { + integration.Slack = nil + integration.SlackSecrets = nil + } + + if integration.Type == "VICTOR_OPS" { + integration.VictorOps.APIKeySecretRef.Name = "" // not part of the atlas conversion roundtrip + } else { + integration.VictorOps = nil + integration.VictorOpsSecrets = nil + } + + if integration.Type == "WEBHOOK" { + integration.Webhook.URLSecretRef.Name = "" // not part of the atlas conversion roundtrip + } else { + integration.Webhook = nil + integration.WebhookSecrets = nil + } +} diff --git a/internal/translation/thirdpartyintegration/thirdpartyintegration.go b/internal/translation/thirdpartyintegration/thirdpartyintegration.go new file mode 100644 index 0000000000..7837e98c00 --- /dev/null +++ b/internal/translation/thirdpartyintegration/thirdpartyintegration.go @@ -0,0 +1,117 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package thirdpartyintegration + +import ( + "context" + "errors" + "fmt" + + "go.mongodb.org/atlas-sdk/v20250312002/admin" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/controller/atlas" +) + +var ( + // ErrNotFound is returned when the expected integration is not found + ErrNotFound = errors.New("integration not found") +) + +type ThirdPartyIntegrationService interface { + Create(ctx context.Context, projectID string, integration *ThirdPartyIntegration) (*ThirdPartyIntegration, error) + Get(ctx context.Context, projectID, integrationType string) (*ThirdPartyIntegration, error) + Update(ctx context.Context, projectID string, integration *ThirdPartyIntegration) (*ThirdPartyIntegration, error) + Delete(ctx context.Context, projectID, integrationType string) error +} + +func NewThirdPartyIntegrationServiceFromClientSet(clientSet *atlas.ClientSet) ThirdPartyIntegrationService { + return NewThirdPartyIntegrationService(clientSet.SdkClient20250312002.ThirdPartyIntegrationsApi) +} + +func NewThirdPartyIntegrationService(integrationsAPI admin.ThirdPartyIntegrationsApi) ThirdPartyIntegrationService { + return &thirdPartyIntegration{integrationsAPI: integrationsAPI} +} + +type thirdPartyIntegration struct { + integrationsAPI admin.ThirdPartyIntegrationsApi +} + +func (tpi *thirdPartyIntegration) Create(ctx context.Context, projectID string, integration *ThirdPartyIntegration) (*ThirdPartyIntegration, error) { + atlasIntegration, err := toAtlas(integration) + if err != nil { + return nil, fmt.Errorf("failed to convert integration to Atlas: %w", err) + } + integrationPages, _, err := tpi.integrationsAPI.CreateThirdPartyIntegration( + ctx, atlasIntegration.GetType(), projectID, atlasIntegration).Execute() + if err != nil { + return nil, fmt.Errorf("failed to create integration from config %v: %w", integration, err) + } + if len(integrationPages.GetResults()) != 1 { + return nil, fmt.Errorf("expected an integration result reply but got %d", len(*integrationPages.Results)) + } + + newIntegration, err := fromAtlas(&integrationPages.GetResults()[0]) + if err != nil { + return nil, fmt.Errorf("failed to convert integration from Atlas: %w", err) + } + return newIntegration, nil +} + +func (tpi *thirdPartyIntegration) Get(ctx context.Context, projectID, integrationType string) (*ThirdPartyIntegration, error) { + atlasIntegration, _, err := tpi.integrationsAPI.GetThirdPartyIntegration(ctx, projectID, integrationType).Execute() + if err != nil { + if admin.IsErrorCode(err, "INTEGRATION_NOT_CONFIGURED") { + return nil, ErrNotFound + } + return nil, fmt.Errorf("failed to get integration type %v for project %v: %w", integrationType, projectID, err) + } + peer, err := fromAtlas(atlasIntegration) + if err != nil { + return nil, fmt.Errorf("failed to convert inetgration from Atlas: %w", err) + } + return peer, nil +} + +func (tpi *thirdPartyIntegration) Update(ctx context.Context, projectID string, integration *ThirdPartyIntegration) (*ThirdPartyIntegration, error) { + atlasIntegration, err := toAtlas(integration) + if err != nil { + return nil, fmt.Errorf("failed to convert integration to Atlas: %w", err) + } + integrationPages, _, err := tpi.integrationsAPI.UpdateThirdPartyIntegration( + ctx, atlasIntegration.GetType(), projectID, atlasIntegration).Execute() + if err != nil { + return nil, fmt.Errorf("failed to update integration with config %v: %w", integration, err) + } + if len(integrationPages.GetResults()) != 1 { + return nil, fmt.Errorf("expected an integration result reply but got %d", len(*integrationPages.Results)) + } + + newIntegration, err := fromAtlas(&integrationPages.GetResults()[0]) + if err != nil { + return nil, fmt.Errorf("failed to convert integration from Atlas: %w", err) + } + return newIntegration, nil +} + +func (tpi *thirdPartyIntegration) Delete(ctx context.Context, projectID, integrationType string) error { + _, err := tpi.integrationsAPI.DeleteThirdPartyIntegration(ctx, integrationType, projectID).Execute() + if err != nil { + if admin.IsErrorCode(err, "INTEGRATION_NOT_CONFIGURED") { + return ErrNotFound + } + return fmt.Errorf("failed to delete integration type %s: %w", integrationType, err) + } + return nil +} diff --git a/internal/translation/thirdpartyintegration/thirdpartyintegration_test.go b/internal/translation/thirdpartyintegration/thirdpartyintegration_test.go new file mode 100644 index 0000000000..d88362edb1 --- /dev/null +++ b/internal/translation/thirdpartyintegration/thirdpartyintegration_test.go @@ -0,0 +1,475 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package thirdpartyintegration_test + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "go.mongodb.org/atlas-sdk/v20250312002/admin" + "go.mongodb.org/atlas-sdk/v20250312002/mockadmin" + + akov2next "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/nextapi/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/pointer" + integration "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/translation/thirdpartyintegration" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e/utils" +) + +const ( + testProjectID = "fake-project" + + testID = "fake-id" + + testRegion = "fake-region" + + testIntegrationType = "PAGER_DUTY" + + testServiceKey = "fake-service-key" + + testAccount = "fake-account-id" + + testLicenseKey = "fake-license-key" + + testReadToken = "fake-read-token" + + testWriteToken = "fake-write-token" +) + +var ( + ErrFakeFailure = errors.New("fake failure") +) + +func TestIntegrationsCreate(t *testing.T) { + testAPIKey := utils.RandomName("fake-apy-key") + for _, tc := range []struct { + title string + integration *integration.ThirdPartyIntegration + api admin.ThirdPartyIntegrationsApi + expected *integration.ThirdPartyIntegration + expectedError error + }{ + { + title: "successful api create", + integration: &integration.ThirdPartyIntegration{ + AtlasThirdPartyIntegrationSpec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "DATADOG", + Datadog: &akov2next.DatadogIntegration{ + Region: testRegion, + SendCollectionLatencyMetrics: pointer.MakePtr("enabled"), + SendDatabaseMetrics: pointer.MakePtr("disabled"), + }, + }, + DatadogSecrets: &integration.DatadogSecrets{ + APIKey: testAPIKey, + }, + }, + api: testCreateIntegrationAPI( + &admin.ThirdPartyIntegration{ + Id: pointer.MakePtr(string(testID)), + Type: pointer.MakePtr("DATADOG"), + ApiKey: pointer.MakePtr(testAPIKey), + Region: pointer.MakePtr(string(testRegion)), + SendCollectionLatencyMetrics: pointer.MakePtr(true), + }, + nil, + ), + expected: &integration.ThirdPartyIntegration{ + ID: testID, + AtlasThirdPartyIntegrationSpec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "DATADOG", + Datadog: &akov2next.DatadogIntegration{ + Region: testRegion, + SendCollectionLatencyMetrics: pointer.MakePtr("enabled"), + SendDatabaseMetrics: pointer.MakePtr("disabled"), + }, + }, + DatadogSecrets: &integration.DatadogSecrets{ + APIKey: testAPIKey, + }, + }, + expectedError: nil, + }, + + { + title: "API failure gets passed through", + integration: &integration.ThirdPartyIntegration{ + AtlasThirdPartyIntegrationSpec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "DATADOG", + Datadog: &akov2next.DatadogIntegration{ + Region: testRegion, + SendCollectionLatencyMetrics: pointer.MakePtr("enabled"), + SendDatabaseMetrics: pointer.MakePtr("disabled"), + }, + }, + DatadogSecrets: &integration.DatadogSecrets{ + APIKey: testAPIKey, + }, + }, + api: testCreateIntegrationAPI( + nil, + ErrFakeFailure, + ), + expected: nil, + expectedError: ErrFakeFailure, + }, + + { + title: "failure to parse config returns before calling API", + integration: &integration.ThirdPartyIntegration{ + AtlasThirdPartyIntegrationSpec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "BLAH", + Datadog: &akov2next.DatadogIntegration{ + Region: testRegion, + SendCollectionLatencyMetrics: pointer.MakePtr("enabled"), + SendDatabaseMetrics: pointer.MakePtr("disabled"), + }, + }, + DatadogSecrets: &integration.DatadogSecrets{ + APIKey: testAPIKey, + }, + }, + expected: nil, + expectedError: integration.ErrUnsupportedIntegrationType, + }, + + { + title: "failure to parse API reply", + integration: &integration.ThirdPartyIntegration{ + AtlasThirdPartyIntegrationSpec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "DATADOG", + Datadog: &akov2next.DatadogIntegration{ + Region: testRegion, + SendCollectionLatencyMetrics: pointer.MakePtr("enabled"), + SendDatabaseMetrics: pointer.MakePtr("disabled"), + }, + }, + DatadogSecrets: &integration.DatadogSecrets{ + APIKey: testAPIKey, + }, + }, + api: testCreateIntegrationAPI( + &admin.ThirdPartyIntegration{ + Id: pointer.MakePtr(string(testID)), + Type: pointer.MakePtr("BLAH"), + ApiKey: pointer.MakePtr(testAPIKey), + Region: pointer.MakePtr(string(testRegion)), + SendCollectionLatencyMetrics: pointer.MakePtr(true), + }, + nil, + ), + expected: nil, + expectedError: integration.ErrUnsupportedIntegrationType, + }, + } { + ctx := context.Background() + t.Run(tc.title, func(t *testing.T) { + s := integration.NewThirdPartyIntegrationService(tc.api) + newIntegrations, err := s.Create(ctx, testProjectID, tc.integration) + assert.Equal(t, tc.expected, newIntegrations) + assert.ErrorIs(t, err, tc.expectedError) + }) + } +} + +func TestIntegrationsgGet(t *testing.T) { + for _, tc := range []struct { + title string + api admin.ThirdPartyIntegrationsApi + expected *integration.ThirdPartyIntegration + expectedError error + }{ + { + title: "successful api get returns success", + api: testGetIntegrationAPI( + &admin.ThirdPartyIntegration{ + Id: pointer.MakePtr(string(testID)), + Type: pointer.MakePtr(string(testIntegrationType)), + ServiceKey: pointer.MakePtr(testServiceKey), + Region: pointer.MakePtr(string(testRegion)), + SendCollectionLatencyMetrics: pointer.MakePtr(true), + }, + nil, + ), + expected: &integration.ThirdPartyIntegration{ + AtlasThirdPartyIntegrationSpec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: testIntegrationType, + PagerDuty: &akov2next.PagerDutyIntegration{ + Region: testRegion, + }, + }, + ID: testID, + PagerDutySecrets: &integration.PagerDutySecrets{ + ServiceKey: testServiceKey, + }, + }, + expectedError: nil, + }, + + { + title: "generic API failure passes though", + api: testGetIntegrationAPI( + nil, + ErrFakeFailure, + ), + expected: nil, + expectedError: ErrFakeFailure, + }, + + { + title: "failure to parse API reply", + api: testGetIntegrationAPI( + &admin.ThirdPartyIntegration{ + Id: pointer.MakePtr(string(testID)), + Type: pointer.MakePtr("BLAH"), + ServiceKey: pointer.MakePtr(testServiceKey), + Region: pointer.MakePtr(string(testRegion)), + SendCollectionLatencyMetrics: pointer.MakePtr(true), + }, + nil, + ), + expected: nil, + expectedError: integration.ErrUnsupportedIntegrationType, + }, + } { + ctx := context.Background() + t.Run(tc.title, func(t *testing.T) { + s := integration.NewThirdPartyIntegrationService(tc.api) + integrations, err := s.Get(ctx, testProjectID, testIntegrationType) + assert.Equal(t, tc.expected, integrations) + assert.ErrorIs(t, err, tc.expectedError) + }) + } +} + +func TestIntegrationsUpdate(t *testing.T) { + for _, tc := range []struct { + title string + integration *integration.ThirdPartyIntegration + api admin.ThirdPartyIntegrationsApi + expected *integration.ThirdPartyIntegration + expectedError error + }{ + { + title: "successful api update returns success", + integration: &integration.ThirdPartyIntegration{ + AtlasThirdPartyIntegrationSpec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "NEW_RELIC", + NewRelic: &akov2next.NewRelicIntegration{}, + }, + NewRelicSecrets: &integration.NewRelicSecrets{ + AccountID: testAccount, + LicenseKey: testLicenseKey, + ReadToken: testReadToken, + WriteToken: testWriteToken, + }, + }, + api: testUpdateIntegrationAPI( + &admin.ThirdPartyIntegration{ + Id: pointer.MakePtr(string(testID)), + Type: pointer.MakePtr("NEW_RELIC"), + AccountId: pointer.MakePtr(string(testAccount)), + LicenseKey: pointer.MakePtr(string(testLicenseKey)), + ReadToken: pointer.MakePtr(string(testReadToken)), + WriteToken: pointer.MakePtr(string(testWriteToken)), + }, + nil, + ), + expected: &integration.ThirdPartyIntegration{ + ID: testID, + AtlasThirdPartyIntegrationSpec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "NEW_RELIC", + NewRelic: &akov2next.NewRelicIntegration{}, + }, + NewRelicSecrets: &integration.NewRelicSecrets{ + AccountID: testAccount, + LicenseKey: testLicenseKey, + ReadToken: testReadToken, + WriteToken: testWriteToken, + }, + }, + expectedError: nil, + }, + + { + title: "API failure gets passed through", + integration: &integration.ThirdPartyIntegration{ + AtlasThirdPartyIntegrationSpec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "NEW_RELIC", + NewRelic: &akov2next.NewRelicIntegration{}, + }, + NewRelicSecrets: &integration.NewRelicSecrets{ + AccountID: testAccount, + LicenseKey: testLicenseKey, + ReadToken: testReadToken, + WriteToken: testWriteToken, + }, + }, + api: testUpdateIntegrationAPI( + nil, + ErrFakeFailure, + ), + expected: nil, + expectedError: ErrFakeFailure, + }, + + { + title: "failure to parse config returns before calling API", + integration: &integration.ThirdPartyIntegration{ + AtlasThirdPartyIntegrationSpec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "BLAH", + Datadog: &akov2next.DatadogIntegration{ + Region: testRegion, + SendCollectionLatencyMetrics: pointer.MakePtr("true"), + SendDatabaseMetrics: nil, + }, + }, + DatadogSecrets: &integration.DatadogSecrets{ + APIKey: "", + }, + }, + expected: nil, + expectedError: integration.ErrUnsupportedIntegrationType, + }, + + { + title: "failure to parse API reply", + integration: &integration.ThirdPartyIntegration{ + AtlasThirdPartyIntegrationSpec: akov2next.AtlasThirdPartyIntegrationSpec{ + Type: "NEW_RELIC", + NewRelic: &akov2next.NewRelicIntegration{}, + }, + NewRelicSecrets: &integration.NewRelicSecrets{ + AccountID: testAccount, + LicenseKey: testLicenseKey, + ReadToken: testReadToken, + WriteToken: testWriteToken, + }, + }, + api: testUpdateIntegrationAPI( + &admin.ThirdPartyIntegration{ + Id: pointer.MakePtr(string(testID)), + Type: pointer.MakePtr("BLAH"), + Region: pointer.MakePtr(string(testRegion)), + SendCollectionLatencyMetrics: pointer.MakePtr(true), + }, + nil, + ), + expected: nil, + expectedError: integration.ErrUnsupportedIntegrationType, + }, + } { + ctx := context.Background() + t.Run(tc.title, func(t *testing.T) { + s := integration.NewThirdPartyIntegrationService(tc.api) + updatedIntegrations, err := s.Update(ctx, testProjectID, tc.integration) + assert.Equal(t, tc.expected, updatedIntegrations) + assert.ErrorIs(t, err, tc.expectedError) + }) + } +} + +func TestIntegrationDelete(t *testing.T) { + for _, tc := range []struct { + title string + api admin.ThirdPartyIntegrationsApi + expectedError error + }{ + { + title: "successful api delete returns success", + api: testDeleteIntegrationAPI(nil), + expectedError: nil, + }, + + { + title: "generic API failure passes though", + api: testDeleteIntegrationAPI(ErrFakeFailure), + expectedError: ErrFakeFailure, + }, + } { + ctx := context.Background() + t.Run(tc.title, func(t *testing.T) { + s := integration.NewThirdPartyIntegrationService(tc.api) + err := s.Delete(ctx, testProjectID, testIntegrationType) + assert.ErrorIs(t, err, tc.expectedError) + }) + } +} + +func testCreateIntegrationAPI(integration *admin.ThirdPartyIntegration, err error) admin.ThirdPartyIntegrationsApi { + var apiMock mockadmin.ThirdPartyIntegrationsApi + + apiMock.EXPECT().CreateThirdPartyIntegration( + mock.Anything, mock.Anything, testProjectID, mock.Anything, + ).Return(admin.CreateThirdPartyIntegrationApiRequest{ApiService: &apiMock}) + + paginatedIntegration := &admin.PaginatedIntegration{} + if integration != nil { + paginatedIntegration.Results = &[]admin.ThirdPartyIntegration{ + *integration, + } + } + apiMock.EXPECT().CreateThirdPartyIntegrationExecute( + mock.AnythingOfType("admin.CreateThirdPartyIntegrationApiRequest"), + ).Return(paginatedIntegration, nil, err) + return &apiMock +} + +func testGetIntegrationAPI(integration *admin.ThirdPartyIntegration, err error) admin.ThirdPartyIntegrationsApi { + var apiMock mockadmin.ThirdPartyIntegrationsApi + + apiMock.EXPECT().GetThirdPartyIntegration( + mock.Anything, testProjectID, testIntegrationType, + ).Return(admin.GetThirdPartyIntegrationApiRequest{ApiService: &apiMock}) + + apiMock.EXPECT().GetThirdPartyIntegrationExecute( + mock.AnythingOfType("admin.GetThirdPartyIntegrationApiRequest"), + ).Return(integration, nil, err) + return &apiMock +} + +func testUpdateIntegrationAPI(integration *admin.ThirdPartyIntegration, err error) admin.ThirdPartyIntegrationsApi { + var apiMock mockadmin.ThirdPartyIntegrationsApi + + apiMock.EXPECT().UpdateThirdPartyIntegration( + mock.Anything, mock.Anything, testProjectID, mock.Anything, + ).Return(admin.UpdateThirdPartyIntegrationApiRequest{ApiService: &apiMock}) + + paginatedIntegration := &admin.PaginatedIntegration{} + if integration != nil { + paginatedIntegration.Results = &[]admin.ThirdPartyIntegration{ + *integration, + } + } + apiMock.EXPECT().UpdateThirdPartyIntegrationExecute( + mock.AnythingOfType("admin.UpdateThirdPartyIntegrationApiRequest"), + ).Return(paginatedIntegration, nil, err) + return &apiMock +} + +func testDeleteIntegrationAPI(err error) admin.ThirdPartyIntegrationsApi { + var apiMock mockadmin.ThirdPartyIntegrationsApi + + apiMock.EXPECT().DeleteThirdPartyIntegration( + mock.Anything, testIntegrationType, testProjectID, + ).Return(admin.DeleteThirdPartyIntegrationApiRequest{ApiService: &apiMock}) + + apiMock.EXPECT().DeleteThirdPartyIntegrationExecute( + mock.AnythingOfType("admin.DeleteThirdPartyIntegrationApiRequest"), + ).Return(nil, err) + return &apiMock +} diff --git a/internal/version/version.go b/internal/version/version.go index e657fc79aa..c847bf0acc 100644 --- a/internal/version/version.go +++ b/internal/version/version.go @@ -24,7 +24,14 @@ const DefaultVersion = "unknown" // Version set by the linker during link time. var Version = DefaultVersion +// Experimental enables unreleased features +var Experimental = "false" + func IsRelease(v string) bool { return v != DefaultVersion && regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+[-certified]*$`).Match([]byte(strings.TrimSpace(v))) } + +func IsExperimental() bool { + return Experimental == "true" +} diff --git a/licenses.csv b/licenses.csv index 7cd6a9be24..f8202d2597 100644 --- a/licenses.csv +++ b/licenses.csv @@ -99,6 +99,7 @@ github.com/youmark/pkcs8,https://github.com/youmark/pkcs8/blob/a2c0da244d78/LICE go.mongodb.org/atlas-sdk/v20231115004,https://github.com/mongodb/atlas-sdk-go/blob/v20231115004.1.0/LICENSE,Apache-2.0 go.mongodb.org/atlas-sdk/v20231115008,https://github.com/mongodb/atlas-sdk-go/blob/v20231115008.5.0/LICENSE,Apache-2.0 go.mongodb.org/atlas-sdk/v20241113001,https://github.com/mongodb/atlas-sdk-go/blob/v20241113001.0.0/LICENSE,Apache-2.0 +go.mongodb.org/atlas-sdk/v20250312002,https://github.com/mongodb/atlas-sdk-go/blob/v20250312002.0.0/LICENSE,Apache-2.0 go.mongodb.org/atlas/mongodbatlas,https://github.com/mongodb/go-client-mongodb-atlas/blob/v0.38.0/LICENSE,Apache-2.0 go.mongodb.org/mongo-driver,https://github.com/mongodb/mongo-go-driver/blob/v1.17.3/LICENSE,Apache-2.0 go.opentelemetry.io/auto/sdk,https://github.com/open-telemetry/opentelemetry-go-instrumentation/blob/sdk/v1.1.0/sdk/LICENSE,Apache-2.0 @@ -114,7 +115,7 @@ golang.org/x/exp,https://cs.opensource.google/go/x/exp/+/8a7402ab:LICENSE,BSD-3- golang.org/x/net,https://cs.opensource.google/go/x/net/+/v0.40.0:LICENSE,BSD-3-Clause golang.org/x/oauth2,https://cs.opensource.google/go/x/oauth2/+/v0.30.0:LICENSE,BSD-3-Clause golang.org/x/sync,https://cs.opensource.google/go/x/sync/+/v0.14.0:LICENSE,BSD-3-Clause -golang.org/x/sys,https://cs.opensource.google/go/x/sys/+/v0.33.0:LICENSE,BSD-3-Clause +golang.org/x/sys/unix,https://cs.opensource.google/go/x/sys/+/v0.33.0:LICENSE,BSD-3-Clause golang.org/x/term,https://cs.opensource.google/go/x/term/+/v0.32.0:LICENSE,BSD-3-Clause golang.org/x/text,https://cs.opensource.google/go/x/text/+/v0.25.0:LICENSE,BSD-3-Clause golang.org/x/time/rate,https://cs.opensource.google/go/x/time/+/v0.11.0:LICENSE,BSD-3-Clause diff --git a/pkg/controller/state/reapply.go b/pkg/controller/state/reapply.go new file mode 100644 index 0000000000..c6e2482ef8 --- /dev/null +++ b/pkg/controller/state/reapply.go @@ -0,0 +1,122 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package state + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const AnnotationReapplyTimestamp = "mongodb.internal.com/reapply-timestamp" + +func ShouldReapply(obj metav1.Object) (bool, error) { + timestamp, hasTimestamp, err := ReapplyTimestamp(obj) + if err != nil { + return false, err + } + + if !hasTimestamp { + return false, nil + } + + period, hasPeriod, err := ReapplyPeriod(obj) + if err != nil { + return false, err + } + + if !hasPeriod { + return false, nil + } + + diff := timestamp.Add(period).Sub(time.Now()) + + return diff <= 0, nil +} + +func ReapplyPeriod(obj metav1.Object) (time.Duration, bool, error) { + annotationPeriod, ok := obj.GetAnnotations()["mongodb.com/reapply-period"] + if !ok { + return 0, false, nil + } + + period, err := time.ParseDuration(annotationPeriod) + if err != nil { + return 0, false, fmt.Errorf("failed to parse reapply period: %w", err) + } + + if period < 60*time.Second { + return 0, false, errors.New("reapply period is invalid: must be greater than 60m") + } + + return period, true, nil +} + +func ReapplyTimestamp(obj metav1.Object) (time.Time, bool, error) { + annotationTimestamp, ok := obj.GetAnnotations()[AnnotationReapplyTimestamp] + if !ok { + return time.Time{}, false, nil + } + + timestampMillis, err := strconv.ParseInt(annotationTimestamp, 10, 0) + if err != nil { + return time.Time{}, false, fmt.Errorf("failed to parse reapply timestamp: %w", err) + } + + return time.UnixMilli(timestampMillis), true, nil +} + +func PatchReapplyTimestamp(ctx context.Context, kubeClient client.Client, obj client.Object) (time.Duration, error) { + period, hasPeriod, err := ReapplyPeriod(obj) + if err != nil { + return 0, err + } + + if !hasPeriod { + return 0, nil + } + + timestamp, hasTimestamp, err := ReapplyTimestamp(obj) + if err != nil { + return 0, err + } + + now := time.Now() + diff := timestamp.Add(period).Sub(now) + if hasTimestamp && diff > 0 { + return diff, nil + } + + escapedAnnotation := strings.ReplaceAll(AnnotationReapplyTimestamp, "/", "~1") + + patch := []byte(fmt.Sprintf(`[{ + "op": "replace", + "path": "/metadata/annotations/%v", + "value": "%v" +}]`, escapedAnnotation, now.UnixMilli())) + + if err := kubeClient.Patch(ctx, obj, client.RawPatch(types.JSONPatchType, patch)); err != nil { + return 0, fmt.Errorf("failed to patch object: %w", err) + } + + return period, nil +} diff --git a/pkg/controller/state/reapply_test.go b/pkg/controller/state/reapply_test.go new file mode 100644 index 0000000000..3a91fd6284 --- /dev/null +++ b/pkg/controller/state/reapply_test.go @@ -0,0 +1,291 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package state + +import ( + "context" + "errors" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" +) + +func TestReapplyPeriod(t *testing.T) { + tests := []struct { + name string + annotations map[string]string + want time.Duration + wantOk bool + wantErr string + }{ + { + name: "valid period", + annotations: map[string]string{"mongodb.com/reapply-period": "2h"}, + want: 2 * time.Hour, + wantOk: true, + }, + { + name: "period missing", + annotations: map[string]string{}, + want: 0, + wantOk: false, + }, + { + name: "period invalid format", + annotations: map[string]string{"mongodb.com/reapply-period": "not-a-period"}, + want: 0, + wantOk: false, + wantErr: "invalid duration", + }, + { + name: "period too short", + annotations: map[string]string{"mongodb.com/reapply-period": "30s"}, + want: 0, + wantOk: false, + wantErr: "must be greater than 60m", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + obj := newUnstructuredObj(tc.annotations) + got, ok, err := ReapplyPeriod(obj) + if tc.wantErr != "" { + assert.ErrorContains(t, err, tc.wantErr) + } + assert.Equal(t, tc.wantOk, ok) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestReapplyTimestamp(t *testing.T) { + now := time.Now().UnixMilli() + tests := []struct { + name string + annotations map[string]string + want int64 + wantOk bool + wantErr string + }{ + { + name: "valid timestamp", + annotations: map[string]string{AnnotationReapplyTimestamp: strconv.FormatInt(now, 10)}, + want: now, + wantOk: true, + }, + { + name: "timestamp missing", + annotations: map[string]string{}, + want: 0, + wantOk: false, + }, + { + name: "invalid timestamp", + annotations: map[string]string{AnnotationReapplyTimestamp: "not-a-number"}, + want: 0, + wantOk: false, + wantErr: "parsing \"not-a-number\": invalid syntax", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + obj := newUnstructuredObj(tc.annotations) + got, ok, err := ReapplyTimestamp(obj) + assertErrContains(t, tc.wantErr, err) + assert.Equal(t, tc.wantOk, ok) + if tc.wantOk { + assert.Equal(t, tc.want, got.UnixMilli()) + } + }) + } +} + +func TestShouldReapply(t *testing.T) { + past := time.Now().Add(-2 * time.Hour).UnixMilli() + future := time.Now().Add(2 * time.Hour).UnixMilli() + + tests := []struct { + name string + annotations map[string]string + want bool + wantErr string + }{ + { + name: "should reapply (past+1h < now)", + annotations: map[string]string{ + AnnotationReapplyTimestamp: strconv.FormatInt(past, 10), + "mongodb.com/reapply-period": "1h", + }, + want: true, + wantErr: "", + }, + { + name: "should not reapply (future+1h > now)", + annotations: map[string]string{ + AnnotationReapplyTimestamp: strconv.FormatInt(future, 10), + "mongodb.com/reapply-period": "1h", + }, + want: false, + wantErr: "", + }, + { + name: "missing period", + annotations: map[string]string{ + AnnotationReapplyTimestamp: strconv.FormatInt(past, 10), + }, + want: false, + wantErr: "", + }, + { + name: "missing timestamp", + annotations: map[string]string{"mongodb.com/reapply-period": "1h"}, + want: false, + wantErr: "", + }, + { + name: "invalid period", + annotations: map[string]string{ + AnnotationReapplyTimestamp: strconv.FormatInt(past, 10), + "mongodb.com/reapply-period": "bad", + }, + want: false, + wantErr: "invalid duration", + }, + { + name: "invalid timestamp", + annotations: map[string]string{ + AnnotationReapplyTimestamp: "bad", + "mongodb.com/reapply-period": "1h", + }, + want: false, + wantErr: "invalid syntax", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + obj := newUnstructuredObj(tc.annotations) + got, err := ShouldReapply(obj) + assertErrContains(t, tc.wantErr, err) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestPatchReapplyTimestamp(t *testing.T) { + now := time.Now() + pastMillis := strconv.FormatInt(now.Add(-2*time.Hour).UnixMilli(), 10) + + tests := []struct { + name string + annotations map[string]string + patchErr error + want time.Duration + wantErr string // substring to match in the error message + wantPatched bool // true if we expect the annotation to be updated + }{ + { + name: "patch performed", + annotations: map[string]string{ + AnnotationReapplyTimestamp: pastMillis, + "mongodb.com/reapply-period": "1h", + }, + want: time.Hour, + wantErr: "", + wantPatched: true, + }, + { + name: "patch not needed (no period)", + annotations: map[string]string{}, + want: 0, + wantErr: "", + wantPatched: false, + }, + { + name: "patch error", + annotations: map[string]string{ + AnnotationReapplyTimestamp: pastMillis, + "mongodb.com/reapply-period": "1h", + }, + patchErr: errors.New("fail"), + want: 0, + wantErr: "fail", + wantPatched: true, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + obj := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + Namespace: "default", + Annotations: tc.annotations, + }, + } + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + patchFn := func(_ context.Context, _ client.WithWatch, _ client.Object, _ client.Patch, _ ...client.PatchOption) error { + return tc.patchErr + } + if tc.patchErr == nil { + patchFn = nil + } + c := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(obj.DeepCopy()). + WithInterceptorFuncs(interceptor.Funcs{Patch: patchFn}). + Build() + ctx := context.Background() + + period, err := PatchReapplyTimestamp(ctx, c, obj) + assertErrContains(t, tc.wantErr, err) + assert.Equal(t, tc.want, period) + + fetched := &corev1.Pod{} + _ = c.Get(ctx, client.ObjectKeyFromObject(obj), fetched) + + annot := fetched.GetAnnotations() + _, patched := annot[AnnotationReapplyTimestamp] + + assert.Equal(t, tc.wantPatched, patched, "Annotation patched?") + }) + } +} + +// Helper to create an Unstructured object with annotations. +func newUnstructuredObj(annotations map[string]string) *unstructured.Unstructured { + obj := &unstructured.Unstructured{} + obj.SetAnnotations(annotations) + return obj +} + +func assertErrContains(t *testing.T, wantErr string, err error) { + if wantErr == "" { + assert.NoError(t, err) + } else { + assert.ErrorContains(t, err, wantErr) + } +} diff --git a/pkg/controller/state/reconciler.go b/pkg/controller/state/reconciler.go new file mode 100644 index 0000000000..efbffeb56b --- /dev/null +++ b/pkg/controller/state/reconciler.go @@ -0,0 +1,369 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package state + +import ( + "context" + "fmt" + "strings" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + ctrlrtbuilder "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/finalizer" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/state" +) + +type Result struct { + reconcile.Result + NextState state.ResourceState + StateMsg string +} + +type StateHandler[T any] interface { + SetupWithManager(ctrl.Manager, reconcile.Reconciler, ...SetupManagerOption) error + For() (client.Object, builder.Predicates) + HandleInitial(context.Context, *T) (Result, error) + HandleImportRequested(context.Context, *T) (Result, error) + HandleImported(context.Context, *T) (Result, error) + HandleCreating(context.Context, *T) (Result, error) + HandleCreated(context.Context, *T) (Result, error) + HandleUpdating(context.Context, *T) (Result, error) + HandleUpdated(context.Context, *T) (Result, error) + HandleDeletionRequested(context.Context, *T) (Result, error) + HandleDeleting(context.Context, *T) (Result, error) + // Deleted, not handled as it is a terminal state +} + +const ( + ReadyReasonError = "Error" + ReadyReasonPending = "Pending" + ReadyReasonSettled = "Settled" +) + +type Reconciler[T any] struct { + cluster cluster.Cluster + reconciler StateHandler[T] + unstructuredGVK schema.GroupVersionKind + supportReapply bool +} + +type ReconcilerOptionFn[T any] func(*Reconciler[T]) + +func WithCluster[T any](c cluster.Cluster) ReconcilerOptionFn[T] { + return func(r *Reconciler[T]) { + r.cluster = c + } +} + +func WithReapplySupport[T any](supportReapply bool) ReconcilerOptionFn[T] { + return func(r *Reconciler[T]) { + r.supportReapply = supportReapply + } +} + +type UnstructuredStateReconciler = StateHandler[unstructured.Unstructured] + +type ControllerSetupBuilder = ctrlrtbuilder.TypedBuilder[reconcile.Request] + +type SetupManagerOption func(builder *ControllerSetupBuilder) *ControllerSetupBuilder + +func NewStateReconciler[T any](target StateHandler[T], options ...ReconcilerOptionFn[T]) *Reconciler[T] { + r := &Reconciler[T]{ + reconciler: target, + } + for _, opt := range options { + opt(r) + } + return r +} + +func NewUnstructuredStateReconciler(target UnstructuredStateReconciler, gvk schema.GroupVersionKind) *Reconciler[unstructured.Unstructured] { + return &Reconciler[unstructured.Unstructured]{ + reconciler: target, + unstructuredGVK: gvk, + } +} + +func (r *Reconciler[T]) SetupWithManager(mgr ctrl.Manager, options ...SetupManagerOption) error { + r.cluster = mgr + return r.reconciler.SetupWithManager(mgr, r, options...) +} + +func (r *Reconciler[T]) For() (client.Object, builder.Predicates) { + return r.reconciler.For() +} + +func ApplyOptions(builder *ControllerSetupBuilder, options ...SetupManagerOption) *ControllerSetupBuilder { + for _, optionFn := range options { + builder = optionFn(builder) + } + return builder +} + +func (r *Reconciler[T]) Reconcile(ctx context.Context, req ctrl.Request) (reconcile.Result, error) { + logger := log.FromContext(ctx).WithName("state") + logger.Info("reconcile started", "req", req) + + t := new(T) + obj := any(t).(StatusObject) + clientObj := any(t).(client.Object) + if u, ok := clientObj.(*unstructured.Unstructured); ok { + u.SetGroupVersionKind(r.unstructuredGVK) + } + + err := r.cluster.GetClient().Get(ctx, req.NamespacedName, clientObj) + if apierrors.IsNotFound(err) { + // object is already gone, nothing to do. + return reconcile.Result{}, nil + } + if err != nil { + return ctrl.Result{}, fmt.Errorf("unable to get object: %w", err) + } + + currentStatus := newStatusObject(obj) + currentState := state.GetState(currentStatus.Status.Conditions) + + logger.Info("reconcile started", "currentState", currentState) + if err := finalizer.EnsureFinalizers(ctx, r.cluster.GetClient(), clientObj, "mongodb.com/finalizer"); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to manage finalizers: %w", err) + } + + result, reconcileErr := r.ReconcileState(ctx, t) + stateStatus := true + if reconcileErr != nil { + // error message will be displayed in Ready state. + stateStatus = false + } + + newStatus := newStatusObject(obj) + observedGeneration := getObservedGeneration(clientObj, currentStatus.Status.Conditions, result.NextState) + newStatusConditions := newStatus.Status.Conditions + state.EnsureState(&newStatusConditions, observedGeneration, result.NextState, result.StateMsg, stateStatus) + + logger.Info("reconcile finished", "nextState", result.NextState) + + if result.NextState == state.StateDeleted { + if err := finalizer.UnsetFinalizers(ctx, r.cluster.GetClient(), clientObj, "mongodb.com/finalizer"); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to unset finalizer: %w", err) + } + + return result.Result, reconcileErr + } + + ready := NewReadyCondition(result) + ready.ObservedGeneration = observedGeneration + + if reconcileErr != nil { + ready.Status = metav1.ConditionFalse + ready.Reason = ReadyReasonError + ready.Message = reconcileErr.Error() + } + + meta.SetStatusCondition(&newStatusConditions, ready) + newStatus.Status.Conditions = newStatusConditions + if err := patchStatus(ctx, r.cluster.GetClient(), clientObj, newStatus); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to patch status: %w", err) + } + + return result.Result, reconcileErr +} + +func NewReadyCondition(result Result) metav1.Condition { + var ( + readyReason, msg string + cond metav1.ConditionStatus + ) + + switch result.NextState { + case state.StateInitial: + cond = metav1.ConditionFalse + readyReason = ReadyReasonPending + msg = "Resource is in initial state." + + case state.StateImportRequested: + cond = metav1.ConditionFalse + readyReason = ReadyReasonPending + msg = "Resource is being imported." + + case state.StateCreating: + cond = metav1.ConditionFalse + readyReason = ReadyReasonPending + msg = "Resource is pending." + + case state.StateUpdating: + cond = metav1.ConditionFalse + readyReason = ReadyReasonPending + msg = "Resource is pending." + + case state.StateDeleting: + cond = metav1.ConditionFalse + readyReason = ReadyReasonPending + msg = "Resource is pending." + + case state.StateDeletionRequested: + cond = metav1.ConditionFalse + readyReason = ReadyReasonPending + msg = "Resource is pending." + + case state.StateImported: + cond = metav1.ConditionTrue + readyReason = ReadyReasonSettled + msg = "Resource is imported." + + case state.StateCreated: + cond = metav1.ConditionTrue + readyReason = ReadyReasonSettled + msg = "Resource is settled." + + case state.StateUpdated: + cond = metav1.ConditionTrue + readyReason = ReadyReasonSettled + msg = "Resource is settled." + + default: + cond = metav1.ConditionFalse + readyReason = ReadyReasonError + msg = fmt.Sprintf("unknown state: %s", result.NextState) + } + + return metav1.Condition{ + Type: state.ReadyCondition, + Status: cond, + LastTransitionTime: metav1.NewTime(time.Now()), + Reason: readyReason, + Message: msg, + } +} + +func (r *Reconciler[T]) ReconcileState(ctx context.Context, t *T) (Result, error) { + obj := any(t).(client.Object) + + var ( + result = Result{ + Result: reconcile.Result{}, + NextState: state.StateInitial, + } + + err error + ) + statusObj := newStatusObject(any(t).(StatusObject)) + currentState := state.GetState(statusObj.Status.Conditions) + + if currentState == state.StateInitial { + for key := range obj.GetAnnotations() { + if strings.HasPrefix(key, "mongodb.com/external-") { + currentState = state.StateImportRequested + } + } + } + + if !obj.GetDeletionTimestamp().IsZero() && currentState != state.StateDeleting { + currentState = state.StateDeletionRequested + } + + switch currentState { + case state.StateInitial: + result, err = r.reconciler.HandleInitial(ctx, t) + case state.StateImportRequested: + result, err = r.reconciler.HandleImportRequested(ctx, t) + case state.StateImported: + result, err = r.reconciler.HandleImported(ctx, t) + case state.StateCreating: + result, err = r.reconciler.HandleCreating(ctx, t) + case state.StateCreated: + result, err = r.reconciler.HandleCreated(ctx, t) + case state.StateUpdating: + result, err = r.reconciler.HandleUpdating(ctx, t) + case state.StateUpdated: + result, err = r.reconciler.HandleUpdated(ctx, t) + case state.StateDeletionRequested: + result, err = r.reconciler.HandleDeletionRequested(ctx, t) + case state.StateDeleting: + result, err = r.reconciler.HandleDeleting(ctx, t) + default: + return Result{}, fmt.Errorf("unsupported state %q", currentState) + } + + if result.NextState == "" { + result.NextState = state.StateInitial + } + + if r.supportReapply { + err := r.reconcileReapply(ctx, obj, result, err) + if err != nil { + return Result{}, fmt.Errorf("failed to reconcile reapply: %w", err) + } + } + + return result, err +} + +func (r *Reconciler[T]) reconcileReapply(ctx context.Context, obj client.Object, result Result, err error) error { + isReapplyState := result.NextState == state.StateImported || + result.NextState == state.StateCreated || + result.NextState == state.StateUpdated + + if isReapplyState && result.RequeueAfter == 0 && err == nil { + requeueAfter, err := PatchReapplyTimestamp(ctx, r.cluster.GetClient(), obj) + if err != nil { + return fmt.Errorf("failed to patch reapply timestamp: %w", err) + } + + result.RequeueAfter = requeueAfter + } + return nil +} + +func getObservedGeneration(obj client.Object, prevStatusConditions []metav1.Condition, nextState state.ResourceState) int64 { + observedGeneration := obj.GetGeneration() + prevState := state.GetState(prevStatusConditions) + + if prevCondition := meta.FindStatusCondition(prevStatusConditions, state.StateCondition); prevCondition != nil { + from := prevState + to := nextState + + // don't change observed generation if we are: + // - creating/updating/deleting + // - just finished creating/updating/deleting + observedGeneration = prevCondition.ObservedGeneration + switch { + case from == state.StateUpdating && to == state.StateUpdating: // polling update + case from == state.StateUpdating && to == state.StateUpdated: // finished updating + + case from == state.StateCreating && to == state.StateCreating: // polling creation + case from == state.StateCreating && to == state.StateCreated: // finished creating + + case from == state.StateDeletionRequested && to == state.StateDeleting: // started deletion + case from == state.StateDeleting && to == state.StateDeleting: // polling deletion + case from == state.StateDeleting && to == state.StateDeleted: // finshed deletion + default: + observedGeneration = obj.GetGeneration() + } + } + + return observedGeneration +} diff --git a/pkg/controller/state/reconciler_test.go b/pkg/controller/state/reconciler_test.go new file mode 100644 index 0000000000..7c205f2353 --- /dev/null +++ b/pkg/controller/state/reconciler_test.go @@ -0,0 +1,581 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package state + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/state" +) + +func TestGetObservedGeneration(t *testing.T) { + type args struct { + obj client.Object + prevStatus StatusObject + nextState state.ResourceState + } + tests := []struct { + name string + args args + want int64 + }{ + { + name: "No previous state, returns current generation", + args: args{ + obj: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Generation: 3}}, + prevStatus: newDummyObject(metav1.ObjectMeta{}, nil), + nextState: state.StateInitial, + }, + want: 3, + }, + { + name: "Switch from Creating to Created, uses observed generation", + args: args{ + obj: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Generation: 2}}, + prevStatus: prevStatusObject(state.StateCreating, 7), + nextState: state.StateCreated, + }, + want: 7, + }, + { + name: "Switch from Updating to Updated, uses observed generation", + args: args{ + obj: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Generation: 3}}, + prevStatus: prevStatusObject(state.StateUpdating, 9), + nextState: state.StateUpdated, + }, + want: 9, + }, + { + name: "Switch from Deleting to Deleted, uses observed generation", + args: args{ + obj: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Generation: 1}}, + prevStatus: prevStatusObject(state.StateDeleting, 2), + nextState: state.StateDeleted, + }, + want: 2, + }, + { + name: "Polling from Creating, uses observed generation", + args: args{ + obj: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Generation: 2}}, + prevStatus: prevStatusObject(state.StateCreating, 7), + nextState: state.StateCreating, + }, + want: 7, + }, + { + name: "Polling Updating, uses observed generation", + args: args{ + obj: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Generation: 3}}, + prevStatus: prevStatusObject(state.StateUpdating, 9), + nextState: state.StateUpdating, + }, + want: 9, + }, + { + name: "Polling Deleting, uses observed generation", + args: args{ + obj: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Generation: 1}}, + prevStatus: prevStatusObject(state.StateDeleting, 2), + nextState: state.StateDeleting, + }, + want: 2, + }, + { + name: "Start Deleting, uses observed generation", + args: args{ + obj: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Generation: 1}}, + prevStatus: prevStatusObject(state.StateDeletionRequested, 2), + nextState: state.StateDeleting, + }, + want: 2, + }, + { + name: "Irrelevant state change, returns obj generation", + args: args{ + obj: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Generation: 8}}, + prevStatus: prevStatusObject(state.StateInitial, 4), + nextState: state.StateInitial, + }, + want: 8, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := getObservedGeneration(tt.args.obj, tt.args.prevStatus.GetConditions(), tt.args.nextState) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestNewReadyCondition(t *testing.T) { + tests := []struct { + name string + nextState state.ResourceState + wantCond metav1.ConditionStatus + wantReason string + wantMsg string + }{ + { + name: "Initial - Pending", + nextState: state.StateInitial, + wantCond: metav1.ConditionFalse, + wantReason: ReadyReasonPending, + wantMsg: "Resource is in initial state.", + }, + { + name: "ImportRequested - Pending", + nextState: state.StateImportRequested, + wantCond: metav1.ConditionFalse, + wantReason: ReadyReasonPending, + wantMsg: "Resource is being imported.", + }, + { + name: "Creating - Pending", + nextState: state.StateCreating, + wantCond: metav1.ConditionFalse, + wantReason: ReadyReasonPending, + wantMsg: "Resource is pending.", + }, + { + name: "Updating - Pending", + nextState: state.StateUpdating, + wantCond: metav1.ConditionFalse, + wantReason: ReadyReasonPending, + wantMsg: "Resource is pending.", + }, + { + name: "Deleting - Pending", + nextState: state.StateDeleting, + wantCond: metav1.ConditionFalse, + wantReason: ReadyReasonPending, + wantMsg: "Resource is pending.", + }, + { + name: "DeletionRequested - Pending", + nextState: state.StateDeletionRequested, + wantCond: metav1.ConditionFalse, + wantReason: ReadyReasonPending, + wantMsg: "Resource is pending.", + }, + { + name: "Imported - Settled", + nextState: state.StateImported, + wantCond: metav1.ConditionTrue, + wantReason: ReadyReasonSettled, + wantMsg: "Resource is imported.", + }, + { + name: "Created - Settled", + nextState: state.StateCreated, + wantCond: metav1.ConditionTrue, + wantReason: ReadyReasonSettled, + wantMsg: "Resource is settled.", + }, + { + name: "Updated - Settled", + nextState: state.StateUpdated, + wantCond: metav1.ConditionTrue, + wantReason: ReadyReasonSettled, + wantMsg: "Resource is settled.", + }, + { + name: "Unknown state - Error", + nextState: "nonexistent", + wantCond: metav1.ConditionFalse, + wantReason: ReadyReasonError, + wantMsg: "unknown state: nonexistent", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := Result{NextState: tt.nextState} + cond := NewReadyCondition(result) + assert.Equal(t, tt.wantCond, cond.Status) + assert.Equal(t, tt.wantReason, cond.Reason) + assert.Equal(t, state.ReadyCondition, cond.Type) + assert.Equal(t, tt.wantMsg, cond.Message) + }) + } +} + +func TestReconcile(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + addKnownTestTypes(scheme) + + basePod := &dummyObject{ + Pod: corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mypod", + Namespace: "default", + Generation: 1, + }, + }, + } + podKey := types.NamespacedName{Name: "mypod", Namespace: "default"} + + tests := []struct { + name string + existingObj client.Object + interceptors *interceptor.Funcs + handleState func(context.Context, *dummyObject) (Result, error) + wantErr string + wantResult reconcile.Result + }{ + { + name: "get object error", + existingObj: basePod, + interceptors: &interceptor.Funcs{ + Get: func(ctx context.Context, c client.WithWatch, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + return errors.New("simulated get error") + }, + }, + handleState: func(ctx context.Context, do *dummyObject) (Result, error) { + return Result{NextState: "Initial"}, nil + }, + wantErr: "unable to get object: simulated get error", + }, + { + name: "object removed is fine", + existingObj: basePod, + interceptors: &interceptor.Funcs{ + Get: func(ctx context.Context, c client.WithWatch, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + return apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "pods"}, key.Name) + }, + }, + handleState: func(ctx context.Context, do *dummyObject) (Result, error) { + return Result{NextState: "Initial"}, nil + }, + wantResult: reconcile.Result{}, + }, + { + name: "failed to set finalizer", + existingObj: basePod, + handleState: func(ctx context.Context, do *dummyObject) (Result, error) { + return Result{NextState: "Initial"}, nil + }, + interceptors: &interceptor.Funcs{ + Patch: func(ctx context.Context, c client.WithWatch, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + return errors.New("simulated patch error") + }, + }, + wantErr: "failed to manage finalizers: simulated patch error", + }, + { + name: "check state", + existingObj: basePod, + handleState: func(ctx context.Context, do *dummyObject) (Result, error) { + return Result{NextState: "Initial"}, nil + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + builder := fake.NewClientBuilder().WithScheme(scheme) + if tc.existingObj != nil { + builder = builder.WithObjects(tc.existingObj) + } + if tc.interceptors != nil { + builder = builder.WithInterceptorFuncs(*tc.interceptors) + } + c := builder.Build() + dummyReconciler := &dummyPodReconciler{handleState: tc.handleState} + r := &Reconciler[dummyObject]{ + cluster: &fakeCluster{cli: c}, + reconciler: dummyReconciler, + } + + req := ctrl.Request{NamespacedName: podKey} + result, err := r.Reconcile(context.Background(), req) + assertErrContains(t, tc.wantErr, err) + assert.Equal(t, tc.wantResult, result) + }) + } +} + +func TestReconcileState(t *testing.T) { + scheme := runtime.NewScheme() + _ = v1.AddToScheme(scheme) + addKnownTestTypes(scheme) + + ctx := context.Background() + + tests := []struct { + name string + initialObj *dummyObject + handleFn func(context.Context, *dummyObject) (Result, error) + modify func(t *dummyObject) + wantResult Result + wantErr string + }{ + { + name: "simulate error", + initialObj: newDummyObject( + metav1.ObjectMeta{Namespace: "default", Name: "myobj"}, + []metav1.Condition{newStateCondition(state.StateCreated, metav1.ConditionTrue, 1)}, + ), + modify: func(t *dummyObject) { + // Simulate a state that should cause an error in ReconcileState + t.conditions[0].Reason = "" + }, + wantErr: "unsupported state \"\"", + }, + { + name: "initial state", + initialObj: newDummyObject(metav1.ObjectMeta{Namespace: "default", Name: "myobj"}, nil), + handleFn: func(context.Context, *dummyObject) (Result, error) { + return Result{Result: reconcile.Result{Requeue: false}}, nil + }, + wantResult: Result{Result: reconcile.Result{Requeue: false}}, + }, + { + name: "creating", + initialObj: newDummyObject( + metav1.ObjectMeta{Namespace: "default", Name: "myobj"}, + []metav1.Condition{newStateCondition(state.StateCreating, metav1.ConditionFalse, 1)}, + ), + handleFn: func(context.Context, *dummyObject) (Result, error) { + return Result{Result: reconcile.Result{Requeue: true}}, nil + }, + wantResult: Result{Result: reconcile.Result{Requeue: true}}, + }, + { + name: "created", + initialObj: newDummyObject( + metav1.ObjectMeta{Namespace: "default", Name: "myobj"}, + []metav1.Condition{newStateCondition(state.StateCreated, metav1.ConditionTrue, 1)}, + ), + handleFn: func(context.Context, *dummyObject) (Result, error) { + return Result{Result: reconcile.Result{Requeue: false}}, nil + }, + wantResult: Result{Result: reconcile.Result{Requeue: false}}, + }, + { + name: "updating", + initialObj: newDummyObject( + metav1.ObjectMeta{Namespace: "default", Name: "myobj"}, + []metav1.Condition{newStateCondition(state.StateUpdating, metav1.ConditionFalse, 1)}, + ), + handleFn: func(context.Context, *dummyObject) (Result, error) { + return Result{Result: reconcile.Result{Requeue: true}}, nil + }, + wantResult: Result{Result: reconcile.Result{Requeue: true}}, + }, + { + name: "Updated", + initialObj: newDummyObject( + metav1.ObjectMeta{Namespace: "default", Name: "myobj"}, + []metav1.Condition{newStateCondition(state.StateUpdated, metav1.ConditionTrue, 1)}, + ), + handleFn: func(context.Context, *dummyObject) (Result, error) { + return Result{Result: reconcile.Result{Requeue: false}}, nil + }, + wantResult: Result{Result: reconcile.Result{Requeue: false}}, + }, + { + name: "delete request", + initialObj: newDummyObject( + metav1.ObjectMeta{Namespace: "default", Name: "myobj"}, + []metav1.Condition{newStateCondition(state.StateDeletionRequested, metav1.ConditionFalse, 1)}, + ), + handleFn: func(context.Context, *dummyObject) (Result, error) { + return Result{Result: reconcile.Result{Requeue: false}}, nil + }, + wantResult: Result{Result: reconcile.Result{Requeue: false}}, + }, + { + name: "deleting", + initialObj: newDummyObject( + metav1.ObjectMeta{Namespace: "default", Name: "myobj"}, + []metav1.Condition{newStateCondition(state.StateDeleting, metav1.ConditionFalse, 1)}, + ), + handleFn: func(context.Context, *dummyObject) (Result, error) { + return Result{Result: reconcile.Result{Requeue: true}}, nil + }, + wantResult: Result{Result: reconcile.Result{Requeue: true}}, + }, + { + name: "import request", + initialObj: newDummyObject( + metav1.ObjectMeta{Namespace: "default", Name: "myobj"}, + []metav1.Condition{newStateCondition(state.StateImportRequested, metav1.ConditionFalse, 1)}, + ), + handleFn: func(context.Context, *dummyObject) (Result, error) { + return Result{Result: reconcile.Result{Requeue: false}}, nil + }, + wantResult: Result{Result: reconcile.Result{Requeue: false}}, + }, + { + name: "importing", + initialObj: newDummyObject( + metav1.ObjectMeta{Namespace: "default", Name: "myobj"}, + []metav1.Condition{newStateCondition(state.StateImported, metav1.ConditionTrue, 1)}, + ), + handleFn: func(context.Context, *dummyObject) (Result, error) { + return Result{Result: reconcile.Result{Requeue: false}}, nil + }, + wantResult: Result{Result: reconcile.Result{Requeue: false}}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + builder := fake.NewClientBuilder().WithScheme(scheme) + if tc.initialObj != nil { + builder = builder.WithObjects(tc.initialObj) + } + c := builder.Build() + dummyReconciler := &dummyPodReconciler{handleState: tc.handleFn} + r := &Reconciler[dummyObject]{ + cluster: &fakeCluster{cli: c}, + reconciler: dummyReconciler, + } + obj := tc.initialObj.DeepCopy() + if tc.modify != nil { + tc.modify(obj) + } + + gotResult, err := r.ReconcileState(ctx, obj) + if tc.wantErr != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.wantErr) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.wantResult.Requeue, gotResult.Requeue) + } + }) + } +} + +func addKnownTestTypes(sch *runtime.Scheme) { + sch.AddKnownTypes( + schema.GroupVersion{Group: "test.dummy.example.com", Version: "v1"}, + &dummyObject{}, + ) +} + +type dummyObject struct { + corev1.Pod + conditions []metav1.Condition +} + +func newDummyObject(objMeta metav1.ObjectMeta, conditions []metav1.Condition) *dummyObject { + return &dummyObject{ + Pod: corev1.Pod{ObjectMeta: objMeta}, + conditions: conditions, + } +} + +func (do *dummyObject) GetConditions() []metav1.Condition { + return do.conditions +} + +func (do *dummyObject) DeepCopy() *dummyObject { + if do == nil { + return nil + } + conditions := make([]metav1.Condition, 0, len(do.conditions)) + for _, condition := range do.conditions { + conditions = append(conditions, *condition.DeepCopy()) + } + return &dummyObject{ + Pod: *do.Pod.DeepCopy(), + conditions: conditions, + } +} + +func prevStatusObject(state state.ResourceState, observedGen int64) StatusObject { + return newDummyObject(metav1.ObjectMeta{}, []metav1.Condition{ + newStateCondition(state, metav1.ConditionTrue, observedGen), + }) +} + +func newStateCondition(reason state.ResourceState, status metav1.ConditionStatus, observedGen int64) metav1.Condition { + return metav1.Condition{ + Type: "State", + Status: status, + Reason: string(reason), + ObservedGeneration: observedGen, + } +} + +// Dummy reconciler implementing StateReconciler[do *dummyObject] +type dummyPodReconciler struct { + handleState func(context.Context, *dummyObject) (Result, error) +} + +func (d *dummyPodReconciler) SetupWithManager(_ ctrl.Manager, _ reconcile.Reconciler, _ ...SetupManagerOption) error { + return nil +} +func (d *dummyPodReconciler) For() (client.Object, builder.Predicates) { + return nil, builder.Predicates{} +} +func (d *dummyPodReconciler) HandleInitial(ctx context.Context, do *dummyObject) (Result, error) { + return d.handleState(ctx, do) +} +func (d *dummyPodReconciler) HandleImportRequested(ctx context.Context, do *dummyObject) (Result, error) { + return d.handleState(ctx, do) +} +func (d *dummyPodReconciler) HandleImported(ctx context.Context, do *dummyObject) (Result, error) { + return d.handleState(ctx, do) +} +func (d *dummyPodReconciler) HandleCreating(ctx context.Context, do *dummyObject) (Result, error) { + return d.handleState(ctx, do) +} +func (d *dummyPodReconciler) HandleCreated(ctx context.Context, do *dummyObject) (Result, error) { + return d.handleState(ctx, do) +} +func (d *dummyPodReconciler) HandleUpdating(ctx context.Context, do *dummyObject) (Result, error) { + return d.handleState(ctx, do) +} +func (d *dummyPodReconciler) HandleUpdated(ctx context.Context, do *dummyObject) (Result, error) { + return d.handleState(ctx, do) +} +func (d *dummyPodReconciler) HandleDeletionRequested(ctx context.Context, do *dummyObject) (Result, error) { + return d.handleState(ctx, do) +} +func (d *dummyPodReconciler) HandleDeleting(ctx context.Context, do *dummyObject) (Result, error) { + return d.handleState(ctx, do) +} + +type fakeCluster struct { + cluster.Cluster + cli client.Client +} + +func (f *fakeCluster) GetClient() client.Client { return f.cli } +func (f *fakeCluster) GetScheme() *runtime.Scheme { return f.cli.Scheme() } diff --git a/pkg/controller/state/status.go b/pkg/controller/state/status.go new file mode 100644 index 0000000000..9a18d6f62f --- /dev/null +++ b/pkg/controller/state/status.go @@ -0,0 +1,53 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package state + +import ( + "context" + "encoding/json" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type StatusObject interface { + GetConditions() []metav1.Condition +} + +type resource struct { + Status statusResource `json:"status,omitempty"` +} + +type statusResource struct { + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +func newStatusObject(statusObj StatusObject) *resource { + return &resource{Status: statusResource{Conditions: statusObj.GetConditions()}} +} + +func patchStatus(ctx context.Context, c client.Client, obj client.Object, status any) error { + statusJSON, err := json.Marshal(status) + if err != nil { + return fmt.Errorf("failed to marshal status: %w", err) + } + patchErr := c.Status().Patch(ctx, obj, client.RawPatch(types.MergePatchType, statusJSON)) + if patchErr != nil { + return fmt.Errorf("failed to patch status: %w", patchErr) + } + return nil +} diff --git a/pkg/finalizer/finalizer.go b/pkg/finalizer/finalizer.go new file mode 100644 index 0000000000..e288d59ca1 --- /dev/null +++ b/pkg/finalizer/finalizer.go @@ -0,0 +1,68 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package finalizer + +import ( + "context" + "encoding/json" + + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +func UnsetFinalizers(ctx context.Context, c client.Client, o client.Object, finalizer ...string) error { + for _, f := range finalizer { + controllerutil.RemoveFinalizer(o, f) + } + + data, err := json.Marshal([]map[string]interface{}{{ + "op": "replace", + "path": "/metadata/finalizers", + "value": o.GetFinalizers(), + }}) + if err != nil { + return err + } + + return c.Patch(ctx, o, client.RawPatch(types.JSONPatchType, data)) +} + +func EnsureFinalizers(ctx context.Context, c client.Client, o client.Object, finalizer ...string) error { + hasAllFinalizers := true + for _, f := range finalizer { + if !controllerutil.ContainsFinalizer(o, f) { + hasAllFinalizers = false + } + } + if hasAllFinalizers { + return nil + } + + for _, f := range finalizer { + controllerutil.AddFinalizer(o, f) + } + + data, err := json.Marshal([]map[string]interface{}{{ + "op": "replace", + "path": "/metadata/finalizers", + "value": o.GetFinalizers(), + }}) + if err != nil { + return err + } + + return c.Patch(ctx, o, client.RawPatch(types.JSONPatchType, data)) +} diff --git a/pkg/finalizer/finalizer_test.go b/pkg/finalizer/finalizer_test.go new file mode 100644 index 0000000000..d5a41b6fab --- /dev/null +++ b/pkg/finalizer/finalizer_test.go @@ -0,0 +1,74 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package finalizer_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/finalizer" +) + +func TestUnsetFinalizers(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + obj := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-obj", + Finalizers: []string{"finalizer1", "finalizer2"}, + }, + } + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(obj).Build() + ctx := context.TODO() + + err := finalizer.UnsetFinalizers(ctx, fakeClient, obj, "finalizer1") + + require.NoError(t, err) + patched := corev1.Pod{} + require.NoError(t, fakeClient.Get(ctx, client.ObjectKeyFromObject(obj), &patched)) + assert.NotContains(t, patched.GetFinalizers(), "finalizer1") +} + +func TestEnsureFinalizers(t *testing.T) { + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + obj := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-obj", + Finalizers: []string{}, + }, + } + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(obj).Build() + ctx := context.TODO() + + err := finalizer.EnsureFinalizers(ctx, fakeClient, obj, "finalizer1") + + require.NoError(t, err) + patched := corev1.Pod{} + require.NoError(t, fakeClient.Get(ctx, client.ObjectKeyFromObject(obj), &patched)) + require.Contains(t, patched.GetFinalizers(), "finalizer1") +} diff --git a/pkg/predicate/annotationchanged.go b/pkg/predicate/annotationchanged.go new file mode 100644 index 0000000000..33c90a4df0 --- /dev/null +++ b/pkg/predicate/annotationchanged.go @@ -0,0 +1,39 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package predicate + +import ( + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +func AnnotationChanged(key string) predicate.Funcs { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + oldAnn := e.ObjectOld.GetAnnotations() + newAnn := e.ObjectNew.GetAnnotations() + + if oldAnn == nil && newAnn == nil { + return false + } + + oldValue, oldExists := oldAnn[key] + newValue, newExists := newAnn[key] + + result := oldExists != newExists || oldValue != newValue + return result + }, + } +} diff --git a/pkg/predicate/annotationchanged_test.go b/pkg/predicate/annotationchanged_test.go new file mode 100644 index 0000000000..9e47aed158 --- /dev/null +++ b/pkg/predicate/annotationchanged_test.go @@ -0,0 +1,92 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package predicate_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/event" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/predicate" +) + +func TestAnnotationChanged(t *testing.T) { + key := "test-key" + p := predicate.AnnotationChanged(key) + + tests := []struct { + name string + old map[string]string + new map[string]string + want bool + }{ + { + name: "No annotations on both objects", + old: nil, + new: nil, + want: false, + }, + { + name: "Annotation added", + old: nil, + new: map[string]string{key: "value"}, + want: true, + }, + { + name: "Annotation removed", + old: map[string]string{key: "value"}, + new: nil, + want: true, + }, + { + name: "Annotation value changed", + old: map[string]string{key: "old-value"}, + new: map[string]string{key: "new-value"}, + want: true, + }, + { + name: "Annotation unchanged", + old: map[string]string{key: "value"}, + new: map[string]string{key: "value"}, + want: false, + }, + { + name: "Different annotation key changed", + old: map[string]string{"other-key": "value"}, + new: map[string]string{"other-key": "new-value"}, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + oldObj := &unstructured.Unstructured{} + oldObj.SetAnnotations(tt.old) + + newObj := &unstructured.Unstructured{} + newObj.SetAnnotations(tt.new) + + e := event.UpdateEvent{ + ObjectOld: oldObj, + ObjectNew: newObj, + } + + result := p.Update(e) + assert.Equal(t, tt.want, result) + }) + } +} diff --git a/pkg/predicate/ignoredelete_test.go b/pkg/predicate/ignoredelete_test.go new file mode 100644 index 0000000000..2e96ea56ba --- /dev/null +++ b/pkg/predicate/ignoredelete_test.go @@ -0,0 +1,36 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package predicate_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/event" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/predicate" +) + +func TestIgnoreDeletedPredicate(t *testing.T) { + p := predicate.IgnoreDeletedPredicate[*unstructured.Unstructured]() + obj := &unstructured.Unstructured{} + e := event.TypedDeleteEvent[*unstructured.Unstructured]{ + Object: obj, + } + + result := p.Delete(e) + assert.False(t, result, "DeleteFunc should always return false") +} diff --git a/pkg/predicate/ignoredeleted.go b/pkg/predicate/ignoredeleted.go new file mode 100644 index 0000000000..858028c0b9 --- /dev/null +++ b/pkg/predicate/ignoredeleted.go @@ -0,0 +1,29 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package predicate + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +func IgnoreDeletedPredicate[T metav1.Object]() predicate.TypedPredicate[T] { + return predicate.TypedFuncs[T]{ + DeleteFunc: func(e event.TypedDeleteEvent[T]) bool { + return false + }, + } +} diff --git a/pkg/ratelimit/ratelimit.go b/pkg/ratelimit/ratelimit.go new file mode 100644 index 0000000000..3b7593b66a --- /dev/null +++ b/pkg/ratelimit/ratelimit.go @@ -0,0 +1,30 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ratelimit + +import ( + "time" + + "golang.org/x/time/rate" + "k8s.io/client-go/util/workqueue" +) + +func NewRateLimiter[T comparable]() workqueue.TypedRateLimiter[T] { + return workqueue.NewTypedMaxOfRateLimiter( + workqueue.NewTypedItemExponentialFailureRateLimiter[T](15*time.Second, time.Minute), + // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) + &workqueue.TypedBucketRateLimiter[T]{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + ) +} diff --git a/pkg/ratelimit/ratelimit_test.go b/pkg/ratelimit/ratelimit_test.go new file mode 100644 index 0000000000..366cc6fdf4 --- /dev/null +++ b/pkg/ratelimit/ratelimit_test.go @@ -0,0 +1,29 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ratelimit_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/ratelimit" +) + +func TestNewRateLimiter(t *testing.T) { + limiter := ratelimit.NewRateLimiter[reconcile.Request]() + assert.NotNil(t, limiter, "NewRateLimiter should return a non-nil RateLimiter") +} diff --git a/pkg/result/result.go b/pkg/result/result.go new file mode 100644 index 0000000000..2b2b02e419 --- /dev/null +++ b/pkg/result/result.go @@ -0,0 +1,93 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package result + +import ( + "fmt" + "strings" + "time" + + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + ctrlstate "github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/controller/state" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/state" +) + +const ( + DefaultRequeueTIme = 15 * time.Second +) + +func NextState(s state.ResourceState, msg string) (ctrlstate.Result, error) { + if len(msg) > 0 && !strings.HasSuffix(msg, ".") { + msg = msg + "." + } + + switch s { + case state.StateCreated: + return ctrlstate.Result{NextState: s, StateMsg: msg}, nil + + case state.StateImported: + return ctrlstate.Result{NextState: s, StateMsg: msg}, nil + + case state.StateUpdated: + return ctrlstate.Result{NextState: s, StateMsg: msg}, nil + + case state.StateDeleted: + return ctrlstate.Result{NextState: s, StateMsg: msg}, nil + + case state.StateInitial: + return ctrlstate.Result{NextState: s, StateMsg: msg}, nil + + case state.StateImportRequested: + return ctrlstate.Result{NextState: s, StateMsg: msg}, nil + + case state.StateCreating: + return ctrlstate.Result{ + Result: reconcile.Result{RequeueAfter: DefaultRequeueTIme}, + NextState: s, + StateMsg: msg, + }, nil + + case state.StateUpdating: + return ctrlstate.Result{ + Result: reconcile.Result{RequeueAfter: DefaultRequeueTIme}, + NextState: s, + StateMsg: msg, + }, nil + + case state.StateDeleting: + return ctrlstate.Result{ + Result: reconcile.Result{RequeueAfter: DefaultRequeueTIme}, + NextState: s, + StateMsg: msg, + }, nil + + case state.StateDeletionRequested: + return ctrlstate.Result{ + Result: reconcile.Result{RequeueAfter: DefaultRequeueTIme}, + NextState: s, + StateMsg: msg, + }, nil + + default: + return ctrlstate.Result{}, fmt.Errorf("unknown state %v", s) + } +} + +func Error(s state.ResourceState, err error) (ctrlstate.Result, error) { + return ctrlstate.Result{ + NextState: s, + }, err +} diff --git a/pkg/result/result_test.go b/pkg/result/result_test.go new file mode 100644 index 0000000000..38cc24a0f2 --- /dev/null +++ b/pkg/result/result_test.go @@ -0,0 +1,169 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package result + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + ctrlstate "github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/controller/state" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/state" +) + +var defaultRequeueResult = reconcile.Result{RequeueAfter: 15 * time.Second} + +func TestNextState(t *testing.T) { + tests := []struct { + name string + state state.ResourceState + msg string + expected ctrlstate.Result + expectedErr error + }{ + { + name: "StateInitial", + state: state.StateInitial, + msg: "", + expected: ctrlstate.Result{ + NextState: state.StateInitial, + StateMsg: "", + }, + }, + { + name: "StateCreated", + state: state.StateCreated, + msg: "Resource created", + expected: ctrlstate.Result{ + NextState: state.StateCreated, + StateMsg: "Resource created.", + }, + }, + { + name: "StateCreating with requeue", + state: state.StateCreating, + msg: "Creating resource", + expected: ctrlstate.Result{ + Result: defaultRequeueResult, + NextState: state.StateCreating, + StateMsg: "Creating resource.", + }, + }, + { + name: "StateUpdated", + state: state.StateUpdated, + msg: "Resource updated", + expected: ctrlstate.Result{ + NextState: state.StateUpdated, + StateMsg: "Resource updated.", + }, + }, + { + name: "StateUpdating with requeue", + state: state.StateUpdating, + msg: "Updating resource", + expected: ctrlstate.Result{ + Result: defaultRequeueResult, + NextState: state.StateUpdating, + StateMsg: "Updating resource.", + }, + }, + { + name: "StateDeleted", + state: state.StateDeleted, + msg: "Resource deleted", + expected: ctrlstate.Result{ + NextState: state.StateDeleted, + StateMsg: "Resource deleted.", + }, + }, + { + name: "StateDeletionRequested", + state: state.StateDeletionRequested, + msg: "Resource delete", + expected: ctrlstate.Result{ + Result: defaultRequeueResult, + NextState: state.StateDeletionRequested, + StateMsg: "Resource delete.", + }, + }, + { + name: "StateDeleting", + state: state.StateDeleting, + msg: "Deleting resource", + expected: ctrlstate.Result{ + Result: defaultRequeueResult, + NextState: state.StateDeleting, + StateMsg: "Deleting resource.", + }, + }, + { + name: "StateImported", + state: state.StateImported, + msg: "Resource imported", + expected: ctrlstate.Result{ + NextState: state.StateImported, + StateMsg: "Resource imported.", + }, + }, + { + name: "StateImportRequested", + state: state.StateImportRequested, + msg: "Resource import", + expected: ctrlstate.Result{ + NextState: state.StateImportRequested, + StateMsg: "Resource import.", + }, + }, + { + name: "Unknown state", + state: state.ResourceState("Unknown"), + msg: "Unknown state", + expectedErr: fmt.Errorf("unknown state %v", state.ResourceState("Unknown")), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := NextState(tt.state, tt.msg) + if tt.expectedErr != nil { + require.EqualError(t, err, tt.expectedErr.Error()) + } else { + require.NoError(t, err) + require.Equal(t, tt.expected, result) + } + }) + } +} + +func TestError(t *testing.T) { + err := fmt.Errorf("an error occurred") + st := state.StateCreating + + s, returnedErr := Error(st, err) + + require.Equal(t, ctrlstate.Result{ + Result: reconcile.Result{ + Requeue: false, + RequeueAfter: 0, + }, + NextState: st, + StateMsg: "", + }, s) + require.EqualError(t, returnedErr, err.Error()) +} diff --git a/pkg/state/state.go b/pkg/state/state.go new file mode 100644 index 0000000000..a7725b47ae --- /dev/null +++ b/pkg/state/state.go @@ -0,0 +1,73 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package state + +import ( + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + StateCondition = "State" + ReadyCondition = "Ready" +) + +type ResourceState string + +const ( + StateInitial ResourceState = "Initial" + + StateImportRequested ResourceState = "Importing" + StateImported ResourceState = "Imported" + + StateCreating ResourceState = "Creating" + StateCreated ResourceState = "Created" + + StateUpdating ResourceState = "Updating" + StateUpdated ResourceState = "Updated" + + StateDeletionRequested ResourceState = "DeletionRequested" + StateDeleting ResourceState = "Deleting" + + // Note: StateDeleted this is a terminal state. + // Finalizers will be unset and no state handler will be invoked. + StateDeleted ResourceState = "Deleted" +) + +func GetState(conditions []metav1.Condition) ResourceState { + c := meta.FindStatusCondition(conditions, StateCondition) + if c == nil { + return StateInitial + } + return ResourceState(c.Reason) +} + +func EnsureState(conditions *[]metav1.Condition, observedGeneration int64, state ResourceState, msg string, status bool) { + s := metav1.ConditionFalse + if status { + s = metav1.ConditionTrue + } + + meta.SetStatusCondition(conditions, metav1.Condition{ + Type: "State", + Status: s, + ObservedGeneration: observedGeneration, + LastTransitionTime: metav1.NewTime(time.Now()), + Reason: string(state), + Message: msg, + }) +} diff --git a/pkg/state/state_test.go b/pkg/state/state_test.go new file mode 100644 index 0000000000..846911013c --- /dev/null +++ b/pkg/state/state_test.go @@ -0,0 +1,137 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package state_test + +import ( + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/state" +) + +func TestGetState(t *testing.T) { + tests := []struct { + name string + conds []metav1.Condition + wantState state.ResourceState + }{ + { + name: "no conditions returns initial", + conds: nil, + wantState: state.StateInitial, + }, + { + name: "empty conditions returns initial", + conds: []metav1.Condition{}, + wantState: state.StateInitial, + }, + { + name: "unrelated condition returns initial", + conds: []metav1.Condition{ + {Type: "Other", Reason: "Ignored"}, + }, + wantState: state.StateInitial, + }, + { + name: "state condition returns correct state", + conds: []metav1.Condition{ + {Type: state.StateCondition, Reason: string(state.StateCreated)}, + }, + wantState: state.StateCreated, + }, + { + name: "multiple conditions picks state condition", + conds: []metav1.Condition{ + {Type: "Other", Reason: "Whatever"}, + {Type: state.StateCondition, Reason: string(state.StateDeleted)}, + }, + wantState: state.StateDeleted, + }, + { + name: "state condition with empty reason returns empty string state", + conds: []metav1.Condition{ + {Type: state.StateCondition, Reason: ""}, + }, + wantState: "", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := state.GetState(tc.conds) + if got != tc.wantState { + t.Errorf("GetState() = %v, want %v", got, tc.wantState) + } + }) + } +} + +func TestEnsureState(t *testing.T) { + now := time.Now() + tests := []struct { + name string + status bool + expectedCondition metav1.ConditionStatus + state state.ResourceState + msg string + }{ + { + name: "sets ConditionTrue", + status: true, + expectedCondition: metav1.ConditionTrue, + state: state.StateImported, + msg: "Import successful", + }, + { + name: "sets ConditionFalse", + status: false, + expectedCondition: metav1.ConditionFalse, + state: state.StateDeleting, + msg: "Deletion in progress", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var conds []metav1.Condition + observedGen := int64(123) + state.EnsureState(&conds, observedGen, tc.state, tc.msg, tc.status) + if len(conds) != 1 { + t.Fatalf("expected 1 condition, got %d", len(conds)) + } + got := conds[0] + if got.Type != state.StateCondition { + t.Errorf("Condition Type = %v, want %v", got.Type, state.StateCondition) + } + if got.Status != tc.expectedCondition { + t.Errorf("Condition Status = %v, want %v", got.Status, tc.expectedCondition) + } + if got.Reason != string(tc.state) { + t.Errorf("Condition Reason = %v, want %v", got.Reason, tc.state) + } + if got.Message != tc.msg { + t.Errorf("Condition Message = %v, want %v", got.Message, tc.msg) + } + if got.ObservedGeneration != observedGen { + t.Errorf("ObservedGeneration = %v, want %v", got.ObservedGeneration, observedGen) + } + if got.LastTransitionTime.Time.Before(now) { + t.Errorf("LastTransitionTime = %v, should be >= test start time", got.LastTransitionTime) + } + }) + } +} diff --git a/test/e2e/dry_run_test.go b/test/e2e/dry_run_test.go index 1a2fbc1e3f..8db4ece05c 100644 --- a/test/e2e/dry_run_test.go +++ b/test/e2e/dry_run_test.go @@ -39,7 +39,7 @@ import ( "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e/api/atlas" e2e_config "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e/config" "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e/model" - "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e/operator" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e2/operator" ) type ( diff --git a/test/e2e2/configs/datadog.sample.yml b/test/e2e2/configs/datadog.sample.yml new file mode 100644 index 0000000000..d9951622d8 --- /dev/null +++ b/test/e2e2/configs/datadog.sample.yml @@ -0,0 +1,32 @@ +apiVersion: atlas.mongodb.com/v1 +kind: AtlasProject +metadata: + name: atlas-project-test-datadog +spec: + name: atlas-project-test-datadog +--- +apiVersion: v1 +kind: Secret +metadata: + name: datadog-secret + namespace: + labels: + atlas.mongodb.com/type: credentials +stringData: + apiKey: 1117e51ce6725368c37c3535959a3a75 +--- +apiVersion: atlas.nextapi.mongodb.com/v1 +kind: AtlasThirdPartyIntegration +metadata: + name: atlas-datadog-integ +spec: + projectRef: + name: atlas-project-test-datadog + type: DATADOG + datadog: + apiKeySecretRef: + name: datadog-secret + region: US + sendCollectionLatencyMetrics: enabled + sendDatabaseMetrics: enabled + diff --git a/test/e2e2/integration_test.go b/test/e2e2/integration_test.go new file mode 100644 index 0000000000..e0a27bed85 --- /dev/null +++ b/test/e2e2/integration_test.go @@ -0,0 +1,94 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e2_test + +import ( + "context" + "embed" + "os" + "testing" + "time" + + "github.com/go-logr/zapr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zaptest" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + ctrllog "sigs.k8s.io/controller-runtime/pkg/log" + + akov2next "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/nextapi/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/control" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e2/kube" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e2/operator" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e2/yml" +) + +//go:embed configs/* +var configs embed.FS + +const ( + AtlasThirdPartyIntegrationsCRD = "atlasthirdpartyintegrations.atlas.nextapi.mongodb.com" +) + +func TestAtlasThirdPartyIntegrationsCreate(t *testing.T) { + control.SkipTestUnless(t, "AKO_E2E2_TEST") + ns := control.MustEnvVar("OPERATOR_NAMESPACE") + + ako := runTestAKO() + ako.Start(t) + defer ako.Stop(t) + ctx := context.Background() + logger := zaptest.NewLogger(t) + logrLogger := zapr.NewLogger(logger) + ctrllog.SetLogger(logrLogger.WithName("test")) + for _, tc := range []struct { + name string + objs []client.Object + wantReady string + }{ + { + name: "simple datadog sample", + objs: yml.MustParseCRs(yml.MustOpen(configs, "configs/datadog.sample.yml")), + wantReady: "atlas-datadog-integ", + }, + } { + t.Run(tc.name, func(t *testing.T) { + kubeClient, err := kube.NewK8sTest(ctx, AtlasThirdPartyIntegrationsCRD) + require.NoError(t, err, "Kubernetes test env is not available") + + require.NoError(t, kube.Apply(ctx, kubeClient, ns, tc.objs...)) + integration := akov2next.AtlasThirdPartyIntegration{ + ObjectMeta: v1.ObjectMeta{ + Name: tc.wantReady, + Namespace: ns, + }, + } + key := client.ObjectKeyFromObject(&integration) + assert.NoError(t, kube.WaitConditionOrFailure(time.Minute, func() (bool, error) { + return kube.AssertObjReady(ctx, kubeClient, key, &integration) + })) + }) + } +} + +func runTestAKO() *operator.Operator { + os.Setenv("EXPERIMENTAL", "true") + return operator.NewOperator(control.MustEnvVar("OPERATOR_NAMESPACE"), os.Stdout, os.Stderr, + "--log-level=-9", + "--global-api-secret-name=mongodb-atlas-operator-api-key", + `--atlas-domain=https://cloud-qa.mongodb.com`, + ) +} diff --git a/test/helper/control/enable.go b/test/helper/control/enable.go index 707118c1f5..32d348ffd6 100644 --- a/test/helper/control/enable.go +++ b/test/helper/control/enable.go @@ -15,14 +15,23 @@ package control import ( + "fmt" "os" - "strings" + "strconv" "testing" ) func Enabled(envvar string) bool { - value := strings.ToLower(os.Getenv(envvar)) - return value == "1" + envSet, _ := strconv.ParseBool(os.Getenv(envvar)) + return envSet +} + +func MustEnvVar(envvar string) string { + value, ok := os.LookupEnv(envvar) + if !ok { + panic(fmt.Errorf("missing required environment variable: %v", envvar)) + } + return value } func SkipTestUnless(t *testing.T, envvar string) { diff --git a/test/helper/e2e2/kube/kube.go b/test/helper/e2e2/kube/kube.go new file mode 100644 index 0000000000..3095bbb656 --- /dev/null +++ b/test/helper/e2e2/kube/kube.go @@ -0,0 +1,176 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kube + +import ( + "context" + "errors" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + akov2next "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/nextapi/v1" +) + +const ( + Pause = time.Second +) + +type ObjectWithStatus interface { + client.Object + GetConditions() []metav1.Condition +} + +// NewK8sTest initializes a test environment on Kubernetes. +// It requires: +// - A running Kubernetes cluster with a local configuration bound to it. +// - The given set CRDs installed in that cluster +func NewK8sTest(ctx context.Context, crds ...string) (client.Client, error) { + kubeClient, err := TestKubeClient() + if err != nil { + return nil, fmt.Errorf("failed to setup Kubernetes test env client: %w", err) + } + + for _, targetCRD := range crds { + if err := assertCRD(ctx, kubeClient, targetCRD); err != nil { + return nil, fmt.Errorf("failed to asert for test-required CRD: %w", err) + } + } + return kubeClient, nil +} + +// TestKubeClient returns a Kubernetes client for tests. +// It requires a running Kubernetes cluster and a local configuration to it. +// It supports core Kubernetes types, production and experimental CRDs. +func TestKubeClient() (client.Client, error) { + testScheme, err := getTestScheme( + corev1.AddToScheme, + apiextensionsv1.AddToScheme, + akov2.AddToScheme, + akov2next.AddToScheme) + if err != nil { + return nil, fmt.Errorf("failed to setup Kubernetes test env scheme: %w", err) + } + return getKubeClient(testScheme) +} + +func Apply(ctx context.Context, kubeClient client.Client, defaultNamespace string, objs ...client.Object) error { + for i, obj := range objs { + if obj.GetNamespace() == "" { + obj = obj.DeepCopyObject().(client.Object) + obj.SetNamespace(defaultNamespace) + } + if err := apply(ctx, kubeClient, obj); err != nil { + return fmt.Errorf("failed to apply object %d: %w", (i + 1), err) + } + } + return nil +} + +func apply(ctx context.Context, kubeClient client.Client, obj client.Object) error { + key := client.ObjectKeyFromObject(obj) + old := obj.DeepCopyObject().(client.Object) + err := kubeClient.Get(ctx, key, old) + switch { + case err == nil: + obj = obj.DeepCopyObject().(client.Object) + obj.SetResourceVersion(old.GetResourceVersion()) + if err := kubeClient.Update(ctx, obj); err != nil { + return fmt.Errorf("failed to update %s: %w", key, err) + } + case apierrors.IsNotFound(err): + if err := kubeClient.Create(ctx, obj); err != nil { + return fmt.Errorf("failed to create %s: %w", key, err) + } + default: + return fmt.Errorf("failed to apply %s: %w", key, err) + } + return nil +} + +type OKOrFailureFunc func() (bool, error) + +func WaitConditionOrFailure(timeout time.Duration, okOrFailFn OKOrFailureFunc) error { + start := time.Now() + for { + ok, err := okOrFailFn() + if ok { + return nil + } + if err != nil { + return fmt.Errorf("failed to check condition: %w", err) + } + if time.Since(start) > timeout { + return errors.New("wait condition timed out") + } + time.Sleep(Pause) + } +} + +func AssertObjReady(ctx context.Context, kubeClient client.Client, key client.ObjectKey, obj ObjectWithStatus) (bool, error) { + err := kubeClient.Get(ctx, key, obj) + if err != nil { + return false, fmt.Errorf("failed to get object %v: %w", key, err) + } + for _, condition := range obj.GetConditions() { + if condition.Type == "Ready" && condition.Status == metav1.ConditionTrue { + return true, nil + } + } + return false, nil +} + +func getTestScheme(addToSchemeFunctions ...func(*runtime.Scheme) error) (*runtime.Scheme, error) { + testScheme := runtime.NewScheme() + for _, addToSchemeFn := range addToSchemeFunctions { + if err := addToSchemeFn(testScheme); err != nil { + return nil, fmt.Errorf("failed to add to testScheme: %w", err) + } + } + return testScheme, nil +} + +func getKubeClient(scheme *runtime.Scheme) (client.Client, error) { + restCfg, err := ctrl.GetConfig() + if err != nil { + return nil, fmt.Errorf("failed to get Kubernetes config (is cluster configured?): %w", err) + } + kubeClient, err := client.New(restCfg, client.Options{Scheme: scheme}) + if err != nil { + return nil, fmt.Errorf("failed to get Kubernetes client (is cluster up?): %w", err) + } + return kubeClient, nil +} + +func assertCRD(ctx context.Context, kubeClient client.Client, targetCRD string) error { + crds := apiextensionsv1.CustomResourceDefinitionList{} + if err := kubeClient.List(ctx, &crds, &client.ListOptions{}); err != nil { + return fmt.Errorf("failed to list CRDs: %w", err) + } + for _, crd := range crds.Items { + if crd.Name == targetCRD { + return nil + } + } + return fmt.Errorf("%s not found", targetCRD) +} diff --git a/test/helper/e2e/operator/dir.go b/test/helper/e2e2/operator/dir.go similarity index 100% rename from test/helper/e2e/operator/dir.go rename to test/helper/e2e2/operator/dir.go diff --git a/test/helper/e2e/operator/operator.go b/test/helper/e2e2/operator/operator.go similarity index 70% rename from test/helper/e2e/operator/operator.go rename to test/helper/e2e2/operator/operator.go index 0387873fda..70dcf557ed 100644 --- a/test/helper/e2e/operator/operator.go +++ b/test/helper/e2e2/operator/operator.go @@ -25,6 +25,10 @@ import ( "syscall" ) +const ( + DefaultDelveListen = ":2345" +) + func NoGoRunEnvSet() bool { envSet, _ := strconv.ParseBool(os.Getenv("NO_GORUN")) return envSet @@ -37,13 +41,28 @@ func RunDelveEnvSet() bool { func operatorCommand() []string { if RunDelveEnvSet() { - return []string{"dlv", "exec", "--api-version=2", "--headless=true", "--listen=:2345", filepath.Join(repositoryDir(), "bin", "manager"), "--"} + return []string{ + "dlv", "exec", + "--api-version=2", + "--headless=true", + delveListenFlag(), + filepath.Join(repositoryDir(), operatorBinary()), + "--", + } } if NoGoRunEnvSet() { - return []string{filepath.Join(repositoryDir(), "bin", "manager")} + return []string{filepath.Join(repositoryDir(), operatorBinary())} } + if os.Getenv("EXPERIMENTAL") == "true" { + return []string{ + "go", + "run", + "-ldflags=-X github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/version.Experimental=true", + filepath.Join(repositoryDir(), "cmd"), + } + } return []string{"go", "run", filepath.Join(repositoryDir(), "cmd")} } @@ -108,13 +127,38 @@ func (o *Operator) Stop(t testingT) { pid = -o.cmd.Process.Pid } + terminated := false if pid != 0 { if err := syscall.Kill(pid, syscall.SIGTERM); err != nil { t.Errorf("error trying to kill command: %v", err) } + terminated = true } if err := o.cmd.Wait(); err != nil { - t.Errorf("error stopping operator: %v", err) + if terminated { + if waitStatus, ok := (o.cmd.ProcessState.Sys()).(syscall.WaitStatus); ok { + if waitStatus.Signaled() && waitStatus.Signal() == syscall.SIGTERM { + return // ignore sigterm if we sent SIGTERM ourselves + } + } + } + t.Errorf("error stopping operator terminated=%v : %+#v", terminated, err) + } +} + +func operatorBinary() string { + return envVarOrDefault("AKO_BINARY", filepath.Join("bin", "manager")) +} + +func delveListenFlag() string { + return fmt.Sprintf("--listen=%s", envVarOrDefault("DELVE_LISTEN", DefaultDelveListen)) +} + +func envVarOrDefault(name, defaultValue string) string { + value, ok := os.LookupEnv(name) + if ok { + return value } + return defaultValue } diff --git a/test/helper/e2e2/yml/samples/sample.yml b/test/helper/e2e2/yml/samples/sample.yml new file mode 100644 index 0000000000..1d24c8e82d --- /dev/null +++ b/test/helper/e2e2/yml/samples/sample.yml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Secret +metadata: + name: datadog-secret + labels: + atlas.mongodb.com/type: credentials +stringData: + apiKey: 1117e51ce6725368c37c3535959a3a75 +--- +apiVersion: atlas.nextapi.mongodb.com/v1 +kind: AtlasThirdPartyIntegration +metadata: + name: my-atlas-integ +spec: + externalProjectRef: # Either this or an projectRef as usual + id: 68359c51ce672533a751117e + connectionSecret: # Optional ref when using projectRef instead + name: mongodb-atlas-operator-api-key + type: DATADOG + datadog: + apiKeySecretRef: + name: datadog-secret # secret contains Datadog's ApiKey + region: US + sendCollectionLatencyMetrics: enabled + sendDatabaseMetrics: enabled + diff --git a/test/helper/e2e2/yml/yml.go b/test/helper/e2e2/yml/yml.go new file mode 100644 index 0000000000..ff25f18ec9 --- /dev/null +++ b/test/helper/e2e2/yml/yml.go @@ -0,0 +1,159 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yml + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "io/fs" + "log" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/scale/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + akov2next "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/nextapi/v1" +) + +var ( + // ErrNoCR indicates the parsed YAML is not valid CR + ErrNoCR = errors.New("YAML definition is not a CR") +) + +type autoCloser struct { + io.ReadCloser + closed bool +} + +func (ac *autoCloser) Read(b []byte) (int, error) { + if ac.closed { + return 0, io.EOF + } + n, err := ac.ReadCloser.Read(b) + if err == io.EOF { + if err := ac.ReadCloser.Close(); err != nil { + log.Printf("autoCloser failed to close %v: %v", ac.ReadCloser, err) + } + } + return n, err +} + +func MustOpen(fsys fs.FS, path string) io.Reader { + f, err := fsys.Open(path) + if err != nil { + panic(fmt.Errorf("Fatal: could not open virtual file system path %q: %w", path, err)) + } + return &autoCloser{ReadCloser: f} +} + +func MustParseCRs(ymls io.Reader) []client.Object { + objs, err := ParseCRs(ymls) + if err != nil { + panic(fmt.Errorf("Fatal: could not parse CRs: %w", err)) + } + return objs +} + +func ParseCRs(ymls io.Reader) ([]client.Object, error) { + var buffer bytes.Buffer + scanner := bufio.NewScanner(ymls) + objs := []client.Object{} + for scanner.Scan() { + line := scanner.Text() + + if strings.TrimSpace(line) == "---" { + if len(strings.TrimSpace(buffer.String())) > 0 { + obj, err := DecodeCR(buffer.Bytes()) + if errors.Is(err, ErrNoCR) { + buffer.Reset() + continue + } + if err != nil { + return nil, err + } + objs = append(objs, obj) + } + continue + } + if strings.HasPrefix(line, "#") { + continue + } + buffer.WriteString(line + "\n") + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("failed to read input: %w", err) + } + + if buffer.Len() > 0 { + obj, err := DecodeCR(buffer.Bytes()) + if err != nil && !errors.Is(err, ErrNoCR) { + return nil, err + } + objs = append(objs, obj) + } + return objs, nil +} + +func DecodeCR(content []byte) (client.Object, error) { + sch := runtime.NewScheme() + + for _, addOrRegisterFn := range []func(*runtime.Scheme) error{ + scheme.AddToScheme, + apiextensions.AddToScheme, + apiextensionsv1.AddToScheme, + apiextensionsv1.RegisterConversions, + apiextensionsv1beta1.AddToScheme, + apiextensionsv1beta1.RegisterConversions, + corev1.AddToScheme, + } { + if err := addOrRegisterFn(sch); err != nil { + return nil, fmt.Errorf("failed to add API extension scheme or register conversion: %w", err) + } + } + + for _, addToSchemeFn := range []func(*runtime.Scheme) error{ + akov2.AddToScheme, + akov2next.AddToScheme, + } { + if err := addToSchemeFn(sch); err != nil { + return nil, fmt.Errorf("failed to add Operator scheme: %w", err) + } + } + + decode := serializer.NewCodecFactory(sch).UniversalDeserializer().Decode + + rtObj, _, err := decode(content, nil, nil) + if err != nil { + return nil, fmt.Errorf("failed to decode YAML: %w", err) + } + + obj, ok := rtObj.(client.Object) + if !ok { + return nil, fmt.Errorf("decoded object is not a client.Object: %T", rtObj) + } + + return obj, nil +} diff --git a/test/helper/e2e2/yml/yml_test.go b/test/helper/e2e2/yml/yml_test.go new file mode 100644 index 0000000000..a8242a344c --- /dev/null +++ b/test/helper/e2e2/yml/yml_test.go @@ -0,0 +1,73 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yml_test + +import ( + "embed" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/api" + akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + akov2next "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/nextapi/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/pointer" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e2/yml" +) + +//go:embed samples/* +var samples embed.FS + +func TestParseCRs(t *testing.T) { + in, err := samples.Open("samples/sample.yml") + require.NoError(t, err) + defer in.Close() + + objs, err := yml.ParseCRs(in) + require.NoError(t, err) + assert.Len(t, objs, 2) + assert.IsType(t, &corev1.Secret{}, objs[0]) + assert.Equal(t, &akov2next.AtlasThirdPartyIntegration{ + TypeMeta: metav1.TypeMeta{ + Kind: "AtlasThirdPartyIntegration", + APIVersion: "atlas.nextapi.mongodb.com/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-atlas-integ", + }, + Spec: akov2next.AtlasThirdPartyIntegrationSpec{ + ProjectDualReference: akov2.ProjectDualReference{ + ExternalProjectRef: &akov2.ExternalProjectReference{ + ID: "68359c51ce672533a751117e", + }, + ConnectionSecret: &api.LocalObjectReference{ + Name: "mongodb-atlas-operator-api-key", + }, + }, + Type: "DATADOG", + Datadog: &akov2next.DatadogIntegration{ + APIKeySecretRef: api.LocalObjectReference{ + Name: "datadog-secret", + }, + Region: "US", + SendCollectionLatencyMetrics: pointer.MakePtr("enabled"), + SendDatabaseMetrics: pointer.MakePtr("enabled"), + }, + }, + }, objs[1]) +}