diff --git a/docs/OpenShiftPipelinesAsCode.md b/docs/OpenShiftPipelinesAsCode.md index 9197fa9ab0..399f242046 100644 --- a/docs/OpenShiftPipelinesAsCode.md +++ b/docs/OpenShiftPipelinesAsCode.md @@ -20,6 +20,13 @@ It is recommended that you install OpenShiftPipelinesAsCode through [TektonConfi metadata: name: pipelines-as-code spec: + additionalPACControllers: + controllername: + enable: true + configMapName: + secretName: + settings: + enable: true settings: application-name: Pipelines as Code CI auto-configure-new-github-repo: "false" @@ -57,12 +64,61 @@ The recommended way to update the OpenShiftPipelinesAsCode CR is using [TektonCo Set this field to provide the namespace in which you want to install the PipelinesAsCode component. -### Properties +### Properties (Optional) The fields have default values so even if the user has not passed them in CR, operator will add them. The user can later change them as per their need. Details of the field can be found in [OpenShift Pipelines As Code Settings][pac-config] +#### Additional Pipelines As Controller (Optional) + +If users want to deploy additional Pipelines As Code controller on their cluster along with default Pipelines As Code +controller then they need to provide the `additionalPACControllers` field in the `pipelinesAsCode` section. + +Example: + +```yaml +pipelinesAsCode: + additionalPACControllers: # can provide a list of controllers + controllername: + enable: + configMapName: + secretName: + settings: +``` + +- `controllerName` is the unique name of the new controller, should not be more than 25 characters and should follow k8s naming rules. + +- `enable` is optional with default value to true. You can use this field to disable the additional PAC controller + without removing the details from the CR. + +- `configMapName` is optional and is to provide the ConfigMap name of additional PAC Controller. If user doesn't + provide any value then Operator will add controllerName + `-pipelines-as-code-configmap` as default value. If user + provides configMap name as `pipelines-as-code` for additional Pipelines As Code controller, then operator will not create + the configMap and the default `pipeline-as-code` configMap will be used with default settings. + +- `secretName` is optional and is to provide the secret name of additional PAC Controller. If user does not provide any + value then operator will add controllerName + `-pipelines-as-code-secret` as default value to be added to deployment env. + +- `settings` is optional and used to set the settings in the configMap of additional PAC Controller. For the fields whose + are not provided, default value will be used. You can check them [here](https://pipelinesascode.com/docs/install/settings/#pipelines-as-code-configuration-settings). + Also, if configmap name is provided as `pipelines-as-code` then these settings will not be taken. + +> **NOTE:** Users can deploy multiple additional PAC Controller by providing multiple entries in `additionalPACControllers` field. + +Example: + +```yaml +pipelinesAsCode: + additionalPACControllers: + firstcontroller: + enable: true + secondcontroller: + enable: true + configMapName: second-config + secretName: second-secret +``` + [PipelinesAsCode]:https://github.com/openshift-pipelines/pipelines-as-code [pac-config]:https://pipelinesascode.com/docs/install/settings/#pipelines-as-code-configuration-settings diff --git a/docs/TektonConfig.md b/docs/TektonConfig.md index cc46d5d48a..1645ab23af 100644 --- a/docs/TektonConfig.md +++ b/docs/TektonConfig.md @@ -111,6 +111,12 @@ The TektonConfig CR provides the following features platforms: openshift: pipelinesAsCode: + additionalPACControllers: + : + enable: true + configMapName: + secretName: + settings: enable: true settings: application-name: Pipelines as Code CI @@ -417,6 +423,12 @@ Example: platforms: openshift: pipelinesAsCode: + additionalPACControllers: + controllername: + enable: true + configMapName: + secretName: + settings: enable: true settings: application-name: Pipelines as Code CI @@ -440,12 +452,6 @@ platforms: **NOTE**: OpenShiftPipelinesAsCode is currently available for the OpenShift Platform only. -[node-selector]:https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector -[tolerations]:https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ -[schedule]:https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax -[priorityClassName]: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#pod-priority -[priorityClass]: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass - ### Additional fields as `options` There is a field called `options` available in all the components.
@@ -636,3 +642,9 @@ The following fields are supported in `HorizontalPodAutoscaler` (aka HPA) * `scaleDown` - replaces scaleDown with this, if not empty **NOTE**: If a Deployment or StatefulSet has a Horizontal Pod Autoscaling (HPA) and is in active state, Operator will not control the replicas to that resource. However if `status.desiredReplicas` and `spec.minReplicas` not present in HPA, operator takes the control. Also if HPA disabled, operator takes control. Even though the operator takes the control, the replicas value will be adjusted to the hpa's scaling range. + +[node-selector]:https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector +[tolerations]:https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ +[schedule]:https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#cron-schedule-syntax +[priorityClassName]: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#pod-priority +[priorityClass]: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass diff --git a/pkg/apis/operator/v1alpha1/const.go b/pkg/apis/operator/v1alpha1/const.go index 20b92dee52..b533570492 100644 --- a/pkg/apis/operator/v1alpha1/const.go +++ b/pkg/apis/operator/v1alpha1/const.go @@ -43,7 +43,7 @@ const ( LastAppliedHashKey = "operator.tekton.dev/last-applied-hash" CreatedByKey = "operator.tekton.dev/created-by" ReleaseVersionKey = "operator.tekton.dev/release-version" - Component = "operator.tekton.dev/component" // Used in case a component has sub-components eg TektonHub + ComponentKey = "operator.tekton.dev/component" // Used in case a component has sub-components eg OpenShiftPipelineAsCode ReleaseMinorVersionKey = "operator.tekton.dev/release-minor-version" TargetNamespaceKey = "operator.tekton.dev/target-namespace" InstallerSetType = "operator.tekton.dev/type" diff --git a/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_defaults.go b/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_defaults.go index 368660578d..7b093c6543 100644 --- a/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_defaults.go +++ b/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_defaults.go @@ -18,20 +18,42 @@ package v1alpha1 import ( "context" + "fmt" "github.com/openshift-pipelines/pipelines-as-code/pkg/params/settings" + "knative.dev/pkg/ptr" ) func (pac *OpenShiftPipelinesAsCode) SetDefaults(ctx context.Context) { if pac.Spec.PACSettings.Settings == nil { pac.Spec.PACSettings.Settings = map[string]string{} } - setPACDefaults(pac.Spec.PACSettings) + if pac.Spec.PACSettings.AdditionalPACControllers == nil { + pac.Spec.PACSettings.AdditionalPACControllers = map[string]AdditionalPACControllerConfig{} + } + pac.Spec.PACSettings.setPACDefaults() } -func setPACDefaults(set PACSettings) { +func (set *PACSettings) setPACDefaults() { if set.Settings == nil { set.Settings = map[string]string{} } settings.SetDefaults(set.Settings) + setAdditionalPACControllerDefault(set.AdditionalPACControllers) +} + +// Set the default values for additional PAc controller resources +func setAdditionalPACControllerDefault(additionalPACController map[string]AdditionalPACControllerConfig) { + for name, additionalPACInfo := range additionalPACController { + if additionalPACInfo.Enable == nil { + additionalPACInfo.Enable = ptr.Bool(true) + } + if additionalPACInfo.ConfigMapName == "" { + additionalPACInfo.ConfigMapName = fmt.Sprintf("%s-pipelines-as-code-configmap", name) + } + if additionalPACInfo.SecretName == "" { + additionalPACInfo.SecretName = fmt.Sprintf("%s-pipelines-as-code-secret", name) + } + additionalPACController[name] = additionalPACInfo + } } diff --git a/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_defaults_test.go b/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_defaults_test.go new file mode 100644 index 0000000000..9109f35be2 --- /dev/null +++ b/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_defaults_test.go @@ -0,0 +1,81 @@ +/* +Copyright 2024 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/ptr" +) + +func TestSetAdditionalPACControllerDefault(t *testing.T) { + opacCR := &OpenShiftPipelinesAsCode{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: OpenShiftPipelinesAsCodeSpec{ + PACSettings: PACSettings{ + Settings: map[string]string{}, + AdditionalPACControllers: map[string]AdditionalPACControllerConfig{ + "test": {}, + }, + }, + }, + } + + opacCR.Spec.PACSettings.setPACDefaults() + + assert.Equal(t, true, *opacCR.Spec.PACSettings.AdditionalPACControllers["test"].Enable) + assert.Equal(t, "test-pipelines-as-code-configmap", opacCR.Spec.PACSettings.AdditionalPACControllers["test"].ConfigMapName) + assert.Equal(t, "test-pipelines-as-code-secret", opacCR.Spec.PACSettings.AdditionalPACControllers["test"].SecretName) +} + +func TestSetAdditionalPACControllerDefaultHavingAdditionalPACController(t *testing.T) { + opacCR := &OpenShiftPipelinesAsCode{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: OpenShiftPipelinesAsCodeSpec{ + PACSettings: PACSettings{ + Settings: map[string]string{}, + AdditionalPACControllers: map[string]AdditionalPACControllerConfig{ + "test": { + Enable: ptr.Bool(false), + ConfigMapName: "test-configmap", + SecretName: "test-secret", + Settings: map[string]string{ + "application-name": "Additional PACController CI", + "custom-console-name": "custom", + "custom-console-url": "https://custom.com", + }, + }, + }, + }, + }, + } + + opacCR.Spec.PACSettings.setPACDefaults() + + assert.Equal(t, false, *opacCR.Spec.PACSettings.AdditionalPACControllers["test"].Enable) + assert.Equal(t, "Additional PACController CI", opacCR.Spec.PACSettings.AdditionalPACControllers["test"].Settings["application-name"]) + assert.Equal(t, "custom", opacCR.Spec.PACSettings.AdditionalPACControllers["test"].Settings["custom-console-name"]) + assert.Equal(t, "https://custom.com", opacCR.Spec.PACSettings.AdditionalPACControllers["test"].Settings["custom-console-url"]) +} diff --git a/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_lifecycle.go b/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_lifecycle.go index e75f19caa8..b2aca8c425 100644 --- a/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_lifecycle.go +++ b/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_lifecycle.go @@ -29,10 +29,15 @@ var ( PreReconciler, InstallerSetAvailable, InstallerSetReady, + AdditionalPACControllerInstalled, PostReconciler, ) ) +const ( + AdditionalPACControllerInstalled apis.ConditionType = "AdditionalPACControllerInstalled" +) + func (pac *OpenShiftPipelinesAsCode) GroupVersionKind() schema.GroupVersionKind { return SchemeGroupVersion.WithKind(KindOpenShiftPipelinesAsCode) } @@ -65,6 +70,10 @@ func (pac *OpenShiftPipelinesAsCodeStatus) MarkInstallerSetReady() { opacCondSet.Manage(pac).MarkTrue(InstallerSetReady) } +func (pac *OpenShiftPipelinesAsCodeStatus) MarkAdditionalPACControllerComplete() { + opacCondSet.Manage(pac).MarkTrue(AdditionalPACControllerInstalled) +} + func (pac *OpenShiftPipelinesAsCodeStatus) MarkPostReconcilerComplete() { opacCondSet.Manage(pac).MarkTrue(PostReconciler) } diff --git a/pkg/apis/operator/v1alpha1/openshiftpipelinesaascode_lifecycle_test.go b/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_lifecycle_test.go similarity index 86% rename from pkg/apis/operator/v1alpha1/openshiftpipelinesaascode_lifecycle_test.go rename to pkg/apis/operator/v1alpha1/openshiftpipelinesascode_lifecycle_test.go index 603e12e0a8..04c655985b 100644 --- a/pkg/apis/operator/v1alpha1/openshiftpipelinesaascode_lifecycle_test.go +++ b/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_lifecycle_test.go @@ -43,6 +43,7 @@ func TestOpenShiftPipelinesAsCodeHappyPath(t *testing.T) { apistest.CheckConditionOngoing(pac, PreReconciler, t) apistest.CheckConditionOngoing(pac, InstallerSetAvailable, t) apistest.CheckConditionOngoing(pac, InstallerSetReady, t) + apistest.CheckConditionOngoing(pac, AdditionalPACControllerInstalled, t) apistest.CheckConditionOngoing(pac, PostReconciler, t) // Dependencies installed @@ -61,10 +62,15 @@ func TestOpenShiftPipelinesAsCodeHappyPath(t *testing.T) { pac.MarkInstallerSetNotReady("waiting for deployments") apistest.CheckConditionFailed(pac, InstallerSetReady, t) - // InstallerSet and then PostReconciler become ready and we're good. + // InstallerSet is ready pac.MarkInstallerSetReady() apistest.CheckConditionSucceeded(pac, InstallerSetReady, t) + // AdditionalInstallerSet is complete + pac.MarkAdditionalPACControllerComplete() + apistest.CheckConditionSucceeded(pac, AdditionalPACControllerInstalled, t) + + // PostReconciler become ready and we're good. pac.MarkPostReconcilerComplete() apistest.CheckConditionSucceeded(pac, PostReconciler, t) @@ -81,6 +87,7 @@ func TestOpenShiftPipelinesAsCodeErrorPath(t *testing.T) { apistest.CheckConditionOngoing(pac, PreReconciler, t) apistest.CheckConditionOngoing(pac, InstallerSetAvailable, t) apistest.CheckConditionOngoing(pac, InstallerSetReady, t) + apistest.CheckConditionOngoing(pac, AdditionalPACControllerInstalled, t) apistest.CheckConditionOngoing(pac, PostReconciler, t) // Dependencies installed @@ -99,10 +106,15 @@ func TestOpenShiftPipelinesAsCodeErrorPath(t *testing.T) { pac.MarkInstallerSetNotReady("waiting for deployments") apistest.CheckConditionFailed(pac, InstallerSetReady, t) - // InstallerSet and then PostReconciler become ready and we're good. + // InstallerSet is ready pac.MarkInstallerSetReady() apistest.CheckConditionSucceeded(pac, InstallerSetReady, t) + // AdditionalInstallerSet is complete + pac.MarkAdditionalPACControllerComplete() + apistest.CheckConditionSucceeded(pac, AdditionalPACControllerInstalled, t) + + // PostReconciler become ready and we're good. pac.MarkPostReconcilerComplete() apistest.CheckConditionSucceeded(pac, PostReconciler, t) diff --git a/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_types.go b/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_types.go index 890e7186fa..575c305e0c 100644 --- a/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_types.go +++ b/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_types.go @@ -70,6 +70,25 @@ type OpenShiftPipelinesAsCodeList struct { type PACSettings struct { Settings map[string]string `json:"settings,omitempty"` + // AdditionalPACControllers allows to deploy additional PAC controller + // +optional + AdditionalPACControllers map[string]AdditionalPACControllerConfig `json:"additionalPACControllers,omitempty"` // options holds additions fields and these fields will be updated on the manifests Options AdditionalOptions `json:"options"` } + +// AdditionalPACControllerConfig contains config for additionalPACControllers +type AdditionalPACControllerConfig struct { + // Enable or disable this additional pipelines as code instance by changing this bool + // +optional + Enable *bool `json:"enable,omitempty"` + // Name of the additional controller configMap + // +optional + ConfigMapName string `json:"configMapName,omitempty"` + // Name of the additional controller Secret + // +optional + SecretName string `json:"secretName,omitempty"` + // Setting will contains the configMap data + // +optional + Settings map[string]string `json:"settings,omitempty"` +} diff --git a/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_validation.go b/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_validation.go index ce6c96b767..68130b1a16 100644 --- a/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_validation.go +++ b/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_validation.go @@ -18,11 +18,17 @@ package v1alpha1 import ( "context" + "fmt" "github.com/openshift-pipelines/pipelines-as-code/pkg/params/settings" + kubernetesValidation "k8s.io/apimachinery/pkg/util/validation" "knative.dev/pkg/apis" ) +// limit is 25 because this name goes in the installerset name which already have 38 characters, so additional length we +// can have for name is 25, as the kubernetes have restriction for 63 +const additionalPACControllerNameMaxLength = 25 + func (pac *OpenShiftPipelinesAsCode) Validate(ctx context.Context) *apis.FieldError { if apis.IsInDelete(ctx) { return nil @@ -33,16 +39,79 @@ func (pac *OpenShiftPipelinesAsCode) Validate(ctx context.Context) *apis.FieldEr // execute common spec validations errs = errs.Also(pac.Spec.CommonSpec.validate("spec")) - errs = errs.Also(validatePACSetting(pac.Spec.PACSettings)) + errs = errs.Also(pac.Spec.PACSettings.validate("spec")) return errs } -func validatePACSetting(pacSettings PACSettings) *apis.FieldError { +func (pacSettings *PACSettings) validate(path string) *apis.FieldError { var errs *apis.FieldError if err := settings.Validate(pacSettings.Settings); err != nil { - errs = errs.Also(apis.ErrInvalidValue(err, "spec.platforms.openshift.pipelinesAsCode")) + errs = errs.Also(apis.ErrInvalidValue(err, fmt.Sprintf("%s.settings", path))) + } + + for name, additionalPACControllerConfig := range pacSettings.AdditionalPACControllers { + if err := validateAdditionalPACControllerName(name); err != nil { + errs = errs.Also(apis.ErrInvalidValue(err, fmt.Sprintf("%s.additionalPACControllers", path))) + } + + errs = errs.Also(additionalPACControllerConfig.validate(fmt.Sprintf("%s.additionalPACControllers", path))) + } + + return errs +} + +func (additionalPACControllerConfig AdditionalPACControllerConfig) validate(path string) *apis.FieldError { + var errs *apis.FieldError + + if err := validateKubernetesName(additionalPACControllerConfig.ConfigMapName); err != nil { + errs = errs.Also(apis.ErrInvalidValue(err, fmt.Sprintf("%s.configMapName", path))) + } + + if err := validateKubernetesName(additionalPACControllerConfig.SecretName); err != nil { + errs = errs.Also(apis.ErrInvalidValue(err, fmt.Sprintf("%s.secretName", path))) } + + if err := settings.Validate(additionalPACControllerConfig.Settings); err != nil { + errs = errs.Also(apis.ErrInvalidValue(err, fmt.Sprintf("%s.settings", path))) + } + return errs } + +// validates the name of the controller resource is valid kubernetes name +func validateAdditionalPACControllerName(name string) *apis.FieldError { + if err := kubernetesValidation.IsDNS1123Subdomain(name); len(err) > 0 { + return &apis.FieldError{ + Message: fmt.Sprintf("invalid resource name %q: must be a valid DNS label", name), + Paths: []string{"name"}, + } + } + + if len(name) > additionalPACControllerNameMaxLength { + return &apis.FieldError{ + Message: fmt.Sprintf("invalid resource name %q: length must be no more than %d characters", name, additionalPACControllerNameMaxLength), + Paths: []string{"name"}, + } + } + return nil +} + +// validates the name of the resource is valid kubernetes name +func validateKubernetesName(name string) *apis.FieldError { + if err := kubernetesValidation.IsDNS1123Subdomain(name); len(err) > 0 { + return &apis.FieldError{ + Message: fmt.Sprintf("invalid resource name %q: must be a valid DNS label", name), + Paths: []string{"name"}, + } + } + + if len(name) > kubernetesValidation.DNS1123LabelMaxLength { + return &apis.FieldError{ + Message: fmt.Sprintf("invalid resource name %q: length must be no more than %d characters", name, kubernetesValidation.DNS1123LabelMaxLength), + Paths: []string{"name"}, + } + } + return nil +} diff --git a/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_validation_test.go b/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_validation_test.go new file mode 100644 index 0000000000..33be332102 --- /dev/null +++ b/pkg/apis/operator/v1alpha1/openshiftpipelinesascode_validation_test.go @@ -0,0 +1,110 @@ +/* +Copyright 2024 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + "testing" + + "gotest.tools/v3/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestValidateAddtionalPACControllerInvalidName(t *testing.T) { + opacCR := &OpenShiftPipelinesAsCode{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: OpenShiftPipelinesAsCodeSpec{ + CommonSpec: CommonSpec{ + TargetNamespace: "openshift-pipelines", + }, + PACSettings: PACSettings{ + Settings: map[string]string{}, + AdditionalPACControllers: map[string]AdditionalPACControllerConfig{ + "Test": { + ConfigMapName: "test-configmap", + SecretName: "test-secret", + Settings: map[string]string{ + "application-name": "Additional PACController CI", + }, + }, + }, + }, + }, + } + err := opacCR.Validate(context.TODO()) + assert.Equal(t, fmt.Sprintf("invalid value: invalid resource name %q: must be a valid DNS label: name: spec.additionalPACControllers", "Test"), err.Error()) +} + +func TestValidateAddtionalPACControllerInvalidConfigMapName(t *testing.T) { + opacCR := &OpenShiftPipelinesAsCode{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: OpenShiftPipelinesAsCodeSpec{ + CommonSpec: CommonSpec{ + TargetNamespace: "openshift-pipelines", + }, + PACSettings: PACSettings{ + Settings: map[string]string{}, + AdditionalPACControllers: map[string]AdditionalPACControllerConfig{ + "test": { + ConfigMapName: "Test-configmap", + SecretName: "test-secret", + Settings: map[string]string{ + "application-name": "Additional PACController CI", + }, + }, + }, + }, + }, + } + err := opacCR.Validate(context.TODO()) + assert.Equal(t, fmt.Sprintf("invalid value: invalid resource name %q: must be a valid DNS label: name: spec.additionalPACControllers.configMapName", "Test-configmap"), err.Error()) +} + +func TestValidateAddtionalPACControllerInvalidNameLength(t *testing.T) { + opacCR := &OpenShiftPipelinesAsCode{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + }, + Spec: OpenShiftPipelinesAsCodeSpec{ + CommonSpec: CommonSpec{ + TargetNamespace: "Openshift-Pipelines", + }, + PACSettings: PACSettings{ + Settings: map[string]string{}, + AdditionalPACControllers: map[string]AdditionalPACControllerConfig{ + "testlengthwhichexceedsthemaximumlength": { + ConfigMapName: "test-configmap", + SecretName: "test-secret", + Settings: map[string]string{ + "application-name": "Additional PACController CI", + }, + }, + }, + }, + }, + } + err := opacCR.Validate(context.TODO()) + assert.Equal(t, fmt.Sprintf("invalid value: invalid resource name %q: length must be no more than 25 characters: name: spec.additionalPACControllers", "testlengthwhichexceedsthemaximumlength"), err.Error()) +} diff --git a/pkg/apis/operator/v1alpha1/tektonconfig_defaults.go b/pkg/apis/operator/v1alpha1/tektonconfig_defaults.go index b320e9a661..70ae129f67 100644 --- a/pkg/apis/operator/v1alpha1/tektonconfig_defaults.go +++ b/pkg/apis/operator/v1alpha1/tektonconfig_defaults.go @@ -51,7 +51,7 @@ func (tc *TektonConfig) SetDefaults(ctx context.Context) { // pac defaulting if *tc.Spec.Platforms.OpenShift.PipelinesAsCode.Enable { - setPACDefaults(tc.Spec.Platforms.OpenShift.PipelinesAsCode.PACSettings) + tc.Spec.Platforms.OpenShift.PipelinesAsCode.PACSettings.setPACDefaults() } // SCC defaulting diff --git a/pkg/apis/operator/v1alpha1/tektonconfig_validation.go b/pkg/apis/operator/v1alpha1/tektonconfig_validation.go index 6689bef3bd..958f5fb00e 100644 --- a/pkg/apis/operator/v1alpha1/tektonconfig_validation.go +++ b/pkg/apis/operator/v1alpha1/tektonconfig_validation.go @@ -47,6 +47,10 @@ func (tc *TektonConfig) Validate(ctx context.Context) (errs *apis.FieldError) { } } + if IsOpenShiftPlatform() && tc.Spec.Platforms.OpenShift.PipelinesAsCode != nil { + errs = errs.Also(tc.Spec.Platforms.OpenShift.PipelinesAsCode.PACSettings.validate("spec.platforms.openshift.pipelinesAsCode")) + } + // validate SCC config if IsOpenShiftPlatform() && tc.Spec.Platforms.OpenShift.SCC != nil { defaultSCC := PipelinesSCC diff --git a/pkg/apis/operator/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/operator/v1alpha1/zz_generated.deepcopy.go index 5dba146a86..79aa60955e 100644 --- a/pkg/apis/operator/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/operator/v1alpha1/zz_generated.deepcopy.go @@ -73,6 +73,34 @@ func (in *AdditionalOptions) DeepCopy() *AdditionalOptions { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalPACControllerConfig) DeepCopyInto(out *AdditionalPACControllerConfig) { + *out = *in + if in.Enable != nil { + in, out := &in.Enable, &out.Enable + *out = new(bool) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalPACControllerConfig. +func (in *AdditionalPACControllerConfig) DeepCopy() *AdditionalPACControllerConfig { + if in == nil { + return nil + } + out := new(AdditionalPACControllerConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Addon) DeepCopyInto(out *Addon) { *out = *in @@ -538,6 +566,13 @@ func (in *PACSettings) DeepCopyInto(out *PACSettings) { (*out)[key] = val } } + if in.AdditionalPACControllers != nil { + in, out := &in.AdditionalPACControllers, &out.AdditionalPACControllers + *out = make(map[string]AdditionalPACControllerConfig, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } in.Options.DeepCopyInto(&out.Options) return } diff --git a/pkg/reconciler/kubernetes/tektoninstallerset/client/create.go b/pkg/reconciler/kubernetes/tektoninstallerset/client/create.go index 7f59d9ad5c..f63bdcfc03 100644 --- a/pkg/reconciler/kubernetes/tektoninstallerset/client/create.go +++ b/pkg/reconciler/kubernetes/tektoninstallerset/client/create.go @@ -30,7 +30,7 @@ import ( "knative.dev/pkg/logging" ) -func (i *InstallerSetClient) create(ctx context.Context, comp v1alpha1.TektonComponent, manifest *mf.Manifest, filterAndTransform FilterAndTransform, isType string) ([]v1alpha1.TektonInstallerSet, error) { +func (i *InstallerSetClient) create(ctx context.Context, comp v1alpha1.TektonComponent, manifest *mf.Manifest, filterAndTransform FilterAndTransform, isType string, customLabels map[string]string) ([]v1alpha1.TektonInstallerSet, error) { logger := logging.FromContext(ctx).With("kind", i.resourceKind, "type", isType) if isType == InstallerTypeMain { @@ -45,7 +45,7 @@ func (i *InstallerSetClient) create(ctx context.Context, comp v1alpha1.TektonCom kind := strings.ToLower(strings.TrimPrefix(i.resourceKind, "Tekton")) isName := fmt.Sprintf("%s-%s-", kind, isType) - iS, err := i.makeInstallerSet(ctx, comp, manifest, filterAndTransform, isName, isType) + iS, err := i.makeInstallerSet(ctx, comp, manifest, filterAndTransform, isName, isType, customLabels) if err != nil { return nil, err } @@ -64,7 +64,7 @@ func (i *InstallerSetClient) makeMainSets(ctx context.Context, comp v1alpha1.Tek kind := strings.ToLower(strings.TrimPrefix(i.resourceKind, "Tekton")) staticName := fmt.Sprintf("%s-%s-%s-", kind, InstallerTypeMain, InstallerSubTypeStatic) - staticIS, err := i.makeInstallerSet(ctx, comp, &staticManifest, filterAndTransform, staticName, InstallerTypeMain) + staticIS, err := i.makeInstallerSet(ctx, comp, &staticManifest, filterAndTransform, staticName, InstallerTypeMain, nil) if err != nil { return nil, err } @@ -78,7 +78,7 @@ func (i *InstallerSetClient) makeMainSets(ctx context.Context, comp v1alpha1.Tek } deployName := fmt.Sprintf("%s-%s-%s-", kind, InstallerTypeMain, InstallerSubTypeDeployment) - deploymentIS, err := i.makeInstallerSet(ctx, comp, &deploymentManifest, filterAndTransform, deployName, InstallerTypeMain) + deploymentIS, err := i.makeInstallerSet(ctx, comp, &deploymentManifest, filterAndTransform, deployName, InstallerTypeMain, nil) if err != nil { return nil, err } @@ -109,7 +109,7 @@ func (i *InstallerSetClient) waitForStatus(ctx context.Context, set *v1alpha1.Te return nil } -func (i *InstallerSetClient) makeInstallerSet(ctx context.Context, comp v1alpha1.TektonComponent, manifest *mf.Manifest, filterAndTransform FilterAndTransform, isName, isType string) (*v1alpha1.TektonInstallerSet, error) { +func (i *InstallerSetClient) makeInstallerSet(ctx context.Context, comp v1alpha1.TektonComponent, manifest *mf.Manifest, filterAndTransform FilterAndTransform, isName, isType string, customLabels map[string]string) (*v1alpha1.TektonInstallerSet, error) { specHash, err := hash.Compute(comp.GetSpec()) if err != nil { return nil, err @@ -120,15 +120,18 @@ func (i *InstallerSetClient) makeInstallerSet(ctx context.Context, comp v1alpha1 return nil, err } + // get default labels of installerset + labels := i.getDefaultLabels(isType) + // append custom labels + for key, value := range customLabels { + labels[key] = value + } + ownerRef := *metav1.NewControllerRef(comp, v1alpha1.SchemeGroupVersion.WithKind(i.resourceKind)) return &v1alpha1.TektonInstallerSet{ ObjectMeta: metav1.ObjectMeta{ GenerateName: isName, - Labels: map[string]string{ - v1alpha1.CreatedByKey: i.resourceKind, - v1alpha1.ReleaseVersionKey: i.releaseVersion, - v1alpha1.InstallerSetType: isType, - }, + Labels: labels, Annotations: map[string]string{ v1alpha1.TargetNamespaceKey: comp.GetSpec().GetTargetNamespace(), v1alpha1.LastAppliedHashKey: specHash, @@ -140,3 +143,11 @@ func (i *InstallerSetClient) makeInstallerSet(ctx context.Context, comp v1alpha1 }, }, nil } + +func (i *InstallerSetClient) getDefaultLabels(isType string) map[string]string { + labels := map[string]string{} + labels[v1alpha1.CreatedByKey] = i.resourceKind + labels[v1alpha1.ReleaseVersionKey] = i.releaseVersion + labels[v1alpha1.InstallerSetType] = isType + return labels +} diff --git a/pkg/reconciler/kubernetes/tektoninstallerset/client/create_test.go b/pkg/reconciler/kubernetes/tektoninstallerset/client/create_test.go index 5ebea064f4..193f243610 100644 --- a/pkg/reconciler/kubernetes/tektoninstallerset/client/create_test.go +++ b/pkg/reconciler/kubernetes/tektoninstallerset/client/create_test.go @@ -111,7 +111,7 @@ func TestInstallerSetClient_Create(t *testing.T) { client = NewInstallerSetClient(fakeClient, releaseVersion, "test-version", v1alpha1.KindTektonTrigger, &testMetrics{}) } - iSs, gotErr := client.create(ctx, comp, &manifest, filterAndTransform(common.NoExtension(ctx)), tt.setType) + iSs, gotErr := client.create(ctx, comp, &manifest, filterAndTransform(common.NoExtension(ctx)), tt.setType, nil) if tt.wantErr != nil { assert.Equal(t, gotErr, tt.wantErr) diff --git a/pkg/reconciler/kubernetes/tektoninstallerset/client/custom_versioned_clustertask.go b/pkg/reconciler/kubernetes/tektoninstallerset/client/custom_versioned_clustertask.go index d4553f23e9..d30ba8efa6 100644 --- a/pkg/reconciler/kubernetes/tektoninstallerset/client/custom_versioned_clustertask.go +++ b/pkg/reconciler/kubernetes/tektoninstallerset/client/custom_versioned_clustertask.go @@ -35,7 +35,7 @@ var ( // VersionedClusterTaskSet this is an exception case where we create one installer set for one minor version // not for patch version and we don't remove older installer sets on upgrade, hence keeping it different // from custom set otherwise code becomes unnecessarily complex to handle this case -func (i *InstallerSetClient) VersionedClusterTaskSet(ctx context.Context, comp v1alpha1.TektonComponent, customName string, manifest *mf.Manifest, filterAndTransform FilterAndTransform) error { +func (i *InstallerSetClient) VersionedClusterTaskSet(ctx context.Context, comp v1alpha1.TektonComponent, manifest *mf.Manifest, filterAndTransform FilterAndTransform) error { logger := logging.FromContext(ctx) setType := InstallerTypeCustom + "-" + strings.ToLower(versionedClusterTaskInstallerSet) @@ -55,7 +55,7 @@ func (i *InstallerSetClient) VersionedClusterTaskSet(ctx context.Context, comp v } if len(is.Items) == 0 { - vctSet, err := i.makeInstallerSet(ctx, comp, manifest, filterAndTransform, "addon-versioned-clustertasks", setType) + vctSet, err := i.makeInstallerSet(ctx, comp, manifest, filterAndTransform, "addon-versioned-clustertasks", setType, nil) if err != nil { return err } diff --git a/pkg/reconciler/kubernetes/tektoninstallerset/client/list.go b/pkg/reconciler/kubernetes/tektoninstallerset/client/list.go new file mode 100644 index 0000000000..79ab9536b8 --- /dev/null +++ b/pkg/reconciler/kubernetes/tektoninstallerset/client/list.go @@ -0,0 +1,39 @@ +/* +Copyright 2024 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "github.com/tektoncd/operator/pkg/apis/operator/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/logging" +) + +// ListCustomSet return the lists of custom sets with the provided labelSelector +func (i *InstallerSetClient) ListCustomSet(ctx context.Context, labelSelector string) (*v1alpha1.TektonInstallerSetList, error) { + logger := logging.FromContext(ctx) + logger.Debugf("%v: checking installer sets with labels: %v", i.resourceKind, labelSelector) + + is, err := i.clientSet.List(ctx, v1.ListOptions{LabelSelector: labelSelector}) + if err != nil { + return nil, err + } + if len(is.Items) == 0 { + logger.Debugf("%v: no installer sets found with labels: %v", i.resourceKind, labelSelector) + } + return is, nil +} diff --git a/pkg/reconciler/kubernetes/tektoninstallerset/client/main_set.go b/pkg/reconciler/kubernetes/tektoninstallerset/client/main_set.go index 8861e25dfc..d0d1ebfeb7 100644 --- a/pkg/reconciler/kubernetes/tektoninstallerset/client/main_set.go +++ b/pkg/reconciler/kubernetes/tektoninstallerset/client/main_set.go @@ -44,7 +44,7 @@ func (i *InstallerSetClient) MainSet(ctx context.Context, comp v1alpha1.TektonCo switch err { case ErrNotFound: logger.Infof("%v/%v: installer set not found, creating", i.resourceKind, setType) - sets, err = i.create(ctx, comp, manifest, filterAndTransform, setType) + sets, err = i.create(ctx, comp, manifest, filterAndTransform, setType, nil) if err != nil { logger.Errorf("%v/%v: failed to create main installer set: %v", i.resourceKind, setType, err) return err diff --git a/pkg/reconciler/kubernetes/tektoninstallerset/client/pre_post_custom_set.go b/pkg/reconciler/kubernetes/tektoninstallerset/client/pre_post_custom_set.go index 25e13c877d..6cf684f36a 100644 --- a/pkg/reconciler/kubernetes/tektoninstallerset/client/pre_post_custom_set.go +++ b/pkg/reconciler/kubernetes/tektoninstallerset/client/pre_post_custom_set.go @@ -26,19 +26,19 @@ import ( ) func (i *InstallerSetClient) PostSet(ctx context.Context, comp v1alpha1.TektonComponent, manifest *mf.Manifest, filterAndTransform FilterAndTransform) error { - return i.createSet(ctx, comp, InstallerTypePost, manifest, filterAndTransform) + return i.createSet(ctx, comp, InstallerTypePost, manifest, filterAndTransform, nil) } func (i *InstallerSetClient) PreSet(ctx context.Context, comp v1alpha1.TektonComponent, manifest *mf.Manifest, filterAndTransform FilterAndTransform) error { - return i.createSet(ctx, comp, InstallerTypePre, manifest, filterAndTransform) + return i.createSet(ctx, comp, InstallerTypePre, manifest, filterAndTransform, nil) } -func (i *InstallerSetClient) CustomSet(ctx context.Context, comp v1alpha1.TektonComponent, customName string, manifest *mf.Manifest, filterAndTransform FilterAndTransform) error { +func (i *InstallerSetClient) CustomSet(ctx context.Context, comp v1alpha1.TektonComponent, customName string, manifest *mf.Manifest, filterAndTransform FilterAndTransform, customLabels map[string]string) error { setType := InstallerTypeCustom + "-" + strings.ToLower(customName) - return i.createSet(ctx, comp, setType, manifest, filterAndTransform) + return i.createSet(ctx, comp, setType, manifest, filterAndTransform, customLabels) } -func (i *InstallerSetClient) createSet(ctx context.Context, comp v1alpha1.TektonComponent, setType string, manifest *mf.Manifest, filterAndTransform FilterAndTransform) error { +func (i *InstallerSetClient) createSet(ctx context.Context, comp v1alpha1.TektonComponent, setType string, manifest *mf.Manifest, filterAndTransform FilterAndTransform, customLabels map[string]string) error { logger := logging.FromContext(ctx) sets, err := i.checkSet(ctx, comp, setType) @@ -49,7 +49,7 @@ func (i *InstallerSetClient) createSet(ctx context.Context, comp v1alpha1.Tekton switch err { case ErrNotFound: logger.Infof("%v/%v: installer set not found, creating", i.resourceKind, setType) - sets, err = i.create(ctx, comp, manifest, filterAndTransform, setType) + sets, err = i.create(ctx, comp, manifest, filterAndTransform, setType, customLabels) if err != nil { logger.Errorf("%v/%v: failed to create installer set: %v", i.resourceKind, setType, err) return err diff --git a/pkg/reconciler/openshift/openshiftpipelinesascode/controller.go b/pkg/reconciler/openshift/openshiftpipelinesascode/controller.go index c9e3259927..10b0b6ab87 100644 --- a/pkg/reconciler/openshift/openshiftpipelinesascode/controller.go +++ b/pkg/reconciler/openshift/openshiftpipelinesascode/controller.go @@ -66,11 +66,12 @@ func NewExtendedController(generator common.ExtensionGenerator) injection.Contro } c := &Reconciler{ - pipelineInformer: tektonPipelineinformer.Get(ctx), - installerSetClient: client.NewInstallerSetClient(tisClient, operatorVer, pacVersion, v1alpha1.KindOpenShiftPipelinesAsCode, metrics), - extension: generator(ctx), - manifest: manifest, - pacVersion: pacVersion, + pipelineInformer: tektonPipelineinformer.Get(ctx), + installerSetClient: client.NewInstallerSetClient(tisClient, operatorVer, pacVersion, v1alpha1.KindOpenShiftPipelinesAsCode, metrics), + extension: generator(ctx), + manifest: manifest, + additionalPACManifest: filterAdditionalControllerManifest(manifest), + pacVersion: pacVersion, } impl := pacreconciler.NewImpl(ctx, c) diff --git a/pkg/reconciler/openshift/openshiftpipelinesascode/pipelinerun_templates.go b/pkg/reconciler/openshift/openshiftpipelinesascode/pipelinerun_templates.go index 421ecfd634..4eb53c1c5b 100644 --- a/pkg/reconciler/openshift/openshiftpipelinesascode/pipelinerun_templates.go +++ b/pkg/reconciler/openshift/openshiftpipelinesascode/pipelinerun_templates.go @@ -17,6 +17,7 @@ limitations under the License. package openshiftpipelinesascode import ( + "fmt" "os" "path/filepath" "strings" @@ -80,7 +81,7 @@ func pipelineRunToConfigMapConverter(prManifests *mf.Manifest) (*mf.Manifest, er // set metadata prname := res.GetName() - cm.SetName("pipelines-as-code-" + prname) + cm.SetName(fmt.Sprintf("pipelines-as-code-%s", prname)) cm.Labels[pacRuntimeLabel] = strings.TrimPrefix(prname, "pipelinerun-") unstrObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(cm) diff --git a/pkg/reconciler/openshift/openshiftpipelinesascode/reconcile.go b/pkg/reconciler/openshift/openshiftpipelinesascode/reconcile.go index 1cf4bb4fdd..68124c083d 100644 --- a/pkg/reconciler/openshift/openshiftpipelinesascode/reconcile.go +++ b/pkg/reconciler/openshift/openshiftpipelinesascode/reconcile.go @@ -19,6 +19,7 @@ package openshiftpipelinesascode import ( "context" "fmt" + "strings" mf "github.com/manifestival/manifestival" "github.com/tektoncd/operator/pkg/apis/operator/v1alpha1" @@ -26,10 +27,17 @@ import ( pacreconciler "github.com/tektoncd/operator/pkg/client/injection/reconciler/operator/v1alpha1/openshiftpipelinesascode" "github.com/tektoncd/operator/pkg/reconciler/common" "github.com/tektoncd/operator/pkg/reconciler/kubernetes/tektoninstallerset/client" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" "knative.dev/pkg/logging" pkgreconciler "knative.dev/pkg/reconciler" ) +const ( + // additionalPACController installerset label value + additionalPACControllerComponentLabelValue = "AdditionalPACController" +) + // Reconciler implements controller.Reconciler for OpenShiftPipelinesAsCode resources. type Reconciler struct { // installer Set client to do CRUD operations for components @@ -43,6 +51,8 @@ type Reconciler struct { extension common.Extension // version of PipelinesAsCode which we are installing pacVersion string + // additionalPACManifest has the source manifest for the additional Openshift Pipelines As Code Controller + additionalPACManifest mf.Manifest } // Check that our Reconciler implements controller.Reconciler @@ -95,6 +105,60 @@ func (r *Reconciler) ReconcileKind(ctx context.Context, pac *v1alpha1.OpenShiftP return nil } + // created additionalPACController for all entries provided + for name, pacInfo := range pac.Spec.PACSettings.AdditionalPACControllers { + // if it is not enabled then skip creating the additionalPACController + if !*pacInfo.Enable { + continue + } + + additionalPACControllerManifest := r.additionalPACManifest + // if name of configMap is pipeline-as-code, then not create a new configmap + if pacInfo.ConfigMapName == pipelinesAsCodeCM { + additionalPACControllerManifest = additionalPACControllerManifest.Filter(mf.Not(mf.ByKind("ConfigMap"))) + } + + // create custome set installerset for the additionalPACController + if err := r.installerSetClient.CustomSet(ctx, pac, name, &additionalPACControllerManifest, additionalControllerTransform(r.extension, name), additionalPacControllerLabels()); err != nil { + msg := fmt.Sprintf("Additional PACController %s Reconciliation failed: %s", name, err.Error()) + logger.Error(msg) + if err == v1alpha1.REQUEUE_EVENT_AFTER { + return err + } + pac.Status.MarkInstallerSetNotReady(msg) + return nil + } + } + + // Handle the deletion of obsolute installersets of additionalController + labelSelector := additionalPacControllerLabelSelector() + logger.Debugf("checking custom installer sets with labels: %v", labelSelector) + is, err := r.installerSetClient.ListCustomSet(ctx, labelSelector) + if err != nil { + msg := fmt.Sprintf("Additional PACController Reconciliation failed: %s", err.Error()) + logger.Error(msg) + if err == v1alpha1.REQUEUE_EVENT_AFTER { + return err + } + } + // for all the custom installerset available, iterate and delete which have been removed or disabled + for _, i := range is.Items { + // get the value of setType label which will be custom- + setTypeValue := i.GetLabels()[v1alpha1.InstallerSetType] + // remove the prefix custom- to get the name + name := strings.TrimPrefix(setTypeValue, fmt.Sprintf("%s-", client.InstallerTypeCustom)) + // check if the name exist in CR spec + additionalPACinfo, ok := pac.Spec.PACSettings.AdditionalPACControllers[name] + // if not exist with same name or marked disable, delete the installerset + if !ok || !*additionalPACinfo.Enable { + if err := r.installerSetClient.CleanupCustomSet(ctx, name); err != nil { + return err + } + } + } + + pac.Status.MarkAdditionalPACControllerComplete() + if err := r.extension.PostReconcile(ctx, pac); err != nil { msg := fmt.Sprintf("PostReconciliation failed: %s", err.Error()) logger.Error(msg) @@ -109,3 +173,24 @@ func (r *Reconciler) ReconcileKind(ctx context.Context, pac *v1alpha1.OpenShiftP pac.Status.MarkPostReconcilerComplete() return nil } + +// custom labels to added to the additionalPACController installerset +func additionalPacControllerLabels() map[string]string { + labels := map[string]string{} + labels[v1alpha1.ComponentKey] = additionalPACControllerComponentLabelValue + return labels +} + +// labelSelector to filter the customsets of additionalPACController +func additionalPacControllerLabelSelector() string { + labelSelector := labels.NewSelector() + createdReq, _ := labels.NewRequirement(v1alpha1.CreatedByKey, selection.Equals, []string{v1alpha1.KindOpenShiftPipelinesAsCode}) + if createdReq != nil { + labelSelector = labelSelector.Add(*createdReq) + } + componentReq, _ := labels.NewRequirement(v1alpha1.ComponentKey, selection.Equals, []string{additionalPACControllerComponentLabelValue}) + if componentReq != nil { + labelSelector = labelSelector.Add(*componentReq) + } + return labelSelector.String() +} diff --git a/pkg/reconciler/openshift/openshiftpipelinesascode/testdata/test-additional-pac-cm.yaml b/pkg/reconciler/openshift/openshiftpipelinesascode/testdata/test-additional-pac-cm.yaml new file mode 100644 index 0000000000..7e491388e7 --- /dev/null +++ b/pkg/reconciler/openshift/openshiftpipelinesascode/testdata/test-additional-pac-cm.yaml @@ -0,0 +1,132 @@ +apiVersion: v1 +data: + # The application name, you can customize this label. If using the Github App you will need to customize the label on the github app setting as well. + application-name: "Pipelines as Code CI" + + # Whether to automatically create a secret with the token to be use by git-clone + secret-auto-create: "true" + + # By default we only generate token scoped to the repository from where the + # payload come from. + # We do this because if the github apps is installed on an github organisation + # + # and there is a mix of public and private repositories in there + # where some users on that org does not have access. + # + # If you trust every users on your organisations to access any repos there or + # not planning to install your github application globally on a Github Organisation + # then you can safely set this option to false. + secret-github-app-token-scoped: "true" + + # If you don't want to completely disable the scoping of the token, but still + # wants some other repos (on the same installation id) available from the + # token, then you can add an extra owner/repo here. + # + # You can have multiple owner/repositories separated by commas: + # i.e: "owner/private-repo1, org/repo2" + secret-github-app-scope-extra-repos: "" + + # Tekton HUB API urls + hub-url: "https://api.hub.tekton.dev/v1" + + # Tekton HUB catalog name + hub-catalog-name: "tekton" + + # Additional Hub Catalogs is supported, for example: + # + # catalog-1-id: anotherhub + # catalog-1-name: tekton + # catalog-1-url: https://api.other.com/v1 + # + # this configuration will have a new catalog named anotherhub on https://api.other.com/v1 endpoint and catalog name tekton + # to be used by a user in their templates like this: + # pipelinesascode.tekton.dev/task: "anotherhub://task" + # + # Increase the number of the catalog to add more of them + + # Allow fetching remote tasks + remote-tasks: "true" + + # Using the URL of the Tekton dashboard, Pipelines-as-Code generates a URL to the + # PipelineRun on the Tekton dashboard + tekton-dashboard-url: "" + + # Enable or disable the feature to show a log snippet of the failed task when there is + # an error in a Pipeline + # + # It will show the last 3 lines of the first container of the first task + # that has error in the pipeline. + # + # you may want to disable this if you think your pipeline may leak some value + error-log-snippet: "true" + + # Enable or disable the inspection of container logs to detect error message + # and expose them as annotations on Pull Request. Only Github apps is supported + error-detection-from-container-logs: "true" + + # How many lines to grab from the container when inspecting the + # logs for error-detection. Increasing this value may increase the watcher + # memory usage. Use -1 for unlimited lines. + error-detection-max-number-of-lines: "50" + + # The default regexp used when we use the simple error detection + error-detection-simple-regexp: | + ^(?P[^:]*):(?P[0-9]+):(?P[0-9]+)?([ ]*)?(?P.*) + + # Since public bitbucket doesn't have the concept of Secret, we need to be + # able to secure the request by querying https://ip-ranges.atlassian.com/, + # this only happen for public bitbucket (ie: when provider.url is not set in + # repository spec). If you want to override this, you need to bear in mind + # this could be a security issue, a malicious user can send a PR to your repo + # with a modification to your PipelineRun that would grab secrets, tunnel or + # others and then send a malicious webhook payload to the controller which + # look like a authorized owner has send the PR to run it.. + bitbucket-cloud-check-source-ip: "true" + + # Add extra IPS (ie: 127.0.0.1) or networks (127.0.0.0/16) separated by commas. + bitbucket-cloud-additional-source-ip: "" + + # max-keep-run-upper-limit defines the upper limit for max-keep-run annotation + # value which a user can set on pipelineRun. the value set on annotation + # should be less than or equal to the upper limit otherwise the upper limit + # will be used while cleaning up + max-keep-run-upper-limit: "" + + # if defined then applies to all pipelineRun who doesn't have max-keep-runs annotation + default-max-keep-runs: "" + + # Whether to auto configure newly created repositories, this will create a new + # namespace and repository CR, supported only with GitHub App + auto-configure-new-github-repo: "false" + + # add a template to generate name for namespace for your auto configured + # github repo supported fields are repo_owner, repo_name eg. if defined as + # `{{repo_owner}}-{{repo_name}}-ci`, then namespace generated for repository + # https://github.com/owner/repo will be `owner-repo-ci` + auto-configure-repo-namespace-template: "" + + # Enable or disable the feature to rerun the CI if push event happens on + # a pull request + # + # By default it is true and CI will be re-run in case of push/amend on the + # pull request if ok-to-test is done once + # + # you may want to disable this if ok-to-test should be done on each iteration + remember-ok-to-test: "true" + + # Configure a custom console here, the driver support custom parameters from + # Repo CR along a few other template variable, see documentation for more + # details + # + # custom-console-name: Console Name + # custom-console-url: https://url + # custom-console-url-pr-details: https://url/ns/{{ namespace }}/{{ pr }} + # custom-console-url-pr-tasklog: https://url/ns/{{ namespace }}/{{ pr }}/logs/{{ task }} + +kind: ConfigMap +metadata: + name: test-config + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.23.0" + app.kubernetes.io/part-of: pipelines-as-code diff --git a/pkg/reconciler/openshift/openshiftpipelinesascode/testdata/test-expected-additional-pac-cm.yaml b/pkg/reconciler/openshift/openshiftpipelinesascode/testdata/test-expected-additional-pac-cm.yaml new file mode 100644 index 0000000000..0da087787c --- /dev/null +++ b/pkg/reconciler/openshift/openshiftpipelinesascode/testdata/test-expected-additional-pac-cm.yaml @@ -0,0 +1,131 @@ +apiVersion: v1 +data: + # The application name, you can customize this label. If using the Github App you will need to customize the label on the github app setting as well. + application-name: "Test CI application" + + # Whether to automatically create a secret with the token to be use by git-clone + secret-auto-create: "true" + + # By default we only generate token scoped to the repository from where the + # payload come from. + # We do this because if the github apps is installed on an github organisation + # + # and there is a mix of public and private repositories in there + # where some users on that org does not have access. + # + # If you trust every users on your organisations to access any repos there or + # not planning to install your github application globally on a Github Organisation + # then you can safely set this option to false. + secret-github-app-token-scoped: "true" + + # If you don't want to completely disable the scoping of the token, but still + # wants some other repos (on the same installation id) available from the + # token, then you can add an extra owner/repo here. + # + # You can have multiple owner/repositories separated by commas: + # i.e: "owner/private-repo1, org/repo2" + secret-github-app-scope-extra-repos: "" + + # Tekton HUB API urls + hub-url: "https://custom-hub.com" + + # Tekton HUB catalog name + hub-catalog-name: "tekton" + + # Additional Hub Catalogs is supported, for example: + # + # catalog-1-id: anotherhub + # catalog-1-name: tekton + # catalog-1-url: https://api.other.com/v1 + # + # this configuration will have a new catalog named anotherhub on https://api.other.com/v1 endpoint and catalog name tekton + # to be used by a user in their templates like this: + # pipelinesascode.tekton.dev/task: "anotherhub://task" + # + # Increase the number of the catalog to add more of them + + # Allow fetching remote tasks + remote-tasks: "true" + + # Using the URL of the Tekton dashboard, Pipelines-as-Code generates a URL to the + # PipelineRun on the Tekton dashboard + tekton-dashboard-url: "" + + # Enable or disable the feature to show a log snippet of the failed task when there is + # an error in a Pipeline + # + # It will show the last 3 lines of the first container of the first task + # that has error in the pipeline. + # + # you may want to disable this if you think your pipeline may leak some value + error-log-snippet: "true" + + # Enable or disable the inspection of container logs to detect error message + # and expose them as annotations on Pull Request. Only Github apps is supported + error-detection-from-container-logs: "true" + + # How many lines to grab from the container when inspecting the + # logs for error-detection. Increasing this value may increase the watcher + # memory usage. Use -1 for unlimited lines. + error-detection-max-number-of-lines: "50" + + # The default regexp used when we use the simple error detection + error-detection-simple-regexp: "^(?P[^:]*):(?P[0-9]+):(?P[0-9]+):([ ]*)?(?P.*)" + + # Since public bitbucket doesn't have the concept of Secret, we need to be + # able to secure the request by querying https://ip-ranges.atlassian.com/, + # this only happen for public bitbucket (ie: when provider.url is not set in + # repository spec). If you want to override this, you need to bear in mind + # this could be a security issue, a malicious user can send a PR to your repo + # with a modification to your PipelineRun that would grab secrets, tunnel or + # others and then send a malicious webhook payload to the controller which + # look like a authorized owner has send the PR to run it.. + bitbucket-cloud-check-source-ip: "true" + + # Add extra IPS (ie: 127.0.0.1) or networks (127.0.0.0/16) separated by commas. + bitbucket-cloud-additional-source-ip: "" + + # max-keep-run-upper-limit defines the upper limit for max-keep-run annotation + # value which a user can set on pipelineRun. the value set on annotation + # should be less than or equal to the upper limit otherwise the upper limit + # will be used while cleaning up + max-keep-run-upper-limit: "" + + # if defined then applies to all pipelineRun who doesn't have max-keep-runs annotation + default-max-keep-runs: "" + + # Whether to auto configure newly created repositories, this will create a new + # namespace and repository CR, supported only with GitHub App + auto-configure-new-github-repo: "false" + + # add a template to generate name for namespace for your auto configured + # github repo supported fields are repo_owner, repo_name eg. if defined as + # `{{repo_owner}}-{{repo_name}}-ci`, then namespace generated for repository + # https://github.com/owner/repo will be `owner-repo-ci` + auto-configure-repo-namespace-template: "" + + # Enable or disable the feature to rerun the CI if push event happens on + # a pull request + # + # By default it is true and CI will be re-run in case of push/amend on the + # pull request if ok-to-test is done once + # + # you may want to disable this if ok-to-test should be done on each iteration + remember-ok-to-test: "true" + + # Configure a custom console here, the driver support custom parameters from + # Repo CR along a few other template variable, see documentation for more + # details + # + custom-console-name: "" + custom-console-url: "" + custom-console-url-pr-details: "" + custom-console-url-pr-tasklog: "" + +kind: ConfigMap +metadata: + name: test-config + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.23.0" + app.kubernetes.io/part-of: pipelines-as-code diff --git a/pkg/reconciler/openshift/openshiftpipelinesascode/testdata/test-expected-additional-pac-dep.yaml b/pkg/reconciler/openshift/openshiftpipelinesascode/testdata/test-expected-additional-pac-dep.yaml new file mode 100644 index 0000000000..ecd27f15af --- /dev/null +++ b/pkg/reconciler/openshift/openshiftpipelinesascode/testdata/test-expected-additional-pac-dep.yaml @@ -0,0 +1,97 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-pac-controller + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.23.0" + app.kubernetes.io/part-of: pipelines-as-code +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: test-pac-controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + template: + metadata: + labels: + app: test-pac-controller + app.kubernetes.io/name: test-pac-controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + app.kubernetes.io/version: "v0.23.0" + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: pipelines-as-code-controller + containers: + - name: test-pac-controller + image: "ghcr.io/openshift-pipelines/pipelines-as-code-controller:v0.23.0" + imagePullPolicy: Always + ports: + - name: api + containerPort: 8080 + - name: metrics + containerPort: 9090 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readinessProbe: + failureThreshold: 3 + httpGet: + path: /live + port: api + scheme: HTTP + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 1 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /live + port: api + scheme: HTTP + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 1 + env: + - name: CONFIG_LOGGING_NAME + value: pac-config-logging + - name: TLS_KEY + value: "key" + - name: TLS_CERT + value: "cert" + - name: TLS_SECRET_NAME + value: "pipelines-as-code-tls-secret" + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K_METRICS_CONFIG + value: '{"Domain":"pipelinesascode.tekton.dev/controller","Component":"pac_controller","PrometheusPort":9090,"ConfigMap":{"name":"pipelines-as-code-config-observability"}}' + - name: K_TRACING_CONFIG + value: '{"backend":"prometheus","debug":"false","sample-rate":"0"}' + - name: K_SINK_TIMEOUT + value: "30" + - name: PAC_CONTROLLER_LABEL + value: "test-pac-controller" + - name: PAC_CONTROLLER_SECRET + value: "test-secret" + - name: PAC_CONTROLLER_CONFIGMAP + value: "test-configmap" + volumeMounts: + - mountPath: "/etc/pipelines-as-code/tls" + readOnly: true + name: tls + volumes: + - name: tls + secret: + secretName: pipelines-as-code-tls-secret + optional: true diff --git a/pkg/reconciler/openshift/openshiftpipelinesascode/testdata/test-expected-additional-pac-route.yaml b/pkg/reconciler/openshift/openshiftpipelinesascode/testdata/test-expected-additional-pac-route.yaml new file mode 100644 index 0000000000..976c971056 --- /dev/null +++ b/pkg/reconciler/openshift/openshiftpipelinesascode/testdata/test-expected-additional-pac-route.yaml @@ -0,0 +1,23 @@ +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + labels: + app: test-pac-controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + app.kubernetes.io/version: "v0.23.0" + pipelines-as-code/route: test-pac-controller + name: test-pac-controller + namespace: pipelines-as-code +spec: + port: + targetPort: http-listener + tls: + insecureEdgeTerminationPolicy: Redirect + termination: edge + to: + kind: Service + name: test-pac-controller + weight: 100 + wildcardPolicy: None diff --git a/pkg/reconciler/openshift/openshiftpipelinesascode/testdata/test-filter-manifest.yaml b/pkg/reconciler/openshift/openshiftpipelinesascode/testdata/test-filter-manifest.yaml new file mode 100644 index 0000000000..5b0857a062 --- /dev/null +++ b/pkg/reconciler/openshift/openshiftpipelinesascode/testdata/test-filter-manifest.yaml @@ -0,0 +1,771 @@ +# Copyright 2024 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# See https://pipelinesascode.com/docs/install/settings/ for the complete +# documentation of all settings. + +apiVersion: v1 +data: + # The application name, you can customize this label. If using the Github App you will need to customize the label on the github app setting as well. + application-name: "Pipelines as Code CI" + + # Whether to automatically create a secret with the token to be use by git-clone + secret-auto-create: "true" + + # By default we only generate token scoped to the repository from where the + # payload come from. + # We do this because if the github apps is installed on an github organisation + # + # and there is a mix of public and private repositories in there + # where some users on that org does not have access. + # + # If you trust every users on your organisations to access any repos there or + # not planning to install your github application globally on a Github Organisation + # then you can safely set this option to false. + secret-github-app-token-scoped: "true" + + # If you don't want to completely disable the scoping of the token, but still + # wants some other repos (on the same installation id) available from the + # token, then you can add an extra owner/repo here. + # + # You can have multiple owner/repositories separated by commas: + # i.e: "owner/private-repo1, org/repo2" + secret-github-app-scope-extra-repos: "" + + # Tekton HUB API urls + hub-url: "https://api.hub.tekton.dev/v1" + + # Tekton HUB catalog name + hub-catalog-name: "tekton" + + # Additional Hub Catalogs is supported, for example: + # + # catalog-1-id: anotherhub + # catalog-1-name: tekton + # catalog-1-url: https://api.other.com/v1 + # + # this configuration will have a new catalog named anotherhub on https://api.other.com/v1 endpoint and catalog name tekton + # to be used by a user in their templates like this: + # pipelinesascode.tekton.dev/task: "anotherhub://task" + # + # Increase the number of the catalog to add more of them + + # Allow fetching remote tasks + remote-tasks: "true" + + # Using the URL of the Tekton dashboard, Pipelines-as-Code generates a URL to the + # PipelineRun on the Tekton dashboard + tekton-dashboard-url: "" + + # Enable or disable the feature to show a log snippet of the failed task when there is + # an error in a Pipeline + # + # It will show the last 3 lines of the first container of the first task + # that has error in the pipeline. + # + # you may want to disable this if you think your pipeline may leak some value + error-log-snippet: "true" + + # Enable or disable the inspection of container logs to detect error message + # and expose them as annotations on Pull Request. Only Github apps is supported + error-detection-from-container-logs: "true" + + # How many lines to grab from the container when inspecting the + # logs for error-detection. Increasing this value may increase the watcher + # memory usage. Use -1 for unlimited lines. + error-detection-max-number-of-lines: "50" + + # The default regexp used when we use the simple error detection + error-detection-simple-regexp: | + ^(?P[^:]*):(?P[0-9]+):(?P[0-9]+)?([ ]*)?(?P.*) + + # Since public bitbucket doesn't have the concept of Secret, we need to be + # able to secure the request by querying https://ip-ranges.atlassian.com/, + # this only happen for public bitbucket (ie: when provider.url is not set in + # repository spec). If you want to override this, you need to bear in mind + # this could be a security issue, a malicious user can send a PR to your repo + # with a modification to your PipelineRun that would grab secrets, tunnel or + # others and then send a malicious webhook payload to the controller which + # look like a authorized owner has send the PR to run it.. + bitbucket-cloud-check-source-ip: "true" + + # Add extra IPS (ie: 127.0.0.1) or networks (127.0.0.0/16) separated by commas. + bitbucket-cloud-additional-source-ip: "" + + # max-keep-run-upper-limit defines the upper limit for max-keep-run annotation + # value which a user can set on pipelineRun. the value set on annotation + # should be less than or equal to the upper limit otherwise the upper limit + # will be used while cleaning up + max-keep-run-upper-limit: "" + + # if defined then applies to all pipelineRun who doesn't have max-keep-runs annotation + default-max-keep-runs: "" + + # Whether to auto configure newly created repositories, this will create a new + # namespace and repository CR, supported only with GitHub App + auto-configure-new-github-repo: "false" + + # add a template to generate name for namespace for your auto configured + # github repo supported fields are repo_owner, repo_name eg. if defined as + # `{{repo_owner}}-{{repo_name}}-ci`, then namespace generated for repository + # https://github.com/owner/repo will be `owner-repo-ci` + auto-configure-repo-namespace-template: "" + + # Enable or disable the feature to rerun the CI if push event happens on + # a pull request + # + # By default it is true and CI will be re-run in case of push/amend on the + # pull request if ok-to-test is done once + # + # you may want to disable this if ok-to-test should be done on each iteration + remember-ok-to-test: "true" + + # Configure a custom console here, the driver support custom parameters from + # Repo CR along a few other template variable, see documentation for more + # details + # + # custom-console-name: Console Name + # custom-console-url: https://url + # custom-console-url-pr-details: https://url/ns/{{ namespace }}/{{ pr }} + # custom-console-url-pr-tasklog: https://url/ns/{{ namespace }}/{{ pr }}/logs/{{ task }} + +kind: ConfigMap +metadata: + name: pipelines-as-code + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.23.0" + app.kubernetes.io/part-of: pipelines-as-code +--- + +# Copyright 2024 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This configmap is filled by bootstrap command +# GitHub App is added as provider and later this is checked +# before configuring a new GitHub App so that we don't +# configure more than one App + +apiVersion: v1 +data: + # pipelines as code controller version + version: "v0.23.0" + + # controller url to be used for configuring webhook using cli + controller-url: "" + + # display the configured provider on the platform + # only one provider type to be configured at a time + # eg. if GitHub App is configured, then webhooks should not be configured + provider: "" + +kind: ConfigMap +metadata: + name: pipelines-as-code-info + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.23.0" + app.kubernetes.io/part-of: pipelines-as-code +--- + +# Copyright 2024 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: v1 +kind: Secret +metadata: + name: pipelines-as-code-webhook-certs + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.23.0" + app.kubernetes.io/part-of: pipelines-as-code +# The data is populated at install time +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: validation.pipelinesascode.tekton.dev + labels: + app.kubernetes.io/version: "v0.23.0" + app.kubernetes.io/part-of: pipelines-as-code +webhooks: + - admissionReviewVersions: ["v1"] + clientConfig: + service: + name: pipelines-as-code-webhook + namespace: pipelines-as-code + failurePolicy: Fail + sideEffects: None + name: validation.pipelinesascode.tekton.dev +--- + +# Copyright 2024 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: pipelines-as-code-config-observability + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.23.0" + app.kubernetes.io/part-of: pipelines-as-code +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + # metrics.backend-destination field specifies the system metrics destination. + # It supports either prometheus (the default) or stackdriver. + # Note: Using Stackdriver will incur additional charges. + metrics.backend-destination: prometheus + # metrics.stackdriver-project-id field specifies the Stackdriver project ID. This + # field is optional. When running on GCE, application default credentials will be + # used and metrics will be sent to the cluster's project if this field is + # not provided. + metrics.stackdriver-project-id: "" + # metrics.allow-stackdriver-custom-metrics indicates whether it is allowed + # to send metrics to Stackdriver using "global" resource type and custom + # metric type. Setting this flag to "true" could cause extra Stackdriver + # charge. If metrics.backend-destination is not Stackdriver, this is + # ignored. + metrics.allow-stackdriver-custom-metrics: "false" +# Copyright 2024 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: pac-watcher-config-leader-election + namespace: pipelines-as-code + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + # lease-duration is how long non-leaders will wait to try to acquire the + # lock; 15 seconds is the value used by core kubernetes controllers. + lease-duration: "60s" + # renew-deadline is how long a leader will try to renew the lease before + # giving up; 10 seconds is the value used by core kubernetes controllers. + renew-deadline: "40s" + # retry-period is how long the leader election client waits between tries of + # actions; 2 seconds is the value used by core kubernetes controllers. + retry-period: "10s" + # buckets is the number of buckets used to partition key space of each + # Reconciler. If this number is M and the replica number of the controller + # is N, the N replicas will compete for the M buckets. The owner of a + # bucket will take care of the reconciling for the keys partitioned into + # that bucket. + buckets: "1" +# Copyright 2024 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: pac-webhook-config-leader-election + namespace: pipelines-as-code + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + # lease-duration is how long non-leaders will wait to try to acquire the + # lock; 15 seconds is the value used by core kubernetes controllers. + lease-duration: "60s" + # renew-deadline is how long a leader will try to renew the lease before + # giving up; 10 seconds is the value used by core kubernetes controllers. + renew-deadline: "40s" + # retry-period is how long the leader election client waits between tries of + # actions; 2 seconds is the value used by core kubernetes controllers. + retry-period: "10s" + # buckets is the number of buckets used to partition key space of each + # Reconciler. If this number is M and the replica number of the controller + # is N, the N replicas will compete for the M buckets. The owner of a + # bucket will take care of the reconciling for the keys partitioned into + # that bucket. + buckets: "1" +# Copyright 2024 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pipelines-as-code-controller + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.23.0" + app.kubernetes.io/part-of: pipelines-as-code +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + template: + metadata: + labels: + app: pipelines-as-code-controller + app.kubernetes.io/name: controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + app.kubernetes.io/version: "v0.23.0" + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: pipelines-as-code-controller + containers: + - name: pac-controller + image: "ghcr.io/openshift-pipelines/pipelines-as-code-controller:v0.23.0" + imagePullPolicy: Always + ports: + - name: api + containerPort: 8080 + - name: metrics + containerPort: 9090 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readinessProbe: + failureThreshold: 3 + httpGet: + path: /live + port: api + scheme: HTTP + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 1 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /live + port: api + scheme: HTTP + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 1 + env: + - name: CONFIG_LOGGING_NAME + value: pac-config-logging + - name: TLS_KEY + value: "key" + - name: TLS_CERT + value: "cert" + - name: TLS_SECRET_NAME + value: "pipelines-as-code-tls-secret" + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K_METRICS_CONFIG + value: '{"Domain":"pipelinesascode.tekton.dev/controller","Component":"pac_controller","PrometheusPort":9090,"ConfigMap":{"name":"pipelines-as-code-config-observability"}}' + - name: K_TRACING_CONFIG + value: '{"backend":"prometheus","debug":"false","sample-rate":"0"}' + - name: K_SINK_TIMEOUT + value: "30" + - name: PAC_CONTROLLER_LABEL + value: "default" + - name: PAC_CONTROLLER_SECRET + value: "pipelines-as-code-secret" + - name: PAC_CONTROLLER_CONFIGMAP + value: "pipelines-as-code" + volumeMounts: + - mountPath: "/etc/pipelines-as-code/tls" + readOnly: true + name: tls + volumes: + - name: tls + secret: + secretName: pipelines-as-code-tls-secret + optional: true +# Copyright 2024 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: v1 +kind: Service +metadata: + name: pipelines-as-code-controller + namespace: pipelines-as-code + labels: + app: pipelines-as-code-controller + app.kubernetes.io/version: "v0.23.0" + app.kubernetes.io/part-of: pipelines-as-code +spec: + ports: + - name: http-listener + port: 8080 + protocol: TCP + targetPort: 8080 + - name: http-metrics + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app.kubernetes.io/name: controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +# Copyright 2024 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pipelines-as-code-watcher + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.23.0" + app.kubernetes.io/part-of: pipelines-as-code +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: watcher + app.kubernetes.io/component: watcher + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + template: + metadata: + labels: + app.kubernetes.io/name: watcher + app.kubernetes.io/component: watcher + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + app.kubernetes.io/version: "v0.23.0" + app: pipelines-as-code-watcher + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: pipelines-as-code-watcher + containers: + - name: pac-watcher + image: "ghcr.io/openshift-pipelines/pipelines-as-code-watcher:v0.23.0" + imagePullPolicy: Always + env: + - name: CONFIG_LOGGING_NAME + value: pac-config-logging + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: METRICS_DOMAIN + value: tekton.dev/pipelinesascode + - name: CONFIG_OBSERVABILITY_NAME + value: pipelines-as-code-config-observability + - name: CONFIG_LEADERELECTION_NAME + value: pac-watcher-config-leader-election + ports: + - name: probes + containerPort: 8080 + - name: metrics + containerPort: 9090 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readinessProbe: + httpGet: + path: /live + port: probes + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + livenessProbe: + httpGet: + path: /live + port: probes + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 +# Copyright 2024 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: v1 +kind: Service +metadata: + name: pipelines-as-code-watcher + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.23.0" + app.kubernetes.io/part-of: pipelines-as-code + app: pipelines-as-code-watcher +spec: + ports: + - name: http-metrics + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app.kubernetes.io/name: watcher + app.kubernetes.io/component: watcher + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +# Copyright 2024 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: v1 +kind: Service +metadata: + name: pipelines-as-code-webhook + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.23.0" + app.kubernetes.io/part-of: pipelines-as-code +spec: + ports: + - name: https-webhook + port: 443 + targetPort: 8443 + selector: + app.kubernetes.io/name: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +# Copyright 2024 Red Hat +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + app.kubernetes.io/version: "v0.23.0" + pipelines-as-code/route: controller + name: pipelines-as-code-controller + namespace: pipelines-as-code +spec: + port: + targetPort: http-listener + tls: + insecureEdgeTerminationPolicy: Redirect + termination: edge + to: + kind: Service + name: pipelines-as-code-controller + weight: 100 + wildcardPolicy: None +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: pipelines-as-code-monitor + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.23.0" + app.kubernetes.io/part-of: pipelines-as-code + annotations: + networkoperator.openshift.io/ignore-errors: "" +spec: + endpoints: + - interval: 10s + port: http-metrics + jobLabel: app + namespaceSelector: + matchNames: + - pipelines-as-code + selector: + matchLabels: + app: pipelines-as-code-watcher +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: pipelines-as-code-controller-monitor + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.23.0" + app.kubernetes.io/part-of: pipelines-as-code + annotations: + networkoperator.openshift.io/ignore-errors: "" +spec: + endpoints: + - interval: 10s + port: http-metrics + jobLabel: app + namespaceSelector: + matchNames: + - pipelines-as-code + selector: + matchLabels: + app: pipelines-as-code-controller diff --git a/pkg/reconciler/openshift/openshiftpipelinesascode/transform.go b/pkg/reconciler/openshift/openshiftpipelinesascode/transform.go index 2910322ff9..43cdb41228 100644 --- a/pkg/reconciler/openshift/openshiftpipelinesascode/transform.go +++ b/pkg/reconciler/openshift/openshiftpipelinesascode/transform.go @@ -18,16 +18,26 @@ package openshiftpipelinesascode import ( "context" + "fmt" mf "github.com/manifestival/manifestival" + "github.com/openshift-pipelines/pipelines-as-code/pkg/params/settings" + routev1 "github.com/openshift/api/route/v1" "github.com/tektoncd/operator/pkg/apis/operator/v1alpha1" "github.com/tektoncd/operator/pkg/reconciler/common" "github.com/tektoncd/operator/pkg/reconciler/kubernetes/tektoninstallerset/client" "github.com/tektoncd/operator/pkg/reconciler/openshift" occommon "github.com/tektoncd/operator/pkg/reconciler/openshift/common" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" ) -const pipelinesAsCodeCM = "pipelines-as-code" +const ( + pipelinesAsCodeCM = "pipelines-as-code" + additionalPACControllerNameSuffix = "-pac-controller" +) func filterAndTransform(extension common.Extension) client.FilterAndTransform { return func(ctx context.Context, manifest *mf.Manifest, comp v1alpha1.TektonComponent) (*mf.Manifest, error) { @@ -62,3 +72,246 @@ func filterAndTransform(extension common.Extension) client.FilterAndTransform { return &pacManifest, nil } } + +func additionalControllerTransform(extension common.Extension, name string) client.FilterAndTransform { + return func(ctx context.Context, manifest *mf.Manifest, comp v1alpha1.TektonComponent) (*mf.Manifest, error) { + pac := comp.(*v1alpha1.OpenShiftPipelinesAsCode) + additionalPACControllerConfig := pac.Spec.PACSettings.AdditionalPACControllers[name] + + images := common.ToLowerCaseKeys(common.ImagesFromEnv(common.PacImagePrefix)) + // Run transformers + tfs := []mf.Transformer{ + common.InjectOperandNameLabelOverwriteExisting(openshift.OperandOpenShiftPipelineAsCode), + common.DeploymentImages(images), + common.AddConfiguration(pac.Spec.Config), + occommon.ApplyCABundles, + occommon.UpdateServiceMonitorTargetNamespace(pac.Spec.TargetNamespace), + updateAdditionControllerDeployment(additionalPACControllerConfig, name), + updateAdditionControllerService(name), + updateAdditionControllerConfigMap(additionalPACControllerConfig), + updateAdditionControllerRoute(name), + updateAdditionControllerServiceMonitor(name), + } + + allTfs := append(tfs, extension.Transformers(pac)...) + if err := common.Transform(ctx, manifest, pac, allTfs...); err != nil { + return &mf.Manifest{}, err + } + + // additional options transformer + // always execute as last transformer, so that the values in options will be final update values on the manifests + if err := common.ExecuteAdditionalOptionsTransformer(ctx, manifest, pac.Spec.GetTargetNamespace(), pac.Spec.Options); err != nil { + return &mf.Manifest{}, err + } + + return manifest, nil + } +} + +// This returns all resources to deploy for the additional PACController +func filterAdditionalControllerManifest(manifest mf.Manifest) mf.Manifest { + // filter deployment + deploymentManifest := manifest.Filter(mf.All(mf.ByName("pipelines-as-code-controller"), mf.ByKind("Deployment"))) + + // filter service + serviceManifest := manifest.Filter(mf.All(mf.ByName("pipelines-as-code-controller"), mf.ByKind("Service"))) + + // filter route + routeManifest := manifest.Filter(mf.All(mf.ByName("pipelines-as-code-controller"), mf.ByKind("Route"))) + + // filter configmap + cmManifest := manifest.Filter(mf.All(mf.ByName("pipelines-as-code"), mf.ByKind("ConfigMap"))) + + // filter serviceMonitor + serviceMonitorManifest := manifest.Filter(mf.All(mf.ByName("pipelines-as-code-controller-monitor"), mf.ByKind("ServiceMonitor"))) + + filteredManifest := mf.Manifest{} + filteredManifest = filteredManifest.Append(cmManifest, deploymentManifest, serviceManifest, serviceMonitorManifest, routeManifest) + return filteredManifest +} + +// This updates additional PACController deployment +func updateAdditionControllerDeployment(config v1alpha1.AdditionalPACControllerConfig, name string) mf.Transformer { + return func(u *unstructured.Unstructured) error { + if u.GetKind() != "Deployment" { + return nil + } + + u.SetName(fmt.Sprintf("%s%s", name, additionalPACControllerNameSuffix)) + + d := &appsv1.Deployment{} + err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, d) + if err != nil { + return err + } + + d.Spec.Selector.MatchLabels["app.kubernetes.io/name"] = fmt.Sprintf("%s%s", name, additionalPACControllerNameSuffix) + + d.Spec.Template.Labels["app"] = fmt.Sprintf("%s%s", name, additionalPACControllerNameSuffix) + d.Spec.Template.Labels["app.kubernetes.io/name"] = fmt.Sprintf("%s%s", name, additionalPACControllerNameSuffix) + + for i, container := range d.Spec.Template.Spec.Containers { + container.Name = fmt.Sprintf("%s%s", name, additionalPACControllerNameSuffix) + containerEnvs := d.Spec.Template.Spec.Containers[i].Env + d.Spec.Template.Spec.Containers[i].Env = replaceEnvInDeployment(containerEnvs, config, name) + d.Spec.Template.Spec.Containers[i] = container + } + + unstrObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(d) + if err != nil { + return err + } + u.SetUnstructuredContent(unstrObj) + + return nil + } +} + +// This updates additional PACController Service +func updateAdditionControllerService(name string) mf.Transformer { + return func(u *unstructured.Unstructured) error { + if u.GetKind() != "Service" { + return nil + } + u.SetName(fmt.Sprintf("%s%s", name, additionalPACControllerNameSuffix)) + + service := &corev1.Service{} + err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, service) + if err != nil { + return err + } + + labels := service.Labels + if labels == nil { + labels = map[string]string{} + } + labels["app"] = fmt.Sprintf("%s%s", name, additionalPACControllerNameSuffix) + labels["app.kubernetes.io/name"] = fmt.Sprintf("%s%s", name, additionalPACControllerNameSuffix) + service.SetLabels(labels) + + labelSelector := service.Spec.Selector + if labelSelector == nil { + labelSelector = map[string]string{} + } + labelSelector["app.kubernetes.io/name"] = fmt.Sprintf("%s%s", name, additionalPACControllerNameSuffix) + service.Spec.Selector = labelSelector + + unstrObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(service) + if err != nil { + return err + } + + u.SetUnstructuredContent(unstrObj) + return nil + } +} + +// This updates additional PACController configMap and sets settings data to configMap data +func updateAdditionControllerConfigMap(config v1alpha1.AdditionalPACControllerConfig) mf.Transformer { + return func(u *unstructured.Unstructured) error { + // set the name + // set the namespace + // set the data from settings + if u.GetKind() != "ConfigMap" { + return nil + } + + u.SetName(config.ConfigMapName) + + // apply the defaults here, we are not adding the defaults in CR + if config.Settings == nil { + config.Settings = map[string]string{} + } + settings.SetDefaults(config.Settings) + + cm := &corev1.ConfigMap{} + err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, cm) + if err != nil { + return err + } + + if cm.Data == nil { + cm.Data = map[string]string{} + } + + for key, value := range config.Settings { + cm.Data[key] = value + } + unstrObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(cm) + if err != nil { + return err + } + + u.SetUnstructuredContent(unstrObj) + return nil + + } +} + +// This updates additional PACController route +func updateAdditionControllerRoute(name string) mf.Transformer { + return func(u *unstructured.Unstructured) error { + if u.GetKind() != "Route" { + return nil + } + u.SetName(fmt.Sprintf("%s%s", name, additionalPACControllerNameSuffix)) + + route := &routev1.Route{} + err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, route) + if err != nil { + return err + } + + route.Spec.To.Name = fmt.Sprintf("%s%s", name, additionalPACControllerNameSuffix) + labels := route.Labels + if labels == nil { + labels = map[string]string{} + } + labels["app"] = fmt.Sprintf("%s%s", name, additionalPACControllerNameSuffix) + labels["pipelines-as-code/route"] = fmt.Sprintf("%s%s", name, additionalPACControllerNameSuffix) + route.SetLabels(labels) + + unstrObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(route) + if err != nil { + return err + } + + u.SetUnstructuredContent(unstrObj) + + return nil + } +} + +// This updates additional PACController ServiceMonitor +func updateAdditionControllerServiceMonitor(name string) mf.Transformer { + return func(u *unstructured.Unstructured) error { + if u.GetKind() != "ServiceMonitor" { + return nil + } + + u.SetName(fmt.Sprintf("%s%s", name, additionalPACControllerNameSuffix)) + err := unstructured.SetNestedMap(u.Object, map[string]interface{}{ + "app": fmt.Sprintf("%s%s", name, additionalPACControllerNameSuffix), + }, "spec", "selector", "matchLabels") + if err != nil { + return err + } + return nil + } +} + +// This replaces additional PACController deployment's container env +func replaceEnvInDeployment(envs []corev1.EnvVar, envInfo v1alpha1.AdditionalPACControllerConfig, name string) []corev1.EnvVar { + for i, e := range envs { + if e.Name == "PAC_CONTROLLER_CONFIGMAP" { + envs[i].Value = envInfo.ConfigMapName + } + if e.Name == "PAC_CONTROLLER_SECRET" { + envs[i].Value = envInfo.SecretName + } + if e.Name == "PAC_CONTROLLER_LABEL" { + envs[i].Value = fmt.Sprintf("%s%s", name, additionalPACControllerNameSuffix) + } + } + return envs +} diff --git a/pkg/reconciler/openshift/openshiftpipelinesascode/transform_test.go b/pkg/reconciler/openshift/openshiftpipelinesascode/transform_test.go new file mode 100644 index 0000000000..afe9473776 --- /dev/null +++ b/pkg/reconciler/openshift/openshiftpipelinesascode/transform_test.go @@ -0,0 +1,186 @@ +/* +Copyright 2024 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openshiftpipelinesascode + +import ( + "path" + "testing" + + "github.com/google/go-cmp/cmp" + mf "github.com/manifestival/manifestival" + routev1 "github.com/openshift/api/route/v1" + "github.com/tektoncd/operator/pkg/apis/operator/v1alpha1" + "github.com/tektoncd/pipeline/test/diff" + "gotest.tools/v3/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestFilterAdditionalControllerManifest(t *testing.T) { + testData := path.Join("testdata", "test-filter-manifest.yaml") + manifest, err := mf.ManifestFrom(mf.Recursive(testData)) + assert.NilError(t, err) + + filteredManifest := filterAdditionalControllerManifest(manifest) + assert.DeepEqual(t, len(filteredManifest.Resources()), 5) + + deployment := filteredManifest.Filter(mf.All(mf.ByKind("Deployment"))) + assert.DeepEqual(t, deployment.Resources()[0].GetName(), "pipelines-as-code-controller") +} + +func TestUpdateAdditionControllerDeployment(t *testing.T) { + testData := path.Join("testdata", "test-filter-manifest.yaml") + manifest, err := mf.ManifestFrom(mf.Recursive(testData)) + assert.NilError(t, err) + manifest = manifest.Filter(mf.All(mf.ByName("pipelines-as-code-controller"), mf.ByKind("Deployment"))) + + additionalPACConfig := v1alpha1.AdditionalPACControllerConfig{ + ConfigMapName: "test-configmap", + SecretName: "test-secret", + } + updatedDeployment, err := manifest.Transform(updateAdditionControllerDeployment(additionalPACConfig, "test")) + assert.NilError(t, err) + assert.DeepEqual(t, updatedDeployment.Resources()[0].GetName(), "test-pac-controller") + + expectedData := path.Join("testdata", "test-expected-additional-pac-dep.yaml") + expectedManifest, err := mf.ManifestFrom(mf.Recursive(expectedData)) + assert.NilError(t, err) + + expected := &appsv1.Deployment{} + err = runtime.DefaultUnstructuredConverter.FromUnstructured(expectedManifest.Resources()[0].Object, expected) + if err != nil { + assert.NilError(t, err) + } + + got := &appsv1.Deployment{} + err = runtime.DefaultUnstructuredConverter.FromUnstructured(updatedDeployment.Resources()[0].Object, got) + if err != nil { + assert.NilError(t, err) + } + + if d := cmp.Diff(got, expected); d != "" { + t.Errorf("failed to update additional pac controller deployment %s", diff.PrintWantGot(d)) + } + +} + +func TestUpdateAdditionControllerService(t *testing.T) { + testData := path.Join("testdata", "test-filter-manifest.yaml") + manifest, err := mf.ManifestFrom(mf.Recursive(testData)) + assert.NilError(t, err) + manifest = manifest.Filter(mf.All(mf.ByName("pipelines-as-code-controller"), mf.ByKind("Service"))) + + updatedManifest, err := manifest.Transform(updateAdditionControllerService("test")) + assert.NilError(t, err) + assert.DeepEqual(t, updatedManifest.Resources()[0].GetName(), "test-pac-controller") +} + +func TestUpdateAdditionControllerRoute(t *testing.T) { + testData := path.Join("testdata", "test-filter-manifest.yaml") + manifest, err := mf.ManifestFrom(mf.Recursive(testData)) + assert.NilError(t, err) + manifest = manifest.Filter(mf.All(mf.ByName("pipelines-as-code-controller"), mf.ByKind("Route"))) + + updatedManifest, err := manifest.Transform(updateAdditionControllerRoute("test")) + if err != nil { + assert.NilError(t, err) + } + + route := &routev1.Route{} + err = runtime.DefaultUnstructuredConverter.FromUnstructured(updatedManifest.Resources()[0].Object, route) + if err != nil { + assert.NilError(t, err) + } + expectedData := path.Join("testdata", "test-expected-additional-pac-route.yaml") + expectedManifest, err := mf.ManifestFrom(mf.Recursive(expectedData)) + assert.NilError(t, err) + + expectedRoute := &routev1.Route{} + err = runtime.DefaultUnstructuredConverter.FromUnstructured(expectedManifest.Resources()[0].Object, expectedRoute) + if err != nil { + assert.NilError(t, err) + } + + if d := cmp.Diff(route, expectedRoute); d != "" { + t.Errorf("failed to update additional pac controller route %s", diff.PrintWantGot(d)) + } + +} + +func TestUpdateAdditionControllerServiceMonitor(t *testing.T) { + testData := path.Join("testdata", "test-filter-manifest.yaml") + manifest, err := mf.ManifestFrom(mf.Recursive(testData)) + assert.NilError(t, err) + manifest = manifest.Filter(mf.All(mf.ByName("pipelines-as-code-controller-monitor"), mf.ByKind("ServiceMonitor"))) + + updatedManifest, err := manifest.Transform(updateAdditionControllerServiceMonitor("test")) + assert.NilError(t, err) + assert.DeepEqual(t, updatedManifest.Resources()[0].GetName(), "test-pac-controller") +} + +func TestUpdateAdditionControllerConfigMapWithDefaultCM(t *testing.T) { + testData := path.Join("testdata", "test-filter-manifest.yaml") + manifest, err := mf.ManifestFrom(mf.Recursive(testData)) + assert.NilError(t, err) + manifest = manifest.Filter(mf.All(mf.ByName("pipelines-as-code"), mf.ByKind("ConfigMap"))) + + additionalPACConfig := v1alpha1.AdditionalPACControllerConfig{ + ConfigMapName: "pipelines-as-code", + SecretName: "test-secret", + } + updatedManifest, err := manifest.Transform(updateAdditionControllerConfigMap(additionalPACConfig)) + assert.NilError(t, err) + assert.DeepEqual(t, updatedManifest.Resources()[0].GetName(), "pipelines-as-code") +} + +func TestUpdateAdditionControllerConfigMap(t *testing.T) { + testData := path.Join("testdata", "test-additional-pac-cm.yaml") + manifest, err := mf.ManifestFrom(mf.Recursive(testData)) + assert.NilError(t, err) + + additionalPACConfig := v1alpha1.AdditionalPACControllerConfig{ + ConfigMapName: "test-config", + SecretName: "test-secret", + Settings: map[string]string{"application-name": "Test CI application", "hub-url": "https://custom-hub.com"}, + } + + updatedManifest, err := manifest.Transform(updateAdditionControllerConfigMap(additionalPACConfig)) + if err != nil { + assert.NilError(t, err) + } + + cm := &corev1.ConfigMap{} + err = runtime.DefaultUnstructuredConverter.FromUnstructured(updatedManifest.Resources()[0].Object, cm) + if err != nil { + assert.NilError(t, err) + } + + expectedTestData := path.Join("testdata", "test-expected-additional-pac-cm.yaml") + expectedManifest, err := mf.ManifestFrom(mf.Recursive(expectedTestData)) + assert.NilError(t, err) + expectedCM := &corev1.ConfigMap{} + err = runtime.DefaultUnstructuredConverter.FromUnstructured(expectedManifest.Resources()[0].Object, expectedCM) + if err != nil { + assert.NilError(t, err) + } + assert.NilError(t, err) + + if d := cmp.Diff(cm, expectedCM); d != "" { + t.Errorf("failed to update additional pac controller route %s", diff.PrintWantGot(d)) + } +} diff --git a/pkg/reconciler/openshift/openshiftpipelinesascode/update_route_in_configmap.go b/pkg/reconciler/openshift/openshiftpipelinesascode/update_route_in_configmap.go index 6d2cd5d7f7..a048b50926 100644 --- a/pkg/reconciler/openshift/openshiftpipelinesascode/update_route_in_configmap.go +++ b/pkg/reconciler/openshift/openshiftpipelinesascode/update_route_in_configmap.go @@ -79,7 +79,7 @@ func updateInfoConfigMap(route string, pacManifest *mf.Manifest, targetNs string if err != nil { return err } - routeURL := "https://" + route + routeURL := fmt.Sprintf("https://%s", route) // set controller url if not the same if cm.Data["controller-url"] == routeURL { diff --git a/pkg/reconciler/openshift/tektonaddon/clustertTask.go b/pkg/reconciler/openshift/tektonaddon/clustertTask.go index 9247ce2b50..59f3bcc077 100644 --- a/pkg/reconciler/openshift/tektonaddon/clustertTask.go +++ b/pkg/reconciler/openshift/tektonaddon/clustertTask.go @@ -30,7 +30,7 @@ import ( func (r *Reconciler) EnsureClusterTask(ctx context.Context, enable string, ta *v1alpha1.TektonAddon) error { manifest := *r.clusterTaskManifest if enable == "true" { - if err := r.installerSetClient.CustomSet(ctx, ta, ClusterTaskInstallerSet, &manifest, filterAndTransformClusterTask()); err != nil { + if err := r.installerSetClient.CustomSet(ctx, ta, ClusterTaskInstallerSet, &manifest, filterAndTransformClusterTask(), nil); err != nil { return err } } else { diff --git a/pkg/reconciler/openshift/tektonaddon/clustertTaskVersioned.go b/pkg/reconciler/openshift/tektonaddon/clustertTaskVersioned.go index 890c487121..c7676ed738 100644 --- a/pkg/reconciler/openshift/tektonaddon/clustertTaskVersioned.go +++ b/pkg/reconciler/openshift/tektonaddon/clustertTaskVersioned.go @@ -28,7 +28,7 @@ import ( func (r *Reconciler) EnsureVersionedClusterTask(ctx context.Context, enable string, ta *v1alpha1.TektonAddon) error { manifest := *r.clusterTaskManifest if enable == "true" { - if err := r.installerSetClient.VersionedClusterTaskSet(ctx, ta, VersionedClusterTaskInstallerSet, &manifest, filterAndTransformVersionedClusterTask(r.operatorVersion)); err != nil { + if err := r.installerSetClient.VersionedClusterTaskSet(ctx, ta, &manifest, filterAndTransformVersionedClusterTask(r.operatorVersion)); err != nil { return err } } else { diff --git a/pkg/reconciler/openshift/tektonaddon/communityClusterTasks.go b/pkg/reconciler/openshift/tektonaddon/communityClusterTasks.go index 8b09e246a7..b1664af013 100644 --- a/pkg/reconciler/openshift/tektonaddon/communityClusterTasks.go +++ b/pkg/reconciler/openshift/tektonaddon/communityClusterTasks.go @@ -43,7 +43,7 @@ func (r *Reconciler) EnsureCommunityClusterTask(ctx context.Context, enable stri } manifest := *r.communityClusterTaskManifest if enable == "true" { - if err := r.installerSetClient.CustomSet(ctx, ta, CommunityClusterTaskInstallerSet, &manifest, filterAndTransformCommunityClusterTask()); err != nil { + if err := r.installerSetClient.CustomSet(ctx, ta, CommunityClusterTaskInstallerSet, &manifest, filterAndTransformCommunityClusterTask(), nil); err != nil { return err } } else { diff --git a/pkg/reconciler/openshift/tektonaddon/consolecli.go b/pkg/reconciler/openshift/tektonaddon/consolecli.go index d88dcb1a20..0f483dca36 100644 --- a/pkg/reconciler/openshift/tektonaddon/consolecli.go +++ b/pkg/reconciler/openshift/tektonaddon/consolecli.go @@ -41,7 +41,7 @@ func (r *Reconciler) EnsureConsoleCLI(ctx context.Context, ta *v1alpha1.TektonAd if err := consoleCLITransform(ctx, &manifest, routeHost); err != nil { return err } - if err := r.installerSetClient.CustomSet(ctx, ta, ConsoleCLIInstallerSet, &manifest, filterAndTransformOCPResources()); err != nil { + if err := r.installerSetClient.CustomSet(ctx, ta, ConsoleCLIInstallerSet, &manifest, filterAndTransformOCPResources(), nil); err != nil { return err } return nil diff --git a/pkg/reconciler/openshift/tektonaddon/openshiftConsole.go b/pkg/reconciler/openshift/tektonaddon/openshiftConsole.go index 825b6bd194..27a5cb5c21 100644 --- a/pkg/reconciler/openshift/tektonaddon/openshiftConsole.go +++ b/pkg/reconciler/openshift/tektonaddon/openshiftConsole.go @@ -58,7 +58,7 @@ func (r *Reconciler) EnsureOpenShiftConsoleResources(ctx context.Context, ta *v1 if len(filteredManifest.Resources()) == 0 { return nil, consoleCLIDownloadExist } - if err := r.installerSetClient.CustomSet(ctx, ta, OpenShiftConsoleInstallerSet, &filteredManifest, filterAndTransformOCPResources()); err != nil { + if err := r.installerSetClient.CustomSet(ctx, ta, OpenShiftConsoleInstallerSet, &filteredManifest, filterAndTransformOCPResources(), nil); err != nil { return err, consoleCLIDownloadExist } return nil, consoleCLIDownloadExist diff --git a/pkg/reconciler/openshift/tektonaddon/pipelinetemplate.go b/pkg/reconciler/openshift/tektonaddon/pipelinetemplate.go index b0e15fef95..cf5c399cae 100644 --- a/pkg/reconciler/openshift/tektonaddon/pipelinetemplate.go +++ b/pkg/reconciler/openshift/tektonaddon/pipelinetemplate.go @@ -30,7 +30,7 @@ import ( func (r *Reconciler) EnsurePipelineTemplates(ctx context.Context, enable string, ta *v1alpha1.TektonAddon) error { manifest := *r.pipelineTemplateManifest if enable == "true" { - if err := r.installerSetClient.CustomSet(ctx, ta, PipelinesTemplateInstallerSet, &manifest, filterAndTransformCommon()); err != nil { + if err := r.installerSetClient.CustomSet(ctx, ta, PipelinesTemplateInstallerSet, &manifest, filterAndTransformCommon(), nil); err != nil { return err } } else { diff --git a/pkg/reconciler/openshift/tektonaddon/triggers.go b/pkg/reconciler/openshift/tektonaddon/triggers.go index 32b4ad66d2..1a7ecd8ec7 100644 --- a/pkg/reconciler/openshift/tektonaddon/triggers.go +++ b/pkg/reconciler/openshift/tektonaddon/triggers.go @@ -26,7 +26,7 @@ import ( func (r *Reconciler) EnsureTriggersResources(ctx context.Context, ta *v1alpha1.TektonAddon) error { manifest := *r.triggersResourcesManifest - if err := r.installerSetClient.CustomSet(ctx, ta, TriggersResourcesInstallerSet, &manifest, filterAndTransformCommon()); err != nil { + if err := r.installerSetClient.CustomSet(ctx, ta, TriggersResourcesInstallerSet, &manifest, filterAndTransformCommon(), nil); err != nil { return err } return nil diff --git a/pkg/reconciler/openshift/tektonconfig/extension/pipelinesascode.go b/pkg/reconciler/openshift/tektonconfig/extension/pipelinesascode.go index df1c254203..904e168e4b 100644 --- a/pkg/reconciler/openshift/tektonconfig/extension/pipelinesascode.go +++ b/pkg/reconciler/openshift/tektonconfig/extension/pipelinesascode.go @@ -74,7 +74,8 @@ func createOPAC(ctx context.Context, clients op.OpenShiftPipelinesAsCodeInterfac }, Config: config.Spec.Config, PACSettings: v1alpha1.PACSettings{ - Settings: config.Spec.Platforms.OpenShift.PipelinesAsCode.Settings, + Settings: config.Spec.Platforms.OpenShift.PipelinesAsCode.Settings, + AdditionalPACControllers: config.Spec.Platforms.OpenShift.PipelinesAsCode.PACSettings.AdditionalPACControllers, }, }, } @@ -114,6 +115,16 @@ func updateOPAC(ctx context.Context, opacCR *v1alpha1.OpenShiftPipelinesAsCode, updated = true } + if !reflect.DeepEqual(opacCR.Spec.PACSettings.AdditionalPACControllers, config.Spec.Platforms.OpenShift.PipelinesAsCode.PACSettings.AdditionalPACControllers) { + opacCR.Spec.PACSettings.AdditionalPACControllers = config.Spec.Platforms.OpenShift.PipelinesAsCode.PACSettings.AdditionalPACControllers + updated = true + } + + if !reflect.DeepEqual(opacCR.Spec.PACSettings.Options, config.Spec.Platforms.OpenShift.PipelinesAsCode.PACSettings.Options) { + opacCR.Spec.PACSettings.Options = config.Spec.Platforms.OpenShift.PipelinesAsCode.PACSettings.Options + updated = true + } + if opacCR.ObjectMeta.OwnerReferences == nil { ownerRef := *metav1.NewControllerRef(config, config.GroupVersionKind()) opacCR.ObjectMeta.OwnerReferences = []metav1.OwnerReference{ownerRef} diff --git a/pkg/reconciler/openshift/tektonconfig/extension/pipelinesascode_test.go b/pkg/reconciler/openshift/tektonconfig/extension/pipelinesascode_test.go index df7d27a2df..ca3659828f 100644 --- a/pkg/reconciler/openshift/tektonconfig/extension/pipelinesascode_test.go +++ b/pkg/reconciler/openshift/tektonconfig/extension/pipelinesascode_test.go @@ -97,6 +97,7 @@ func markOPACReady(t *testing.T, ctx context.Context, c op.OpenShiftPipelinesAsC opac.Status.MarkPreReconcilerComplete() opac.Status.MarkInstallerSetAvailable() opac.Status.MarkInstallerSetReady() + opac.Status.MarkAdditionalPACControllerComplete() opac.Status.MarkPostReconcilerComplete() _, err = c.UpdateStatus(ctx, opac, metav1.UpdateOptions{}) util.AssertEqual(t, err, nil) diff --git a/test/e2e/openshift/openshiftpipelinesascode_test.go b/test/e2e/openshift/openshiftpipelinesascode_test.go new file mode 100644 index 0000000000..44a6fa1c3d --- /dev/null +++ b/test/e2e/openshift/openshiftpipelinesascode_test.go @@ -0,0 +1,131 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2024 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openshift + +import ( + "os" + "testing" + "time" + + "github.com/tektoncd/operator/test/client" + "github.com/tektoncd/operator/test/resources" + "github.com/tektoncd/operator/test/utils" +) + +const ( + interval = 5 * time.Second + timeout = 5 * time.Minute + deploymentName = "additional-test-pac-controller" +) + +// TestOpenshiftPipelinesAsCode verifies the TestOpenshiftPipelinesAsCode creation, additional controller creation and +// deletion, and TestOpenshiftPipelinesAsCode deletion. +func TestOpenshiftPipelinesAsCode(t *testing.T) { + crNames := utils.ResourceNames{ + TektonConfig: "config", + TektonPipeline: "pipeline", + OpenShiftPipelinesAsCode: "pipelines-as-code", + Namespace: "", + TargetNamespace: "openshift-pipelines", + } + + clients := client.Setup(t, crNames.TargetNamespace) + + if os.Getenv("TARGET") == "openshift" { + crNames.TargetNamespace = "openshift-pipelines" + } + + utils.CleanupOnInterrupt(func() { utils.TearDownPipeline(clients, crNames.OpenShiftPipelinesAsCode) }) + utils.CleanupOnInterrupt(func() { utils.TearDownPipeline(clients, crNames.TektonPipeline) }) + utils.CleanupOnInterrupt(func() { utils.TearDownNamespace(clients, crNames.TargetNamespace) }) + + defer utils.TearDownNamespace(clients, crNames.OpenShiftPipelinesAsCode) + defer utils.TearDownPipeline(clients, crNames.TektonPipeline) + defer utils.TearDownNamespace(clients, crNames.TargetNamespace) + + resources.EnsureNoTektonConfigInstance(t, clients, crNames) + + // Create a TektonPipeline + if _, err := resources.EnsureTektonPipelineExists(clients.TektonPipeline(), crNames); err != nil { + t.Fatalf("TektonPipeline %q failed to create: %v", crNames.TektonPipeline, err) + } + + // Test if TektonPipeline can reach the READY status + t.Run("create-pipeline", func(t *testing.T) { + resources.AssertTektonPipelineCRReadyStatus(t, clients, crNames) + }) + + // Create the OpenShift Pipelines As Code + if _, err := resources.EnsureOpenShiftPipelinesAsCodeExists(clients.OpenShiftPipelinesAsCode(), crNames); err != nil { + t.Fatalf("OpenShiftPipelinesAsCode %q failed to create: %v", crNames.OpenShiftPipelinesAsCode, err) + } + + // Test if OpenShiftPipelinesAsCode can reach the READY status + t.Run("create-openshift-pipelines-as-code", func(t *testing.T) { + resources.AssertOpenShiftPipelinesAsCodeCRReadyStatus(t, clients, crNames) + }) + + // Create the additional Pipelines As Controller in the OpenShiftPipelinesAsCode CR + if _, err := resources.CreateAdditionalPipelinesAsCodeController(clients.OpenShiftPipelinesAsCode(), crNames); err != nil { + t.Fatalf("failed to create additional pipelines as code controller in %q: %v", crNames.OpenShiftPipelinesAsCode, err) + } + + // Test if OpenShiftPipelinesAsCode can reach the READY status after deploying additional controller + t.Run("create-additional-pipelines-as-code-controller", func(t *testing.T) { + resources.AssertOpenShiftPipelinesAsCodeCRReadyStatus(t, clients, crNames) + }) + + // Wait for additional pipelines as code controller deployment gets ready + if err := resources.WaitForDeploymentReady(clients.KubeClient, deploymentName, crNames.TargetNamespace, interval, timeout); err != nil { + t.Fatalf("failed to check ready status of additional pipelines as code deployment with name %q: %v", deploymentName, err) + } + + // If additional Pipelines As Code deployment is available or not + if err := resources.WaitForDeploymentAvailable(clients.KubeClient, deploymentName, crNames.TargetNamespace, interval, timeout); err != nil { + t.Fatalf("failed to check if additional pipelines as code deployment %q is available: %v", deploymentName, err) + } + + // Remove the additional Pipelines As Controller from the OpenShiftPipelinesAsCode CR + if _, err := resources.RemoveAdditionalPipelinesAsCodeController(clients.OpenShiftPipelinesAsCode(), crNames); err != nil { + t.Fatalf("failed to remove additional pipelines as code controller in %q: %v", crNames.OpenShiftPipelinesAsCode, err) + } + + // Test if OpenShiftPipelinesAsCode can reach the READY status after removing additional controller + t.Run("remove-additional-controller-pipelines-as-code", func(t *testing.T) { + resources.AssertOpenShiftPipelinesAsCodeCRReadyStatus(t, clients, crNames) + }) + + // Wait for additional pipelines as code controller deployment gets deleted + if err := resources.WaitForDeploymentDeletion(clients.KubeClient, deploymentName, crNames.TargetNamespace, interval, timeout); err != nil { + t.Fatalf("failed to check if additional pipelines as code deployment %q is deleted: %v", deploymentName, err) + } + + // Delete the OpenShiftPipelinesAsCode CR instance + t.Run("delete-openshift-pipelines-as-code", func(t *testing.T) { + resources.AssertOpenShiftPipelinesAsCodeCRReadyStatus(t, clients, crNames) + resources.OpenShiftPipelinesAsCodeCRDelete(t, clients, crNames) + }) + + // Delete the TektonPipeline CR instance + t.Run("delete-pipeline", func(t *testing.T) { + resources.AssertTektonPipelineCRReadyStatus(t, clients, crNames) + resources.TektonPipelineCRDelete(t, clients, crNames) + }) +} diff --git a/test/resources/deployment.go b/test/resources/deployment.go index 99d43da524..87ceebfcee 100644 --- a/test/resources/deployment.go +++ b/test/resources/deployment.go @@ -100,6 +100,18 @@ func WaitForDeploymentReady(kubeClient kubernetes.Interface, name, namespace str return wait.PollImmediate(interval, timeout, verifyFunc) } +func WaitForDeploymentAvailable(kubeClient kubernetes.Interface, name, namespace string, interval, timeout time.Duration) error { + verifyFunc := func() (bool, error) { + dep, err := kubeClient.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil && !apierrs.IsNotFound(err) { + return false, err + } + return IsDeploymentAvailable(dep) + } + + return wait.PollImmediate(interval, timeout, verifyFunc) +} + func WaitForDeploymentDeletion(kubeClient kubernetes.Interface, name, namespace string, interval, timeout time.Duration) error { verifyFunc := func() (bool, error) { _, err := kubeClient.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) diff --git a/test/resources/openshiftpipelinesascode.go b/test/resources/openshiftpipelinesascode.go new file mode 100644 index 0000000000..ad5d8a326f --- /dev/null +++ b/test/resources/openshiftpipelinesascode.go @@ -0,0 +1,134 @@ +/* +Copyright 2024 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resources + +import ( + "context" + "fmt" + "testing" + + "github.com/tektoncd/operator/pkg/apis/operator/v1alpha1" + typedv1alpha1 "github.com/tektoncd/operator/pkg/client/clientset/versioned/typed/operator/v1alpha1" + "github.com/tektoncd/operator/test/utils" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "knative.dev/pkg/test/logging" +) + +// EnsureOpenShiftPipelinesAsCodeExists creates a OpenShiftPipelinesAsCode with the name names.OpenShiftPipelinesAsCode, if it does not exist. +func EnsureOpenShiftPipelinesAsCodeExists(clients typedv1alpha1.OpenShiftPipelinesAsCodeInterface, names utils.ResourceNames) (*v1alpha1.OpenShiftPipelinesAsCode, error) { + // If this function is called by the upgrade tests, we only create the custom resource, if it does not exist. + ks, err := clients.Get(context.TODO(), names.OpenShiftPipelinesAsCode, metav1.GetOptions{}) + if apierrs.IsNotFound(err) { + ks := &v1alpha1.OpenShiftPipelinesAsCode{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.OpenShiftPipelinesAsCode, + }, + Spec: v1alpha1.OpenShiftPipelinesAsCodeSpec{ + CommonSpec: v1alpha1.CommonSpec{ + TargetNamespace: names.TargetNamespace, + }, + PACSettings: v1alpha1.PACSettings{ + Settings: map[string]string{}, + }, + }, + } + return clients.Create(context.TODO(), ks, metav1.CreateOptions{}) + } + return ks, err +} + +// WaitForOpenshiftPipelinesAsCodeState polls the status of the OpenShift Pipelines As Code called name +// from client every `interval` until `inState` returns `true` indicating it +// is done, returns an error or timeout. +func WaitForOpenshiftPipelinesAsCodeState(clients typedv1alpha1.OpenShiftPipelinesAsCodeInterface, name string, + inState func(s *v1alpha1.OpenShiftPipelinesAsCode, err error) (bool, error)) (*v1alpha1.OpenShiftPipelinesAsCode, error) { + span := logging.GetEmitableSpan(context.Background(), fmt.Sprintf("WaitForOpenShiftPipelinesAsCodeState/%s/%s", name, "TektonOpenShiftPipelinesASCodeIsReady")) + defer span.End() + + var lastState *v1alpha1.OpenShiftPipelinesAsCode + waitErr := wait.PollImmediate(utils.Interval, utils.Timeout, func() (bool, error) { + lastState, err := clients.Get(context.TODO(), name, metav1.GetOptions{}) + return inState(lastState, err) + }) + + if waitErr != nil { + return lastState, fmt.Errorf("OpenShiftPipelinesAsCode %s is not in desired state, got: %+v: %w", name, lastState, waitErr) + } + return lastState, nil +} + +// IsOpenShiftPipelinesAsCodeReady will check the status conditions of the OpenShiftPipelinesAsCode and return true if the OpenShiftPipelinesASCode is ready. +func IsOpenShiftPipelinesAsCodeReady(s *v1alpha1.OpenShiftPipelinesAsCode, err error) (bool, error) { + return s.Status.IsReady(), err +} + +// AssertOpenShiftPipelinesCRReadyStatus verifies if the OpenShiftPIpelinesAsCode reaches the READY status. +func AssertOpenShiftPipelinesAsCodeCRReadyStatus(t *testing.T, clients *utils.Clients, names utils.ResourceNames) { + if _, err := WaitForOpenshiftPipelinesAsCodeState(clients.OpenShiftPipelinesAsCode(), names.OpenShiftPipelinesAsCode, IsOpenShiftPipelinesAsCodeReady); err != nil { + t.Fatalf("OpenShiftPipelinesAsCodeCR %q failed to get to the READY status: %v", names.OpenShiftPipelinesAsCode, err) + } +} + +// Fetch the OpenShiftPipelinesAsCode CR and update the spec to create additional controller of PAC +func CreateAdditionalPipelinesAsCodeController(clients typedv1alpha1.OpenShiftPipelinesAsCodeInterface, names utils.ResourceNames) (*v1alpha1.OpenShiftPipelinesAsCode, error) { + // fetch the OpenShiftPipelinesAsCode CR + opacCR, err := clients.Get(context.TODO(), names.OpenShiftPipelinesAsCode, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + // update the OpenshiftPipelines CR to add the additional Pipelines As Code Controller + opacCR.Spec.PACSettings.AdditionalPACControllers = map[string]v1alpha1.AdditionalPACControllerConfig{ + "additional-test": { + ConfigMapName: "additional-test-configmap", + SecretName: "additional-test-secret", + }, + } + return clients.Update(context.TODO(), opacCR, metav1.UpdateOptions{}) +} + +// Fetch the OpenShiftPipelinesAsCode CR and and delete the additional pipelines as code config +func RemoveAdditionalPipelinesAsCodeController(clients typedv1alpha1.OpenShiftPipelinesAsCodeInterface, names utils.ResourceNames) (*v1alpha1.OpenShiftPipelinesAsCode, error) { + // fetch the OpenShiftPipelinesAsCode CR + opacCR, err := clients.Get(context.TODO(), names.OpenShiftPipelinesAsCode, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + // update the OpenshiftPipelines CR to remove the additional Pipelines As Code Controller + opacCR.Spec.PACSettings.AdditionalPACControllers = map[string]v1alpha1.AdditionalPACControllerConfig{} + return clients.Update(context.TODO(), opacCR, metav1.UpdateOptions{}) +} + +// OpenShiftPipelinesASCodeCRDelete deletes the OpenShiftPipelinesAsCode to see if all resources will be deleted +func OpenShiftPipelinesAsCodeCRDelete(t *testing.T, clients *utils.Clients, crNames utils.ResourceNames) { + if err := clients.OpenShiftPipelinesAsCode().Delete(context.TODO(), crNames.OpenShiftPipelinesAsCode, metav1.DeleteOptions{}); err != nil { + t.Fatalf("OpenShiftPipelinesAsCode %q failed to delete: %v", crNames.OpenShiftPipelinesAsCode, err) + } + err := wait.PollImmediate(utils.Interval, utils.Timeout, func() (bool, error) { + _, err := clients.OpenShiftPipelinesAsCode().Get(context.TODO(), crNames.OpenShiftPipelinesAsCode, metav1.GetOptions{}) + if apierrs.IsNotFound(err) { + return true, nil + } + return false, err + }) + if err != nil { + t.Fatal("Timed out waiting on OpenShiftPipelinesAsCode to delete", err) + } +} diff --git a/test/utils/clients.go b/test/utils/clients.go index 168cb266a8..cc6ad67093 100644 --- a/test/utils/clients.go +++ b/test/utils/clients.go @@ -178,3 +178,7 @@ func (c *Clients) TektonInstallerSet() operatorv1alpha1.TektonInstallerSetInterf func (c *Clients) TektonInstallerSetAll() operatorv1alpha1.TektonInstallerSetInterface { return c.Operator.TektonInstallerSets() } + +func (c *Clients) OpenShiftPipelinesAsCode() operatorv1alpha1.OpenShiftPipelinesAsCodeInterface { + return c.Operator.OpenShiftPipelinesAsCodes() +} diff --git a/test/utils/names.go b/test/utils/names.go index a358e92dfc..8cdac9f0df 100644 --- a/test/utils/names.go +++ b/test/utils/names.go @@ -31,6 +31,7 @@ type ResourceNames struct { Namespace string TargetNamespace string OperatorPodSelectorLabel string + OpenShiftPipelinesAsCode string } func GetResourceNames() ResourceNames { @@ -43,6 +44,7 @@ func GetResourceNames() ResourceNames { TektonResult: v1alpha1.ResultResourceName, TektonChain: v1alpha1.ChainResourceName, TektonHub: v1alpha1.HubResourceName, + OpenShiftPipelinesAsCode: v1alpha1.OpenShiftPipelinesAsCodeName, Namespace: "tekton-operator", TargetNamespace: "tekton-pipelines", OperatorPodSelectorLabel: "name=tekton-operator",