diff --git a/.github/workflows/crons.yaml b/.github/workflows/crons.yaml index 1fa3fe38a2..784e8bca78 100644 --- a/.github/workflows/crons.yaml +++ b/.github/workflows/crons.yaml @@ -19,15 +19,15 @@ jobs: include: # these helper comments are needed by the dev branch workflow # please do not edit them unless you're changing the version as well - # current=129.0 - - name: "Nightly for MetalK8s 129.0" + # current=130.0 + - name: "Nightly for MetalK8s 130.0" cron: "0 1 * * 1-5" - branch: "development/129.0" + branch: "development/130.0" workflow: "nightly.yaml" - # old=128.0 - - name: "Nightly for MetalK8s 128.0" + # old=129.0 + - name: "Nightly for MetalK8s 129.0" cron: "0 2 * * 1-5" - branch: "development/128.0" + branch: "development/129.0" workflow: "nightly.yaml" steps: - name: Checkout diff --git a/CHANGELOG.md b/CHANGELOG.md index 19a08239b3..a86e75b2f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,16 @@ # CHANGELOG +## Release 130.0.0 (in development) + +### Enhancements + +- Bump Kubernetes version to + [1.30.7](https://github.com/kubernetes/kubernetes/releases/tag/v1.30.7) + (PR[#4497](https://github.com/scality/metalk8s/pull/4497)) + +- Bump etcd version to [3.5.16](https://github.com/etcd-io/etcd/releases/tag/v3.5.16) + (PR[#4497](https://github.com/scality/metalk8s/pull/4497)) + ## Release 129.0.0 (in development) ### Removals diff --git a/VERSION b/VERSION index 182bfa4878..6496aa4f3d 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,4 @@ -VERSION_MAJOR=129 +VERSION_MAJOR=130 VERSION_MINOR=0 VERSION_PATCH=0 VERSION_SUFFIX=-dev diff --git a/buildchain/buildchain/codegen.py b/buildchain/buildchain/codegen.py index 3ae6071cb0..366ee17327 100644 --- a/buildchain/buildchain/codegen.py +++ b/buildchain/buildchain/codegen.py @@ -125,60 +125,6 @@ def codegen_chart_fluent_bit() -> types.TaskDict: } -def codegen_chart_ingress_nginx() -> types.TaskDict: - """Generate the SLS file for NGINX Ingress using the chart render script.""" - chart_dir = constants.CHART_ROOT / "ingress-nginx" - ingress_nginx_namespace = "metalk8s-ingress" - actions = [] - file_dep = list(utils.git_ls(chart_dir)) - file_dep.append(constants.CHART_RENDER_SCRIPT) - - # Workload Plane Ingress - target_sls = ( - constants.ROOT / "salt/metalk8s/addons/nginx-ingress/deployed/chart.sls" - ) - chart_name = "ingress-nginx" - value_file = constants.CHART_ROOT / f"{chart_name}.yaml" - actions.append( - doit.action.CmdAction( - f"{constants.CHART_RENDER_CMD} {chart_name} {value_file} {chart_dir} " - f"--namespace {ingress_nginx_namespace} --remove-manifest ConfigMap " - f"{chart_name}-controller " - f"--output {target_sls}", - cwd=constants.ROOT, - ) - ) - file_dep.append(value_file) - - # Control Plane Ingress - target_sls = ( - constants.ROOT - / "salt/metalk8s/addons/nginx-ingress-control-plane" - / "deployed/chart.sls" - ) - chart_name = "ingress-nginx-control-plane" - value_file = constants.CHART_ROOT / f"{chart_name}.yaml" - actions.append( - doit.action.CmdAction( - f"{constants.CHART_RENDER_CMD} {chart_name} {value_file} {chart_dir} " - f"--namespace {ingress_nginx_namespace} --remove-manifest ConfigMap " - f"{chart_name}-controller " - f"--output {target_sls}", - cwd=constants.ROOT, - ) - ) - file_dep.append(value_file) - - return { - "name": "chart_ingress-nginx", - "title": utils.title_with_subtask_name("CODEGEN"), - "doc": codegen_chart_ingress_nginx.__doc__, - "actions": actions, - "file_dep": file_dep, - "task_dep": ["check_for:tox", "check_for:helm"], - } - - def codegen_chart_kube_prometheus_stack() -> types.TaskDict: """Generate the SLS file for Kube Prometheus Stack using the chart render script.""" target_sls = ( @@ -333,18 +279,35 @@ def codegen_chart_cert_manager() -> types.TaskDict: } +def codegen_olm() -> types.TaskDict: + """Generate the SLS file for OLMv1 using the render script.""" + target_sls = constants.ROOT / "salt/metalk8s/addons/olm/deployed/chart.sls" + cmd = f"{constants.OLM_RENDER_CMD} " f"--output {target_sls}" + + file_dep = [constants.OLM_RENDER_SCRIPT] + + return { + "name": "olm", + "title": utils.title_with_subtask_name("CODEGEN"), + "doc": codegen_olm.__doc__, + "actions": [doit.action.CmdAction(cmd, cwd=constants.ROOT)], + "file_dep": file_dep, + "task_dep": ["check_for:tox"], + } + + # List of available code generation tasks. CODEGEN: Tuple[Callable[[], types.TaskDict], ...] = ( codegen_storage_operator, codegen_metalk8s_operator, codegen_chart_dex, codegen_chart_fluent_bit, - codegen_chart_ingress_nginx, codegen_chart_kube_prometheus_stack, codegen_chart_loki, codegen_chart_prometheus_adapter, codegen_chart_thanos, codegen_chart_cert_manager, + codegen_olm, ) diff --git a/buildchain/buildchain/constants.py b/buildchain/buildchain/constants.py index 945105f988..414b6a8567 100644 --- a/buildchain/buildchain/constants.py +++ b/buildchain/buildchain/constants.py @@ -32,6 +32,7 @@ PROMETHEUS_REPOSITORY: str = "quay.io/prometheus" THANOS_REPOSITORY: str = "quay.io/thanos" CERT_MANAGER_REPOSITORY: str = "quay.io/jetstack" +OPERATOR_FRAMEWORK_REPOSITORYT: str = "quay.io/operator-framework" # Paths {{{ @@ -57,12 +58,16 @@ STATIC_CONTAINER_REGISTRY: Path = Path(ROOT, "buildchain/static-container-registry") # Path to the MetalK8s operator source directory METALK8S_OPERATOR_ROOT: Path = ROOT / "operator" +# Path to the nginx-operator source directory. +NGINX_OPERATOR_ROOT: Path = ROOT / "nginx-operator" # Path to the storage-operator source directory. STORAGE_OPERATOR_ROOT: Path = ROOT / "storage-operator" # Path to the UI build root directory. UI_BUILD_ROOT: Path = config.BUILD_ROOT / "ui" # Path to the shell-ui build root directory. SHELL_UI_BUILD_ROOT: Path = config.BUILD_ROOT / "shell-ui" +# Path to the MetalK8s Catalog Source root directory +CATALOG_SOURCE_ROOT: Path = ROOT / "catalog-source" # Docker entrypoints. REDHAT_ENTRYPOINT: Path = ROOT / "packages/redhat/common/entrypoint.sh" @@ -76,6 +81,8 @@ CHART_ROOT: Path = ROOT / "charts" CHART_RENDER_SCRIPT: Path = CHART_ROOT / "render.py" +OLM_RENDER_SCRIPT: Path = ROOT / "olm/render.py" + # }}} # Vagrant parameters {{{ @@ -145,6 +152,7 @@ def git_ref() -> Optional[str]: ] CHART_RENDER_CMD: str = f"tox -e chart-render -- --kube-version {versions.K8S_VERSION}" +OLM_RENDER_CMD: str = f"tox -e olm-render -- -v v{versions.OLM_VERSION}" # For mypy, see `--no-implicit-reexport` documentation. __all__ = ["ROOT"] diff --git a/buildchain/buildchain/image.py b/buildchain/buildchain/image.py index 858e473e53..1874fc5a10 100644 --- a/buildchain/buildchain/image.py +++ b/buildchain/buildchain/image.py @@ -217,6 +217,11 @@ def _local_image(name: str, **kwargs: Any) -> targets.LocalImage: "cert-manager-cainjector", "cert-manager-acmesolver", ], + constants.OPERATOR_FRAMEWORK_REPOSITORYT: [ + "catalogd", + "operator-controller", + "opm", + ], } REMOTE_NAMES: Dict[str, str] = { @@ -323,6 +328,30 @@ def _local_image(name: str, **kwargs: Any) -> targets.LocalImage: "VERSION": versions.VERSION, }, ), + _local_image( + name="nginx-operator", + dockerfile=constants.NGINX_OPERATOR_ROOT / "Dockerfile", + build_context=constants.NGINX_OPERATOR_ROOT, + ), + _local_image( + name="nginx-operator-bundle", + dockerfile=constants.NGINX_OPERATOR_ROOT / "bundle.Dockerfile", + build_context=constants.NGINX_OPERATOR_ROOT, + build_args={ + "BUILDER_IMG": TO_PULL["alpine"].remote_fullname_digest, + "METALK8S_VERSION": versions.VERSION, + }, + ), + _local_image( + name="metalk8s-catalog-source", + dockerfile=constants.CATALOG_SOURCE_ROOT / "catalog.Dockerfile", + build_context=constants.CATALOG_SOURCE_ROOT, + build_args={ + "BASE_IMG": TO_PULL["opm"].remote_fullname_digest, + "SED_IMG": TO_PULL["alpine"].remote_fullname_digest, + "METALK8S_VERSION": versions.VERSION, + }, + ), ) # }}} diff --git a/buildchain/buildchain/salt_tree.py b/buildchain/buildchain/salt_tree.py index 0fdef93c6f..600e978d0c 100644 --- a/buildchain/buildchain/salt_tree.py +++ b/buildchain/buildchain/salt_tree.py @@ -344,6 +344,14 @@ def task(self) -> types.TaskDict: file_dep=[METALK8S_OPERATOR_MANIFESTS], ), Path("salt/metalk8s/addons/metalk8s-operator/deployed/init.sls"), + Path("salt/metalk8s/addons/nginx-operator/deployed/clusterextension.sls"), + Path("salt/metalk8s/addons/nginx-operator/deployed/init.sls"), + Path("salt/metalk8s/addons/nginx-operator/deployed/namespace.sls"), + Path("salt/metalk8s/addons/nginx-operator/deployed/rbac.sls"), + Path("salt/metalk8s/addons/olm/catalog/deployed/cluster-catalog.sls"), + Path("salt/metalk8s/addons/olm/catalog/deployed/init.sls"), + Path("salt/metalk8s/addons/olm/deployed/chart.sls"), + Path("salt/metalk8s/addons/olm/deployed/init.sls"), Path("salt/metalk8s/addons/prometheus-adapter/deployed/chart.sls"), Path("salt/metalk8s/addons/prometheus-adapter/deployed/init.sls"), Path("salt/metalk8s/addons/prometheus-operator/macros.j2"), @@ -680,6 +688,7 @@ def task(self) -> types.TaskDict: Path("salt/_modules/metalk8s_kubernetes_utils.py"), Path("salt/_modules/metalk8s_monitoring.py"), Path("salt/_modules/metalk8s_network.py"), + Path("salt/_modules/metalk8s_olm.py"), Path("salt/_modules/metalk8s_os.py"), Path("salt/_modules/metalk8s_package_manager_yum.py"), Path("salt/_modules/metalk8s_service_configuration.py"), diff --git a/buildchain/buildchain/versions.py b/buildchain/buildchain/versions.py index a1e95d00c9..1a1ab4e021 100644 --- a/buildchain/buildchain/versions.py +++ b/buildchain/buildchain/versions.py @@ -19,8 +19,8 @@ # Project-wide versions {{{ K8S_VERSION_MAJOR: str = "1" -K8S_VERSION_MINOR: str = "29" -K8S_VERSION_PATCH: str = "8" +K8S_VERSION_MINOR: str = "30" +K8S_VERSION_PATCH: str = "7" K8S_SHORT_VERSION: str = f"{K8S_VERSION_MAJOR}.{K8S_VERSION_MINOR}" K8S_VERSION: str = f"{K8S_SHORT_VERSION}.{K8S_VERSION_PATCH}" @@ -28,10 +28,13 @@ CALICO_VERSION: str = "3.29.0" SALT_VERSION: str = "3002.9" CONTAINERD_VERSION: str = "1.6.36" +NGINX_OPERATOR_VERSION: str = "4.11.3" CONTAINERD_RELEASE: str = "1" SOSREPORT_RELEASE: str = "2" +OLM_VERSION: str = "1.1.0" + def load_version_information() -> None: """Load version information from `VERSION`.""" @@ -80,7 +83,7 @@ def load_version_information() -> None: "2cb86b2d8326a987546dc7fb393f43d43d478fea12ce3ce4accbda571f47f86b" ) -ETCD_VERSION: str = "3.5.15" +ETCD_VERSION: str = "3.5.16" ETCD_IMAGE_VERSION: str = f"{ETCD_VERSION}-0" NGINX_IMAGE_VERSION: str = "1.27.2-alpine" NODEJS_IMAGE_VERSION: str = "20.11.1" @@ -141,7 +144,7 @@ def _version_prefix(version: str, prefix: str = "v") -> str: Image( name="etcd", version=ETCD_IMAGE_VERSION, - digest="sha256:a6dc63e6e8cfa0307d7851762fa6b629afb18f28d8aa3fab5a6e91b4af60026a", + digest="sha256:c6a9d11cc5c04b114ccdef39a9265eeef818e3d02f5359be035ae784097fdec5", ), Image( name="grafana", @@ -156,22 +159,22 @@ def _version_prefix(version: str, prefix: str = "v") -> str: Image( name="kube-apiserver", version=_version_prefix(K8S_VERSION), - digest="sha256:6f72fa926c9b05e10629fe1a092fd28dcd65b4fdfd0cc7bd55f85a57a6ba1fa5", + digest="sha256:13f4f0f5850b39742101c656b1bbd5090eacf27084ad89b489fc824ef482ed9c", ), Image( name="kube-controller-manager", version=_version_prefix(K8S_VERSION), - digest="sha256:6f27d63ded20614c68554b477cd7a78eda78a498a92bfe8935cf964ca5b74d0b", + digest="sha256:5b8dc26c05b273ce198fbaf4eb179f3b6d6919c6e2116d36dc7f82555374c687", ), Image( name="kube-proxy", version=_version_prefix(K8S_VERSION), - digest="sha256:559a093080f70ca863922f5e4bb90d6926d52653a91edb5b72c685ebb65f1858", + digest="sha256:c4b64b2ac9e3d26c43a81ad11226a127e278a04b6f33f8baa9f5daa162b71c26", ), Image( name="kube-scheduler", version=_version_prefix(K8S_VERSION), - digest="sha256:da74a66675d95e39ec25da5e70729da746d0fa0b15ee0da872ac980519bc28bd", + digest="sha256:00b16db991101b7361f2f18035e1c6526c0ce6c9a9568524824b4bccdf1afbd6", ), Image( name="kube-state-metrics", @@ -225,6 +228,16 @@ def _version_prefix(version: str, prefix: str = "v") -> str: version="v0.36.1", digest="sha256:e542959e1b36d5046083d1b64a7049c356b68a44a173c58b3ae7c0c9ada932d5", ), + Image( + name="catalogd", + version=_version_prefix(OLM_VERSION), + digest="sha256:95477a136772765fa2cfb02a6e5fb52bcc167ef7b5333f9e238b0b13b9e72f7b", + ), + Image( + name="operator-controller", + version=_version_prefix(OLM_VERSION), + digest="sha256:6272919257e695fcdadf0b57cc0a272084ebf2caca7571e2e5d79d6a56a788fa", + ), # Local images Image( name="metalk8s-alert-logger", @@ -266,6 +279,26 @@ def _version_prefix(version: str, prefix: str = "v") -> str: version=VERSION, digest=None, ), + Image( + name="nginx-operator", + version=_version_prefix(NGINX_OPERATOR_VERSION), + digest=None, + ), + Image( + name="nginx-operator-bundle", + version=_version_prefix(NGINX_OPERATOR_VERSION), + digest=None, + ), + Image( + name="opm", + version="v1.49.0", + digest="sha256:0bbe4054f2f88410ae364169379639c06b0e253d6e233bc24fcf2c0cd2d9803d", + ), + Image( + name="metalk8s-catalog-source", + version=VERSION, + digest=None, + ), Image( name="loki", version="3.2.0", diff --git a/catalog-source/.indexignore b/catalog-source/.indexignore new file mode 100644 index 0000000000..01ea697c74 --- /dev/null +++ b/catalog-source/.indexignore @@ -0,0 +1,2 @@ +README.md +*.Dockerfile diff --git a/catalog-source/README.md b/catalog-source/README.md new file mode 100644 index 0000000000..551d037cc9 --- /dev/null +++ b/catalog-source/README.md @@ -0,0 +1,5 @@ +# MetalK8s catalog source for OLMv1 + +contains the current operators: + + - MetalK8s [nginx-operator](../nginx-operator) diff --git a/catalog-source/catalog.Dockerfile b/catalog-source/catalog.Dockerfile new file mode 100644 index 0000000000..61d59c5d23 --- /dev/null +++ b/catalog-source/catalog.Dockerfile @@ -0,0 +1,31 @@ +ARG SED_IMG=alpine:latest +ARG BASE_IMG=quay.io/operator-framework/opm:latest + +# replace MK8S_VERSION_STUB with the actual version +FROM ${SED_IMG} as sed_step +ARG METALK8S_VERSION +ADD catalog /catalog +RUN find /catalog -type f -exec sed -i "s/MK8S_VERSION_STUB/${METALK8S_VERSION}/g" {} \; + +# The builder image is expected to contain +# /bin/opm (with serve subcommand) +FROM ${BASE_IMG} as builder + +# Copy FBC root into image at /configs and pre-populate serve cache +COPY --from=sed_step /catalog /configs +RUN ["/bin/opm", "serve", "/configs", "--cache-dir=/tmp/cache", "--cache-only"] + +FROM ${BASE_IMG} +# The base image is expected to contain +# /bin/opm (with serve subcommand) and /bin/grpc_health_probe + +# Configure the entrypoint and command +ENTRYPOINT ["/bin/opm"] +CMD ["serve", "/configs", "--cache-dir=/tmp/cache"] + +COPY --from=builder /configs /configs +COPY --from=builder /tmp/cache /tmp/cache + +# Set FBC-specific label for the location of the FBC root directory +# in the image +LABEL operators.operatorframework.io.index.configs.v1=/configs diff --git a/catalog-source/catalog/.indexignore b/catalog-source/catalog/.indexignore new file mode 100644 index 0000000000..b43bf86b50 --- /dev/null +++ b/catalog-source/catalog/.indexignore @@ -0,0 +1 @@ +README.md diff --git a/catalog-source/catalog/nginx-operator/bundles/v4.11.3.yaml b/catalog-source/catalog/nginx-operator/bundles/v4.11.3.yaml new file mode 100644 index 0000000000..f31ed7d3e1 --- /dev/null +++ b/catalog-source/catalog/nginx-operator/bundles/v4.11.3.yaml @@ -0,0 +1,563 @@ +--- +image: registry.metalk8s.lan/metalk8s-MK8S_VERSION_STUB/nginx-operator-bundle:v4.11.3 +name: nginx-operator.v4.11.3 +package: nginx-operator +properties: +- type: olm.gvk + value: + group: metalk8s.scality.com + kind: IngressNginx + version: v1alpha1 +- type: olm.package + value: + packageName: nginx-operator + version: 4.11.3 +- type: olm.csv.metadata + value: + annotations: + alm-examples: |- + [ + { + "apiVersion": "metalk8s.scality.com/v1alpha1", + "kind": "IngressNginx", + "metadata": { + "name": "ingressnginx-sample" + }, + "spec": { + "commonLabels": {}, + "controller": { + "addHeaders": {}, + "admissionWebhooks": { + "annotations": {}, + "certManager": { + "admissionCert": { + "duration": "" + }, + "enabled": false, + "rootCert": { + "duration": "" + } + }, + "certificate": "/usr/local/certificates/cert", + "createSecretJob": { + "name": "create", + "resources": {}, + "securityContext": { + "allowPrivilegeEscalation": false, + "capabilities": { + "drop": [ + "ALL" + ] + }, + "readOnlyRootFilesystem": true, + "runAsNonRoot": true, + "runAsUser": 65532, + "seccompProfile": { + "type": "RuntimeDefault" + } + } + }, + "enabled": true, + "existingPsp": "", + "extraEnvs": [], + "failurePolicy": "Fail", + "key": "/usr/local/certificates/key", + "labels": {}, + "name": "admission", + "namespaceSelector": {}, + "objectSelector": {}, + "patch": { + "enabled": true, + "image": { + "digest": "sha256:a9f03b34a3cbfbb26d103a14046ab2c5130a80c3d69d526ff8063d2b37b9fd3f", + "image": "ingress-nginx/kube-webhook-certgen", + "pullPolicy": "IfNotPresent", + "registry": "registry.k8s.io", + "tag": "v1.4.4" + }, + "labels": {}, + "networkPolicy": { + "enabled": false + }, + "nodeSelector": { + "kubernetes.io/os": "linux" + }, + "podAnnotations": {}, + "priorityClassName": "", + "rbac": { + "create": true + }, + "securityContext": {}, + "serviceAccount": { + "automountServiceAccountToken": true, + "create": true, + "name": "" + }, + "tolerations": [] + }, + "patchWebhookJob": { + "name": "patch", + "resources": {}, + "securityContext": { + "allowPrivilegeEscalation": false, + "capabilities": { + "drop": [ + "ALL" + ] + }, + "readOnlyRootFilesystem": true, + "runAsNonRoot": true, + "runAsUser": 65532, + "seccompProfile": { + "type": "RuntimeDefault" + } + } + }, + "port": 8443, + "service": { + "annotations": {}, + "externalIPs": [], + "loadBalancerSourceRanges": [], + "servicePort": 443, + "type": "ClusterIP" + } + }, + "affinity": {}, + "allowSnippetAnnotations": false, + "annotations": {}, + "autoscaling": { + "annotations": {}, + "behavior": {}, + "enabled": false, + "maxReplicas": 11, + "minReplicas": 1, + "targetCPUUtilizationPercentage": 50, + "targetMemoryUtilizationPercentage": 50 + }, + "autoscalingTemplate": [], + "config": {}, + "configAnnotations": {}, + "configMapNamespace": "", + "containerName": "controller", + "containerPort": { + "http": 80, + "https": 443 + }, + "containerSecurityContext": {}, + "customTemplate": { + "configMapKey": "", + "configMapName": "" + }, + "disableLeaderElection": false, + "dnsConfig": {}, + "dnsPolicy": "ClusterFirst", + "electionID": "", + "electionTTL": "", + "enableAnnotationValidations": false, + "enableMimalloc": true, + "enableTopologyAwareRouting": false, + "existingPsp": "", + "extraArgs": {}, + "extraContainers": [], + "extraEnvs": [], + "extraInitContainers": [], + "extraModules": [], + "extraVolumeMounts": [], + "extraVolumes": [], + "healthCheckHost": "", + "healthCheckPath": "/healthz", + "hostAliases": [], + "hostNetwork": false, + "hostPort": { + "enabled": false, + "ports": { + "http": 80, + "https": 443 + } + }, + "hostname": {}, + "image": { + "allowPrivilegeEscalation": false, + "chroot": false, + "digest": "sha256:d56f135b6462cfc476447cfe564b83a45e8bb7da2774963b00d12161112270b7", + "digestChroot": "sha256:22701f0fc0f2dd209ef782f4e281bfe2d8cccd50ededa00aec88e0cdbe7edd14", + "image": "ingress-nginx/controller", + "pullPolicy": "IfNotPresent", + "readOnlyRootFilesystem": false, + "registry": "registry.k8s.io", + "runAsNonRoot": true, + "runAsUser": 101, + "seccompProfile": { + "type": "RuntimeDefault" + }, + "tag": "v1.11.3" + }, + "ingressClass": "nginx", + "ingressClassByName": false, + "ingressClassResource": { + "aliases": [], + "annotations": {}, + "controllerValue": "k8s.io/ingress-nginx", + "default": false, + "enabled": true, + "name": "nginx", + "parameters": {} + }, + "keda": { + "apiVersion": "keda.sh/v1alpha1", + "behavior": {}, + "cooldownPeriod": 300, + "enabled": false, + "maxReplicas": 11, + "minReplicas": 1, + "pollingInterval": 30, + "restoreToOriginalReplicaCount": false, + "scaledObject": { + "annotations": {} + }, + "triggers": [] + }, + "kind": "Deployment", + "labels": {}, + "lifecycle": { + "preStop": { + "exec": { + "command": [ + "/wait-shutdown" + ] + } + } + }, + "livenessProbe": { + "failureThreshold": 5, + "httpGet": { + "path": "/healthz", + "port": 10254, + "scheme": "HTTP" + }, + "initialDelaySeconds": 10, + "periodSeconds": 10, + "successThreshold": 1, + "timeoutSeconds": 1 + }, + "maxmindLicenseKey": "", + "metrics": { + "enabled": false, + "port": 10254, + "portName": "metrics", + "prometheusRule": { + "additionalLabels": {}, + "enabled": false, + "rules": [] + }, + "service": { + "annotations": {}, + "externalIPs": [], + "labels": {}, + "loadBalancerSourceRanges": [], + "servicePort": 10254, + "type": "ClusterIP" + }, + "serviceMonitor": { + "additionalLabels": {}, + "annotations": {}, + "enabled": false, + "metricRelabelings": [], + "namespace": "", + "namespaceSelector": {}, + "relabelings": [], + "scrapeInterval": "30s", + "targetLabels": [] + } + }, + "minAvailable": 1, + "minReadySeconds": 0, + "name": "controller", + "networkPolicy": { + "enabled": false + }, + "nodeSelector": { + "kubernetes.io/os": "linux" + }, + "opentelemetry": { + "containerSecurityContext": { + "allowPrivilegeEscalation": false, + "capabilities": { + "drop": [ + "ALL" + ] + }, + "readOnlyRootFilesystem": true, + "runAsNonRoot": true, + "runAsUser": 65532, + "seccompProfile": { + "type": "RuntimeDefault" + } + }, + "enabled": false, + "image": { + "digest": "sha256:f7604ac0547ed64d79b98d92133234e66c2c8aade3c1f4809fed5eec1fb7f922", + "distroless": true, + "image": "ingress-nginx/opentelemetry-1.25.3", + "registry": "registry.k8s.io", + "tag": "v20240813-b933310d" + }, + "name": "opentelemetry", + "resources": {} + }, + "podAnnotations": {}, + "podLabels": {}, + "podSecurityContext": {}, + "priorityClassName": "", + "proxySetHeaders": {}, + "publishService": { + "enabled": true, + "pathOverride": "" + }, + "readinessProbe": { + "failureThreshold": 3, + "httpGet": { + "path": "/healthz", + "port": 10254, + "scheme": "HTTP" + }, + "initialDelaySeconds": 10, + "periodSeconds": 10, + "successThreshold": 1, + "timeoutSeconds": 1 + }, + "replicaCount": 1, + "reportNodeInternalIp": false, + "resources": { + "requests": { + "cpu": "100m", + "memory": "90Mi" + } + }, + "scope": { + "enabled": false, + "namespace": "", + "namespaceSelector": "" + }, + "service": { + "annotations": {}, + "appProtocol": true, + "clusterIP": "", + "enableHttp": true, + "enableHttps": true, + "enabled": true, + "external": { + "enabled": true + }, + "externalIPs": [], + "externalTrafficPolicy": "", + "internal": { + "annotations": {}, + "appProtocol": true, + "clusterIP": "", + "enabled": false, + "externalIPs": [], + "externalTrafficPolicy": "", + "ipFamilies": [ + "IPv4" + ], + "ipFamilyPolicy": "SingleStack", + "loadBalancerClass": "", + "loadBalancerIP": "", + "loadBalancerSourceRanges": [], + "nodePorts": { + "http": "", + "https": "", + "tcp": {}, + "udp": {} + }, + "ports": {}, + "sessionAffinity": "", + "targetPorts": {}, + "type": "" + }, + "ipFamilies": [ + "IPv4" + ], + "ipFamilyPolicy": "SingleStack", + "labels": {}, + "loadBalancerClass": "", + "loadBalancerIP": "", + "loadBalancerSourceRanges": [], + "nodePorts": { + "http": "", + "https": "", + "tcp": {}, + "udp": {} + }, + "ports": { + "http": 80, + "https": 443 + }, + "sessionAffinity": "", + "targetPorts": { + "http": "http", + "https": "https" + }, + "type": "LoadBalancer" + }, + "shareProcessNamespace": false, + "sysctls": {}, + "tcp": { + "annotations": {}, + "configMapNamespace": "" + }, + "terminationGracePeriodSeconds": 300, + "tolerations": [], + "topologySpreadConstraints": [], + "udp": { + "annotations": {}, + "configMapNamespace": "" + }, + "updateStrategy": {}, + "watchIngressWithoutClass": false + }, + "defaultBackend": { + "affinity": {}, + "autoscaling": { + "annotations": {}, + "enabled": false, + "maxReplicas": 2, + "minReplicas": 1, + "targetCPUUtilizationPercentage": 50, + "targetMemoryUtilizationPercentage": 50 + }, + "containerSecurityContext": {}, + "enabled": false, + "existingPsp": "", + "extraArgs": {}, + "extraConfigMaps": [], + "extraEnvs": [], + "extraVolumeMounts": [], + "extraVolumes": [], + "image": { + "allowPrivilegeEscalation": false, + "image": "defaultbackend-amd64", + "pullPolicy": "IfNotPresent", + "readOnlyRootFilesystem": true, + "registry": "registry.k8s.io", + "runAsNonRoot": true, + "runAsUser": 65534, + "seccompProfile": { + "type": "RuntimeDefault" + }, + "tag": "1.5" + }, + "labels": {}, + "livenessProbe": { + "failureThreshold": 3, + "initialDelaySeconds": 30, + "periodSeconds": 10, + "successThreshold": 1, + "timeoutSeconds": 5 + }, + "minAvailable": 1, + "minReadySeconds": 0, + "name": "defaultbackend", + "networkPolicy": { + "enabled": false + }, + "nodeSelector": { + "kubernetes.io/os": "linux" + }, + "podAnnotations": {}, + "podLabels": {}, + "podSecurityContext": {}, + "port": 8080, + "priorityClassName": "", + "readinessProbe": { + "failureThreshold": 6, + "initialDelaySeconds": 0, + "periodSeconds": 5, + "successThreshold": 1, + "timeoutSeconds": 5 + }, + "replicaCount": 1, + "resources": {}, + "service": { + "annotations": {}, + "externalIPs": [], + "loadBalancerSourceRanges": [], + "servicePort": 80, + "type": "ClusterIP" + }, + "serviceAccount": { + "automountServiceAccountToken": true, + "create": true, + "name": "" + }, + "tolerations": [], + "topologySpreadConstraints": [], + "updateStrategy": {} + }, + "dhParam": "", + "imagePullSecrets": [], + "namespaceOverride": "", + "podSecurityPolicy": { + "enabled": false + }, + "portNamePrefix": "", + "rbac": { + "create": true, + "scope": false + }, + "revisionHistoryLimit": 10, + "serviceAccount": { + "annotations": {}, + "automountServiceAccountToken": true, + "create": true, + "name": "" + }, + "tcp": {}, + "udp": {} + } + } + ] + capabilities: Basic Install + createdAt: "2025-02-12T09:00:14Z" + operators.operatorframework.io/builder: operator-sdk-v1.39.1 + operators.operatorframework.io/project_layout: helm.sdk.operatorframework.io/v1 + apiServiceDefinitions: {} + crdDescriptions: + owned: + - kind: IngressNginx + name: ingressnginxes.metalk8s.scality.com + version: v1alpha1 + description: Operator Manages Kubernetes Nginx Controllers + displayName: nginx-operator + installModes: + - supported: false + type: OwnNamespace + - supported: false + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: true + type: AllNamespaces + keywords: + - operator + - scality + - metalk8s + - nginx + - kubernetes + links: + - name: Nginx Operator + url: https://nginx-operator.domain + maintainers: + - email: ayoub.nasr@scality.com + name: Ayoub Nasr + maturity: alpha + provider: + name: scality + url: scality.com +relatedImages: +- image: registry.metalk8s.lan/metalk8s-MK8S_VERSION_STUB/nginx-operator-bundle:v4.11.3 + name: "" +- image: registry.metalk8s.lan/metalk8s-MK8S_VERSION_STUB/nginx-operator:v4.11.3 + name: "" +schema: olm.bundle diff --git a/catalog-source/catalog/nginx-operator/channels/stable.yaml b/catalog-source/catalog/nginx-operator/channels/stable.yaml new file mode 100644 index 0000000000..3fbc9d716e --- /dev/null +++ b/catalog-source/catalog/nginx-operator/channels/stable.yaml @@ -0,0 +1,6 @@ +--- +schema: olm.channel +package: nginx-operator +name: stable +entries: + - name: nginx-operator.v4.11.3 diff --git a/catalog-source/catalog/nginx-operator/package.yaml b/catalog-source/catalog/nginx-operator/package.yaml new file mode 100644 index 0000000000..702ca4a14f --- /dev/null +++ b/catalog-source/catalog/nginx-operator/package.yaml @@ -0,0 +1,8 @@ +--- +defaultChannel: stable +description: | + # NGINX-operator + + This operator manages IngressNginx CRs and uses them as a values file for the [ingress-nginx](https://github.com/kubernetes/ingress-nginx) helm chart. +name: nginx-operator +schema: olm.package diff --git a/charts/ingress-nginx-control-plane.yaml b/charts/ingress-nginx-control-plane.yaml deleted file mode 100644 index 42c22d889c..0000000000 --- a/charts/ingress-nginx-control-plane.yaml +++ /dev/null @@ -1,60 +0,0 @@ -controller: - allowSnippetAnnotations: true - - image: - digest: null - repository: __image__(nginx-ingress-controller) - - electionID: ingress-control-plane-controller-leader - - ingressClassResource: - name: nginx-control-plane - controllerValue: "k8s.io/ingress-nginx-control-plane" - - ingressClass: nginx-control-plane - - admissionWebhooks: - enabled: false - - kind: DaemonSet - - updateStrategy: - type: RollingUpdate - - tolerations: - - key: "node-role.kubernetes.io/bootstrap" - operator: "Exists" - effect: "NoSchedule" - - key: "node-role.kubernetes.io/master" - operator: "Exists" - effect: "NoSchedule" - - key: "node-role.kubernetes.io/infra" - operator: "Exists" - effect: "NoSchedule" - - nodeSelector: - node-role.kubernetes.io/master: '' - - service: - type: ClusterIP - - externalIPs: '__var_tojson__(salt.metalk8s_network.get_control_plane_ingress_external_ips())' - - enableHttp: false - - ports: - https: 8443 - - extraArgs: - default-ssl-certificate: "metalk8s-ingress/ingress-control-plane-default-certificate" - metrics-per-host: false - - metrics: - enabled: true - serviceMonitor: - enabled: true - additionalLabels: - metalk8s.scality.com/monitor: '' - -defaultBackend: - enabled: false diff --git a/charts/ingress-nginx.yaml b/charts/ingress-nginx.yaml deleted file mode 100644 index 16faf24c84..0000000000 --- a/charts/ingress-nginx.yaml +++ /dev/null @@ -1,45 +0,0 @@ -controller: - allowSnippetAnnotations: true - - image: - digest: null - repository: __image__(nginx-ingress-controller) - - hostPort: - enabled: true - - ingressClassResource: - default: true - - watchIngressWithoutClass: true - - admissionWebhooks: - enabled: false - - kind: DaemonSet - - tolerations: - - key: "node-role.kubernetes.io/bootstrap" - operator: "Exists" - effect: "NoSchedule" - - key: "node-role.kubernetes.io/infra" - operator: "Exists" - effect: "NoSchedule" - - service: - type: ClusterIP - - extraArgs: - default-backend-service: metalk8s-ui/metalk8s-ui - default-ssl-certificate: "metalk8s-ingress/ingress-workload-plane-default-certificate" - metrics-per-host: false - - metrics: - enabled: true - serviceMonitor: - enabled: true - additionalLabels: - metalk8s.scality.com/monitor: '' - -defaultBackend: - enabled: false diff --git a/nginx-operator/.gitignore b/nginx-operator/.gitignore new file mode 100644 index 0000000000..62fd3e3995 --- /dev/null +++ b/nginx-operator/.gitignore @@ -0,0 +1,14 @@ + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin + +# editor and IDE paraphernalia +.idea +*.swp +*.swo +*~ diff --git a/nginx-operator/BUMPING.md b/nginx-operator/BUMPING.md new file mode 100644 index 0000000000..9a2deb218d --- /dev/null +++ b/nginx-operator/BUMPING.md @@ -0,0 +1,26 @@ +## instructions + +### Update helm chart + +from within this directory: + +``` +VERSION=<...> #SEMVER VERSION without the v +rm -rf helm-charts/ingress-nginx +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx +helm repo update +helm fetch -d helm-charts --untar https://kubernetes.github.io/ingress-nginx/ingress-nginx --version $VERSION +git apply remove_configmap.patch +sed -i "s/^VERSION.*/VERSION ?= $VERSION/" Makefile +make bundle +git apply bundle.Dockerfile.patch +``` + +also change `NGINX_OPERATOR_VERSION` in `buildchain/buildchain/versions.py`. + +### About the patch + +The ConfigMaps are managed by salt. Previously, they were automatically deleted from the manifest +after generating the chart. +If we keep them, they interfere with the salt-generated configmap. +We don't have a way to disable the configmap generation, so we patch it in the chart. diff --git a/nginx-operator/Dockerfile b/nginx-operator/Dockerfile new file mode 100644 index 0000000000..31b25dfe46 --- /dev/null +++ b/nginx-operator/Dockerfile @@ -0,0 +1,7 @@ +# Build the manager binary +FROM quay.io/operator-framework/helm-operator:v1.39.1 + +ENV HOME=/opt/helm +COPY watches.yaml ${HOME}/watches.yaml +COPY helm-charts ${HOME}/helm-charts +WORKDIR ${HOME} diff --git a/nginx-operator/Makefile b/nginx-operator/Makefile new file mode 100644 index 0000000000..413af37239 --- /dev/null +++ b/nginx-operator/Makefile @@ -0,0 +1,230 @@ +# VERSION defines the project version for the bundle. +# Update this value when you upgrade the version of your project. +# To re-generate a bundle for another specific version without changing the standard setup, you can: +# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) +# - use environment variables to overwrite this value (e.g export VERSION=0.0.2) +# +# We keep this aligned with the chart version +# Will also be set in versions.py +VERSION ?= 4.11.3 + +# CHANNELS define the bundle channels used in the bundle. +# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable") +# To re-generate a bundle for other specific channels without changing the standard setup, you can: +# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=candidate,fast,stable) +# - use environment variables to overwrite this value (e.g export CHANNELS="candidate,fast,stable") +ifneq ($(origin CHANNELS), undefined) +BUNDLE_CHANNELS := --channels=$(CHANNELS) +endif + +# DEFAULT_CHANNEL defines the default channel used in the bundle. +# Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable") +# To re-generate a bundle for any other default channel without changing the default setup, you can: +# - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable) +# - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable") +ifneq ($(origin DEFAULT_CHANNEL), undefined) +BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL) +endif +BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) + +# IMAGE_TAG_BASE defines the docker.io namespace and part of the image name for remote images. +# This variable is used to construct full image tags for bundle and catalog images. +# +# For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both +# scality.com/nginx-operator-bundle:$VERSION and scality.com/nginx-operator-catalog:$VERSION. +IMAGE_TAG_BASE ?= scality.com/nginx-operator + +# BUNDLE_IMG defines the image:tag used for the bundle. +# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:) +BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION) + +# BUNDLE_GEN_FLAGS are the flags passed to the operator-sdk generate bundle command +BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) + +# USE_IMAGE_DIGESTS defines if images are resolved via tags or digests +# You can enable this value if you would like to use SHA Based Digests +# To enable set flag to true +USE_IMAGE_DIGESTS ?= false +ifeq ($(USE_IMAGE_DIGESTS), true) + BUNDLE_GEN_FLAGS += --use-image-digests +endif + +# Set the Operator SDK version to use. By default, what is installed on the system is used. +# This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. +OPERATOR_SDK_VERSION ?= v1.39.1 + +# Image URL to use all building/pushing image targets +IMG ?= registry.metalk8s.lan/metalk8s-MK8S_VERSION_STUB/nginx-operator:v$(VERSION) + +.PHONY: all +all: docker-build + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk commands is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Build + +.PHONY: run +run: helm-operator ## Run against the configured Kubernetes cluster in ~/.kube/config + $(HELM_OPERATOR) run + +.PHONY: docker-build +docker-build: ## Build docker image with the manager. + docker build -t ${IMG} . + +.PHONY: docker-push +docker-push: ## Push docker image with the manager. + docker push ${IMG} + +# PLATFORMS defines the target platforms for the manager image be build to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - able to use docker buildx . More info: https://docs.docker.com/build/buildx/ +# - have enable BuildKit, More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image for your registry (i.e. if you do not inform a valid value via IMG=> than the export will fail) +# To properly provided solutions that supports more than one platform you should use this option. +PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +.PHONY: docker-buildx +docker-buildx: ## Build and push docker image for the manager for cross-platform support + - docker buildx create --name project-v3-builder + docker buildx use project-v3-builder + - docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile . + - docker buildx rm project-v3-builder + +##@ Deployment + +.PHONY: install +install: kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/crd | kubectl apply -f - + +.PHONY: uninstall +uninstall: kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/crd | kubectl delete -f - + +.PHONY: deploy +deploy: kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default | kubectl apply -f - + +.PHONY: undeploy +undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/default | kubectl delete -f - + +OS := $(shell uname -s | tr '[:upper:]' '[:lower:]') +ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/') + +.PHONY: kustomize +KUSTOMIZE = $(shell pwd)/bin/kustomize +kustomize: ## Download kustomize locally if necessary. +ifeq (,$(wildcard $(KUSTOMIZE))) +ifeq (,$(shell which kustomize 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(KUSTOMIZE)) ;\ + curl -sSLo - https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v5.4.3/kustomize_v5.4.3_$(OS)_$(ARCH).tar.gz | \ + tar xzf - -C bin/ ;\ + } +else +KUSTOMIZE = $(shell which kustomize) +endif +endif + +.PHONY: helm-operator +HELM_OPERATOR = $(shell pwd)/bin/helm-operator +helm-operator: ## Download helm-operator locally if necessary, preferring the $(pwd)/bin path over global if both exist. +ifeq (,$(wildcard $(HELM_OPERATOR))) +ifeq (,$(shell which helm-operator 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(HELM_OPERATOR)) ;\ + curl -sSLo $(HELM_OPERATOR) https://github.com/operator-framework/operator-sdk/releases/download/v1.39.1/helm-operator_$(OS)_$(ARCH) ;\ + chmod +x $(HELM_OPERATOR) ;\ + } +else +HELM_OPERATOR = $(shell which helm-operator) +endif +endif + +.PHONY: operator-sdk +OPERATOR_SDK ?= $(LOCALBIN)/operator-sdk +operator-sdk: ## Download operator-sdk locally if necessary. +ifeq (,$(wildcard $(OPERATOR_SDK))) +ifeq (, $(shell which operator-sdk 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(OPERATOR_SDK)) ;\ + curl -sSLo $(OPERATOR_SDK) https://github.com/operator-framework/operator-sdk/releases/download/$(OPERATOR_SDK_VERSION)/operator-sdk_$(OS)_$(ARCH) ;\ + chmod +x $(OPERATOR_SDK) ;\ + } +else +OPERATOR_SDK = $(shell which operator-sdk) +endif +endif + +.PHONY: bundle +bundle: kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files. + $(OPERATOR_SDK) generate kustomize manifests -q + cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) + $(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS) + $(OPERATOR_SDK) bundle validate ./bundle + +.PHONY: bundle-build +bundle-build: ## Build the bundle image. + docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) . + +.PHONY: bundle-push +bundle-push: ## Push the bundle image. + $(MAKE) docker-push IMG=$(BUNDLE_IMG) + +.PHONY: opm +OPM = $(LOCALBIN)/opm +opm: ## Download opm locally if necessary. +ifeq (,$(wildcard $(OPM))) +ifeq (,$(shell which opm 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(OPM)) ;\ + curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.23.0/$(OS)-$(ARCH)-opm ;\ + chmod +x $(OPM) ;\ + } +else +OPM = $(shell which opm) +endif +endif + +# A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0). +# These images MUST exist in a registry and be pull-able. +BUNDLE_IMGS ?= $(BUNDLE_IMG) + +# The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0). +CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION) + +# Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image. +ifneq ($(origin CATALOG_BASE_IMG), undefined) +FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG) +endif + +# Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'. +# This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see: +# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator +.PHONY: catalog-build +catalog-build: opm ## Build a catalog image. + $(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) + +# Push the catalog image. +.PHONY: catalog-push +catalog-push: ## Push a catalog image. + $(MAKE) docker-push IMG=$(CATALOG_IMG) diff --git a/nginx-operator/PROJECT b/nginx-operator/PROJECT new file mode 100644 index 0000000000..3450d68d55 --- /dev/null +++ b/nginx-operator/PROJECT @@ -0,0 +1,20 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html +domain: scality.com +layout: +- helm.sdk.operatorframework.io/v1 +plugins: + manifests.sdk.operatorframework.io/v2: {} + scorecard.sdk.operatorframework.io/v2: {} +projectName: nginx-operator +resources: +- api: + crdVersion: v1 + namespaced: true + domain: scality.com + group: metalk8s + kind: IngressNginx + version: v1alpha1 +version: "3" diff --git a/nginx-operator/README.md b/nginx-operator/README.md new file mode 100644 index 0000000000..70618ed1cd --- /dev/null +++ b/nginx-operator/README.md @@ -0,0 +1,3 @@ +# NGINX-operator + +This operator manages IngressNginx CRs and uses them as a values file for the [ingress-nginx](https://github.com/kubernetes/ingress-nginx) helm chart. diff --git a/nginx-operator/bundle.Dockerfile b/nginx-operator/bundle.Dockerfile new file mode 100644 index 0000000000..3faedffc23 --- /dev/null +++ b/nginx-operator/bundle.Dockerfile @@ -0,0 +1,32 @@ +ARG BUILDER_IMG=alpine:latest +FROM ${BUILDER_IMG} AS builder + +# Copy files to locations specified by labels. +COPY bundle/manifests /manifests/ +COPY bundle/metadata /metadata/ +COPY bundle/tests/scorecard /tests/scorecard/ + +# Replace MK8S_VERSION_STUB with the actual version. +ARG METALK8S_VERSION=MK8S_VERSION_STUB +RUN sed -i "s/MK8S_VERSION_STUB/${METALK8S_VERSION}/g" /manifests/nginx-operator.clusterserviceversion.yaml + +FROM scratch + +# Core bundle labels. +LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 +LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ +LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ +LABEL operators.operatorframework.io.bundle.package.v1=nginx-operator +LABEL operators.operatorframework.io.bundle.channels.v1=alpha +LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.39.1 +LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 +LABEL operators.operatorframework.io.metrics.project_layout=helm.sdk.operatorframework.io/v1 + +# Labels for testing. +LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 +LABEL operators.operatorframework.io.test.config.v1=tests/scorecard/ + +# Copy files from builder to locations specified by labels. +COPY --from=builder /manifests /manifests/ +COPY --from=builder /metadata /metadata/ +COPY --from=builder /tests/scorecard /tests/scorecard/ diff --git a/nginx-operator/bundle.Dockerfile.patch b/nginx-operator/bundle.Dockerfile.patch new file mode 100644 index 0000000000..d79bc5e7c5 --- /dev/null +++ b/nginx-operator/bundle.Dockerfile.patch @@ -0,0 +1,32 @@ +diff --git a/nginx-operator/bundle.Dockerfile b/nginx-operator/bundle.Dockerfile +index 132db509b..3faedffc2 100644 +--- a/nginx-operator/bundle.Dockerfile ++++ b/nginx-operator/bundle.Dockerfile +@@ -1,3 +1,15 @@ ++ARG BUILDER_IMG=alpine:latest ++FROM ${BUILDER_IMG} AS builder ++ ++# Copy files to locations specified by labels. ++COPY bundle/manifests /manifests/ ++COPY bundle/metadata /metadata/ ++COPY bundle/tests/scorecard /tests/scorecard/ ++ ++# Replace MK8S_VERSION_STUB with the actual version. ++ARG METALK8S_VERSION=MK8S_VERSION_STUB ++RUN sed -i "s/MK8S_VERSION_STUB/${METALK8S_VERSION}/g" /manifests/nginx-operator.clusterserviceversion.yaml ++ + FROM scratch + + # Core bundle labels. +@@ -14,7 +26,7 @@ LABEL operators.operatorframework.io.metrics.project_layout=helm.sdk.operatorfra + LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 + LABEL operators.operatorframework.io.test.config.v1=tests/scorecard/ + +-# Copy files to locations specified by labels. +-COPY bundle/manifests /manifests/ +-COPY bundle/metadata /metadata/ +-COPY bundle/tests/scorecard /tests/scorecard/ ++# Copy files from builder to locations specified by labels. ++COPY --from=builder /manifests /manifests/ ++COPY --from=builder /metadata /metadata/ ++COPY --from=builder /tests/scorecard /tests/scorecard/ diff --git a/nginx-operator/bundle/manifests/metalk8s.scality.com_ingressnginxes.yaml b/nginx-operator/bundle/manifests/metalk8s.scality.com_ingressnginxes.yaml new file mode 100644 index 0000000000..b39ea6560d --- /dev/null +++ b/nginx-operator/bundle/manifests/metalk8s.scality.com_ingressnginxes.yaml @@ -0,0 +1,50 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: ingressnginxes.metalk8s.scality.com +spec: + group: metalk8s.scality.com + names: + kind: IngressNginx + listKind: IngressNginxList + plural: ingressnginxes + singular: ingressnginx + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: IngressNginx is the Schema for the ingressnginxes API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired state of IngressNginx + type: object + x-kubernetes-preserve-unknown-fields: true + status: + description: Status defines the observed state of IngressNginx + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/nginx-operator/bundle/manifests/nginx-operator-controller-manager-metrics-service_v1_service.yaml b/nginx-operator/bundle/manifests/nginx-operator-controller-manager-metrics-service_v1_service.yaml new file mode 100644 index 0000000000..e4d4306533 --- /dev/null +++ b/nginx-operator/bundle/manifests/nginx-operator-controller-manager-metrics-service_v1_service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: nginx-operator + control-plane: controller-manager + name: nginx-operator-controller-manager-metrics-service +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + control-plane: controller-manager +status: + loadBalancer: {} diff --git a/nginx-operator/bundle/manifests/nginx-operator-ingressnginx-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml b/nginx-operator/bundle/manifests/nginx-operator-ingressnginx-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 0000000000..7a04755d53 --- /dev/null +++ b/nginx-operator/bundle/manifests/nginx-operator-ingressnginx-editor-role_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,27 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: nginx-operator + name: nginx-operator-ingressnginx-editor-role +rules: +- apiGroups: + - metalk8s.scality.com + resources: + - ingressnginxes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - metalk8s.scality.com + resources: + - ingressnginxes/status + verbs: + - get diff --git a/nginx-operator/bundle/manifests/nginx-operator-ingressnginx-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml b/nginx-operator/bundle/manifests/nginx-operator-ingressnginx-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 0000000000..17b8174c65 --- /dev/null +++ b/nginx-operator/bundle/manifests/nginx-operator-ingressnginx-viewer-role_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,23 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: nginx-operator + name: nginx-operator-ingressnginx-viewer-role +rules: +- apiGroups: + - metalk8s.scality.com + resources: + - ingressnginxes + verbs: + - get + - list + - watch +- apiGroups: + - metalk8s.scality.com + resources: + - ingressnginxes/status + verbs: + - get diff --git a/nginx-operator/bundle/manifests/nginx-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml b/nginx-operator/bundle/manifests/nginx-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml new file mode 100644 index 0000000000..ecd01b7be2 --- /dev/null +++ b/nginx-operator/bundle/manifests/nginx-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -0,0 +1,10 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: nginx-operator-metrics-reader +rules: +- nonResourceURLs: + - /metrics + verbs: + - get diff --git a/nginx-operator/bundle/manifests/nginx-operator.clusterserviceversion.yaml b/nginx-operator/bundle/manifests/nginx-operator.clusterserviceversion.yaml new file mode 100644 index 0000000000..8db2842c38 --- /dev/null +++ b/nginx-operator/bundle/manifests/nginx-operator.clusterserviceversion.yaml @@ -0,0 +1,765 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: |- + [ + { + "apiVersion": "metalk8s.scality.com/v1alpha1", + "kind": "IngressNginx", + "metadata": { + "name": "ingressnginx-sample" + }, + "spec": { + "commonLabels": {}, + "controller": { + "addHeaders": {}, + "admissionWebhooks": { + "annotations": {}, + "certManager": { + "admissionCert": { + "duration": "" + }, + "enabled": false, + "rootCert": { + "duration": "" + } + }, + "certificate": "/usr/local/certificates/cert", + "createSecretJob": { + "name": "create", + "resources": {}, + "securityContext": { + "allowPrivilegeEscalation": false, + "capabilities": { + "drop": [ + "ALL" + ] + }, + "readOnlyRootFilesystem": true, + "runAsNonRoot": true, + "runAsUser": 65532, + "seccompProfile": { + "type": "RuntimeDefault" + } + } + }, + "enabled": true, + "existingPsp": "", + "extraEnvs": [], + "failurePolicy": "Fail", + "key": "/usr/local/certificates/key", + "labels": {}, + "name": "admission", + "namespaceSelector": {}, + "objectSelector": {}, + "patch": { + "enabled": true, + "image": { + "digest": "sha256:a9f03b34a3cbfbb26d103a14046ab2c5130a80c3d69d526ff8063d2b37b9fd3f", + "image": "ingress-nginx/kube-webhook-certgen", + "pullPolicy": "IfNotPresent", + "registry": "registry.k8s.io", + "tag": "v1.4.4" + }, + "labels": {}, + "networkPolicy": { + "enabled": false + }, + "nodeSelector": { + "kubernetes.io/os": "linux" + }, + "podAnnotations": {}, + "priorityClassName": "", + "rbac": { + "create": true + }, + "securityContext": {}, + "serviceAccount": { + "automountServiceAccountToken": true, + "create": true, + "name": "" + }, + "tolerations": [] + }, + "patchWebhookJob": { + "name": "patch", + "resources": {}, + "securityContext": { + "allowPrivilegeEscalation": false, + "capabilities": { + "drop": [ + "ALL" + ] + }, + "readOnlyRootFilesystem": true, + "runAsNonRoot": true, + "runAsUser": 65532, + "seccompProfile": { + "type": "RuntimeDefault" + } + } + }, + "port": 8443, + "service": { + "annotations": {}, + "externalIPs": [], + "loadBalancerSourceRanges": [], + "servicePort": 443, + "type": "ClusterIP" + } + }, + "affinity": {}, + "allowSnippetAnnotations": false, + "annotations": {}, + "autoscaling": { + "annotations": {}, + "behavior": {}, + "enabled": false, + "maxReplicas": 11, + "minReplicas": 1, + "targetCPUUtilizationPercentage": 50, + "targetMemoryUtilizationPercentage": 50 + }, + "autoscalingTemplate": [], + "config": {}, + "configAnnotations": {}, + "configMapNamespace": "", + "containerName": "controller", + "containerPort": { + "http": 80, + "https": 443 + }, + "containerSecurityContext": {}, + "customTemplate": { + "configMapKey": "", + "configMapName": "" + }, + "disableLeaderElection": false, + "dnsConfig": {}, + "dnsPolicy": "ClusterFirst", + "electionID": "", + "electionTTL": "", + "enableAnnotationValidations": false, + "enableMimalloc": true, + "enableTopologyAwareRouting": false, + "existingPsp": "", + "extraArgs": {}, + "extraContainers": [], + "extraEnvs": [], + "extraInitContainers": [], + "extraModules": [], + "extraVolumeMounts": [], + "extraVolumes": [], + "healthCheckHost": "", + "healthCheckPath": "/healthz", + "hostAliases": [], + "hostNetwork": false, + "hostPort": { + "enabled": false, + "ports": { + "http": 80, + "https": 443 + } + }, + "hostname": {}, + "image": { + "allowPrivilegeEscalation": false, + "chroot": false, + "digest": "sha256:d56f135b6462cfc476447cfe564b83a45e8bb7da2774963b00d12161112270b7", + "digestChroot": "sha256:22701f0fc0f2dd209ef782f4e281bfe2d8cccd50ededa00aec88e0cdbe7edd14", + "image": "ingress-nginx/controller", + "pullPolicy": "IfNotPresent", + "readOnlyRootFilesystem": false, + "registry": "registry.k8s.io", + "runAsNonRoot": true, + "runAsUser": 101, + "seccompProfile": { + "type": "RuntimeDefault" + }, + "tag": "v1.11.3" + }, + "ingressClass": "nginx", + "ingressClassByName": false, + "ingressClassResource": { + "aliases": [], + "annotations": {}, + "controllerValue": "k8s.io/ingress-nginx", + "default": false, + "enabled": true, + "name": "nginx", + "parameters": {} + }, + "keda": { + "apiVersion": "keda.sh/v1alpha1", + "behavior": {}, + "cooldownPeriod": 300, + "enabled": false, + "maxReplicas": 11, + "minReplicas": 1, + "pollingInterval": 30, + "restoreToOriginalReplicaCount": false, + "scaledObject": { + "annotations": {} + }, + "triggers": [] + }, + "kind": "Deployment", + "labels": {}, + "lifecycle": { + "preStop": { + "exec": { + "command": [ + "/wait-shutdown" + ] + } + } + }, + "livenessProbe": { + "failureThreshold": 5, + "httpGet": { + "path": "/healthz", + "port": 10254, + "scheme": "HTTP" + }, + "initialDelaySeconds": 10, + "periodSeconds": 10, + "successThreshold": 1, + "timeoutSeconds": 1 + }, + "maxmindLicenseKey": "", + "metrics": { + "enabled": false, + "port": 10254, + "portName": "metrics", + "prometheusRule": { + "additionalLabels": {}, + "enabled": false, + "rules": [] + }, + "service": { + "annotations": {}, + "externalIPs": [], + "labels": {}, + "loadBalancerSourceRanges": [], + "servicePort": 10254, + "type": "ClusterIP" + }, + "serviceMonitor": { + "additionalLabels": {}, + "annotations": {}, + "enabled": false, + "metricRelabelings": [], + "namespace": "", + "namespaceSelector": {}, + "relabelings": [], + "scrapeInterval": "30s", + "targetLabels": [] + } + }, + "minAvailable": 1, + "minReadySeconds": 0, + "name": "controller", + "networkPolicy": { + "enabled": false + }, + "nodeSelector": { + "kubernetes.io/os": "linux" + }, + "opentelemetry": { + "containerSecurityContext": { + "allowPrivilegeEscalation": false, + "capabilities": { + "drop": [ + "ALL" + ] + }, + "readOnlyRootFilesystem": true, + "runAsNonRoot": true, + "runAsUser": 65532, + "seccompProfile": { + "type": "RuntimeDefault" + } + }, + "enabled": false, + "image": { + "digest": "sha256:f7604ac0547ed64d79b98d92133234e66c2c8aade3c1f4809fed5eec1fb7f922", + "distroless": true, + "image": "ingress-nginx/opentelemetry-1.25.3", + "registry": "registry.k8s.io", + "tag": "v20240813-b933310d" + }, + "name": "opentelemetry", + "resources": {} + }, + "podAnnotations": {}, + "podLabels": {}, + "podSecurityContext": {}, + "priorityClassName": "", + "proxySetHeaders": {}, + "publishService": { + "enabled": true, + "pathOverride": "" + }, + "readinessProbe": { + "failureThreshold": 3, + "httpGet": { + "path": "/healthz", + "port": 10254, + "scheme": "HTTP" + }, + "initialDelaySeconds": 10, + "periodSeconds": 10, + "successThreshold": 1, + "timeoutSeconds": 1 + }, + "replicaCount": 1, + "reportNodeInternalIp": false, + "resources": { + "requests": { + "cpu": "100m", + "memory": "90Mi" + } + }, + "scope": { + "enabled": false, + "namespace": "", + "namespaceSelector": "" + }, + "service": { + "annotations": {}, + "appProtocol": true, + "clusterIP": "", + "enableHttp": true, + "enableHttps": true, + "enabled": true, + "external": { + "enabled": true + }, + "externalIPs": [], + "externalTrafficPolicy": "", + "internal": { + "annotations": {}, + "appProtocol": true, + "clusterIP": "", + "enabled": false, + "externalIPs": [], + "externalTrafficPolicy": "", + "ipFamilies": [ + "IPv4" + ], + "ipFamilyPolicy": "SingleStack", + "loadBalancerClass": "", + "loadBalancerIP": "", + "loadBalancerSourceRanges": [], + "nodePorts": { + "http": "", + "https": "", + "tcp": {}, + "udp": {} + }, + "ports": {}, + "sessionAffinity": "", + "targetPorts": {}, + "type": "" + }, + "ipFamilies": [ + "IPv4" + ], + "ipFamilyPolicy": "SingleStack", + "labels": {}, + "loadBalancerClass": "", + "loadBalancerIP": "", + "loadBalancerSourceRanges": [], + "nodePorts": { + "http": "", + "https": "", + "tcp": {}, + "udp": {} + }, + "ports": { + "http": 80, + "https": 443 + }, + "sessionAffinity": "", + "targetPorts": { + "http": "http", + "https": "https" + }, + "type": "LoadBalancer" + }, + "shareProcessNamespace": false, + "sysctls": {}, + "tcp": { + "annotations": {}, + "configMapNamespace": "" + }, + "terminationGracePeriodSeconds": 300, + "tolerations": [], + "topologySpreadConstraints": [], + "udp": { + "annotations": {}, + "configMapNamespace": "" + }, + "updateStrategy": {}, + "watchIngressWithoutClass": false + }, + "defaultBackend": { + "affinity": {}, + "autoscaling": { + "annotations": {}, + "enabled": false, + "maxReplicas": 2, + "minReplicas": 1, + "targetCPUUtilizationPercentage": 50, + "targetMemoryUtilizationPercentage": 50 + }, + "containerSecurityContext": {}, + "enabled": false, + "existingPsp": "", + "extraArgs": {}, + "extraConfigMaps": [], + "extraEnvs": [], + "extraVolumeMounts": [], + "extraVolumes": [], + "image": { + "allowPrivilegeEscalation": false, + "image": "defaultbackend-amd64", + "pullPolicy": "IfNotPresent", + "readOnlyRootFilesystem": true, + "registry": "registry.k8s.io", + "runAsNonRoot": true, + "runAsUser": 65534, + "seccompProfile": { + "type": "RuntimeDefault" + }, + "tag": "1.5" + }, + "labels": {}, + "livenessProbe": { + "failureThreshold": 3, + "initialDelaySeconds": 30, + "periodSeconds": 10, + "successThreshold": 1, + "timeoutSeconds": 5 + }, + "minAvailable": 1, + "minReadySeconds": 0, + "name": "defaultbackend", + "networkPolicy": { + "enabled": false + }, + "nodeSelector": { + "kubernetes.io/os": "linux" + }, + "podAnnotations": {}, + "podLabels": {}, + "podSecurityContext": {}, + "port": 8080, + "priorityClassName": "", + "readinessProbe": { + "failureThreshold": 6, + "initialDelaySeconds": 0, + "periodSeconds": 5, + "successThreshold": 1, + "timeoutSeconds": 5 + }, + "replicaCount": 1, + "resources": {}, + "service": { + "annotations": {}, + "externalIPs": [], + "loadBalancerSourceRanges": [], + "servicePort": 80, + "type": "ClusterIP" + }, + "serviceAccount": { + "automountServiceAccountToken": true, + "create": true, + "name": "" + }, + "tolerations": [], + "topologySpreadConstraints": [], + "updateStrategy": {} + }, + "dhParam": "", + "imagePullSecrets": [], + "namespaceOverride": "", + "podSecurityPolicy": { + "enabled": false + }, + "portNamePrefix": "", + "rbac": { + "create": true, + "scope": false + }, + "revisionHistoryLimit": 10, + "serviceAccount": { + "annotations": {}, + "automountServiceAccountToken": true, + "create": true, + "name": "" + }, + "tcp": {}, + "udp": {} + } + } + ] + capabilities: Basic Install + createdAt: "2025-02-13T16:39:21Z" + operators.operatorframework.io/builder: operator-sdk-v1.39.1 + operators.operatorframework.io/project_layout: helm.sdk.operatorframework.io/v1 + name: nginx-operator.v4.11.3 + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinitions: + owned: + - kind: IngressNginx + name: ingressnginxes.metalk8s.scality.com + version: v1alpha1 + description: Operator Manages Kubernetes Nginx Controllers + displayName: nginx-operator + icon: + - base64data: "" + mediatype: "" + install: + spec: + clusterPermissions: + - rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - secrets + verbs: + - '*' + - apiGroups: + - "" + resources: + - events + verbs: + - create + - apiGroups: + - metalk8s.scality.com + resources: + - ingressnginxes + - ingressnginxes/status + - ingressnginxes/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - pods + - services + - services/finalizers + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - '*' + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - '*' + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - '*' + - apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + - clusterroles + - clusterrolebindings + verbs: + - '*' + serviceAccountName: nginx-operator-controller-manager + deployments: + - label: + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: nginx-operator + control-plane: controller-manager + name: nginx-operator-controller-manager + spec: + replicas: 1 + selector: + matchLabels: + control-plane: controller-manager + strategy: {} + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + spec: + containers: + - args: + - --metrics-require-rbac + - --metrics-secure + - --metrics-bind-address=:8443 + - --leader-elect + - --leader-election-id=nginx-operator + - --health-probe-bind-address=:8081 + image: registry.metalk8s.lan/metalk8s-MK8S_VERSION_STUB/nginx-operator:v4.11.3 + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/infra: "" + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: nginx-operator-controller-manager + terminationGracePeriodSeconds: 10 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/bootstrap + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists + permissions: + - rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + serviceAccountName: nginx-operator-controller-manager + strategy: deployment + installModes: + - supported: false + type: OwnNamespace + - supported: false + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: true + type: AllNamespaces + keywords: + - operator + - scality + - metalk8s + - nginx + - kubernetes + links: + - name: Nginx Operator + url: https://nginx-operator.domain + maintainers: + - email: ayoub.nasr@scality.com + name: Ayoub Nasr + maturity: alpha + provider: + name: scality + url: scality.com + version: 4.11.3 diff --git a/nginx-operator/bundle/metadata/annotations.yaml b/nginx-operator/bundle/metadata/annotations.yaml new file mode 100644 index 0000000000..e2b9d6b397 --- /dev/null +++ b/nginx-operator/bundle/metadata/annotations.yaml @@ -0,0 +1,14 @@ +annotations: + # Core bundle annotations. + operators.operatorframework.io.bundle.mediatype.v1: registry+v1 + operators.operatorframework.io.bundle.manifests.v1: manifests/ + operators.operatorframework.io.bundle.metadata.v1: metadata/ + operators.operatorframework.io.bundle.package.v1: nginx-operator + operators.operatorframework.io.bundle.channels.v1: alpha + operators.operatorframework.io.metrics.builder: operator-sdk-v1.39.1 + operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 + operators.operatorframework.io.metrics.project_layout: helm.sdk.operatorframework.io/v1 + + # Annotations for testing. + operators.operatorframework.io.test.mediatype.v1: scorecard+v1 + operators.operatorframework.io.test.config.v1: tests/scorecard/ diff --git a/nginx-operator/bundle/tests/scorecard/config.yaml b/nginx-operator/bundle/tests/scorecard/config.yaml new file mode 100644 index 0000000000..924ca62431 --- /dev/null +++ b/nginx-operator/bundle/tests/scorecard/config.yaml @@ -0,0 +1,70 @@ +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: +- parallel: true + tests: + - entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:v1.39.1 + labels: + suite: basic + test: basic-check-spec-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:v1.39.1 + labels: + suite: olm + test: olm-bundle-validation-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:v1.39.1 + labels: + suite: olm + test: olm-crds-have-validation-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:v1.39.1 + labels: + suite: olm + test: olm-crds-have-resources-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:v1.39.1 + labels: + suite: olm + test: olm-spec-descriptors-test + storage: + spec: + mountPath: {} + - entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:v1.39.1 + labels: + suite: olm + test: olm-status-descriptors-test + storage: + spec: + mountPath: {} +storage: + spec: + mountPath: {} diff --git a/nginx-operator/config/crd/bases/metalk8s.scality.com_ingressnginxes.yaml b/nginx-operator/config/crd/bases/metalk8s.scality.com_ingressnginxes.yaml new file mode 100644 index 0000000000..23ede4b3ee --- /dev/null +++ b/nginx-operator/config/crd/bases/metalk8s.scality.com_ingressnginxes.yaml @@ -0,0 +1,44 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ingressnginxes.metalk8s.scality.com +spec: + group: metalk8s.scality.com + names: + kind: IngressNginx + listKind: IngressNginxList + plural: ingressnginxes + singular: ingressnginx + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: IngressNginx is the Schema for the ingressnginxes API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired state of IngressNginx + type: object + x-kubernetes-preserve-unknown-fields: true + status: + description: Status defines the observed state of IngressNginx + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: true + subresources: + status: {} diff --git a/nginx-operator/config/crd/kustomization.yaml b/nginx-operator/config/crd/kustomization.yaml new file mode 100644 index 0000000000..c4dccd218b --- /dev/null +++ b/nginx-operator/config/crd/kustomization.yaml @@ -0,0 +1,6 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/metalk8s.scality.com_ingressnginxes.yaml +# +kubebuilder:scaffold:crdkustomizeresource diff --git a/nginx-operator/config/default/kustomization.yaml b/nginx-operator/config/default/kustomization.yaml new file mode 100644 index 0000000000..87fe8bdcdb --- /dev/null +++ b/nginx-operator/config/default/kustomization.yaml @@ -0,0 +1,37 @@ +# Adds namespace to all resources. +namespace: nginx-operator-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: nginx-operator- + +# Labels to add to all resources and selectors. +#labels: +#- includeSelectors: true +# pairs: +# someName: someValue + +resources: +- ../crd +- ../rbac +- ../manager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus +# [METRICS] Expose the controller manager metrics service. +- metrics_service.yaml +# [NETWORK POLICY] Protect the /metrics endpoint and Webhook Server with NetworkPolicy. +# Only Pod(s) running a namespace labeled with 'metrics: enabled' will be able to gather the metrics. +# Only CR(s) which requires webhooks and are applied on namespaces labeled with 'webhooks: enabled' will +# be able to communicate with the Webhook Server. +#- ../network-policy + +# Uncomment the patches line if you enable Metrics, and/or are using webhooks and cert-manager +patches: +# [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443. +# More info: https://book.kubebuilder.io/reference/metrics +- path: manager_metrics_patch.yaml + target: + kind: Deployment diff --git a/nginx-operator/config/default/manager_metrics_patch.yaml b/nginx-operator/config/default/manager_metrics_patch.yaml new file mode 100644 index 0000000000..a3cb2f1865 --- /dev/null +++ b/nginx-operator/config/default/manager_metrics_patch.yaml @@ -0,0 +1,12 @@ +# This patch adds the args to allow exposing the metrics endpoint using HTTPS +- op: add + path: /spec/template/spec/containers/0/args/0 + value: --metrics-bind-address=:8443 +# This patch adds the args to allow securing the metrics endpoint +- op: add + path: /spec/template/spec/containers/0/args/0 + value: --metrics-secure +# This patch adds the args to allow RBAC-based authn/authz the metrics endpoint +- op: add + path: /spec/template/spec/containers/0/args/0 + value: --metrics-require-rbac diff --git a/nginx-operator/config/default/metrics_service.yaml b/nginx-operator/config/default/metrics_service.yaml new file mode 100644 index 0000000000..7032beceed --- /dev/null +++ b/nginx-operator/config/default/metrics_service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: nginx-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + control-plane: controller-manager diff --git a/nginx-operator/config/manager/kustomization.yaml b/nginx-operator/config/manager/kustomization.yaml new file mode 100644 index 0000000000..0e62316ad2 --- /dev/null +++ b/nginx-operator/config/manager/kustomization.yaml @@ -0,0 +1,8 @@ +resources: +- manager.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: controller + newName: registry.metalk8s.lan/metalk8s-MK8S_VERSION_STUB/nginx-operator + newTag: v4.11.3 diff --git a/nginx-operator/config/manager/manager.yaml b/nginx-operator/config/manager/manager.yaml new file mode 100644 index 0000000000..69fcba60d6 --- /dev/null +++ b/nginx-operator/config/manager/manager.yaml @@ -0,0 +1,104 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: nginx-operator + app.kubernetes.io/managed-by: kustomize + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager + app.kubernetes.io/name: nginx-operator + app.kubernetes.io/managed-by: kustomize +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/arch + # operator: In + # values: + # - amd64 + # - arm64 + # - ppc64le + # - s390x + # - key: kubernetes.io/os + # operator: In + # values: + # - linux + securityContext: + runAsNonRoot: true + # TODO(user): For common cases that do not require escalating privileges + # it is recommended to ensure that all your Pods/Containers are restrictive. + # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + # Please uncomment the following code if your project does NOT have to work on old Kubernetes + # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ). + seccompProfile: + type: RuntimeDefault + containers: + - args: + - --leader-elect + - --leader-election-id=nginx-operator + - --health-probe-bind-address=:8081 + image: controller:latest + name: manager + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + # TODO(user): Configure the resources accordingly based on the project requirements. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 10 + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/infra: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/bootstrap + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists diff --git a/nginx-operator/config/manifests/bases/nginx-operator.clusterserviceversion.yaml b/nginx-operator/config/manifests/bases/nginx-operator.clusterserviceversion.yaml new file mode 100644 index 0000000000..1df205c97f --- /dev/null +++ b/nginx-operator/config/manifests/bases/nginx-operator.clusterserviceversion.yaml @@ -0,0 +1,46 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: '[]' + capabilities: Basic Install + name: nginx-operator.v0.0.0 + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinitions: {} + description: Operator Manages Kubernetes Nginx Controllers + displayName: nginx-operator + icon: + - base64data: "" + mediatype: "" + install: + spec: + deployments: null + strategy: "" + installModes: + - supported: false + type: OwnNamespace + - supported: false + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: true + type: AllNamespaces + keywords: + - operator + - scality + - metalk8s + - nginx + - kubernetes + links: + - name: Nginx Operator + url: https://nginx-operator.domain + maintainers: + - email: ayoub.nasr@scality.com + name: Ayoub Nasr + maturity: alpha + provider: + name: scality + url: scality.com + version: 0.0.0 diff --git a/nginx-operator/config/manifests/kustomization.yaml b/nginx-operator/config/manifests/kustomization.yaml new file mode 100644 index 0000000000..39cb5ff796 --- /dev/null +++ b/nginx-operator/config/manifests/kustomization.yaml @@ -0,0 +1,7 @@ +# These resources constitute the fully configured set of manifests +# used to generate the 'manifests/' directory in a bundle. +resources: +- bases/nginx-operator.clusterserviceversion.yaml +- ../default +- ../samples +- ../scorecard diff --git a/nginx-operator/config/network-policy/allow-metrics-traffic.yaml b/nginx-operator/config/network-policy/allow-metrics-traffic.yaml new file mode 100644 index 0000000000..323090e29d --- /dev/null +++ b/nginx-operator/config/network-policy/allow-metrics-traffic.yaml @@ -0,0 +1,26 @@ +# This NetworkPolicy allows ingress traffic +# with Pods running on namespaces labeled with 'metrics: enabled'. Only Pods on those +# namespaces are able to gathering data from the metrics endpoint. +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + labels: + app.kubernetes.io/name: memcached-operator + app.kubernetes.io/managed-by: kustomize + name: allow-metrics-traffic + namespace: system +spec: + podSelector: + matchLabels: + control-plane: controller-manager + policyTypes: + - Ingress + ingress: + # This allows ingress traffic from any namespace with the label metrics: enabled + - from: + - namespaceSelector: + matchLabels: + metrics: enabled # Only from namespaces with this label + ports: + - port: 8443 + protocol: TCP diff --git a/nginx-operator/config/network-policy/kustomization.yaml b/nginx-operator/config/network-policy/kustomization.yaml new file mode 100644 index 0000000000..ec0fb5e57d --- /dev/null +++ b/nginx-operator/config/network-policy/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- allow-metrics-traffic.yaml diff --git a/nginx-operator/config/prometheus/kustomization.yaml b/nginx-operator/config/prometheus/kustomization.yaml new file mode 100644 index 0000000000..ed137168a1 --- /dev/null +++ b/nginx-operator/config/prometheus/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- monitor.yaml diff --git a/nginx-operator/config/prometheus/monitor.yaml b/nginx-operator/config/prometheus/monitor.yaml new file mode 100644 index 0000000000..cff501f974 --- /dev/null +++ b/nginx-operator/config/prometheus/monitor.yaml @@ -0,0 +1,30 @@ +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: nginx-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-monitor + namespace: system +spec: + endpoints: + - path: /metrics + port: https # Ensure this is the name of the port that exposes HTTPS metrics + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + # TODO(user): The option insecureSkipVerify: true is not recommended for production since it disables + # certificate verification. This poses a significant security risk by making the system vulnerable to + # man-in-the-middle attacks, where an attacker could intercept and manipulate the communication between + # Prometheus and the monitored services. This could lead to unauthorized access to sensitive metrics data, + # compromising the integrity and confidentiality of the information. + # Please use the following options for secure configurations: + # caFile: /etc/metrics-certs/ca.crt + # certFile: /etc/metrics-certs/tls.crt + # keyFile: /etc/metrics-certs/tls.key + insecureSkipVerify: true + selector: + matchLabels: + control-plane: controller-manager diff --git a/nginx-operator/config/rbac/ingressnginx_editor_role.yaml b/nginx-operator/config/rbac/ingressnginx_editor_role.yaml new file mode 100644 index 0000000000..0b25086af6 --- /dev/null +++ b/nginx-operator/config/rbac/ingressnginx_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit ingressnginxes. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: nginx-operator + app.kubernetes.io/managed-by: kustomize + name: ingressnginx-editor-role +rules: +- apiGroups: + - metalk8s.scality.com + resources: + - ingressnginxes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - metalk8s.scality.com + resources: + - ingressnginxes/status + verbs: + - get diff --git a/nginx-operator/config/rbac/ingressnginx_viewer_role.yaml b/nginx-operator/config/rbac/ingressnginx_viewer_role.yaml new file mode 100644 index 0000000000..524bd43ed1 --- /dev/null +++ b/nginx-operator/config/rbac/ingressnginx_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view ingressnginxes. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: nginx-operator + app.kubernetes.io/managed-by: kustomize + name: ingressnginx-viewer-role +rules: +- apiGroups: + - metalk8s.scality.com + resources: + - ingressnginxes + verbs: + - get + - list + - watch +- apiGroups: + - metalk8s.scality.com + resources: + - ingressnginxes/status + verbs: + - get diff --git a/nginx-operator/config/rbac/kustomization.yaml b/nginx-operator/config/rbac/kustomization.yaml new file mode 100644 index 0000000000..b0f024cc89 --- /dev/null +++ b/nginx-operator/config/rbac/kustomization.yaml @@ -0,0 +1,36 @@ +resources: +# All RBAC will be applied under this service account in +# the deployment namespace. You may comment out this resource +# if your manager will use a service account that exists at +# runtime. Be sure to update RoleBinding and ClusterRoleBinding +# subjects if changing service account names. +- service_account.yaml +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# The following RBAC configurations are used to protect +# the metrics endpoint with authn/authz. These configurations +# ensure that only authorized users and service accounts +# can access the metrics endpoint. Comment the following +# permissions if you want to disable this protection. +# More info: https://book.kubebuilder.io/reference/metrics.html +- metrics_auth_role.yaml +- metrics_auth_role_binding.yaml +- metrics_reader_role.yaml +# For each CRD, "Editor" and "Viewer" roles are scaffolded by +# default, aiding admins in cluster management. Those roles are +# not used by the Project itself. You can comment the following lines +# if you do not want those helpers be installed with your Project. +- ingressnginx_editor_role.yaml +- ingressnginx_viewer_role.yaml +# These roles and role bindings help the operator create +# serviceaccounts, roles, rolebindings, clusterroles, clusterrolebindings +- rbac_role.yaml +- rbac_role_binding.yaml +# IngressClass +- network_role.yaml +- network_role_binding.yaml +# ServiceMonitor +- monitoring_role.yaml +- monitoring_role_binding.yaml diff --git a/nginx-operator/config/rbac/leader_election_role.yaml b/nginx-operator/config/rbac/leader_election_role.yaml new file mode 100644 index 0000000000..5f17aa89d4 --- /dev/null +++ b/nginx-operator/config/rbac/leader_election_role.yaml @@ -0,0 +1,40 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: nginx-operator + app.kubernetes.io/managed-by: kustomize + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/nginx-operator/config/rbac/leader_election_role_binding.yaml b/nginx-operator/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 0000000000..160b5cbf90 --- /dev/null +++ b/nginx-operator/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: nginx-operator + app.kubernetes.io/managed-by: kustomize + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/nginx-operator/config/rbac/metrics_auth_role.yaml b/nginx-operator/config/rbac/metrics_auth_role.yaml new file mode 100644 index 0000000000..32d2e4ec6b --- /dev/null +++ b/nginx-operator/config/rbac/metrics_auth_role.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-auth-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/nginx-operator/config/rbac/metrics_auth_role_binding.yaml b/nginx-operator/config/rbac/metrics_auth_role_binding.yaml new file mode 100644 index 0000000000..e775d67ff0 --- /dev/null +++ b/nginx-operator/config/rbac/metrics_auth_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metrics-auth-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metrics-auth-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/nginx-operator/config/rbac/metrics_reader_role.yaml b/nginx-operator/config/rbac/metrics_reader_role.yaml new file mode 100644 index 0000000000..51a75db47a --- /dev/null +++ b/nginx-operator/config/rbac/metrics_reader_role.yaml @@ -0,0 +1,9 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-reader +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get diff --git a/nginx-operator/config/rbac/monitoring_role.yaml b/nginx-operator/config/rbac/monitoring_role.yaml new file mode 100644 index 0000000000..99f03bdade --- /dev/null +++ b/nginx-operator/config/rbac/monitoring_role.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: nginx-operator-monitoring-role +rules: +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - "*" + diff --git a/nginx-operator/config/rbac/monitoring_role_binding.yaml b/nginx-operator/config/rbac/monitoring_role_binding.yaml new file mode 100644 index 0000000000..ff01ae5d6d --- /dev/null +++ b/nginx-operator/config/rbac/monitoring_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: nginx-operator-monitoring-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nginx-operator-monitoring-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/nginx-operator/config/rbac/network_role.yaml b/nginx-operator/config/rbac/network_role.yaml new file mode 100644 index 0000000000..248fbae1df --- /dev/null +++ b/nginx-operator/config/rbac/network_role.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: nginx-operator-networking-role +rules: +- apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - "*" + diff --git a/nginx-operator/config/rbac/network_role_binding.yaml b/nginx-operator/config/rbac/network_role_binding.yaml new file mode 100644 index 0000000000..f9dda4ef74 --- /dev/null +++ b/nginx-operator/config/rbac/network_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: nginx-operator-networking-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nginx-operator-networking-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/nginx-operator/config/rbac/rbac_role.yaml b/nginx-operator/config/rbac/rbac_role.yaml new file mode 100644 index 0000000000..d1b2a2a8bc --- /dev/null +++ b/nginx-operator/config/rbac/rbac_role.yaml @@ -0,0 +1,21 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: nginx-operator-rbac-role +rules: +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - "*" +- apiGroups: + - rbac.authorization.k8s.io + resources: + - roles + - rolebindings + - clusterroles + - clusterrolebindings + verbs: + - "*" + diff --git a/nginx-operator/config/rbac/rbac_role_binding.yaml b/nginx-operator/config/rbac/rbac_role_binding.yaml new file mode 100644 index 0000000000..b1d78fe567 --- /dev/null +++ b/nginx-operator/config/rbac/rbac_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: nginx-operator-rbac-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nginx-operator-rbac-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/nginx-operator/config/rbac/role.yaml b/nginx-operator/config/rbac/role.yaml new file mode 100644 index 0000000000..5782063557 --- /dev/null +++ b/nginx-operator/config/rbac/role.yaml @@ -0,0 +1,83 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: manager-role +rules: +## +## Base operator rules +## +# We need to get namespaces so the operator can read namespaces to ensure they exist +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get +# We need to manage Helm release secrets +- apiGroups: + - "" + resources: + - secrets + verbs: + - "*" +# We need to create events on CRs about things happening during reconciliation +- apiGroups: + - "" + resources: + - events + verbs: + - create + +## +## Rules for metalk8s.scality.com/v1alpha1, Kind: IngressNginx +## +- apiGroups: + - metalk8s.scality.com + resources: + - ingressnginxes + - ingressnginxes/status + - ingressnginxes/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - pods + - services + - services/finalizers + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + +# +kubebuilder:scaffold:rules diff --git a/nginx-operator/config/rbac/role_binding.yaml b/nginx-operator/config/rbac/role_binding.yaml new file mode 100644 index 0000000000..40002fd6f7 --- /dev/null +++ b/nginx-operator/config/rbac/role_binding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: nginx-operator + app.kubernetes.io/managed-by: kustomize + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/nginx-operator/config/rbac/service_account.yaml b/nginx-operator/config/rbac/service_account.yaml new file mode 100644 index 0000000000..c6c8adb986 --- /dev/null +++ b/nginx-operator/config/rbac/service_account.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: nginx-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager + namespace: system diff --git a/nginx-operator/config/samples/kustomization.yaml b/nginx-operator/config/samples/kustomization.yaml new file mode 100644 index 0000000000..4079730025 --- /dev/null +++ b/nginx-operator/config/samples/kustomization.yaml @@ -0,0 +1,4 @@ +## Append samples of your project ## +resources: +- metalk8s_v1alpha1_ingressnginx.yaml +# +kubebuilder:scaffold:manifestskustomizesamples diff --git a/nginx-operator/config/samples/metalk8s_v1alpha1_ingressnginx.yaml b/nginx-operator/config/samples/metalk8s_v1alpha1_ingressnginx.yaml new file mode 100644 index 0000000000..a9b20b7e2a --- /dev/null +++ b/nginx-operator/config/samples/metalk8s_v1alpha1_ingressnginx.yaml @@ -0,0 +1,419 @@ +apiVersion: metalk8s.scality.com/v1alpha1 +kind: IngressNginx +metadata: + name: ingressnginx-sample +spec: + # Default values copied from /helm-charts/ingress-nginx/values.yaml + commonLabels: {} + controller: + addHeaders: {} + admissionWebhooks: + annotations: {} + certManager: + admissionCert: + duration: "" + enabled: false + rootCert: + duration: "" + certificate: /usr/local/certificates/cert + createSecretJob: + name: create + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 65532 + seccompProfile: + type: RuntimeDefault + enabled: true + existingPsp: "" + extraEnvs: [] + failurePolicy: Fail + key: /usr/local/certificates/key + labels: {} + name: admission + namespaceSelector: {} + objectSelector: {} + patch: + enabled: true + image: + digest: sha256:a9f03b34a3cbfbb26d103a14046ab2c5130a80c3d69d526ff8063d2b37b9fd3f + image: ingress-nginx/kube-webhook-certgen + pullPolicy: IfNotPresent + registry: registry.k8s.io + tag: v1.4.4 + labels: {} + networkPolicy: + enabled: false + nodeSelector: + kubernetes.io/os: linux + podAnnotations: {} + priorityClassName: "" + rbac: + create: true + securityContext: {} + serviceAccount: + automountServiceAccountToken: true + create: true + name: "" + tolerations: [] + patchWebhookJob: + name: patch + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 65532 + seccompProfile: + type: RuntimeDefault + port: 8443 + service: + annotations: {} + externalIPs: [] + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + affinity: {} + allowSnippetAnnotations: false + annotations: {} + autoscaling: + annotations: {} + behavior: {} + enabled: false + maxReplicas: 11 + minReplicas: 1 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + autoscalingTemplate: [] + config: {} + configAnnotations: {} + configMapNamespace: "" + containerName: controller + containerPort: + http: 80 + https: 443 + containerSecurityContext: {} + customTemplate: + configMapKey: "" + configMapName: "" + disableLeaderElection: false + dnsConfig: {} + dnsPolicy: ClusterFirst + electionID: "" + electionTTL: "" + enableAnnotationValidations: false + enableMimalloc: true + enableTopologyAwareRouting: false + existingPsp: "" + extraArgs: {} + extraContainers: [] + extraEnvs: [] + extraInitContainers: [] + extraModules: [] + extraVolumeMounts: [] + extraVolumes: [] + healthCheckHost: "" + healthCheckPath: /healthz + hostAliases: [] + hostNetwork: false + hostPort: + enabled: false + ports: + http: 80 + https: 443 + hostname: {} + image: + allowPrivilegeEscalation: false + chroot: false + digest: sha256:d56f135b6462cfc476447cfe564b83a45e8bb7da2774963b00d12161112270b7 + digestChroot: sha256:22701f0fc0f2dd209ef782f4e281bfe2d8cccd50ededa00aec88e0cdbe7edd14 + image: ingress-nginx/controller + pullPolicy: IfNotPresent + readOnlyRootFilesystem: false + registry: registry.k8s.io + runAsNonRoot: true + runAsUser: 101 + seccompProfile: + type: RuntimeDefault + tag: v1.11.3 + ingressClass: nginx + ingressClassByName: false + ingressClassResource: + aliases: [] + annotations: {} + controllerValue: k8s.io/ingress-nginx + default: false + enabled: true + name: nginx + parameters: {} + keda: + apiVersion: keda.sh/v1alpha1 + behavior: {} + cooldownPeriod: 300 + enabled: false + maxReplicas: 11 + minReplicas: 1 + pollingInterval: 30 + restoreToOriginalReplicaCount: false + scaledObject: + annotations: {} + triggers: [] + kind: Deployment + labels: {} + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + maxmindLicenseKey: "" + metrics: + enabled: false + port: 10254 + portName: metrics + prometheusRule: + additionalLabels: {} + enabled: false + rules: [] + service: + annotations: {} + externalIPs: [] + labels: {} + loadBalancerSourceRanges: [] + servicePort: 10254 + type: ClusterIP + serviceMonitor: + additionalLabels: {} + annotations: {} + enabled: false + metricRelabelings: [] + namespace: "" + namespaceSelector: {} + relabelings: [] + scrapeInterval: 30s + targetLabels: [] + minAvailable: 1 + minReadySeconds: 0 + name: controller + networkPolicy: + enabled: false + nodeSelector: + kubernetes.io/os: linux + opentelemetry: + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 65532 + seccompProfile: + type: RuntimeDefault + enabled: false + image: + digest: sha256:f7604ac0547ed64d79b98d92133234e66c2c8aade3c1f4809fed5eec1fb7f922 + distroless: true + image: ingress-nginx/opentelemetry-1.25.3 + registry: registry.k8s.io + tag: v20240813-b933310d + name: opentelemetry + resources: {} + podAnnotations: {} + podLabels: {} + podSecurityContext: {} + priorityClassName: "" + proxySetHeaders: {} + publishService: + enabled: true + pathOverride: "" + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + replicaCount: 1 + reportNodeInternalIp: false + resources: + requests: + cpu: 100m + memory: 90Mi + scope: + enabled: false + namespace: "" + namespaceSelector: "" + service: + annotations: {} + appProtocol: true + clusterIP: "" + enableHttp: true + enableHttps: true + enabled: true + external: + enabled: true + externalIPs: [] + externalTrafficPolicy: "" + internal: + annotations: {} + appProtocol: true + clusterIP: "" + enabled: false + externalIPs: [] + externalTrafficPolicy: "" + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + loadBalancerClass: "" + loadBalancerIP: "" + loadBalancerSourceRanges: [] + nodePorts: + http: "" + https: "" + tcp: {} + udp: {} + ports: {} + sessionAffinity: "" + targetPorts: {} + type: "" + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + labels: {} + loadBalancerClass: "" + loadBalancerIP: "" + loadBalancerSourceRanges: [] + nodePorts: + http: "" + https: "" + tcp: {} + udp: {} + ports: + http: 80 + https: 443 + sessionAffinity: "" + targetPorts: + http: http + https: https + type: LoadBalancer + shareProcessNamespace: false + sysctls: {} + tcp: + annotations: {} + configMapNamespace: "" + terminationGracePeriodSeconds: 300 + tolerations: [] + topologySpreadConstraints: [] + udp: + annotations: {} + configMapNamespace: "" + updateStrategy: {} + watchIngressWithoutClass: false + defaultBackend: + affinity: {} + autoscaling: + annotations: {} + enabled: false + maxReplicas: 2 + minReplicas: 1 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + containerSecurityContext: {} + enabled: false + existingPsp: "" + extraArgs: {} + extraConfigMaps: [] + extraEnvs: [] + extraVolumeMounts: [] + extraVolumes: [] + image: + allowPrivilegeEscalation: false + image: defaultbackend-amd64 + pullPolicy: IfNotPresent + readOnlyRootFilesystem: true + registry: registry.k8s.io + runAsNonRoot: true + runAsUser: 65534 + seccompProfile: + type: RuntimeDefault + tag: "1.5" + labels: {} + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + minAvailable: 1 + minReadySeconds: 0 + name: defaultbackend + networkPolicy: + enabled: false + nodeSelector: + kubernetes.io/os: linux + podAnnotations: {} + podLabels: {} + podSecurityContext: {} + port: 8080 + priorityClassName: "" + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + replicaCount: 1 + resources: {} + service: + annotations: {} + externalIPs: [] + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + serviceAccount: + automountServiceAccountToken: true + create: true + name: "" + tolerations: [] + topologySpreadConstraints: [] + updateStrategy: {} + dhParam: "" + imagePullSecrets: [] + namespaceOverride: "" + podSecurityPolicy: + enabled: false + portNamePrefix: "" + rbac: + create: true + scope: false + revisionHistoryLimit: 10 + serviceAccount: + annotations: {} + automountServiceAccountToken: true + create: true + name: "" + tcp: {} + udp: {} + + diff --git a/nginx-operator/config/scorecard/bases/config.yaml b/nginx-operator/config/scorecard/bases/config.yaml new file mode 100644 index 0000000000..c77047841e --- /dev/null +++ b/nginx-operator/config/scorecard/bases/config.yaml @@ -0,0 +1,7 @@ +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: +- parallel: true + tests: [] diff --git a/nginx-operator/config/scorecard/kustomization.yaml b/nginx-operator/config/scorecard/kustomization.yaml new file mode 100644 index 0000000000..54e8aa5075 --- /dev/null +++ b/nginx-operator/config/scorecard/kustomization.yaml @@ -0,0 +1,18 @@ +resources: +- bases/config.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +patches: +- path: patches/basic.config.yaml + target: + group: scorecard.operatorframework.io + kind: Configuration + name: config + version: v1alpha3 +- path: patches/olm.config.yaml + target: + group: scorecard.operatorframework.io + kind: Configuration + name: config + version: v1alpha3 +# +kubebuilder:scaffold:patches diff --git a/nginx-operator/config/scorecard/patches/basic.config.yaml b/nginx-operator/config/scorecard/patches/basic.config.yaml new file mode 100644 index 0000000000..b9ec7c6c82 --- /dev/null +++ b/nginx-operator/config/scorecard/patches/basic.config.yaml @@ -0,0 +1,10 @@ +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:v1.39.1 + labels: + suite: basic + test: basic-check-spec-test diff --git a/nginx-operator/config/scorecard/patches/olm.config.yaml b/nginx-operator/config/scorecard/patches/olm.config.yaml new file mode 100644 index 0000000000..25d83f98f2 --- /dev/null +++ b/nginx-operator/config/scorecard/patches/olm.config.yaml @@ -0,0 +1,50 @@ +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:v1.39.1 + labels: + suite: olm + test: olm-bundle-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:v1.39.1 + labels: + suite: olm + test: olm-crds-have-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:v1.39.1 + labels: + suite: olm + test: olm-crds-have-resources-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:v1.39.1 + labels: + suite: olm + test: olm-spec-descriptors-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:v1.39.1 + labels: + suite: olm + test: olm-status-descriptors-test diff --git a/charts/ingress-nginx/.helmignore b/nginx-operator/helm-charts/ingress-nginx/.helmignore similarity index 100% rename from charts/ingress-nginx/.helmignore rename to nginx-operator/helm-charts/ingress-nginx/.helmignore diff --git a/charts/ingress-nginx/Chart.yaml b/nginx-operator/helm-charts/ingress-nginx/Chart.yaml similarity index 100% rename from charts/ingress-nginx/Chart.yaml rename to nginx-operator/helm-charts/ingress-nginx/Chart.yaml diff --git a/charts/ingress-nginx/OWNERS b/nginx-operator/helm-charts/ingress-nginx/OWNERS similarity index 100% rename from charts/ingress-nginx/OWNERS rename to nginx-operator/helm-charts/ingress-nginx/OWNERS diff --git a/charts/ingress-nginx/README.md b/nginx-operator/helm-charts/ingress-nginx/README.md similarity index 100% rename from charts/ingress-nginx/README.md rename to nginx-operator/helm-charts/ingress-nginx/README.md diff --git a/charts/ingress-nginx/README.md.gotmpl b/nginx-operator/helm-charts/ingress-nginx/README.md.gotmpl similarity index 100% rename from charts/ingress-nginx/README.md.gotmpl rename to nginx-operator/helm-charts/ingress-nginx/README.md.gotmpl diff --git a/charts/ingress-nginx/changelog/helm-chart-2.10.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.10.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-2.10.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.10.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-2.11.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.11.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-2.11.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.11.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-2.11.1.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.11.1.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-2.11.1.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.11.1.md diff --git a/charts/ingress-nginx/changelog/helm-chart-2.11.2.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.11.2.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-2.11.2.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.11.2.md diff --git a/charts/ingress-nginx/changelog/helm-chart-2.11.3.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.11.3.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-2.11.3.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.11.3.md diff --git a/charts/ingress-nginx/changelog/helm-chart-2.12.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.12.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-2.12.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.12.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-2.12.1.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.12.1.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-2.12.1.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.12.1.md diff --git a/charts/ingress-nginx/changelog/helm-chart-2.13.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.13.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-2.13.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.13.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-2.14.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.14.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-2.14.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.14.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-2.15.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.15.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-2.15.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.15.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-2.16.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.16.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-2.16.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.16.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-2.9.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.9.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-2.9.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.9.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-2.9.1.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.9.1.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-2.9.1.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-2.9.1.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.0.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.0.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.0.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.0.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.10.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.10.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.10.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.10.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.10.1.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.10.1.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.10.1.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.10.1.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.11.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.11.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.11.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.11.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.11.1.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.11.1.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.11.1.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.11.1.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.12.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.12.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.12.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.12.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.13.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.13.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.13.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.13.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.14.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.14.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.14.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.14.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.15.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.15.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.15.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.15.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.15.1.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.15.1.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.15.1.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.15.1.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.16.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.16.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.16.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.16.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.16.1.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.16.1.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.16.1.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.16.1.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.17.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.17.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.17.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.17.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.18.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.18.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.18.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.18.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.19.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.19.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.19.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.19.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.20.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.20.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.20.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.20.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.20.1.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.20.1.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.20.1.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.20.1.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.21.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.21.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.21.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.21.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.22.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.22.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.22.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.22.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.23.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.23.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.23.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.23.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.24.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.24.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.24.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.24.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.25.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.25.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.25.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.25.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.26.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.26.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.26.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.26.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.27.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.27.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.27.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.27.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.28.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.28.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.28.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.28.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.29.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.29.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.29.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.29.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.3.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.3.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.3.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.3.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.3.1.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.3.1.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.3.1.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.3.1.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.30.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.30.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.30.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.30.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.31.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.31.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.31.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.31.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.32.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.32.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.32.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.32.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.33.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.33.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.33.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.33.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.34.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.34.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.34.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.34.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.4.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.4.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.4.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.4.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.5.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.5.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.5.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.5.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.5.1.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.5.1.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.5.1.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.5.1.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.6.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.6.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.6.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.6.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.7.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.7.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.7.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.7.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.7.1.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.7.1.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.7.1.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.7.1.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.8.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.8.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.8.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.8.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-3.9.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.9.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-3.9.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-3.9.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.0.1.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.1.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.0.1.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.1.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.0.10.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.10.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.0.10.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.10.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.0.11.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.11.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.0.11.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.11.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.0.12.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.12.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.0.12.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.12.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.0.13.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.13.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.0.13.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.13.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.0.14.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.14.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.0.14.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.14.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.0.15.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.15.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.0.15.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.15.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.0.18.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.18.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.0.18.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.18.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.0.2.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.2.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.0.2.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.2.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.0.3.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.3.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.0.3.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.3.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.0.5.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.5.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.0.5.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.5.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.0.6.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.6.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.0.6.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.6.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.0.7.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.7.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.0.7.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.7.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.0.9.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.9.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.0.9.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.0.9.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.1.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.1.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.1.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.1.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.1.2.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.1.2.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.1.2.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.1.2.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.10.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.10.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.10.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.10.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.10.1.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.10.1.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.10.1.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.10.1.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.10.2.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.10.2.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.10.2.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.10.2.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.11.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.11.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.11.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.11.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.11.1.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.11.1.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.11.1.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.11.1.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.11.2.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.11.2.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.11.2.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.11.2.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.11.3.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.11.3.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.11.3.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.11.3.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.2.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.2.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.2.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.2.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.2.1.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.2.1.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.2.1.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.2.1.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.3.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.3.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.3.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.3.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.4.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.4.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.4.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.4.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.5.2.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.5.2.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.5.2.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.5.2.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.6.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.6.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.6.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.6.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.6.1.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.6.1.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.6.1.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.6.1.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.7.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.7.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.7.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.7.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.7.1.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.7.1.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.7.1.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.7.1.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.7.2.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.7.2.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.7.2.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.7.2.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.8.0-beta.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.8.0-beta.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.8.0-beta.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.8.0-beta.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.8.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.8.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.8.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.8.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.8.1.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.8.1.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.8.1.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.8.1.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.8.2.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.8.2.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.8.2.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.8.2.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.8.3.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.8.3.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.8.3.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.8.3.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.9.0.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.9.0.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.9.0.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.9.0.md diff --git a/charts/ingress-nginx/changelog/helm-chart-4.9.1.md b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.9.1.md similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart-4.9.1.md rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart-4.9.1.md diff --git a/charts/ingress-nginx/changelog/helm-chart.md.gotmpl b/nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart.md.gotmpl similarity index 100% rename from charts/ingress-nginx/changelog/helm-chart.md.gotmpl rename to nginx-operator/helm-charts/ingress-nginx/changelog/helm-chart.md.gotmpl diff --git a/charts/ingress-nginx/ci/admission-webhooks-cert-manager-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/admission-webhooks-cert-manager-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/admission-webhooks-cert-manager-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/admission-webhooks-cert-manager-values.yaml diff --git a/charts/ingress-nginx/ci/controller-configmap-addheaders-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/controller-configmap-addheaders-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/controller-configmap-addheaders-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/controller-configmap-addheaders-values.yaml diff --git a/charts/ingress-nginx/ci/controller-configmap-proxyheaders-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/controller-configmap-proxyheaders-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/controller-configmap-proxyheaders-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/controller-configmap-proxyheaders-values.yaml diff --git a/charts/ingress-nginx/ci/controller-configmap-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/controller-configmap-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/controller-configmap-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/controller-configmap-values.yaml diff --git a/charts/ingress-nginx/ci/controller-daemonset-extra-modules-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/controller-daemonset-extra-modules-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/controller-daemonset-extra-modules-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/controller-daemonset-extra-modules-values.yaml diff --git a/charts/ingress-nginx/ci/controller-daemonset-metrics-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/controller-daemonset-metrics-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/controller-daemonset-metrics-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/controller-daemonset-metrics-values.yaml diff --git a/charts/ingress-nginx/ci/controller-daemonset-opentelemetry-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/controller-daemonset-opentelemetry-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/controller-daemonset-opentelemetry-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/controller-daemonset-opentelemetry-values.yaml diff --git a/charts/ingress-nginx/ci/controller-daemonset-podannotations-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/controller-daemonset-podannotations-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/controller-daemonset-podannotations-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/controller-daemonset-podannotations-values.yaml diff --git a/charts/ingress-nginx/ci/controller-daemonset-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/controller-daemonset-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/controller-daemonset-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/controller-daemonset-values.yaml diff --git a/charts/ingress-nginx/ci/controller-deployment-extra-modules-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/controller-deployment-extra-modules-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/controller-deployment-extra-modules-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/controller-deployment-extra-modules-values.yaml diff --git a/charts/ingress-nginx/ci/controller-deployment-metrics-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/controller-deployment-metrics-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/controller-deployment-metrics-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/controller-deployment-metrics-values.yaml diff --git a/charts/ingress-nginx/ci/controller-deployment-opentelemetry-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/controller-deployment-opentelemetry-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/controller-deployment-opentelemetry-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/controller-deployment-opentelemetry-values.yaml diff --git a/charts/ingress-nginx/ci/controller-deployment-podannotations-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/controller-deployment-podannotations-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/controller-deployment-podannotations-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/controller-deployment-podannotations-values.yaml diff --git a/charts/ingress-nginx/ci/controller-deployment-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/controller-deployment-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/controller-deployment-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/controller-deployment-values.yaml diff --git a/charts/ingress-nginx/ci/controller-hpa-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/controller-hpa-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/controller-hpa-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/controller-hpa-values.yaml diff --git a/charts/ingress-nginx/ci/controller-ingressclass-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/controller-ingressclass-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/controller-ingressclass-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/controller-ingressclass-values.yaml diff --git a/charts/ingress-nginx/ci/controller-service-internal-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/controller-service-internal-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/controller-service-internal-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/controller-service-internal-values.yaml diff --git a/charts/ingress-nginx/ci/controller-service-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/controller-service-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/controller-service-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/controller-service-values.yaml diff --git a/charts/ingress-nginx/ci/deamonset-psp-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/deamonset-psp-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/deamonset-psp-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/deamonset-psp-values.yaml diff --git a/charts/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/deamonset-webhook-and-psp-values.yaml diff --git a/charts/ingress-nginx/ci/deployment-psp-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/deployment-psp-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/deployment-psp-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/deployment-psp-values.yaml diff --git a/charts/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml b/nginx-operator/helm-charts/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml similarity index 100% rename from charts/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml rename to nginx-operator/helm-charts/ingress-nginx/ci/deployment-webhook-and-psp-values.yaml diff --git a/charts/ingress-nginx/templates/NOTES.txt b/nginx-operator/helm-charts/ingress-nginx/templates/NOTES.txt similarity index 100% rename from charts/ingress-nginx/templates/NOTES.txt rename to nginx-operator/helm-charts/ingress-nginx/templates/NOTES.txt diff --git a/charts/ingress-nginx/templates/_helpers.tpl b/nginx-operator/helm-charts/ingress-nginx/templates/_helpers.tpl similarity index 100% rename from charts/ingress-nginx/templates/_helpers.tpl rename to nginx-operator/helm-charts/ingress-nginx/templates/_helpers.tpl diff --git a/charts/ingress-nginx/templates/_params.tpl b/nginx-operator/helm-charts/ingress-nginx/templates/_params.tpl similarity index 100% rename from charts/ingress-nginx/templates/_params.tpl rename to nginx-operator/helm-charts/ingress-nginx/templates/_params.tpl diff --git a/charts/ingress-nginx/templates/admission-webhooks/cert-manager.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/cert-manager.yaml similarity index 100% rename from charts/ingress-nginx/templates/admission-webhooks/cert-manager.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/cert-manager.yaml diff --git a/charts/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml similarity index 100% rename from charts/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml diff --git a/charts/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml similarity index 100% rename from charts/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml diff --git a/charts/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml similarity index 100% rename from charts/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml diff --git a/charts/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml similarity index 100% rename from charts/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml diff --git a/charts/ingress-nginx/templates/admission-webhooks/job-patch/networkpolicy.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/job-patch/networkpolicy.yaml similarity index 100% rename from charts/ingress-nginx/templates/admission-webhooks/job-patch/networkpolicy.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/job-patch/networkpolicy.yaml diff --git a/charts/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml similarity index 100% rename from charts/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/job-patch/psp.yaml diff --git a/charts/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml similarity index 100% rename from charts/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/job-patch/role.yaml diff --git a/charts/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml similarity index 100% rename from charts/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml diff --git a/charts/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml similarity index 100% rename from charts/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml diff --git a/charts/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml similarity index 100% rename from charts/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/admission-webhooks/validating-webhook.yaml diff --git a/charts/ingress-nginx/templates/clusterrole.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/clusterrole.yaml similarity index 100% rename from charts/ingress-nginx/templates/clusterrole.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/clusterrole.yaml diff --git a/charts/ingress-nginx/templates/clusterrolebinding.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/clusterrolebinding.yaml similarity index 100% rename from charts/ingress-nginx/templates/clusterrolebinding.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/clusterrolebinding.yaml diff --git a/charts/ingress-nginx/templates/controller-configmap-addheaders.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-configmap-addheaders.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-configmap-addheaders.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-configmap-addheaders.yaml diff --git a/charts/ingress-nginx/templates/controller-configmap-proxyheaders.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-configmap-proxyheaders.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-configmap-proxyheaders.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-configmap-proxyheaders.yaml diff --git a/charts/ingress-nginx/templates/controller-configmap-tcp.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-configmap-tcp.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-configmap-tcp.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-configmap-tcp.yaml diff --git a/charts/ingress-nginx/templates/controller-configmap-udp.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-configmap-udp.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-configmap-udp.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-configmap-udp.yaml diff --git a/charts/ingress-nginx/templates/controller-configmap.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-configmap.yaml similarity index 95% rename from charts/ingress-nginx/templates/controller-configmap.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-configmap.yaml index 22080d115f..e24a967426 100644 --- a/charts/ingress-nginx/templates/controller-configmap.yaml +++ b/nginx-operator/helm-charts/ingress-nginx/templates/controller-configmap.yaml @@ -1,3 +1,4 @@ +{{- if .Values.controller.generateConfigMap -}} apiVersion: v1 kind: ConfigMap metadata: @@ -26,3 +27,4 @@ data: {{- range $key, $value := .Values.controller.config }} {{- $key | nindent 2 }}: {{ tpl (toString $value) $ | quote }} {{- end }} +{{- end }} diff --git a/charts/ingress-nginx/templates/controller-daemonset.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-daemonset.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-daemonset.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-daemonset.yaml diff --git a/charts/ingress-nginx/templates/controller-deployment.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-deployment.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-deployment.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-deployment.yaml diff --git a/charts/ingress-nginx/templates/controller-hpa.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-hpa.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-hpa.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-hpa.yaml diff --git a/charts/ingress-nginx/templates/controller-ingressclass-aliases.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-ingressclass-aliases.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-ingressclass-aliases.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-ingressclass-aliases.yaml diff --git a/charts/ingress-nginx/templates/controller-ingressclass.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-ingressclass.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-ingressclass.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-ingressclass.yaml diff --git a/charts/ingress-nginx/templates/controller-keda.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-keda.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-keda.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-keda.yaml diff --git a/charts/ingress-nginx/templates/controller-networkpolicy.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-networkpolicy.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-networkpolicy.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-networkpolicy.yaml diff --git a/charts/ingress-nginx/templates/controller-poddisruptionbudget.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-poddisruptionbudget.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-poddisruptionbudget.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-poddisruptionbudget.yaml diff --git a/charts/ingress-nginx/templates/controller-prometheusrule.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-prometheusrule.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-prometheusrule.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-prometheusrule.yaml diff --git a/charts/ingress-nginx/templates/controller-psp.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-psp.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-psp.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-psp.yaml diff --git a/charts/ingress-nginx/templates/controller-role.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-role.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-role.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-role.yaml diff --git a/charts/ingress-nginx/templates/controller-rolebinding.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-rolebinding.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-rolebinding.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-rolebinding.yaml diff --git a/charts/ingress-nginx/templates/controller-secret.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-secret.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-secret.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-secret.yaml diff --git a/charts/ingress-nginx/templates/controller-service-internal.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-service-internal.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-service-internal.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-service-internal.yaml diff --git a/charts/ingress-nginx/templates/controller-service-metrics.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-service-metrics.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-service-metrics.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-service-metrics.yaml diff --git a/charts/ingress-nginx/templates/controller-service-webhook.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-service-webhook.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-service-webhook.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-service-webhook.yaml diff --git a/charts/ingress-nginx/templates/controller-service.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-service.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-service.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-service.yaml diff --git a/charts/ingress-nginx/templates/controller-serviceaccount.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-serviceaccount.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-serviceaccount.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-serviceaccount.yaml diff --git a/charts/ingress-nginx/templates/controller-servicemonitor.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-servicemonitor.yaml similarity index 100% rename from charts/ingress-nginx/templates/controller-servicemonitor.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/controller-servicemonitor.yaml diff --git a/charts/ingress-nginx/templates/default-backend-deployment.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/default-backend-deployment.yaml similarity index 100% rename from charts/ingress-nginx/templates/default-backend-deployment.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/default-backend-deployment.yaml diff --git a/charts/ingress-nginx/templates/default-backend-extra-configmaps.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/default-backend-extra-configmaps.yaml similarity index 100% rename from charts/ingress-nginx/templates/default-backend-extra-configmaps.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/default-backend-extra-configmaps.yaml diff --git a/charts/ingress-nginx/templates/default-backend-hpa.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/default-backend-hpa.yaml similarity index 100% rename from charts/ingress-nginx/templates/default-backend-hpa.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/default-backend-hpa.yaml diff --git a/charts/ingress-nginx/templates/default-backend-networkpolicy.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/default-backend-networkpolicy.yaml similarity index 100% rename from charts/ingress-nginx/templates/default-backend-networkpolicy.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/default-backend-networkpolicy.yaml diff --git a/charts/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml similarity index 100% rename from charts/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/default-backend-poddisruptionbudget.yaml diff --git a/charts/ingress-nginx/templates/default-backend-psp.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/default-backend-psp.yaml similarity index 100% rename from charts/ingress-nginx/templates/default-backend-psp.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/default-backend-psp.yaml diff --git a/charts/ingress-nginx/templates/default-backend-role.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/default-backend-role.yaml similarity index 100% rename from charts/ingress-nginx/templates/default-backend-role.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/default-backend-role.yaml diff --git a/charts/ingress-nginx/templates/default-backend-rolebinding.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/default-backend-rolebinding.yaml similarity index 100% rename from charts/ingress-nginx/templates/default-backend-rolebinding.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/default-backend-rolebinding.yaml diff --git a/charts/ingress-nginx/templates/default-backend-service.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/default-backend-service.yaml similarity index 100% rename from charts/ingress-nginx/templates/default-backend-service.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/default-backend-service.yaml diff --git a/charts/ingress-nginx/templates/default-backend-serviceaccount.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/default-backend-serviceaccount.yaml similarity index 100% rename from charts/ingress-nginx/templates/default-backend-serviceaccount.yaml rename to nginx-operator/helm-charts/ingress-nginx/templates/default-backend-serviceaccount.yaml diff --git a/charts/ingress-nginx/tests/admission-webhooks/job-patch/clusterrole_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/admission-webhooks/job-patch/clusterrole_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/admission-webhooks/job-patch/clusterrole_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/admission-webhooks/job-patch/clusterrole_test.yaml diff --git a/charts/ingress-nginx/tests/admission-webhooks/job-patch/clusterrolebinding_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/admission-webhooks/job-patch/clusterrolebinding_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/admission-webhooks/job-patch/clusterrolebinding_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/admission-webhooks/job-patch/clusterrolebinding_test.yaml diff --git a/charts/ingress-nginx/tests/admission-webhooks/job-patch/role_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/admission-webhooks/job-patch/role_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/admission-webhooks/job-patch/role_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/admission-webhooks/job-patch/role_test.yaml diff --git a/charts/ingress-nginx/tests/admission-webhooks/job-patch/rolebinding_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/admission-webhooks/job-patch/rolebinding_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/admission-webhooks/job-patch/rolebinding_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/admission-webhooks/job-patch/rolebinding_test.yaml diff --git a/charts/ingress-nginx/tests/admission-webhooks/job-patch/serviceaccount_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/admission-webhooks/job-patch/serviceaccount_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/admission-webhooks/job-patch/serviceaccount_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/admission-webhooks/job-patch/serviceaccount_test.yaml diff --git a/charts/ingress-nginx/tests/admission-webhooks/validating-webhook_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/admission-webhooks/validating-webhook_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/admission-webhooks/validating-webhook_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/admission-webhooks/validating-webhook_test.yaml diff --git a/charts/ingress-nginx/tests/controller-configmap-addheaders_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/controller-configmap-addheaders_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/controller-configmap-addheaders_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/controller-configmap-addheaders_test.yaml diff --git a/charts/ingress-nginx/tests/controller-configmap-proxyheaders_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/controller-configmap-proxyheaders_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/controller-configmap-proxyheaders_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/controller-configmap-proxyheaders_test.yaml diff --git a/charts/ingress-nginx/tests/controller-configmap_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/controller-configmap_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/controller-configmap_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/controller-configmap_test.yaml diff --git a/charts/ingress-nginx/tests/controller-daemonset_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/controller-daemonset_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/controller-daemonset_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/controller-daemonset_test.yaml diff --git a/charts/ingress-nginx/tests/controller-deployment_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/controller-deployment_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/controller-deployment_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/controller-deployment_test.yaml diff --git a/charts/ingress-nginx/tests/controller-hpa_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/controller-hpa_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/controller-hpa_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/controller-hpa_test.yaml diff --git a/charts/ingress-nginx/tests/controller-ingressclass-aliases_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/controller-ingressclass-aliases_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/controller-ingressclass-aliases_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/controller-ingressclass-aliases_test.yaml diff --git a/charts/ingress-nginx/tests/controller-ingressclass_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/controller-ingressclass_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/controller-ingressclass_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/controller-ingressclass_test.yaml diff --git a/charts/ingress-nginx/tests/controller-keda_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/controller-keda_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/controller-keda_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/controller-keda_test.yaml diff --git a/charts/ingress-nginx/tests/controller-networkpolicy_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/controller-networkpolicy_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/controller-networkpolicy_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/controller-networkpolicy_test.yaml diff --git a/charts/ingress-nginx/tests/controller-poddisruptionbudget_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/controller-poddisruptionbudget_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/controller-poddisruptionbudget_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/controller-poddisruptionbudget_test.yaml diff --git a/charts/ingress-nginx/tests/controller-prometheusrule_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/controller-prometheusrule_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/controller-prometheusrule_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/controller-prometheusrule_test.yaml diff --git a/charts/ingress-nginx/tests/controller-service-internal_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/controller-service-internal_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/controller-service-internal_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/controller-service-internal_test.yaml diff --git a/charts/ingress-nginx/tests/controller-service-metrics_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/controller-service-metrics_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/controller-service-metrics_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/controller-service-metrics_test.yaml diff --git a/charts/ingress-nginx/tests/controller-service-webhook_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/controller-service-webhook_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/controller-service-webhook_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/controller-service-webhook_test.yaml diff --git a/charts/ingress-nginx/tests/controller-service_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/controller-service_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/controller-service_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/controller-service_test.yaml diff --git a/charts/ingress-nginx/tests/controller-servicemonitor_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/controller-servicemonitor_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/controller-servicemonitor_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/controller-servicemonitor_test.yaml diff --git a/charts/ingress-nginx/tests/default-backend-deployment_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/default-backend-deployment_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/default-backend-deployment_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/default-backend-deployment_test.yaml diff --git a/charts/ingress-nginx/tests/default-backend-extra-configmaps_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/default-backend-extra-configmaps_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/default-backend-extra-configmaps_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/default-backend-extra-configmaps_test.yaml diff --git a/charts/ingress-nginx/tests/default-backend-poddisruptionbudget_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/default-backend-poddisruptionbudget_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/default-backend-poddisruptionbudget_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/default-backend-poddisruptionbudget_test.yaml diff --git a/charts/ingress-nginx/tests/default-backend-service_test.yaml b/nginx-operator/helm-charts/ingress-nginx/tests/default-backend-service_test.yaml similarity index 100% rename from charts/ingress-nginx/tests/default-backend-service_test.yaml rename to nginx-operator/helm-charts/ingress-nginx/tests/default-backend-service_test.yaml diff --git a/charts/ingress-nginx/values.yaml b/nginx-operator/helm-charts/ingress-nginx/values.yaml similarity index 99% rename from charts/ingress-nginx/values.yaml rename to nginx-operator/helm-charts/ingress-nginx/values.yaml index f42a6821dc..6c44a3a921 100644 --- a/charts/ingress-nginx/values.yaml +++ b/nginx-operator/helm-charts/ingress-nginx/values.yaml @@ -48,6 +48,8 @@ controller: # -- Global configuration passed to the ConfigMap consumed by the controller. Values may contain Helm templates. # Ref.: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ config: {} + # -- whether to generate the configmap object or not + generateConfigMap: false # -- Annotations to be added to the controller config configuration configmap. configAnnotations: {} # -- Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers diff --git a/nginx-operator/remove_configmap.patch b/nginx-operator/remove_configmap.patch new file mode 100644 index 0000000000..eadba8db08 --- /dev/null +++ b/nginx-operator/remove_configmap.patch @@ -0,0 +1,27 @@ +diff --git a/nginx-operator/helm-charts/ingress-nginx/templates/controller-configmap.yaml b/nginx-operator/helm-charts/ingress-nginx/templates/controller-configmap.yaml +index 22080d115..e24a96742 100644 +--- a/nginx-operator/helm-charts/ingress-nginx/templates/controller-configmap.yaml ++++ b/nginx-operator/helm-charts/ingress-nginx/templates/controller-configmap.yaml +@@ -1,3 +1,4 @@ ++{{- if .Values.controller.generateConfigMap -}} + apiVersion: v1 + kind: ConfigMap + metadata: +@@ -26,3 +27,4 @@ data: + {{- range $key, $value := .Values.controller.config }} + {{- $key | nindent 2 }}: {{ tpl (toString $value) $ | quote }} + {{- end }} ++{{- end }} +diff --git a/nginx-operator/helm-charts/ingress-nginx/values.yaml b/nginx-operator/helm-charts/ingress-nginx/values.yaml +index f42a6821d..6c44a3a92 100644 +--- a/nginx-operator/helm-charts/ingress-nginx/values.yaml ++++ b/nginx-operator/helm-charts/ingress-nginx/values.yaml +@@ -48,6 +48,8 @@ controller: + # -- Global configuration passed to the ConfigMap consumed by the controller. Values may contain Helm templates. + # Ref.: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: {} ++ # -- whether to generate the configmap object or not ++ generateConfigMap: false + # -- Annotations to be added to the controller config configuration configmap. + configAnnotations: {} + # -- Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers diff --git a/nginx-operator/watches.yaml b/nginx-operator/watches.yaml new file mode 100644 index 0000000000..ccc890f1d3 --- /dev/null +++ b/nginx-operator/watches.yaml @@ -0,0 +1,6 @@ +# Use the 'create api' subcommand to add watches to this file. +- group: metalk8s.scality.com + version: v1alpha1 + kind: IngressNginx + chart: helm-charts/ingress-nginx +# +kubebuilder:scaffold:watch diff --git a/olm/.gitignore b/olm/.gitignore new file mode 100644 index 0000000000..1e82fc7deb --- /dev/null +++ b/olm/.gitignore @@ -0,0 +1 @@ +*.yaml diff --git a/olm/render.py b/olm/render.py new file mode 100755 index 0000000000..5365445158 --- /dev/null +++ b/olm/render.py @@ -0,0 +1,281 @@ +#!/usr/bin/env python + +""" +This script downloads operator controller and catalogd manifests and `renders` them into a useable +salt chart. + +To do so, it: + - downloads manifests from github releases (optional) + - merges duplicate objects + - adds appropriate labels + - changes names and namespaces of cert-manager resources + - swaps image values +""" + +import argparse +import io +import pathlib +import re +import requests +import sys +import yaml + +CONTROLLER_MANIFEST_URL = "https://github.com/operator-framework/operator-controller/releases/download/{version}/operator-controller.yaml" + +CATALOGD_MANIFEST_URL = "https://github.com/operator-framework/catalogd/releases/download/{version}/catalogd.yaml" + +REGISTRIES_CONF = """ +[[registry]] +prefix = "{% endraw -%}{{ repo.registry_endpoint }}{%- raw %}" +insecure = true +location = "{% endraw -%}{{ repo.registry_endpoint }}{%- raw %}:80" +[[registry]] +prefix = "registry.metalk8s.lan" +insecure = true +location = "{% endraw -%}{{ repo.registry_endpoint }}{%- raw %}:80" +""" + + +START_BLOCK = """ +#!jinja | metalk8s_kubernetes + +{%- from "metalk8s/map.jinja" import repo with context %} +{%- from "metalk8s/repo/macro.sls" import build_image_name with context %} + +{% raw %} +""" + +END_BLOCK = """ +{% endraw %} +""" + + +class DownloadError(Exception): + pass + + +def semver_regex_type(value): + pat = re.compile(r"^v\d+\.\d+\.\d+$") + if not pat.match(value): + raise argparse.ArgumentTypeError("invalid value, must be ~v1.1.2") + return value + + +def download_source_manifest(version): + controller_response = requests.get(CONTROLLER_MANIFEST_URL.format(version=version)) + catalogd_response = requests.get(CATALOGD_MANIFEST_URL.format(version=version)) + if controller_response.status_code == 200 and catalogd_response.status_code == 200: + return controller_response.content, catalogd_response.content + raise DownloadError("Problem fetching catalogd or operator controller manifests") + + +""" +Merge source manifests on top of destination +""" + + +def merge(source, destination): + def _dict_merge(src, dst): + for k, v in src.items(): + if k in dst and isinstance(dst[k], dict) and isinstance(v, dict): + _dict_merge(v, dst[k]) + else: + dst[k] = v + + to_add = [] + # check for each doc in source + for sdoc in source: + # does it exist in the destination docs ? + found = False + for ddoc in destination: + if ( + ddoc["kind"] == sdoc["kind"] + and ddoc["metadata"]["name"] == sdoc["metadata"]["name"] + ): + found = True + _dict_merge(sdoc, ddoc) + if not found: + to_add.append(sdoc) + destination.extend(to_add) + return destination + + +def add_labels(manifest, version): + for doc in manifest: + doc["metadata"].setdefault("labels", {}).update( + { + "app.kubernetes.io/instance": "olm", + "app.kubernetes.io/managed-by": "salt", + "app.kubernetes.io/part-of": "metalk8s", + "app.kubernetes.io/version": str(version), + "heritage": "metalk8s", + } + ) + return manifest + + +def add_tolerations(manifest): + for doc in manifest: + if doc["kind"] == "Deployment": + doc["spec"]["template"]["spec"].setdefault("tolerations", []).extend( + [ + { + "key": "node-role.kubernetes.io/bootstrap", + "operator": "Exists", + "effect": "NoSchedule", + }, + { + "key": "node-role.kubernetes.io/infra", + "operator": "Exists", + "effect": "NoSchedule", + }, + ] + ) + return manifest + + +def add_registries_conf(manifest): + manifest.append( + { + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "registries-conf", + "namespace": "olmv1-system", + }, + "data": { + "registries.conf": REGISTRIES_CONF, + }, + } + ) + for doc in manifest: + if doc["kind"] == "Deployment": + doc["spec"]["template"]["spec"]["volumes"].append( + { + "name": "registries-conf", + "configMap": { + "name": "registries-conf", + }, + } + ) + doc["spec"]["template"]["spec"]["containers"][0]["volumeMounts"].append( + { + "name": "registries-conf", + "mountPath": "/etc/containers/", + } + ) + return manifest + + +def add_node_selector(manifest): + for doc in manifest: + if doc["kind"] == "Deployment": + doc["spec"]["template"]["spec"].setdefault("nodeSelector", {}).update( + { + "kubernetes.io/os": "linux", + "node-role.kubernetes.io/infra": "", + } + ) + return manifest + + +def fixup_certmanager(manifest): + for doc in manifest: + if ( + doc["apiVersion"] == "cert-manager.io/v1" + and doc["metadata"].get("namespace") == "cert-manager" + ): + doc["metadata"]["namespace"] = "metalk8s-certs" + if doc["kind"] == "MutatingWebhookConfiguration": + doc["metadata"]["annotations"][ + "cert-manager.io/inject-ca-from-secret" + ] = "metalk8s-certs/olmv1-ca" + return manifest + + +class multiline_string(str): + pass + + +def represent_multiline_string(dumper, data): + scalar = yaml.SafeDumper.represent_str(dumper, data) + scalar.style = "|" + return scalar + + +yaml.SafeDumper.add_representer(multiline_string, represent_multiline_string) + + +def render(manifest): + def _fix_strings(obj): + if isinstance(obj, dict): + return dict((k, _fix_strings(v)) for (k, v) in obj.items()) + elif isinstance(obj, list): + return [_fix_strings(elem) for elem in obj] + elif isinstance(obj, str): + if "\n" in obj: + value = "\n".join( + line for line in obj.splitlines() if not re.match(r"^\s*$", line) + ) + return multiline_string(value) + return obj + else: + return obj + + manifest = _fix_strings(manifest) + out = START_BLOCK.lstrip() + stream = io.StringIO() + yaml.safe_dump_all( + manifest, + stream, + default_flow_style=False, + ) + stream.seek(0) + out += re.sub( + r"image: quay.io/operator-framework/(?P.*):(?P.*)", + r'image: {% endraw -%}{{ build_image_name("\g", False) }}{%- raw %}:\g', + stream.read(), + ) + out += END_BLOCK + return out + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("-v", "--version", type=semver_regex_type) + parser.add_argument("-o", "--output", type=pathlib.Path) + args = parser.parse_args() + + # 1 - download the manifests + controller_manifest, catalogd_manifest = download_source_manifest( + version=args.version + ) + + # 1.5 - interpret yaml + controller = list(yaml.safe_load_all(controller_manifest)) + catalogd = list(yaml.safe_load_all(catalogd_manifest)) + + # 2 - merge manifests + manifest = merge(catalogd, controller) + + # 3- add labels, tolerations, nodeSelector + manifest = add_labels(manifest, args.version) + manifest = add_tolerations(manifest) + manifest = add_node_selector(manifest) + manifest = add_registries_conf(manifest) + + # 4- Fix cert-manager objects + manifest = fixup_certmanager(manifest) + + # 5- render yaml with new images + rendered = render(manifest) + + if args.output: + with open(args.output, "w") as fd: + fd.write(rendered) + else: + sys.stdout.write(rendered) + + +if __name__ == "__main__": + main() diff --git a/salt/_modules/metalk8s_olm.py b/salt/_modules/metalk8s_olm.py new file mode 100644 index 0000000000..d58c2a573a --- /dev/null +++ b/salt/_modules/metalk8s_olm.py @@ -0,0 +1,25 @@ +"""Interacts with OLMv1 CRs""" + +__virtualname__ = "metalk8s_olm" + + +def __virtual__(): + return __virtualname__ + + +def check_condition_status(kind: str, name: str, condition: str, status: str): + obj = __salt__["metalk8s_kubernetes.get_object"]( + kind=kind, apiVersion="olm.operatorframework.io/v1", name=name + ) + for cond in obj["status"]["conditions"]: + if cond["type"] == condition: + return cond["status"] == status + return False + + +def check_clustercatalog_serving(name: str): + return check_condition_status("ClusterCatalog", name, "Serving", "True") + + +def check_clusterextension_installed(name: str): + return check_condition_status("ClusterExtension", name, "Installed", "True") diff --git a/salt/metalk8s/addons/cert-manager/deployed/init.sls b/salt/metalk8s/addons/cert-manager/deployed/init.sls index 9510d7dddb..bf5ba53776 100644 --- a/salt/metalk8s/addons/cert-manager/deployed/init.sls +++ b/salt/metalk8s/addons/cert-manager/deployed/init.sls @@ -1,3 +1,39 @@ include: - .namespace - .chart + +Wait for cert-manager deployment to be Ready: + test.configurable_test_state: + - changes: False + - result: __slot__:salt:metalk8s_kubernetes.check_object_ready( + apiVersion=apps/v1, kind=Deployment, + name=cert-manager, namespace=metalk8s-certs) + - comment: wait for cert-manager + - retry: + attempts: 30 + - require: + - sls: metalk8s.addons.cert-manager.deployed.chart + +Wait for cert-manager webhook to be Ready: + test.configurable_test_state: + - changes: False + - result: __slot__:salt:metalk8s_kubernetes.check_object_ready( + apiVersion=apps/v1, kind=Deployment, + name=cert-manager-webhook, namespace=metalk8s-certs) + - comment: wait for cert-manager-webhook + - retry: + attempts: 30 + - require: + - sls: metalk8s.addons.cert-manager.deployed.chart + +Wait for cert-manager cainjector to be Ready: + test.configurable_test_state: + - changes: False + - result: __slot__:salt:metalk8s_kubernetes.check_object_ready( + apiVersion=apps/v1, kind=Deployment, + name=cert-manager-cainjector, namespace=metalk8s-certs) + - comment: wait for cert-manager-cainjector + - retry: + attempts: 30 + - require: + - sls: metalk8s.addons.cert-manager.deployed.chart diff --git a/salt/metalk8s/addons/nginx-ingress-control-plane/deployed/chart.sls b/salt/metalk8s/addons/nginx-ingress-control-plane/deployed/chart.sls index f61f7a059d..b53360f91d 100644 --- a/salt/metalk8s/addons/nginx-ingress-control-plane/deployed/chart.sls +++ b/salt/metalk8s/addons/nginx-ingress-control-plane/deployed/chart.sls @@ -3,478 +3,53 @@ {%- from "metalk8s/map.jinja" import repo with context %} {%- from "metalk8s/repo/macro.sls" import build_image_name with context %} - - -{% raw %} - -apiVersion: v1 -automountServiceAccountToken: true -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx-control-plane - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s - name: ingress-nginx-control-plane - namespace: metalk8s-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/instance: ingress-nginx-control-plane - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s - name: ingress-nginx-control-plane - namespace: metalk8s-ingress -rules: -- apiGroups: - - '' - resources: - - configmaps - - endpoints - - nodes - - pods - - secrets - - namespaces - verbs: - - list - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - list - - watch -- apiGroups: - - '' - resources: - - nodes - verbs: - - get -- apiGroups: - - '' - resources: - - services - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch -- apiGroups: - - '' - resources: - - events - verbs: - - create - - patch -- apiGroups: - - networking.k8s.io - resources: - - ingresses/status - verbs: - - update -- apiGroups: - - networking.k8s.io - resources: - - ingressclasses - verbs: - - get - - list - - watch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - list - - watch - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/instance: ingress-nginx-control-plane - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s - name: ingress-nginx-control-plane - namespace: metalk8s-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: ingress-nginx-control-plane -subjects: -- kind: ServiceAccount - name: ingress-nginx-control-plane - namespace: metalk8s-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx-control-plane - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s - name: ingress-nginx-control-plane - namespace: metalk8s-ingress -rules: -- apiGroups: - - '' - resources: - - namespaces - verbs: - - get -- apiGroups: - - '' - resources: - - configmaps - - pods - - secrets - - endpoints - verbs: - - get - - list - - watch -- apiGroups: - - '' - resources: - - services - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses/status - verbs: - - update -- apiGroups: - - networking.k8s.io - resources: - - ingressclasses - verbs: - - get - - list - - watch -- apiGroups: - - coordination.k8s.io - resourceNames: - - ingress-control-plane-controller-leader - resources: - - leases - verbs: - - get - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create -- apiGroups: - - '' - resources: - - events - verbs: - - create - - patch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - list - - watch - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding +apiVersion: metalk8s.scality.com/v1alpha1 +kind: IngressNginx metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx-control-plane - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s name: ingress-nginx-control-plane namespace: metalk8s-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: ingress-nginx-control-plane -subjects: -- kind: ServiceAccount - name: ingress-nginx-control-plane - namespace: metalk8s-ingress ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx-control-plane - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s - name: ingress-nginx-control-plane-controller-metrics - namespace: metalk8s-ingress -spec: - ports: - - name: metrics - port: 10254 - protocol: TCP - targetPort: metrics - selector: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx-control-plane - app.kubernetes.io/name: ingress-nginx - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - annotations: null - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx-control-plane - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s - name: ingress-nginx-control-plane-controller - namespace: metalk8s-ingress -spec: - externalIPs: {% endraw -%}{{ salt.metalk8s_network.get_control_plane_ingress_external_ips() | tojson }}{%- raw %} - ipFamilies: - - IPv4 - ipFamilyPolicy: SingleStack - ports: - - appProtocol: https - name: https - port: 8443 - protocol: TCP - targetPort: https - selector: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx-control-plane - app.kubernetes.io/name: ingress-nginx - type: ClusterIP ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx-control-plane - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s - name: ingress-nginx-control-plane-controller - namespace: metalk8s-ingress -spec: - minReadySeconds: 0 - revisionHistoryLimit: 10 - selector: - matchLabels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx-control-plane - app.kubernetes.io/name: ingress-nginx - template: - metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx-control-plane - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s - spec: - containers: - - args: - - /nginx-ingress-controller - - --publish-service=$(POD_NAMESPACE)/ingress-nginx-control-plane-controller - - --election-id=ingress-control-plane-controller-leader - - --controller-class=k8s.io/ingress-nginx-control-plane - - --ingress-class=nginx-control-plane - - --configmap=$(POD_NAMESPACE)/ingress-nginx-control-plane-controller - - --default-ssl-certificate=metalk8s-ingress/ingress-control-plane-default-certificate - - --metrics-per-host=false - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LD_PRELOAD - value: /usr/local/lib/libmimalloc.so - image: {% endraw -%}{{ build_image_name("nginx-ingress-controller", False) }}{%- raw %}:v1.11.3 - imagePullPolicy: IfNotPresent - lifecycle: - preStop: - exec: - command: - - /wait-shutdown - livenessProbe: - failureThreshold: 5 - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - name: controller - ports: - - containerPort: 80 - name: http - protocol: TCP - - containerPort: 443 - name: https - protocol: TCP - - containerPort: 10254 - name: metrics - protocol: TCP - readinessProbe: - failureThreshold: 3 - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - resources: - requests: - cpu: 100m - memory: 90Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: false - runAsNonRoot: true - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - dnsPolicy: ClusterFirst - nodeSelector: - kubernetes.io/os: linux - node-role.kubernetes.io/master: '' - serviceAccountName: ingress-nginx-control-plane - terminationGracePeriodSeconds: 300 - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/bootstrap - operator: Exists - - effect: NoSchedule - key: node-role.kubernetes.io/master - operator: Exists - - effect: NoSchedule - key: node-role.kubernetes.io/infra - operator: Exists - updateStrategy: - type: RollingUpdate ---- -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx-control-plane - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s - name: nginx-control-plane - namespace: metalk8s-ingress spec: - controller: k8s.io/ingress-nginx-control-plane ---- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx-control-plane - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s - metalk8s.scality.com/monitor: '' - name: ingress-nginx-control-plane-controller - namespace: metalk8s-ingress -spec: - endpoints: - - interval: 30s - port: metrics - namespaceSelector: - matchNames: - - metalk8s-ingress - selector: - matchLabels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx-control-plane - app.kubernetes.io/name: ingress-nginx - -{% endraw %} + controller: + allowSnippetAnnotations: true + image: + digest: null + repository: {{ build_image_name("nginx-ingress-controller", False) }} + electionID: ingress-control-plane-controller-leader + ingressClassResource: + name: nginx-control-plane + controllerValue: "k8s.io/ingress-nginx-control-plane" + ingressClass: nginx-control-plane + admissionWebhooks: + enabled: false + kind: DaemonSet + updateStrategy: + type: RollingUpdate + tolerations: + - key: "node-role.kubernetes.io/bootstrap" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/infra" + operator: "Exists" + effect: "NoSchedule" + nodeSelector: + node-role.kubernetes.io/master: '' + service: + type: ClusterIP + externalIPs: {{ salt.metalk8s_network.get_control_plane_ingress_external_ips() | tojson }} + enableHttp: false + ports: + https: 8443 + extraArgs: + default-ssl-certificate: "metalk8s-ingress/ingress-control-plane-default-certificate" + metrics-per-host: false + metrics: + enabled: true + serviceMonitor: + enabled: true + additionalLabels: + metalk8s.scality.com/monitor: '' + defaultBackend: + enabled: false diff --git a/salt/metalk8s/addons/nginx-ingress-control-plane/deployed/init.sls b/salt/metalk8s/addons/nginx-ingress-control-plane/deployed/init.sls index b202872be4..ed7e50324e 100644 --- a/salt/metalk8s/addons/nginx-ingress-control-plane/deployed/init.sls +++ b/salt/metalk8s/addons/nginx-ingress-control-plane/deployed/init.sls @@ -1,4 +1,5 @@ include: + - metalk8s.addons.nginx-operator.deployed - metalk8s.addons.nginx-ingress.deployed.namespace - .tls-secret - .chart diff --git a/salt/metalk8s/addons/nginx-ingress/deployed/chart.sls b/salt/metalk8s/addons/nginx-ingress/deployed/chart.sls index 9586e867f8..18ad8fae77 100644 --- a/salt/metalk8s/addons/nginx-ingress/deployed/chart.sls +++ b/salt/metalk8s/addons/nginx-ingress/deployed/chart.sls @@ -3,482 +3,43 @@ {%- from "metalk8s/map.jinja" import repo with context %} {%- from "metalk8s/repo/macro.sls" import build_image_name with context %} - - -{% raw %} - -apiVersion: v1 -automountServiceAccountToken: true -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s - name: ingress-nginx - namespace: metalk8s-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s - name: ingress-nginx - namespace: metalk8s-ingress -rules: -- apiGroups: - - '' - resources: - - configmaps - - endpoints - - nodes - - pods - - secrets - - namespaces - verbs: - - list - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - list - - watch -- apiGroups: - - '' - resources: - - nodes - verbs: - - get -- apiGroups: - - '' - resources: - - services - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch -- apiGroups: - - '' - resources: - - events - verbs: - - create - - patch -- apiGroups: - - networking.k8s.io - resources: - - ingresses/status - verbs: - - update -- apiGroups: - - networking.k8s.io - resources: - - ingressclasses - verbs: - - get - - list - - watch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - list - - watch - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s - name: ingress-nginx - namespace: metalk8s-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: ingress-nginx -subjects: -- kind: ServiceAccount - name: ingress-nginx - namespace: metalk8s-ingress ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s - name: ingress-nginx - namespace: metalk8s-ingress -rules: -- apiGroups: - - '' - resources: - - namespaces - verbs: - - get -- apiGroups: - - '' - resources: - - configmaps - - pods - - secrets - - endpoints - verbs: - - get - - list - - watch -- apiGroups: - - '' - resources: - - services - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - get - - list - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses/status - verbs: - - update -- apiGroups: - - networking.k8s.io - resources: - - ingressclasses - verbs: - - get - - list - - watch -- apiGroups: - - coordination.k8s.io - resourceNames: - - ingress-nginx-leader - resources: - - leases - verbs: - - get - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create -- apiGroups: - - '' - resources: - - events - verbs: - - create - - patch -- apiGroups: - - discovery.k8s.io - resources: - - endpointslices - verbs: - - list - - watch - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding +apiVersion: metalk8s.scality.com/v1alpha1 +kind: IngressNginx metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s name: ingress-nginx namespace: metalk8s-ingress -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: ingress-nginx -subjects: -- kind: ServiceAccount - name: ingress-nginx - namespace: metalk8s-ingress ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s - name: ingress-nginx-controller-metrics - namespace: metalk8s-ingress -spec: - ports: - - name: metrics - port: 10254 - protocol: TCP - targetPort: metrics - selector: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/name: ingress-nginx - type: ClusterIP ---- -apiVersion: v1 -kind: Service -metadata: - annotations: null - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s - name: ingress-nginx-controller - namespace: metalk8s-ingress -spec: - ipFamilies: - - IPv4 - ipFamilyPolicy: SingleStack - ports: - - appProtocol: http - name: http - port: 80 - protocol: TCP - targetPort: http - - appProtocol: https - name: https - port: 443 - protocol: TCP - targetPort: https - selector: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/name: ingress-nginx - type: ClusterIP ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s - name: ingress-nginx-controller - namespace: metalk8s-ingress -spec: - minReadySeconds: 0 - revisionHistoryLimit: 10 - selector: - matchLabels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/name: ingress-nginx - template: - metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s - spec: - containers: - - args: - - /nginx-ingress-controller - - --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller - - --election-id=ingress-nginx-leader - - --controller-class=k8s.io/ingress-nginx - - --ingress-class=nginx - - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller - - --watch-ingress-without-class=true - - --default-backend-service=metalk8s-ui/metalk8s-ui - - --default-ssl-certificate=metalk8s-ingress/ingress-workload-plane-default-certificate - - --metrics-per-host=false - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LD_PRELOAD - value: /usr/local/lib/libmimalloc.so - image: {% endraw -%}{{ build_image_name("nginx-ingress-controller", False) }}{%- raw %}:v1.11.3 - imagePullPolicy: IfNotPresent - lifecycle: - preStop: - exec: - command: - - /wait-shutdown - livenessProbe: - failureThreshold: 5 - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - name: controller - ports: - - containerPort: 80 - hostPort: 80 - name: http - protocol: TCP - - containerPort: 443 - hostPort: 443 - name: https - protocol: TCP - - containerPort: 10254 - name: metrics - protocol: TCP - readinessProbe: - failureThreshold: 3 - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - resources: - requests: - cpu: 100m - memory: 90Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - ALL - readOnlyRootFilesystem: false - runAsNonRoot: true - runAsUser: 101 - seccompProfile: - type: RuntimeDefault - dnsPolicy: ClusterFirst - nodeSelector: - kubernetes.io/os: linux - serviceAccountName: ingress-nginx - terminationGracePeriodSeconds: 300 - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/bootstrap - operator: Exists - - effect: NoSchedule - key: node-role.kubernetes.io/infra - operator: Exists ---- -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - annotations: - ingressclass.kubernetes.io/is-default-class: 'true' - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s - name: nginx - namespace: metalk8s-ingress spec: - controller: k8s.io/ingress-nginx ---- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/managed-by: salt - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: metalk8s - app.kubernetes.io/version: 1.11.3 - helm.sh/chart: ingress-nginx-4.11.3 - heritage: metalk8s - metalk8s.scality.com/monitor: '' - name: ingress-nginx-controller - namespace: metalk8s-ingress -spec: - endpoints: - - interval: 30s - port: metrics - namespaceSelector: - matchNames: - - metalk8s-ingress - selector: - matchLabels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: ingress-nginx - app.kubernetes.io/name: ingress-nginx - -{% endraw %} + controller: + allowSnippetAnnotations: true + image: + digest: null + repository: {{ build_image_name("nginx-ingress-controller", False) }} + hostPort: + enabled: true + ingressClassResource: + default: true + watchIngressWithoutClass: true + admissionWebhooks: + enabled: false + kind: DaemonSet + tolerations: + - key: "node-role.kubernetes.io/bootstrap" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/infra" + operator: "Exists" + effect: "NoSchedule" + service: + type: ClusterIP + extraArgs: + default-backend-service: metalk8s-ui/metalk8s-ui + default-ssl-certificate: "metalk8s-ingress/ingress-workload-plane-default-certificate" + metrics-per-host: false + metrics: + enabled: true + serviceMonitor: + enabled: true + additionalLabels: + metalk8s.scality.com/monitor: '' + defaultBackend: + enabled: false diff --git a/salt/metalk8s/addons/nginx-ingress/deployed/files/ingress-nginx-performance.json b/salt/metalk8s/addons/nginx-ingress/deployed/files/ingress-nginx-performance.json index 61db983a65..cde796384c 100644 --- a/salt/metalk8s/addons/nginx-ingress/deployed/files/ingress-nginx-performance.json +++ b/salt/metalk8s/addons/nginx-ingress/deployed/files/ingress-nginx-performance.json @@ -893,104 +893,6 @@ ], "title": "Average Response Size by Method and Path", "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "links": [], - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 32 - }, - "id": 96, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "desc" - } - }, - "pluginVersion": "10.4.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "sum (\n rate(\n nginx_ingress_controller_ingress_upstream_latency_seconds_sum {\n ingress =~ \"$ingress\",\n }[5m]\n)) / sum (\n rate(\n nginx_ingress_controller_ingress_upstream_latency_seconds_count {\n ingress =~ \"$ingress\",\n }[5m]\n )\n)\n", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "average", - "refId": "B" - } - ], - "title": "Upstream Service Latency", - "type": "timeseries" } ], "refresh": "30s", diff --git a/salt/metalk8s/addons/nginx-ingress/deployed/init.sls b/salt/metalk8s/addons/nginx-ingress/deployed/init.sls index 9a57e0c9ce..13de239a43 100644 --- a/salt/metalk8s/addons/nginx-ingress/deployed/init.sls +++ b/salt/metalk8s/addons/nginx-ingress/deployed/init.sls @@ -1,4 +1,5 @@ include: + - metalk8s.addons.nginx-operator.deployed - .namespace - .tls-secret - .chart diff --git a/salt/metalk8s/addons/nginx-operator/deployed/clusterextension.sls b/salt/metalk8s/addons/nginx-operator/deployed/clusterextension.sls new file mode 100644 index 0000000000..f684d5d882 --- /dev/null +++ b/salt/metalk8s/addons/nginx-operator/deployed/clusterextension.sls @@ -0,0 +1,16 @@ +#!jinja | metalk8s_kubernetes + +--- +apiVersion: olm.operatorframework.io/v1 +kind: ClusterExtension +metadata: + name: nginx-install +spec: + namespace: nginx-operator + serviceAccount: + name: nginx-operator-installer + source: + sourceType: Catalog + catalog: + packageName: nginx-operator + version: "v4.11.3" diff --git a/salt/metalk8s/addons/nginx-operator/deployed/init.sls b/salt/metalk8s/addons/nginx-operator/deployed/init.sls new file mode 100644 index 0000000000..2221c3a522 --- /dev/null +++ b/salt/metalk8s/addons/nginx-operator/deployed/init.sls @@ -0,0 +1,20 @@ +include: + - metalk8s.addons.olm.deployed + - metalk8s.addons.olm.catalog.deployed + - .namespace + - .rbac + - .clusterextension + +Wait for the Nginx Operator Cluster Extension to be Installed: + test.configurable_test_state: + - changes: False + - result: __slot__:salt:metalk8s_olm.check_clusterextension_installed("nginx-install") + - comment: Wait for the Nginx Operator Cluster Extension to be Installed + - retry: + attempts: 30 + - require: + - test: Wait for the Operator Controller Controller Manager Deployment to be Ready + - test: Wait for Cluster Catalog to be Serving + - sls: metalk8s.addons.nginx-operator.deployed.namespace + - sls: metalk8s.addons.nginx-operator.deployed.rbac + - sls: metalk8s.addons.nginx-operator.deployed.clusterextension diff --git a/salt/metalk8s/addons/nginx-operator/deployed/namespace.sls b/salt/metalk8s/addons/nginx-operator/deployed/namespace.sls new file mode 100644 index 0000000000..8c3803f1b5 --- /dev/null +++ b/salt/metalk8s/addons/nginx-operator/deployed/namespace.sls @@ -0,0 +1,7 @@ +#!jinja | metalk8s_kubernetes + +--- +apiVersion: v1 +kind: Namespace +metadata: + name: nginx-operator diff --git a/salt/metalk8s/addons/nginx-operator/deployed/rbac.sls b/salt/metalk8s/addons/nginx-operator/deployed/rbac.sls new file mode 100644 index 0000000000..0944a1e5a2 --- /dev/null +++ b/salt/metalk8s/addons/nginx-operator/deployed/rbac.sls @@ -0,0 +1,488 @@ +#!jinja | metalk8s_kubernetes + +# TODO: cluster role and cluster role binding +# names must bne injected here to make sure operator-installer can only +# manage the operator's resources + +# generated on running platform using +# https://github.com/operator-framework/operator-controller/tree/main/hack/tools/catalogs + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nginx-operator-installer + namespace: nginx-operator +--- +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: nginx-operator-installer-cluster-role +rules: [ + { + "apiGroups": [ + "olm.operatorframework.io" + ], + "resources": [ + "clusterextensions/finalizers" + ], + "verbs": [ + "update" + ], + "resourceNames": [ + "nginx-operator" + ] + }, + { + "apiGroups": [ + "apiextensions.k8s.io" + ], + "resources": [ + "customresourcedefinitions" + ], + "verbs": [ + "create", + "list", + "watch" + ] + }, + { + "apiGroups": [ + "apiextensions.k8s.io" + ], + "resources": [ + "customresourcedefinitions" + ], + "verbs": [ + "get", + "update", + "patch", + "delete" + ], + "resourceNames": [ + "ingressnginxes.metalk8s.scality.com" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "resources": [ + "clusterroles" + ], + "verbs": [ + "create", + "list", + "watch" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "resources": [ + "clusterroles" + ], + "verbs": [ + "get", + "update", + "patch", + "delete" + ] + }, + { + "apiGroups": [ + "metalk8s.scality.com" + ], + "resources": [ + "ingressnginxes" + ], + "verbs": [ + "create", + "delete", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + { + "apiGroups": [ + "metalk8s.scality.com" + ], + "resources": [ + "ingressnginxes/status" + ], + "verbs": [ + "get" + ] + }, + { + "apiGroups": [ + "metalk8s.scality.com" + ], + "resources": [ + "ingressnginxes" + ], + "verbs": [ + "get", + "list", + "watch" + ] + }, + { + "apiGroups": [ + "metalk8s.scality.com" + ], + "resources": [ + "ingressnginxes/status" + ], + "verbs": [ + "get" + ] + }, + { + "nonResourceURLs": [ + "/metrics" + ], + "verbs": [ + "get" + ] + }, + { + "apiGroups": [ + "" + ], + "resources": [ + "namespaces" + ], + "verbs": [ + "get" + ] + }, + { + "apiGroups": [ + "" + ], + "resources": [ + "secrets" + ], + "verbs": [ + "*" + ] + }, + { + "apiGroups": [ + "" + ], + "resources": [ + "events" + ], + "verbs": [ + "create" + ] + }, + { + "apiGroups": [ + "metalk8s.scality.com" + ], + "resources": [ + "ingressnginxes", + "ingressnginxes/status", + "ingressnginxes/finalizers" + ], + "verbs": [ + "create", + "delete", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + { + "apiGroups": [ + "" + ], + "resources": [ + "pods", + "services", + "services/finalizers", + "endpoints", + "persistentvolumeclaims", + "events", + "configmaps", + "secrets" + ], + "verbs": [ + "create", + "delete", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + { + "apiGroups": [ + "apps" + ], + "resources": [ + "deployments", + "daemonsets", + "replicasets", + "statefulsets" + ], + "verbs": [ + "create", + "delete", + "get", + "list", + "patch", + "update", + "watch" + ] + }, + { + "apiGroups": [ + "authentication.k8s.io" + ], + "resources": [ + "tokenreviews" + ], + "verbs": [ + "create" + ] + }, + { + "apiGroups": [ + "authorization.k8s.io" + ], + "resources": [ + "subjectaccessreviews" + ], + "verbs": [ + "create" + ] + }, + { + "apiGroups": [ + "monitoring.coreos.com" + ], + "resources": [ + "servicemonitors" + ], + "verbs": [ + "*" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "resources": [ + "ingressclasses" + ], + "verbs": [ + "*" + ] + }, + { + "apiGroups": [ + "" + ], + "resources": [ + "serviceaccounts" + ], + "verbs": [ + "*" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "resources": [ + "roles", + "rolebindings", + "clusterroles", + "clusterrolebindings" + ], + "verbs": [ + "*" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "resources": [ + "clusterrolebindings" + ], + "verbs": [ + "create", + "list", + "watch" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "resources": [ + "clusterrolebindings" + ], + "verbs": [ + "get", + "update", + "patch", + "delete" + ] + }, + { + "apiGroups": [ + "" + ], + "resources": [ + "configmaps" + ], + "verbs": [ + "get", + "list", + "watch", + "create", + "update", + "patch", + "delete" + ] + }, + { + "apiGroups": [ + "coordination.k8s.io" + ], + "resources": [ + "leases" + ], + "verbs": [ + "get", + "list", + "watch", + "create", + "update", + "patch", + "delete" + ] + }, + { + "apiGroups": [ + "" + ], + "resources": [ + "events" + ], + "verbs": [ + "create", + "patch" + ] + } +] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: nginx-operator-installer-cluster-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nginx-operator-installer-cluster-role +subjects: + - kind: ServiceAccount + name: nginx-operator-installer + namespace: nginx-operator +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: nginx-operator-installer-installer-role + namespace: nginx-operator +rules: [ + { + "apiGroups": [ + "apps" + ], + "resources": [ + "deployments" + ], + "verbs": [ + "create", + "list", + "watch" + ] + }, + { + "apiGroups": [ + "apps" + ], + "resources": [ + "deployments" + ], + "verbs": [ + "get", + "update", + "patch", + "delete" + ], + "resourceNames": [ + "nginx-operator-controller-manager" + ] + }, + { + "apiGroups": [ + "" + ], + "resources": [ + "serviceaccounts" + ], + "verbs": [ + "create", + "list", + "watch" + ] + }, + { + "apiGroups": [ + "" + ], + "resources": [ + "serviceaccounts" + ], + "verbs": [ + "get", + "update", + "patch", + "delete" + ], + "resourceNames": [ + "nginx-operator-controller-manager" + ] + } +] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: nginx-operator-installer-installer-role-binding + namespace: nginx-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nginx-operator-installer-installer-role +subjects: + - kind: ServiceAccount + name: nginx-operator-installer + namespace: nginx-operator + diff --git a/salt/metalk8s/addons/olm/catalog/deployed/cluster-catalog.sls b/salt/metalk8s/addons/olm/catalog/deployed/cluster-catalog.sls new file mode 100644 index 0000000000..87d1d42680 --- /dev/null +++ b/salt/metalk8s/addons/olm/catalog/deployed/cluster-catalog.sls @@ -0,0 +1,13 @@ +#!jinja | metalk8s_kubernetes +{%- from "metalk8s/repo/macro.sls" import build_image_name with context %} + +--- +apiVersion: olm.operatorframework.io/v1 +kind: ClusterCatalog +metadata: + name: metalk8s-catalog-source +spec: + source: + type: Image + image: + ref: {{ build_image_name("metalk8s-catalog-source") }} diff --git a/salt/metalk8s/addons/olm/catalog/deployed/init.sls b/salt/metalk8s/addons/olm/catalog/deployed/init.sls new file mode 100644 index 0000000000..5af66c2e72 --- /dev/null +++ b/salt/metalk8s/addons/olm/catalog/deployed/init.sls @@ -0,0 +1,14 @@ +include: + - metalk8s.addons.olm.deployed + - .cluster-catalog + +Wait for Cluster Catalog to be Serving: + test.configurable_test_state: + - changes: False + - result: __slot__:salt:metalk8s_olm.check_clustercatalog_serving("metalk8s-catalog-source") + - comment: Wait for ClusterCatalog to be Ready + - retry: + attempts: 30 + - require: + - test: Wait for the Operator Controller Controller Manager Deployment to be Ready + - sls: metalk8s.addons.olm.catalog.deployed.cluster-catalog diff --git a/salt/metalk8s/addons/olm/deployed/chart.sls b/salt/metalk8s/addons/olm/deployed/chart.sls new file mode 100644 index 0000000000..9f0a3b9a6a --- /dev/null +++ b/salt/metalk8s/addons/olm/deployed/chart.sls @@ -0,0 +1,1971 @@ +#!jinja | metalk8s_kubernetes + +{%- from "metalk8s/map.jinja" import repo with context %} +{%- from "metalk8s/repo/macro.sls" import build_image_name with context %} + +{% raw %} +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + pod-security.kubernetes.io/enforce: baseline + pod-security.kubernetes.io/enforce-version: latest + name: olmv1-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: clusterextensions.olm.operatorframework.io +spec: + group: olm.operatorframework.io + names: + kind: ClusterExtension + listKind: ClusterExtensionList + plural: clusterextensions + singular: clusterextension + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.install.bundle.name + name: Installed Bundle + type: string + - jsonPath: .status.install.bundle.version + name: Version + type: string + - jsonPath: .status.conditions[?(@.type=='Installed')].status + name: Installed + type: string + - jsonPath: .status.conditions[?(@.type=='Progressing')].status + name: Progressing + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterExtension is the Schema for the clusterextensions API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec is an optional field that defines the desired state + of the ClusterExtension. + properties: + install: + description: |- + install is an optional field used to configure the installation options + for the ClusterExtension such as the pre-flight check configuration. + properties: + preflight: + description: |- + preflight is an optional field that can be used to configure the checks that are + run before installation or upgrade of the content for the package specified in the packageName field. + When specified, it replaces the default preflight configuration for install/upgrade actions. + When not specified, the default configuration will be used. + properties: + crdUpgradeSafety: + description: |- + crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight + checks that run prior to upgrades of installed content. + The CRD Upgrade Safety pre-flight check safeguards from unintended + consequences of upgrading a CRD, such as data loss. + properties: + enforcement: + description: |- + enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check. + Allowed values are "None" or "Strict". The default value is "Strict". + When set to "None", the CRD Upgrade Safety pre-flight check will be skipped + when performing an upgrade operation. This should be used with caution as + unintended consequences such as data loss can occur. + When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when + performing an upgrade operation. + enum: + - None + - Strict + type: string + required: + - enforcement + type: object + required: + - crdUpgradeSafety + type: object + x-kubernetes-validations: + - message: at least one of [crdUpgradeSafety] are required when + preflight is specified + rule: has(self.crdUpgradeSafety) + type: object + x-kubernetes-validations: + - message: at least one of [preflight] are required when install is + specified + rule: has(self.preflight) + namespace: + description: |- + namespace is a reference to a Kubernetes namespace. + This is the namespace in which the provided ServiceAccount must exist. + It also designates the default namespace where namespace-scoped resources + for the extension are applied to the cluster. + Some extensions may contain namespace-scoped resources to be applied in other namespaces. + This namespace must exist. + namespace is required, immutable, and follows the DNS label standard + as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), + start and end with an alphanumeric character, and be no longer than 63 characters + [RFC 1123]: https://tools.ietf.org/html/rfc1123 + maxLength: 63 + type: string + x-kubernetes-validations: + - message: namespace is immutable + rule: self == oldSelf + - message: namespace must be a valid DNS1123 label + rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") + serviceAccount: + description: |- + serviceAccount is a reference to a ServiceAccount used to perform all interactions + with the cluster that are required to manage the extension. + The ServiceAccount must be configured with the necessary permissions to perform these interactions. + The ServiceAccount must exist in the namespace referenced in the spec. + serviceAccount is required. + properties: + name: + description: |- + name is a required, immutable reference to the name of the ServiceAccount + to be used for installation and management of the content for the package + specified in the packageName field. + This ServiceAccount must exist in the installNamespace. + name follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, + hyphens (-) or periods (.), start and end with an alphanumeric character, + and be no longer than 253 characters. + Some examples of valid values are: + - some-serviceaccount + - 123-serviceaccount + - 1-serviceaccount-2 + - someserviceaccount + - some.serviceaccount + Some examples of invalid values are: + - -some-serviceaccount + - some-serviceaccount- + [RFC 1123]: https://tools.ietf.org/html/rfc1123 + maxLength: 253 + type: string + x-kubernetes-validations: + - message: name is immutable + rule: self == oldSelf + - message: name must be a valid DNS1123 subdomain. It must contain + only lowercase alphanumeric characters, hyphens (-) or periods + (.), start and end with an alphanumeric character, and be + no longer than 253 characters + rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") + required: + - name + type: object + source: + description: |- + source is a required field which selects the installation source of content + for this ClusterExtension. Selection is performed by setting the sourceType. + Catalog is currently the only implemented sourceType, and setting the + sourcetype to "Catalog" requires the catalog field to also be defined. + Below is a minimal example of a source definition (in yaml): + source: + sourceType: Catalog + catalog: + packageName: example-package + properties: + catalog: + description: |- + catalog is used to configure how information is sourced from a catalog. + This field is required when sourceType is "Catalog", and forbidden otherwise. + properties: + channels: + description: |- + channels is an optional reference to a set of channels belonging to + the package specified in the packageName field. + A "channel" is a package-author-defined stream of updates for an extension. + Each channel in the list must follow the DNS subdomain standard + as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, + hyphens (-) or periods (.), start and end with an alphanumeric character, + and be no longer than 253 characters. No more than 256 channels can be specified. + When specified, it is used to constrain the set of installable bundles and + the automated upgrade path. This constraint is an AND operation with the + version field. For example: + - Given channel is set to "foo" + - Given version is set to ">=1.0.0, <1.5.0" + - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable + - Automatic upgrades will be constrained to upgrade edges defined by the selected channel + When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths. + Some examples of valid values are: + - 1.1.x + - alpha + - stable + - stable-v1 + - v1-stable + - dev-preview + - preview + - community + Some examples of invalid values are: + - -some-channel + - some-channel- + - thisisareallylongchannelnamethatisgreaterthanthemaximumlength + - original_40 + - --default-channel + [RFC 1123]: https://tools.ietf.org/html/rfc1123 + items: + maxLength: 253 + type: string + x-kubernetes-validations: + - message: channels entries must be valid DNS1123 subdomains + rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") + maxItems: 256 + type: array + packageName: + description: |- + packageName is a reference to the name of the package to be installed + and is used to filter the content from catalogs. + packageName is required, immutable, and follows the DNS subdomain standard + as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, + hyphens (-) or periods (.), start and end with an alphanumeric character, + and be no longer than 253 characters. + Some examples of valid values are: + - some-package + - 123-package + - 1-package-2 + - somepackage + Some examples of invalid values are: + - -some-package + - some-package- + - thisisareallylongpackagenamethatisgreaterthanthemaximumlength + - some.package + [RFC 1123]: https://tools.ietf.org/html/rfc1123 + maxLength: 253 + type: string + x-kubernetes-validations: + - message: packageName is immutable + rule: self == oldSelf + - message: packageName must be a valid DNS1123 subdomain. + It must contain only lowercase alphanumeric characters, + hyphens (-) or periods (.), start and end with an alphanumeric + character, and be no longer than 253 characters + rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") + selector: + description: |- + selector is an optional field that can be used + to filter the set of ClusterCatalogs used in the bundle + selection process. + When unspecified, all ClusterCatalogs will be used in + the bundle selection process. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + upgradeConstraintPolicy: + default: CatalogProvided + description: |- + upgradeConstraintPolicy is an optional field that controls whether + the upgrade path(s) defined in the catalog are enforced for the package + referenced in the packageName field. + Allowed values are: "CatalogProvided" or "SelfCertified", or omitted. + When this field is set to "CatalogProvided", automatic upgrades will only occur + when upgrade constraints specified by the package author are met. + When this field is set to "SelfCertified", the upgrade constraints specified by + the package author are ignored. This allows for upgrades and downgrades to + any version of the package. This is considered a dangerous operation as it + can lead to unknown and potentially disastrous outcomes, such as data + loss. It is assumed that users have independently verified changes when + using this option. + When this field is omitted, the default value is "CatalogProvided". + enum: + - CatalogProvided + - SelfCertified + type: string + version: + description: |- + version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed. + Acceptable version ranges are no longer than 64 characters. + Version ranges are composed of comma- or space-delimited values and one or + more comparison operators, known as comparison strings. Additional + comparison strings can be added using the OR operator (||). + # Range Comparisons + To specify a version range, you can use a comparison string like ">=3.0, + <3.6". When specifying a range, automatic updates will occur within that + range. The example comparison string means "install any version greater than + or equal to 3.0.0 but less than 3.6.0.". It also states intent that if any + upgrades are available within the version range after initial installation, + those upgrades should be automatically performed. + # Pinned Versions + To specify an exact version to install you can use a version range that + "pins" to a specific version. When pinning to a specific version, no + automatic updates will occur. An example of a pinned version range is + "0.6.0", which means "only install version 0.6.0 and never + upgrade from this version". + # Basic Comparison Operators + The basic comparison operators and their meanings are: + - "=", equal (not aliased to an operator) + - "!=", not equal + - "<", less than + - ">", greater than + - ">=", greater than OR equal to + - "<=", less than OR equal to + # Wildcard Comparisons + You can use the "x", "X", and "*" characters as wildcard characters in all + comparison operations. Some examples of using the wildcard characters: + - "1.2.x", "1.2.X", and "1.2.*" is equivalent to ">=1.2.0, < 1.3.0" + - ">= 1.2.x", ">= 1.2.X", and ">= 1.2.*" is equivalent to ">= 1.2.0" + - "<= 2.x", "<= 2.X", and "<= 2.*" is equivalent to "< 3" + - "x", "X", and "*" is equivalent to ">= 0.0.0" + # Patch Release Comparisons + When you want to specify a minor version up to the next major version you + can use the "~" character to perform patch comparisons. Some examples: + - "~1.2.3" is equivalent to ">=1.2.3, <1.3.0" + - "~1" and "~1.x" is equivalent to ">=1, <2" + - "~2.3" is equivalent to ">=2.3, <2.4" + - "~1.2.x" is equivalent to ">=1.2.0, <1.3.0" + # Major Release Comparisons + You can use the "^" character to make major release comparisons after a + stable 1.0.0 version is published. If there is no stable version published, // minor versions define the stability level. Some examples: + - "^1.2.3" is equivalent to ">=1.2.3, <2.0.0" + - "^1.2.x" is equivalent to ">=1.2.0, <2.0.0" + - "^2.3" is equivalent to ">=2.3, <3" + - "^2.x" is equivalent to ">=2.0.0, <3" + - "^0.2.3" is equivalent to ">=0.2.3, <0.3.0" + - "^0.2" is equivalent to ">=0.2.0, <0.3.0" + - "^0.0.3" is equvalent to ">=0.0.3, <0.0.4" + - "^0.0" is equivalent to ">=0.0.0, <0.1.0" + - "^0" is equivalent to ">=0.0.0, <1.0.0" + # OR Comparisons + You can use the "||" character to represent an OR operation in the version + range. Some examples: + - ">=1.2.3, <2.0.0 || >3.0.0" + - "^0 || ^3 || ^5" + For more information on semver, please see https://semver.org/ + maxLength: 64 + type: string + x-kubernetes-validations: + - message: invalid version expression + rule: self.matches("^(\\s*(=||!=|>|<|>=|=>|<=|=<|~|~>|\\^)\\s*(v?(0|[1-9]\\d*|[x|X|\\*])(\\.(0|[1-9]\\d*|x|X|\\*]))?(\\.(0|[1-9]\\d*|x|X|\\*))?(-([0-9A-Za-z\\-]+(\\.[0-9A-Za-z\\-]+)*))?(\\+([0-9A-Za-z\\-]+(\\.[0-9A-Za-z\\-]+)*))?)\\s*)((?:\\s+|,\\s*|\\s*\\|\\|\\s*)(=||!=|>|<|>=|=>|<=|=<|~|~>|\\^)\\s*(v?(0|[1-9]\\d*|x|X|\\*])(\\.(0|[1-9]\\d*|x|X|\\*))?(\\.(0|[1-9]\\d*|x|X|\\*]))?(-([0-9A-Za-z\\-]+(\\.[0-9A-Za-z\\-]+)*))?(\\+([0-9A-Za-z\\-]+(\\.[0-9A-Za-z\\-]+)*))?)\\s*)*$") + required: + - packageName + type: object + sourceType: + description: |- + sourceType is a required reference to the type of install source. + Allowed values are "Catalog" + When this field is set to "Catalog", information for determining the + appropriate bundle of content to install will be fetched from + ClusterCatalog resources existing on the cluster. + When using the Catalog sourceType, the catalog field must also be set. + enum: + - Catalog + type: string + required: + - sourceType + type: object + x-kubernetes-validations: + - message: catalog is required when sourceType is Catalog, and forbidden + otherwise + rule: 'has(self.sourceType) && self.sourceType == ''Catalog'' ? + has(self.catalog) : !has(self.catalog)' + required: + - namespace + - serviceAccount + - source + type: object + status: + description: status is an optional field that defines the observed state + of the ClusterExtension. + properties: + conditions: + description: |- + The set of condition types which apply to all spec.source variations are Installed and Progressing. + The Installed condition represents whether or not the bundle has been installed for this ClusterExtension. + When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. + When Installed is False and the Reason is Failed, the bundle has failed to install. + The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. + When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. + When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts. + When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery. + When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition. + These are indications from a package owner to guide users away from a particular package, channel, or bundle. + BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. + ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. + PackageDeprecated is set if the requested package is marked deprecated in the catalog. + Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - 'True' + - 'False' + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + install: + description: install is a representation of the current installation + status for this ClusterExtension. + properties: + bundle: + description: |- + bundle is a required field which represents the identifying attributes of a bundle. + A "bundle" is a versioned set of content that represents the resources that + need to be applied to a cluster to install a package. + properties: + name: + description: |- + name is required and follows the DNS subdomain standard + as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, + hyphens (-) or periods (.), start and end with an alphanumeric character, + and be no longer than 253 characters. + type: string + x-kubernetes-validations: + - message: packageName must be a valid DNS1123 subdomain. + It must contain only lowercase alphanumeric characters, + hyphens (-) or periods (.), start and end with an alphanumeric + character, and be no longer than 253 characters + rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") + version: + description: |- + version is a required field and is a reference to the version that this bundle represents + version follows the semantic versioning standard as defined in https://semver.org/. + type: string + x-kubernetes-validations: + - message: version must be well-formed semver + rule: self.matches("^([0-9]+)(\\.[0-9]+)?(\\.[0-9]+)?(-([-0-9A-Za-z]+(\\.[-0-9A-Za-z]+)*))?(\\+([-0-9A-Za-z]+(-\\.[-0-9A-Za-z]+)*))?") + required: + - name + - version + type: object + required: + - bundle + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: operator-controller-controller-manager + namespace: olmv1-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: operator-controller-leader-election-role + namespace: olmv1-system +rules: +- apiGroups: + - '' + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - '' + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: operator-controller-manager-role + namespace: olmv1-system +rules: +- apiGroups: + - '' + resources: + - secrets + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: operator-controller-clusterextension-editor-role +rules: +- apiGroups: + - olm.operatorframework.io + resources: + - clusterextensions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: operator-controller-clusterextension-viewer-role +rules: +- apiGroups: + - olm.operatorframework.io + resources: + - clusterextensions + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: operator-controller-extension-editor-role +rules: +- apiGroups: + - olm.operatorframework.io + resources: + - extensions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: operator-controller-extension-viewer-role +rules: +- apiGroups: + - olm.operatorframework.io + resources: + - extensions + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: operator-controller-manager-role +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get +- apiGroups: + - '' + resources: + - serviceaccounts/token + verbs: + - create +- apiGroups: + - olm.operatorframework.io + resources: + - clustercatalogs + verbs: + - get + - list + - watch +- apiGroups: + - olm.operatorframework.io + resources: + - clusterextensions + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - olm.operatorframework.io + resources: + - clusterextensions/finalizers + verbs: + - update +- apiGroups: + - olm.operatorframework.io + resources: + - clusterextensions/status + verbs: + - patch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: operator-controller-metrics-reader +rules: +- nonResourceURLs: + - /metrics + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: operator-controller-proxy-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: operator-controller-leader-election-rolebinding + namespace: olmv1-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: operator-controller-leader-election-role +subjects: +- kind: ServiceAccount + name: operator-controller-controller-manager + namespace: olmv1-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: operator-controller-manager-rolebinding + namespace: olmv1-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: operator-controller-manager-role +subjects: +- kind: ServiceAccount + name: operator-controller-controller-manager + namespace: olmv1-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: operator-controller-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-controller-manager-role +subjects: +- kind: ServiceAccount + name: operator-controller-controller-manager + namespace: olmv1-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: operator-controller-proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-controller-proxy-role +subjects: +- kind: ServiceAccount + name: operator-controller-controller-manager + namespace: olmv1-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + control-plane: operator-controller-controller-manager + heritage: metalk8s + name: operator-controller-service + namespace: olmv1-system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + control-plane: operator-controller-controller-manager +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kubectl.kubernetes.io/default-logs-container: manager + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + control-plane: operator-controller-controller-manager + heritage: metalk8s + name: operator-controller-controller-manager + namespace: olmv1-system +spec: + replicas: 1 + selector: + matchLabels: + control-plane: operator-controller-controller-manager + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: operator-controller-controller-manager + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + - ppc64le + - s390x + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - args: + - --health-probe-bind-address=:8081 + - --metrics-bind-address=:8443 + - --leader-elect + - --ca-certs-dir=/var/certs + - --tls-cert=/var/certs/tls.cert + - --tls-key=/var/certs/tls.key + command: + - /manager + image: {% endraw -%}{{ build_image_name("operator-controller", False) }}{%- raw %}:v1.1.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + requests: + cpu: 10m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/cache + name: cache + - mountPath: /var/certs/ + name: olmv1-certificate + readOnly: true + - mountPath: /etc/containers/ + name: registries-conf + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/infra: '' + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: operator-controller-controller-manager + terminationGracePeriodSeconds: 10 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/bootstrap + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists + volumes: + - emptyDir: {} + name: cache + - name: olmv1-certificate + secret: + items: + - key: ca.crt + path: olm-ca.crt + - key: tls.crt + path: tls.cert + - key: tls.key + path: tls.key + optional: false + secretName: olmv1-cert + - configMap: + name: registries-conf + name: registries-conf +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: olmv1-ca + namespace: metalk8s-certs +spec: + commonName: olmv1-ca + isCA: true + issuerRef: + group: cert-manager.io + kind: Issuer + name: self-sign-issuer + privateKey: + algorithm: ECDSA + size: 256 + secretName: olmv1-ca + secretTemplate: + annotations: + cert-manager.io/allow-direct-injection: 'true' +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: olmv1-cert + namespace: olmv1-system +spec: + dnsNames: + - operator-controller-service.olmv1-system.svc + - operator-controller-service.olmv1-system.svc.cluster.local + issuerRef: + group: cert-manager.io + kind: ClusterIssuer + name: olmv1-ca + privateKey: + algorithm: ECDSA + size: 256 + secretName: olmv1-cert +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: olmv1-ca +spec: + ca: + secretName: olmv1-ca +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: self-sign-issuer + namespace: metalk8s-certs +spec: + selfSigned: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: clustercatalogs.olm.operatorframework.io +spec: + group: olm.operatorframework.io + names: + kind: ClusterCatalog + listKind: ClusterCatalogList + plural: clustercatalogs + singular: clustercatalog + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.lastUnpacked + name: LastUnpacked + type: date + - jsonPath: .status.conditions[?(@.type=="Serving")].status + name: Serving + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: |- + ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. + For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + spec is the desired state of the ClusterCatalog. + spec is required. + The controller will work to ensure that the desired + catalog is unpacked and served over the catalog content HTTP server. + properties: + availabilityMode: + default: Available + description: |- + availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster. + availabilityMode is optional. + Allowed values are "Available" and "Unavailable" and omitted. + When omitted, the default value is "Available". + When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server. + Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog + and its contents as usable. + When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server. + When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing. + Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want + to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. + enum: + - Unavailable + - Available + type: string + priority: + default: 0 + description: |- + priority allows the user to define a priority for a ClusterCatalog. + priority is optional. + A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements. + A higher number means higher priority. + It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. + When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input. + When omitted, the default priority is 0 because that is the zero value of integers. + Negative numbers can be used to specify a priority lower than the default. + Positive numbers can be used to specify a priority higher than the default. + The lowest possible value is -2147483648. + The highest possible value is 2147483647. + format: int32 + type: integer + source: + description: |- + source allows a user to define the source of a catalog. + A "catalog" contains information on content that can be installed on a cluster. + Providing a catalog source makes the contents of the catalog discoverable and usable by + other on-cluster components. + These on-cluster components may do a variety of things with this information, such as + presenting the content in a GUI dashboard or installing content from the catalog on the cluster. + The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. + For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. + source is a required field. + Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: + source: + type: Image + image: + ref: quay.io/operatorhubio/catalog:latest + properties: + image: + description: |- + image is used to configure how catalog contents are sourced from an OCI image. + This field is required when type is Image, and forbidden otherwise. + properties: + pollIntervalMinutes: + description: |- + pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content. + pollIntervalMinutes is optional. + pollIntervalMinutes can not be specified when ref is a digest-based reference. + When omitted, the image will not be polled for new content. + minimum: 1 + type: integer + ref: + description: |- + ref allows users to define the reference to a container image containing Catalog contents. + ref is required. + ref can not be more than 1000 characters. + A reference can be broken down into 3 parts - the domain, name, and identifier. + The domain is typically the registry where an image is located. + It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. + Hyphenation is allowed, but the domain must start and end with alphanumeric characters. + Specifying a port to use is also allowed by adding the ":" character followed by numeric values. + The port must be the last value in the domain. + Some examples of valid domain values are "registry.mydomain.io", "quay.io", "my-registry.io:8080". + The name is typically the repository in the registry where an image is located. + It must contain lowercase alphanumeric characters separated only by the ".", "_", "__", "-" characters. + Multiple names can be concatenated with the "/" character. + The domain and name are combined using the "/" character. + Some examples of valid name values are "operatorhubio/catalog", "catalog", "my-catalog.prod". + An example of the domain and name parts of a reference being combined is "quay.io/operatorhubio/catalog". + The identifier is typically the tag or digest for an image reference and is present at the end of the reference. + It starts with a separator character used to distinguish the end of the name and beginning of the identifier. + For a digest-based reference, the "@" character is the separator. + For a tag-based reference, the ":" character is the separator. + An identifier is required in the reference. + Digest-based references must contain an algorithm reference immediately after the "@" separator. + The algorithm reference must be followed by the ":" character and an encoded string. + The algorithm must start with an uppercase or lowercase alpha character followed by alphanumeric characters and may contain the "-", "_", "+", and "." characters. + Some examples of valid algorithm values are "sha256", "sha256+b64u", "multihash+base58". + The encoded string following the algorithm must be hex digits (a-f, A-F, 0-9) and must be a minimum of 32 characters. + Tag-based references must begin with a word character (alphanumeric + "_") followed by word characters or ".", and "-" characters. + The tag must not be longer than 127 characters. + An example of a valid digest-based image reference is "quay.io/operatorhubio/catalog@sha256:200d4ddb2a73594b91358fe6397424e975205bfbe44614f5846033cad64b3f05" + An example of a valid tag-based image reference is "quay.io/operatorhubio/catalog:latest" + maxLength: 1000 + type: string + x-kubernetes-validations: + - message: must start with a valid domain. valid domains must + be alphanumeric characters (lowercase and uppercase) separated + by the "." character. + rule: self.matches('^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])((\\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(:[0-9]+)?\\b') + - message: a valid name is required. valid names must contain + lowercase alphanumeric characters separated only by the + ".", "_", "__", "-" characters. + rule: self.find('(\\/[a-z0-9]+((([._]|__|[-]*)[a-z0-9]+)+)?((\\/[a-z0-9]+((([._]|__|[-]*)[a-z0-9]+)+)?)+)?)') + != "" + - message: must end with a digest or a tag + rule: self.find('(@.*:)') != "" || self.find(':.*$') != + "" + - message: tag is invalid. the tag must not be more than 127 + characters + rule: 'self.find(''(@.*:)'') == "" ? (self.find('':.*$'') + != "" ? self.find('':.*$'').substring(1).size() <= 127 + : true) : true' + - message: tag is invalid. valid tags must begin with a word + character (alphanumeric + "_") followed by word characters + or ".", and "-" characters + rule: 'self.find(''(@.*:)'') == "" ? (self.find('':.*$'') + != "" ? self.find('':.*$'').matches('':[\\w][\\w.-]*$'') + : true) : true' + - message: digest algorithm is not valid. valid algorithms + must start with an uppercase or lowercase alpha character + followed by alphanumeric characters and may contain the + "-", "_", "+", and "." characters. + rule: 'self.find(''(@.*:)'') != "" ? self.find(''(@.*:)'').matches(''(@[A-Za-z][A-Za-z0-9]*([-_+.][A-Za-z][A-Za-z0-9]*)*[:])'') + : true' + - message: digest is not valid. the encoded string must be + at least 32 characters + rule: 'self.find(''(@.*:)'') != "" ? self.find('':.*$'').substring(1).size() + >= 32 : true' + - message: digest is not valid. the encoded string must only + contain hex characters (A-F, a-f, 0-9) + rule: 'self.find(''(@.*:)'') != "" ? self.find('':.*$'').matches('':[0-9A-Fa-f]*$'') + : true' + required: + - ref + type: object + x-kubernetes-validations: + - message: cannot specify pollIntervalMinutes while using digest-based + image + rule: 'self.ref.find(''(@.*:)'') != "" ? !has(self.pollIntervalMinutes) + : true' + type: + description: |- + type is a reference to the type of source the catalog is sourced from. + type is required. + The only allowed value is "Image". + When set to "Image", the ClusterCatalog content will be sourced from an OCI image. + When using an image source, the image field must be set and must be the only field defined for this type. + enum: + - Image + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: image is required when source type is Image, and forbidden + otherwise + rule: 'has(self.type) && self.type == ''Image'' ? has(self.image) + : !has(self.image)' + required: + - source + type: object + status: + description: |- + status contains information about the state of the ClusterCatalog such as: + - Whether or not the catalog contents are being served via the catalog content HTTP server + - Whether or not the ClusterCatalog is progressing to a new state + - A reference to the source from which the catalog contents were retrieved + properties: + conditions: + description: |- + conditions is a representation of the current state for this ClusterCatalog. + The current condition types are Serving and Progressing. + The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server. + When it has a status of True and a reason of Available, the contents of the catalog are being served. + When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available. + When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable. + The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state. + When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts. + When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. + When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery. + In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched + catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog + contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes + to the contents we identify that there are updates to the contents. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - 'True' + - 'False' + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + lastUnpacked: + description: |- + lastUnpacked represents the last time the contents of the + catalog were extracted from their source format. As an example, + when using an Image source, the OCI image will be pulled and the + image layers written to a file-system backed cache. We refer to the + act of this extraction from the source format as "unpacking". + format: date-time + type: string + resolvedSource: + description: resolvedSource contains information about the resolved + source based on the source type. + properties: + image: + description: |- + image is a field containing resolution information for a catalog sourced from an image. + This field must be set when type is Image, and forbidden otherwise. + properties: + ref: + description: |- + ref contains the resolved image digest-based reference. + The digest format is used so users can use other tooling to fetch the exact + OCI manifests that were used to extract the catalog contents. + maxLength: 1000 + type: string + x-kubernetes-validations: + - message: must start with a valid domain. valid domains must + be alphanumeric characters (lowercase and uppercase) separated + by the "." character. + rule: self.matches('^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])((\\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(:[0-9]+)?\\b') + - message: a valid name is required. valid names must contain + lowercase alphanumeric characters separated only by the + ".", "_", "__", "-" characters. + rule: self.find('(\\/[a-z0-9]+((([._]|__|[-]*)[a-z0-9]+)+)?((\\/[a-z0-9]+((([._]|__|[-]*)[a-z0-9]+)+)?)+)?)') + != "" + - message: must end with a digest + rule: self.find('(@.*:)') != "" + - message: digest algorithm is not valid. valid algorithms + must start with an uppercase or lowercase alpha character + followed by alphanumeric characters and may contain the + "-", "_", "+", and "." characters. + rule: 'self.find(''(@.*:)'') != "" ? self.find(''(@.*:)'').matches(''(@[A-Za-z][A-Za-z0-9]*([-_+.][A-Za-z][A-Za-z0-9]*)*[:])'') + : true' + - message: digest is not valid. the encoded string must be + at least 32 characters + rule: 'self.find(''(@.*:)'') != "" ? self.find('':.*$'').substring(1).size() + >= 32 : true' + - message: digest is not valid. the encoded string must only + contain hex characters (A-F, a-f, 0-9) + rule: 'self.find(''(@.*:)'') != "" ? self.find('':.*$'').matches('':[0-9A-Fa-f]*$'') + : true' + required: + - ref + type: object + type: + description: |- + type is a reference to the type of source the catalog is sourced from. + type is required. + The only allowed value is "Image". + When set to "Image", information about the resolved image source will be set in the 'image' field. + enum: + - Image + type: string + required: + - image + - type + type: object + x-kubernetes-validations: + - message: image is required when source type is Image, and forbidden + otherwise + rule: 'has(self.type) && self.type == ''Image'' ? has(self.image) + : !has(self.image)' + urls: + description: urls contains the URLs that can be used to access the + catalog. + properties: + base: + description: |- + base is a cluster-internal URL that provides endpoints for + accessing the content of the catalog. + It is expected that clients append the path for the endpoint they wish + to access. + Currently, only a single endpoint is served and is accessible at the path + /api/v1. + The endpoints served for the v1 API are: + - /all - this endpoint returns the entirety of the catalog contents in the FBC format + As the needs of users and clients of the evolve, new endpoints may be added. + maxLength: 525 + type: string + x-kubernetes-validations: + - message: must be a valid URL + rule: isURL(self) + - message: scheme must be either http or https + rule: 'isURL(self) ? (url(self).getScheme() == "http" || url(self).getScheme() + == "https") : true' + required: + - base + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: catalogd + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: catalogd-controller-manager + namespace: olmv1-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: catalogd + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: catalogd-leader-election-role + namespace: olmv1-system +rules: +- apiGroups: + - '' + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - '' + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: catalogd-manager-role +rules: +- apiGroups: + - olm.operatorframework.io + resources: + - clustercatalogs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - olm.operatorframework.io + resources: + - clustercatalogs/finalizers + verbs: + - update +- apiGroups: + - olm.operatorframework.io + resources: + - clustercatalogs/status + verbs: + - get + - patch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: catalogd + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: catalogd-metrics-reader +rules: +- nonResourceURLs: + - /metrics + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: catalogd + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: catalogd-proxy-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: catalogd + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: catalogd-leader-election-rolebinding + namespace: olmv1-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: catalogd-leader-election-role +subjects: +- kind: ServiceAccount + name: catalogd-controller-manager + namespace: olmv1-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: catalogd + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: catalogd-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: catalogd-manager-role +subjects: +- kind: ServiceAccount + name: catalogd-controller-manager + namespace: olmv1-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: catalogd + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: catalogd-proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: catalogd-proxy-role +subjects: +- kind: ServiceAccount + name: catalogd-controller-manager + namespace: olmv1-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/name: catalogd + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: catalogd-service + namespace: olmv1-system +spec: + ports: + - name: https + port: 443 + protocol: TCP + targetPort: 8443 + - name: webhook + port: 9443 + protocol: TCP + targetPort: 9443 + - name: metrics + port: 7443 + protocol: TCP + targetPort: 7443 + selector: + control-plane: catalogd-controller-manager +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + kubectl.kubernetes.io/default-logs-container: manager + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + control-plane: catalogd-controller-manager + heritage: metalk8s + name: catalogd-controller-manager + namespace: olmv1-system +spec: + minReadySeconds: 5 + replicas: 1 + selector: + matchLabels: + control-plane: catalogd-controller-manager + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: catalogd-controller-manager + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + - ppc64le + - s390x + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - args: + - --leader-elect + - --metrics-bind-address=:7443 + - --external-address=catalogd-service.olmv1-system.svc + - --tls-cert=/var/certs/tls.crt + - --tls-key=/var/certs/tls.key + - --ca-certs-dir=/var/ca-certs + command: + - ./manager + image: {% endraw -%}{{ build_image_name("catalogd", False) }}{%- raw %}:v1.1.0 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + requests: + cpu: 100m + memory: 200Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/cache/ + name: cache + - mountPath: /var/certs + name: catalogserver-certs + - mountPath: /var/ca-certs/ + name: olmv1-certificate + readOnly: true + - mountPath: /etc/containers/ + name: registries-conf + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/infra: '' + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: catalogd-controller-manager + terminationGracePeriodSeconds: 10 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/bootstrap + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists + volumes: + - emptyDir: {} + name: cache + - name: catalogserver-certs + secret: + secretName: catalogd-service-cert-v1.1.0 + - name: olmv1-certificate + secret: + items: + - key: ca.crt + path: olm-ca.crt + optional: false + secretName: catalogd-service-cert-v1.1.0 + - configMap: + name: registries-conf + name: registries-conf +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: catalogd-service-cert + namespace: olmv1-system +spec: + dnsNames: + - localhost + - catalogd-service.olmv1-system.svc + - catalogd-service.olmv1-system.svc.cluster.local + issuerRef: + group: cert-manager.io + kind: ClusterIssuer + name: olmv1-ca + privateKey: + algorithm: ECDSA + size: 256 + secretName: catalogd-service-cert-v1.1.0 +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + annotations: + cert-manager.io/inject-ca-from-secret: metalk8s-certs/olmv1-ca + labels: + app.kubernetes.io/instance: olm + app.kubernetes.io/managed-by: salt + app.kubernetes.io/part-of: metalk8s + app.kubernetes.io/version: v1.1.0 + heritage: metalk8s + name: catalogd-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: catalogd-service + namespace: olmv1-system + path: /mutate-olm-operatorframework-io-v1-clustercatalog + port: 9443 + failurePolicy: Fail + matchConditions: + - expression: '''name'' in object.metadata && (!has(object.metadata.labels) || !(''olm.operatorframework.io/metadata.name'' + in object.metadata.labels) || object.metadata.labels[''olm.operatorframework.io/metadata.name''] + != object.metadata.name)' + name: MissingOrIncorrectMetadataNameLabel + name: inject-metadata-name.olm.operatorframework.io + rules: + - apiGroups: + - olm.operatorframework.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - clustercatalogs + sideEffects: None + timeoutSeconds: 10 +--- +apiVersion: v1 +data: + registries.conf: |- + [[registry]] + prefix = "{% endraw -%}{{ repo.registry_endpoint }}{%- raw %}" + insecure = true + location = "{% endraw -%}{{ repo.registry_endpoint }}{%- raw %}:80" + [[registry]] + prefix = "registry.metalk8s.lan" + insecure = true + location = "{% endraw -%}{{ repo.registry_endpoint }}{%- raw %}:80" +kind: ConfigMap +metadata: + name: registries-conf + namespace: olmv1-system + +{% endraw %} diff --git a/salt/metalk8s/addons/olm/deployed/init.sls b/salt/metalk8s/addons/olm/deployed/init.sls new file mode 100644 index 0000000000..01ba011ee4 --- /dev/null +++ b/salt/metalk8s/addons/olm/deployed/init.sls @@ -0,0 +1,31 @@ +include: + - metalk8s.addons.cert-manager.deployed + - .chart + +Wait for the Catalogd Controller Manager deployment to be Ready: + test.configurable_test_state: + - changes: False + - result: __slot__:salt:metalk8s_kubernetes.check_object_ready( + apiVersion=apps/v1, kind=Deployment, + name=catalogd-controller-manager, namespace=olmv1-system) + - comment: Wait for the Catalog Operator to be Ready + - retry: + attempts: 30 + - require: + - test: Wait for cert-manager deployment to be Ready + - test: Wait for cert-manager webhook to be Ready + - test: Wait for cert-manager cainjector to be Ready + - sls: metalk8s.addons.olm.deployed.chart + +Wait for the Operator Controller Controller Manager Deployment to be Ready: + test.configurable_test_state: + - changes: False + - result: __slot__:salt:metalk8s_kubernetes.check_object_ready( + apiVersion=apps/v1, kind=Deployment, + name=operator-controller-controller-manager, namespace=olmv1-system) + - comment: Wait for the Operator Controller to be Ready + - retry: + attempts: 30 + - require: + - test: Wait for the Catalogd Controller Manager deployment to be Ready + diff --git a/salt/metalk8s/container-engine/containerd/installed.sls b/salt/metalk8s/container-engine/containerd/installed.sls index 136bb5ca73..3e78f23cf8 100644 --- a/salt/metalk8s/container-engine/containerd/installed.sls +++ b/salt/metalk8s/container-engine/containerd/installed.sls @@ -112,6 +112,9 @@ Configure registry IP in containerd conf: [plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ repo.registry_endpoint }}"] endpoint = [{{ registry_eps | join(",") }}] + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."registry.metalk8s.lan"] + endpoint = [{{ registry_eps | join(",") }}] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] runtime_type = "io.containerd.runc.v2" [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] diff --git a/salt/metalk8s/deployed/init.sls b/salt/metalk8s/deployed/init.sls index dec83efe11..93c0433521 100644 --- a/salt/metalk8s/deployed/init.sls +++ b/salt/metalk8s/deployed/init.sls @@ -3,6 +3,9 @@ include: - metalk8s.addons.alert-logger.deployed - metalk8s.addons.prometheus-operator.deployed - metalk8s.addons.cert-manager.deployed + - metalk8s.addons.olm.deployed + - metalk8s.addons.olm.catalog.deployed + - metalk8s.addons.nginx-operator.deployed - metalk8s.addons.nginx-ingress.deployed - metalk8s.addons.nginx-ingress-control-plane.deployed - metalk8s.addons.volumes.deployed diff --git a/salt/metalk8s/kubernetes/kube-proxy/deployed.sls b/salt/metalk8s/kubernetes/kube-proxy/deployed.sls index 2514dba8a7..edfa24328f 100644 --- a/salt/metalk8s/kubernetes/kube-proxy/deployed.sls +++ b/salt/metalk8s/kubernetes/kube-proxy/deployed.sls @@ -92,6 +92,8 @@ Deploy kube-proxy (ConfigMap): options: json: infoBufferSize: "0" + text: + infoBufferSize: "0" verbosity: 0 metricsBindAddress: @HOST_IP@:10249 mode: "" diff --git a/salt/metalk8s/kubernetes/kubelet/standalone.sls b/salt/metalk8s/kubernetes/kubelet/standalone.sls index 178829679d..e2dcd15b31 100644 --- a/salt/metalk8s/kubernetes/kubelet/standalone.sls +++ b/salt/metalk8s/kubernetes/kubelet/standalone.sls @@ -84,6 +84,8 @@ Create kubelet config file: options: json: infoBufferSize: "0" + text: + infoBufferSize: "0" verbosity: 0 memorySwap: {} nodeStatusReportFrequency: 0s diff --git a/salt/tests/unit/modules/files/test_metalk8s_olm.yaml b/salt/tests/unit/modules/files/test_metalk8s_olm.yaml new file mode 100644 index 0000000000..81414cd43e --- /dev/null +++ b/salt/tests/unit/modules/files/test_metalk8s_olm.yaml @@ -0,0 +1,48 @@ +clustercatalog_serving: + - name: serving-catalog + manifest: + status: + conditions: + - type: "Serving" + status: "True" + reason: "Serving" + message: "Serving" + expected_result: True + - name: failed-catalog + manifest: + status: + conditions: + - type: "Serving" + status: "False" + reason: "Failed" + message: "Failed" + expected_result: False + - name: unknown-state + manifest: + status: + conditions: [] + expected_result: False +clusterextension_installed: + - name: extension-installed + manifest: + status: + conditions: + - type: "Installed" + status: "True" + reason: "Installed" + message: "Installed" + expected_result: True + - name: failed-install + manifest: + status: + conditions: + - type: "Installed" + status: "False" + reason: "Failed" + message: "Failed" + expected_result: False + - name: unknown-install-state + manifest: + status: + conditions: [] + expected_result: False diff --git a/salt/tests/unit/modules/test_metalk8s_olm.py b/salt/tests/unit/modules/test_metalk8s_olm.py new file mode 100644 index 0000000000..6d9455ced7 --- /dev/null +++ b/salt/tests/unit/modules/test_metalk8s_olm.py @@ -0,0 +1,54 @@ +import os.path +from unittest import TestCase +from unittest.mock import MagicMock, patch +import yaml + +from _modules import metalk8s_olm + +from tests.unit import mixins +from tests.unit import utils + +YAML_TESTS_FILE = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "files", "test_metalk8s_olm.yaml" +) + +with open(YAML_TESTS_FILE) as fd: + YAML_TESTS_CASES = yaml.safe_load(fd) + + +class Metalk8sOLMTestCase(TestCase, mixins.LoaderModuleMockMixin): + """Test case for the olm module""" + + loader_module = metalk8s_olm + + def test_virtual(self): + """ + Tests the return of `__virtual__` function + """ + self.assertEqual(metalk8s_olm.__virtual__(), "metalk8s_olm") + + @utils.parameterized_from_cases(YAML_TESTS_CASES["clustercatalog_serving"]) + def test_check_clustercatalog_serving(self, name, manifest, expected_result): + """ + Tests the `check_clustercatalog_serving` function + """ + get_object_mock = MagicMock(return_value=manifest) + with patch.dict( + metalk8s_olm.__salt__, {"metalk8s_kubernetes.get_object": get_object_mock} + ): + self.assertEqual( + metalk8s_olm.check_clustercatalog_serving(name), expected_result + ) + + @utils.parameterized_from_cases(YAML_TESTS_CASES["clusterextension_installed"]) + def test_check_clusterextension_installed(self, name, manifest, expected_result): + """ + Tests the `check_clusterextension_installed` function + """ + get_object_mock = MagicMock(return_value=manifest) + with patch.dict( + metalk8s_olm.__salt__, {"metalk8s_kubernetes.get_object": get_object_mock} + ): + self.assertEqual( + metalk8s_olm.check_clusterextension_installed(name), expected_result + ) diff --git a/tests/post/features/sanity.feature b/tests/post/features/sanity.feature index 0f8bf17226..91393ea13a 100644 --- a/tests/post/features/sanity.feature +++ b/tests/post/features/sanity.feature @@ -45,6 +45,9 @@ Feature: Cluster Sanity Checks | metalk8s-certs | cert-manager | | metalk8s-certs | cert-manager-cainjector | | metalk8s-certs | cert-manager-webhook | + | olmv1-system | catalogd-controller-manager | + | olmv1-system | operator-controller-controller-manager | + | nginx-operator | nginx-operator-controller-manager | Scenario Outline: DaemonSet has desired Pods ready Then the DaemonSet in the namespace has all desired Pods ready diff --git a/tox.ini b/tox.ini index 9d0879097d..8c7d6bb1ed 100644 --- a/tox.ini +++ b/tox.ini @@ -143,6 +143,17 @@ deps = commands = {toxinidir}/charts/render.py {posargs} +[testenv:olm-render] +description = + Run the olm render script +allowlist_externals = + {toxinidir}/olm/render.py +deps = + pyyaml + requests +commands = + {toxinidir}/olm/render.py {posargs} + [testenv:tests] description = Run tests suite remotely (uses local Vagrant configuration by default). diff --git a/ui/public/brand/email.html b/ui/public/brand/email.html index 92e7d64126..b95e09cbba 100644 --- a/ui/public/brand/email.html +++ b/ui/public/brand/email.html @@ -612,9 +612,9 @@ {{ if eq .Labels.severity "critical" }} - + (critical) {{ else if eq .Labels.severity "warning" }} - + (warning) {{ else }} {{ .Labels.severity }} {{ end }} {{ .Labels.alertname }}