Skip to content

KubeArchive: install on production #6407

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -13,23 +13,40 @@ spec:
values:
sourceRoot: components/kubearchive
environment: staging
clusterDir: ""
clusterDir: base
- list:
elements: []
elements:
- nameNormalized: stone-stage-p01
values.clusterDir: stone-stage-p01
# Private
- nameNormalized: kflux-ocp-p01
values.clusterDir: kflux-ocp-p01
- nameNormalized: stone-prod-p01
values.clusterDir: stone-prod-p01
- nameNormalized: stone-prod-p02
values.clusterDir: stone-prod-p02
# Public
- nameNormalized: stone-prd-rh01
values.clusterDir: stone-prd-rh01
- nameNormalized: kflux-prd-rh02
values.clusterDir: kflux-prd-rh02
# database is not created here yet
# - nameNormalized: kflux-prd-rh03
# values.clusterDir: kflux-prd-rh03
template:
metadata:
name: kubearchive-{{nameNormalized}}
spec:
project: default
source:
path: '{{values.sourceRoot}}/{{values.environment}}/{{values.clusterDir}}'
path: "{{values.sourceRoot}}/{{values.environment}}/{{values.clusterDir}}"
repoURL: https://github.com/redhat-appstudio/infra-deployments.git
targetRevision: main
destination:
# This is the default namespace if resources or kustomziation.yaml do
# not specifcy namespace
namespace: product-kubearchive
server: '{{server}}'
server: "{{server}}"
syncPolicy:
automated:
prune: true
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,6 @@ metadata:
name: tempo
$patch: delete
---
# KubeArchive not yet ready to go to production
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: kubearchive
$patch: delete
---
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -231,3 +231,8 @@ patches:
kind: ApplicationSet
version: v1alpha1
name: etcd-shield
- path: production-overlay-patch.yaml
target:
kind: ApplicationSet
version: v1alpha1
name: kubearchive
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,6 @@ metadata:
name: quality-dashboard
$patch: delete
---
# KubeArchive not yet ready to go to production
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: kubearchive
$patch: delete
---
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -241,3 +241,8 @@ patches:
kind: ApplicationSet
version: v1alpha1
name: etcd-shield
- path: production-overlay-patch.yaml
target:
kind: ApplicationSet
version: v1alpha1
name: kubearchive
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,6 @@ metadata:
name: quality-dashboard
$patch: delete
---
# There is not RDS database provisioned yet for internal staging, starting with external staging only
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
name: kubearchive
$patch: delete
---
apiVersion: argoproj.io/v1alpha1
kind: ApplicationSet
metadata:
Expand Down
9 changes: 9 additions & 0 deletions components/konflux-ui/production/base/proxy/nginx.conf
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,15 @@ http {
include /mnt/nginx-generated-config/auth.conf;
}

location /api/k8s/plugins/kubearchive/ {
auth_request /oauth2/auth;

rewrite /api/k8s/plugins/kubearchive/(.+) /$1 break;
proxy_read_timeout 30m;
include /mnt/nginx-generated-config/kubearchive.conf;
include /mnt/nginx-generated-config/auth.conf;
}

# GET requests to /api/k8s/api/v1/namespaces and /api/k8s/api/v1/namespaces/
# are handled from the namespace-lister.
# Requests with other methods are handled by the Kube-API
Expand Down
4 changes: 4 additions & 0 deletions components/konflux-ui/production/base/proxy/proxy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,10 @@ spec:
echo \
"proxy_pass ${TEKTON_RESULTS_URL:?tekton results url must be provided};" \
> /mnt/nginx-generated-config/tekton-results.conf

echo \
"proxy_pass ${KUBEARCHIVE_URL:?kubearchive url must be provided};" \
> /mnt/nginx-generated-config/kubearchive.conf

volumeMounts:
- name: nginx-generated-config
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ configMapGenerator:
literals:
- IMPERSONATE=true
- TEKTON_RESULTS_URL=https://tekton-results-api-service.tekton-results.svc.cluster.local:8080
- KUBEARCHIVE_URL=https://kubearchive-api-server.product-kubearchive.svc.cluster.local:8081

patches:
- path: add-service-certs-patch.yaml
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ configMapGenerator:
literals:
- IMPERSONATE=true
- TEKTON_RESULTS_URL=https://tekton-results-api-service.tekton-results.svc.cluster.local:8080
- KUBEARCHIVE_URL=https://kubearchive-api-server.product-kubearchive.svc.cluster.local:8081

patches:
- path: add-service-certs-patch.yaml
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ configMapGenerator:
literals:
- IMPERSONATE=true
- TEKTON_RESULTS_URL=https://tekton-results-api-service.tekton-results.svc.cluster.local:8080
- KUBEARCHIVE_URL=https://kubearchive-api-server.product-kubearchive.svc.cluster.local:8081

patches:
- path: add-service-certs-patch.yaml
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ configMapGenerator:
literals:
- IMPERSONATE=true
- TEKTON_RESULTS_URL=https://tekton-results-api-service.tekton-results.svc.cluster.local:8080
- KUBEARCHIVE_URL=https://kubearchive-api-server.product-kubearchive.svc.cluster.local:8081

patches:
- path: add-service-certs-patch.yaml
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ configMapGenerator:
literals:
- IMPERSONATE=true
- TEKTON_RESULTS_URL=https://tekton-results-api-service.tekton-results.svc.cluster.local:8080
- KUBEARCHIVE_URL=https://kubearchive-api-server.product-kubearchive.svc.cluster.local:8081

patches:
- path: add-service-certs-patch.yaml
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ configMapGenerator:
literals:
- IMPERSONATE=true
- TEKTON_RESULTS_URL=https://tekton-results-api-service.tekton-results.svc.cluster.local:8080
- KUBEARCHIVE_URL=https://kubearchive-api-server.product-kubearchive.svc.cluster.local:8081

patches:
- path: add-service-certs-patch.yaml
Expand Down
177 changes: 6 additions & 171 deletions components/kubearchive/base/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,12 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- https://github.com/kubearchive/kubearchive/releases/download/v1.0.1/kubearchive.yaml?timeout=90
- rbac.yaml
- kubearchive-config.yaml
- kubearchive-maintainer.yaml
- monitoring-otel-collector.yaml
- monitoring-servicemonitor.yaml
- rbac.yaml
- kubearchive-config.yaml
- kubearchive-maintainer.yaml
- monitoring-otel-collector.yaml
- monitoring-servicemonitor.yaml
- ../policies

# ROSA does not support namespaces starting with `kube`
namespace: product-kubearchive
Expand All @@ -17,168 +17,3 @@ configMapGenerator:
namespace: product-kubearchive
files:
- otel-collector-config.yaml

patches:
# These patches add an annotation so an OpenShift service
# creates the TLS secrets instead of Cert Manager
- patch: |-
apiVersion: v1
kind: Service
metadata:
name: kubearchive-api-server
namespace: kubearchive
annotations:
service.beta.openshift.io/serving-cert-secret-name: kubearchive-api-server-tls
- patch: |-
apiVersion: v1
kind: Service
metadata:
name: kubearchive-operator-webhooks
namespace: kubearchive
annotations:
service.beta.openshift.io/serving-cert-secret-name: kubearchive-operator-tls
- patch: |-
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: kubearchive-mutating-webhook-configuration
annotations:
service.beta.openshift.io/inject-cabundle: "true"
- patch: |-
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: kubearchive-validating-webhook-configuration
annotations:
service.beta.openshift.io/inject-cabundle: "true"

# These patches solve Kube Linter problems
# We don't need this CronJob as it is suspended, we can enable it later
- patch: |-
$patch: delete
apiVersion: batch/v1
kind: CronJob
metadata:
name: cluster-vacuum
namespace: kubearchive

- patch: |-
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubearchive-api-server
namespace: kubearchive
annotations:
ignore-check.kube-linter.io/readiness-port: >
"The port is working properly, just not exposed. Will be fixed in newer versions"
ignore-check.kube-linter.io/liveness-port: >
"The port is working properly, just not exposed. Will be fixed in newer versions"
spec:
template:
spec:
containers:
- name: kubearchive-api-server
env:
- name: KUBEARCHIVE_OTEL_MODE
value: enabled
- name: OTEL_EXPORTER_OTLP_ENDPOINT
value: http://otel-collector:4318
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
- patch: |-
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubearchive-operator
namespace: kubearchive
annotations:
ignore-check.kube-linter.io/unset-memory-requirements: >
"This is temporary to troubleshoot an issue"

spec:
template:
spec:
containers:
- name: manager
env:
- name: KUBEARCHIVE_OTEL_MODE
value: enabled
- name: OTEL_EXPORTER_OTLP_ENDPOINT
value: http://otel-collector:4318
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
ports:
- containerPort: 8081
resources:
$patch: replace
limits:
cpu: 500m
requests:
cpu: 10m
memory: 256Mi

- patch: |-
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubearchive-sink
namespace: kubearchive
spec:
template:
spec:
containers:
- name: kubearchive-sink
env:
- name: KUBEARCHIVE_OTEL_MODE
value: enabled
- name: OTEL_EXPORTER_OTLP_ENDPOINT
value: http://otel-collector:4318
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
resources:
limits:
cpu: 200m
memory: 1Gi
requests:
cpu: 200m
memory: 900Mi

# These patches remove Certificates and Issuer from Cert-Manager
- patch: |-
$patch: delete
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: "kubearchive-api-server-certificate"
namespace: kubearchive
- patch: |-
$patch: delete
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: "kubearchive-ca"
namespace: kubearchive
- patch: |-
$patch: delete
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: "kubearchive-ca"
namespace: kubearchive
- patch: |-
$patch: delete
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: "kubearchive"
namespace: kubearchive
- patch: |-
$patch: delete
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: "kubearchive-operator-certificate"
namespace: kubearchive
Loading