Skip to content

Commit 71c0d40

Browse files
authored
Merge pull request #143 from zakkg3/fix-e2e-updateall
Fix e2e & updateall
2 parents aa9435f + 92dcb84 commit 71c0d40

14 files changed

+473
-44
lines changed

.github/install_latest_podman.sh

100644100755
+1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
#!/bin/bash
12
sudo apt-get update
23
sudo apt-get -y upgrade
34
sudo apt-get -y install podman

.github/workflows/e2e-testing.yaml

+5-5
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,11 @@ jobs:
88
strategy:
99
matrix:
1010
kind-node-images:
11-
- kindest/node:v1.29.2@sha256:51a1434a5397193442f0be2a297b488b6c919ce8a3931be0ce822606ea5ca245
12-
- kindest/node:v1.28.7@sha256:9bc6c451a289cf96ad0bbaf33d416901de6fd632415b076ab05f5fa7e4f65c58
13-
- kindest/node:v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72
14-
- kindest/node:v1.26.6@sha256:6e2d8b28a5b601defe327b98bd1c2d1930b49e5d8c512e1895099e4504007adb
15-
- kindest/node:v1.25.11@sha256:227fa11ce74ea76a0474eeefb84cb75d8dad1b08638371ecf0e86259b35be0c8
11+
- kindest/node:v1.31.0@sha256:53df588e04085fd41ae12de0c3fe4c72f7013bba32a20e7325357a1ac94ba865
12+
- kindest/node:v1.30.4@sha256:976ea815844d5fa93be213437e3ff5754cd599b040946b5cca43ca45c2047114
13+
- kindest/node:v1.29.8@sha256:d46b7aa29567e93b27f7531d258c372e829d7224b25e3fc6ffdefed12476d3aa
14+
- kindest/node:v1.28.13@sha256:45d319897776e11167e4698f6b14938eb4d52eb381d9e3d7a9086c16c69a8110
15+
1616

1717
steps:
1818
- name: Checkout

.gitignore

+4
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,6 @@
11
src/__pycache__/
2+
src/tests/__pycache__/
23
yaml/Object_example/debug-*
4+
.vscode
5+
.coverage
6+
lcov.info

Makefile

+6-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
IMG_NAMESPACE = flag5
22
IMG_NAME = clustersecret
33
IMG_FQNAME = $(IMG_NAMESPACE)/$(IMG_NAME)
4-
IMG_VERSION = 0.0.10
4+
IMG_VERSION = 0.0.11
55

66
.PHONY: container push clean
77
all: container
@@ -69,5 +69,9 @@ chart-update:
6969
helm package charts/cluster-secret/ -d docs/
7070
helm repo index ./docs
7171

72-
dev-run:
72+
dev-prepare:
73+
kubectl apply -f ./yaml/00_rbac.yaml
74+
kubectl apply -f ./yaml/01_crd.yaml
75+
76+
dev-run: dev-prepare
7377
kopf run ./src/handlers.py --verbose -A

README.md

-2
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,6 @@
55
## Kubernetes ClusterSecret
66
[*clustersecret.com*](https://clustersecret.com/)
77

8-
# note clustersecret.io domain is deprecated. use clustersecret.com from now on.
9-
108
Cluster wide secrets
119

1210
ClusterSecret operator makes sure all the matching namespaces have the secret available and up to date.

charts/cluster-secret/Chart.yaml

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,13 @@
11
apiVersion: v2
22
name: cluster-secret
33
description: ClusterSecret Operator
4-
kubeVersion: '>= 1.16.0-0'
4+
kubeVersion: '>= 1.25.0-0'
55
type: application
6-
version: 0.4.2
6+
version: 0.4.3
77
icon: https://clustersecret.com/assets/csninjasmall.png
88
sources:
99
- https://github.com/zakkg3/ClusterSecret
10-
appVersion: "0.0.10"
10+
appVersion: "0.0.11"
1111
maintainers:
1212
1313
name: zakkg3

charts/cluster-secret/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ Clustersecrets automates this. It keep track of any modification in your secret
5757

5858
## Requirements
5959

60-
Current is 0.0.10 tested on > 1.27.1
60+
Current is 0.0.11 tested on > 1.27.1
6161
Version 0.0.9 is tested for Kubernetes >= 1.19 up to 1.27.1
6262

6363
For older kubernes (<1.19) use the image tag "0.0.6" in yaml/02_deployment.yaml

charts/cluster-secret/templates/deployment.yaml

+5-1
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,10 @@ spec:
2222
labels:
2323
app: clustersecret
2424
{{- include "cluster-secret.selectorLabels" . | nindent 8 }}
25+
annotations:
26+
{{- range $key, $value := .Values.podAnnotations }}
27+
{{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 8 }}
28+
{{- end }}
2529
spec:
2630
securityContext:
2731
runAsUser: 100 # 100 is set by the container and can NOT be changed here - this would result in a getpwuid() error
@@ -59,4 +63,4 @@ spec:
5963
{{- with .Values.tolerations }}
6064
tolerations:
6165
{{- toYaml . | nindent 8 }}
62-
{{- end }}
66+
{{- end }}

charts/cluster-secret/values.yaml

+4-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
imagePullSecrets: []
22
image:
33
repository: quay.io/clustersecret/clustersecret
4-
tag: 0.0.10
4+
tag: 0.0.11
55
# use tag-alt for ARM and other alternative builds - read the readme for more information
66
# If Clustersecret is about to create a secret and then it founds it exists:
77
# Default is to ignore it. (to not loose any unintentional data)
@@ -15,3 +15,6 @@ nodeSelector: {}
1515
tolerations: []
1616

1717
affinity: {}
18+
19+
# Additional Pod annotations
20+
podAnnotations: {}

src/handlers.py

+65-25
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import logging
2+
import sys
23
from typing import Any, Dict, List, Optional
34

45
import kopf
@@ -14,15 +15,14 @@
1415

1516
from os_utils import in_cluster
1617

17-
csecs: Dict[str, Any] = {}
18-
19-
# Loading kubeconfig
20-
if in_cluster():
18+
if "unittest" not in sys.modules:
2119
# Loading kubeconfig
22-
config.load_incluster_config()
23-
else:
24-
# Loading using the local kubevonfig.
25-
config.load_kube_config()
20+
if in_cluster():
21+
# Loading kubeconfig
22+
config.load_incluster_config()
23+
else:
24+
# Loading using the local kubevonfig.
25+
config.load_kube_config()
2626

2727
v1 = client.CoreV1Api()
2828
custom_objects_api = client.CustomObjectsApi()
@@ -92,7 +92,7 @@ def on_field_match_namespace(
9292
uid=uid,
9393
name=name,
9494
namespace=namespace,
95-
data=body.get('data'),
95+
body=body,
9696
synced_namespace=updated_matched,
9797
))
9898

@@ -113,6 +113,8 @@ def on_field_data(
113113
body: Dict[str, Any],
114114
meta: kopf.Meta,
115115
name: str,
116+
namespace: Optional[str],
117+
uid: str,
116118
logger: logging.Logger,
117119
**_,
118120
):
@@ -126,9 +128,14 @@ def on_field_data(
126128

127129
secret_type = body.get('type', 'Opaque')
128130

131+
cached_cluster_secret = csecs_cache.get_cluster_secret(uid)
132+
if cached_cluster_secret is None:
133+
logger.error('Received an event for an unknown ClusterSecret.')
134+
135+
updated_syncedns = syncedns.copy()
129136
for ns in syncedns:
130137
logger.info(f'Re Syncing secret {name} in ns {ns}')
131-
body = client.V1Secret(
138+
ns_sec_body = client.V1Secret(
132139
api_version='v1',
133140
data={str(key): str(value) for key, value in new.items()},
134141
kind='Secret',
@@ -140,14 +147,42 @@ def on_field_data(
140147
),
141148
type=secret_type,
142149
)
143-
logger.debug(f'body: {body}')
150+
logger.debug(f'body: {ns_sec_body}')
144151
# Ensuring the secret still exist.
145152
if secret_exists(logger=logger, name=name, namespace=ns, v1=v1):
146-
response = v1.replace_namespaced_secret(name=name, namespace=ns, body=body)
153+
response = v1.replace_namespaced_secret(name=name, namespace=ns, body=ns_sec_body)
147154
else:
148-
response = v1.create_namespaced_secret(namespace=ns, body=body)
155+
try:
156+
v1.read_namespace(name=ns)
157+
except client.exceptions.ApiException as e:
158+
if e.status != 404:
159+
raise
160+
response = f'Namespace {ns} not found'
161+
updated_syncedns.remove(ns)
162+
logger.info(f'Namespace {ns} not found while Syncing secret {name}')
163+
else:
164+
response = v1.create_namespaced_secret(namespace=ns, body=ns_sec_body)
149165
logger.debug(response)
150166

167+
if updated_syncedns != syncedns:
168+
# Patch synced_ns field
169+
logger.debug(f'Patching clustersecret {name} in namespace {namespace}')
170+
body = patch_clustersecret_status(
171+
logger=logger,
172+
name=name,
173+
new_status={'create_fn': {'syncedns': updated_syncedns}},
174+
custom_objects_api=custom_objects_api,
175+
)
176+
177+
# Updating the cache
178+
csecs_cache.set_cluster_secret(BaseClusterSecret(
179+
uid=uid,
180+
name=name,
181+
namespace=namespace or "",
182+
body=body,
183+
synced_namespace=updated_syncedns,
184+
))
185+
151186

152187
@kopf.on.resume('clustersecret.io', 'v1', 'clustersecrets')
153188
@kopf.on.create('clustersecret.io', 'v1', 'clustersecrets')
@@ -164,8 +199,8 @@ async def create_fn(
164199

165200
# sync in all matched NS
166201
logger.info(f'Syncing on Namespaces: {matchedns}')
167-
for namespace in matchedns:
168-
sync_secret(logger, namespace, body, v1)
202+
for ns in matchedns:
203+
sync_secret(logger, ns, body, v1)
169204

170205
# store status in memory
171206
cached_cluster_secret = csecs_cache.get_cluster_secret(uid)
@@ -176,8 +211,8 @@ async def create_fn(
176211
csecs_cache.set_cluster_secret(BaseClusterSecret(
177212
uid=uid,
178213
name=name,
179-
namespace=namespace,
180-
data=body.get('data'),
214+
namespace=namespace or "",
215+
body=body,
181216
synced_namespace=matchedns,
182217
))
183218

@@ -193,10 +228,10 @@ async def namespace_watcher(logger: logging.Logger, meta: kopf.Meta, **_):
193228
logger.debug(f'New namespace created: {new_ns} re-syncing')
194229
ns_new_list = []
195230
for cluster_secret in csecs_cache.all_cluster_secret():
196-
obj_body = cluster_secret['body']
197-
name = obj_body['metadata']['name']
231+
obj_body = cluster_secret.body
232+
name = cluster_secret.name
198233

199-
matcheddns = cluster_secret['syncedns']
234+
matcheddns = cluster_secret.synced_namespace
200235

201236
logger.debug(f'Old matched namespace: {matcheddns} - name: {name}')
202237
ns_new_list = get_ns_list(logger, obj_body, v1)
@@ -211,11 +246,16 @@ async def namespace_watcher(logger: logging.Logger, meta: kopf.Meta, **_):
211246
)
212247

213248
# if there is a new matching ns, refresh cache
214-
cluster_secret.namespace = ns_new_list
249+
cluster_secret.synced_namespace = ns_new_list
215250
csecs_cache.set_cluster_secret(cluster_secret)
216251

217-
# update ns_new_list on the object so then we also delete from there
218-
return {'syncedns': ns_new_list}
252+
# update ns_new_list on the object so then we also delete from there
253+
patch_clustersecret_status(
254+
logger=logger,
255+
name=cluster_secret.name,
256+
new_status={'create_fn': {'syncedns': ns_new_list}},
257+
custom_objects_api=custom_objects_api,
258+
)
219259

220260

221261
@kopf.on.startup()
@@ -243,8 +283,8 @@ async def startup_fn(logger: logging.Logger, **_):
243283
BaseClusterSecret(
244284
uid=metadata.get('uid'),
245285
name=metadata.get('name'),
246-
namespace=metadata.get('namespace'),
247-
data=item.get('data'),
286+
namespace=metadata.get('namespace', ''),
287+
body=item,
248288
synced_namespace=item.get('status', {}).get('create_fn', {}).get('syncedns', []),
249289
)
250290
)

src/kubernetes_utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def patch_clustersecret_status(
3636
logger.debug(f'Updated clustersecret manifest: {clustersecret}')
3737

3838
# Perform a patch operation to update the custom resource
39-
custom_objects_api.patch_cluster_custom_object(
39+
return custom_objects_api.patch_cluster_custom_object(
4040
group=group,
4141
version=version,
4242
plural=plural,

src/models.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,5 +7,5 @@ class BaseClusterSecret(BaseModel):
77
uid: str
88
name: str
99
namespace: str
10-
data: Dict[str, Any]
10+
body: Dict[str, Any]
1111
synced_namespace: List[str]

src/requirements.txt

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
kopf===1.35.3
1+
kopf===1.37.2
22
kubernetes===19.15.0
33
setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability
4-
pydantic==2.3.0
4+
pydantic==2.4.0

0 commit comments

Comments
 (0)