Skip to content

Commit 7cdf0d0

Browse files
authored
waiter.py Add ClusterOperator Test (#879)
SUMMARY Fixes #869 During an OpenShift installation, one of the checks to see that the cluster is ready to proceed with configuration is to check to ensure that the Cluster Operators are in an Available: True Degraded: False Progressing: False state. While you can currently use the k8s_info module to get a json response, the resulting json needs to be iterated over several times to get the appropriate status. This PR adds functionality into waiter.py which loops over all resource instances of the cluster operators. If any of them is not ready, waiter returns False and the task false. If the task returns, you can assume that all the cluster operators are healthy. ISSUE TYPE Feature Pull Request COMPONENT NAME waiter.py ADDITIONAL INFORMATION A simple playbook will trigger the waiter.py to watch the ClusterOperator object --- - name: get operators hosts: localhost gather_facts: false tasks: - name: Get cluster operators kubernetes.core.k8s_info: api_version: v1 kind: ClusterOperator kubeconfig: "/home/ocp/one/auth/kubeconfig" wait: true wait_timeout: 30 register: cluster_operators This will produce the simple response if everything is functioning properly: PLAY [get operators] ************************************************************************************************* TASK [Get cluster operators] ***************************************************************************************** ok: [localhost] PLAY RECAP *********************************************************************************************************** localhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 If the timeout is reached: PLAY [get operators] ************************************************************************************************* TASK [Get cluster operators] ***************************************************************************************** An exception occurred during task execution. To see the full traceback, use -vvv. The error was: ansible_collections.kubernetes.core.plugins.module_utils.k8s.exceptions.CoreException: Failed to gather information about ClusterOperator(s) even after waiting for 30 seconds fatal: [localhost]: FAILED! => {"changed": false, "msg": "Failed to gather information about ClusterOperator(s) even after waiting for 30 seconds"} PLAY RECAP *********************************************************************************************************** localhost : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0 UNSOLVED: How to know which Operators are failing Reviewed-by: Mandar Kulkarni <[email protected]> Reviewed-by: Bikouo Aubin
1 parent 91df2f1 commit 7cdf0d0

File tree

10 files changed

+142
-0
lines changed

10 files changed

+142
-0
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
minor_changes:
2+
- >-
3+
waiter.py - add ClusterOperator support. The module can now check OpenShift cluster health
4+
by verifying ClusterOperator status requiring 'Available: True', 'Degraded: False', and
5+
'Progressing: False' for success. (https://github.com/ansible-collections/kubernetes.core/issues/869)

plugins/module_utils/k8s/waiter.py

+23
Original file line numberDiff line numberDiff line change
@@ -117,11 +117,34 @@ def exists(resource: Optional[ResourceInstance]) -> bool:
117117
return bool(resource) and not empty_list(resource)
118118

119119

120+
def cluster_operator_ready(resource: ResourceInstance) -> bool:
121+
"""
122+
Predicate to check if a single ClusterOperator is healthy.
123+
Returns True if:
124+
- "Available" is True
125+
- "Degraded" is False
126+
- "Progressing" is False
127+
"""
128+
if not resource:
129+
return False
130+
131+
# Extract conditions from the resource's status
132+
conditions = resource.get("status", {}).get("conditions", [])
133+
134+
status = {x.get("type", ""): x.get("status") for x in conditions}
135+
return (
136+
(status.get("Degraded") == "False")
137+
and (status.get("Progressing") == "False")
138+
and (status.get("Available") == "True")
139+
)
140+
141+
120142
RESOURCE_PREDICATES = {
121143
"DaemonSet": daemonset_ready,
122144
"Deployment": deployment_ready,
123145
"Pod": pod_ready,
124146
"StatefulSet": statefulset_ready,
147+
"ClusterOperator": cluster_operator_ready,
125148
}
126149

127150

tests/sanity/ignore-2.14.txt

+1
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ plugins/module_utils/k8sdynamicclient.py import-3.11!skip
1010
plugins/modules/k8s.py validate-modules:parameter-type-not-in-doc
1111
plugins/modules/k8s_scale.py validate-modules:parameter-type-not-in-doc
1212
plugins/modules/k8s_service.py validate-modules:parameter-type-not-in-doc
13+
tests/unit/module_utils/fixtures/clusteroperator.yml yamllint!skip
1314
tests/unit/module_utils/fixtures/definitions.yml yamllint!skip
1415
tests/unit/module_utils/fixtures/deployments.yml yamllint!skip
1516
tests/unit/module_utils/fixtures/pods.yml yamllint!skip

tests/sanity/ignore-2.15.txt

+1
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ plugins/module_utils/version.py pylint!skip
1111
plugins/modules/k8s.py validate-modules:parameter-type-not-in-doc
1212
plugins/modules/k8s_scale.py validate-modules:parameter-type-not-in-doc
1313
plugins/modules/k8s_service.py validate-modules:parameter-type-not-in-doc
14+
tests/unit/module_utils/fixtures/clusteroperator.yml yamllint!skip
1415
tests/unit/module_utils/fixtures/definitions.yml yamllint!skip
1516
tests/unit/module_utils/fixtures/deployments.yml yamllint!skip
1617
tests/integration/targets/k8s_delete/files/deployments.yaml yamllint!skip

tests/sanity/ignore-2.16.txt

+1
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ plugins/module_utils/version.py pylint!skip
1414
plugins/modules/k8s.py validate-modules:parameter-type-not-in-doc
1515
plugins/modules/k8s_scale.py validate-modules:parameter-type-not-in-doc
1616
plugins/modules/k8s_service.py validate-modules:parameter-type-not-in-doc
17+
tests/unit/module_utils/fixtures/clusteroperator.yml yamllint!skip
1718
tests/unit/module_utils/fixtures/definitions.yml yamllint!skip
1819
tests/unit/module_utils/fixtures/deployments.yml yamllint!skip
1920
tests/integration/targets/k8s_delete/files/deployments.yaml yamllint!skip

tests/sanity/ignore-2.17.txt

+1
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ plugins/module_utils/version.py pylint!skip
1414
plugins/modules/k8s.py validate-modules:parameter-type-not-in-doc
1515
plugins/modules/k8s_scale.py validate-modules:parameter-type-not-in-doc
1616
plugins/modules/k8s_service.py validate-modules:parameter-type-not-in-doc
17+
tests/unit/module_utils/fixtures/clusteroperator.yml yamllint!skip
1718
tests/unit/module_utils/fixtures/definitions.yml yamllint!skip
1819
tests/unit/module_utils/fixtures/deployments.yml yamllint!skip
1920
tests/integration/targets/k8s_delete/files/deployments.yaml yamllint!skip

tests/sanity/ignore-2.18.txt

+1
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ plugins/module_utils/version.py pylint!skip
1111
plugins/modules/k8s.py validate-modules:parameter-type-not-in-doc
1212
plugins/modules/k8s_scale.py validate-modules:parameter-type-not-in-doc
1313
plugins/modules/k8s_service.py validate-modules:parameter-type-not-in-doc
14+
tests/unit/module_utils/fixtures/clusteroperator.yml yamllint!skip
1415
tests/unit/module_utils/fixtures/definitions.yml yamllint!skip
1516
tests/unit/module_utils/fixtures/deployments.yml yamllint!skip
1617
tests/integration/targets/k8s_delete/files/deployments.yaml yamllint!skip

tests/sanity/ignore-2.19.txt

+1
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ plugins/module_utils/version.py pylint!skip
1111
plugins/modules/k8s.py validate-modules:parameter-type-not-in-doc
1212
plugins/modules/k8s_scale.py validate-modules:parameter-type-not-in-doc
1313
plugins/modules/k8s_service.py validate-modules:parameter-type-not-in-doc
14+
tests/unit/module_utils/fixtures/clusteroperator.yml yamllint!skip
1415
tests/unit/module_utils/fixtures/definitions.yml yamllint!skip
1516
tests/unit/module_utils/fixtures/deployments.yml yamllint!skip
1617
tests/integration/targets/k8s_delete/files/deployments.yaml yamllint!skip
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,99 @@
1+
---
2+
apiVersion: config.openshift.io/v1
3+
kind: ClusterOperator
4+
metadata:
5+
name: authentication
6+
spec: {}
7+
status:
8+
conditions:
9+
- message: All is well
10+
reason: AsExpected
11+
status: 'False'
12+
type: Degraded
13+
- message: 'AuthenticatorCertKeyProgressing: All is well'
14+
reason: AsExpected
15+
status: 'False'
16+
type: Progressing
17+
- message: All is well
18+
reason: AsExpected
19+
status: 'True'
20+
type: Available
21+
- message: All is well
22+
reason: AsExpected
23+
status: 'True'
24+
type: Upgradeable
25+
- reason: NoData
26+
status: Unknown
27+
type: EvaluationConditionsDetected
28+
---
29+
apiVersion: config.openshift.io/v1
30+
kind: ClusterOperator
31+
metadata:
32+
name: dns
33+
spec: {}
34+
status:
35+
conditions:
36+
- message: DNS "default" is available.
37+
reason: AsExpected
38+
status: 'True'
39+
type: Available
40+
- message: 'DNS "default" reports Progressing=True: "Have 2 available node-resolver
41+
pods, want 3."'
42+
reason: DNSReportsProgressingIsTrue
43+
status: 'True'
44+
type: Progressing
45+
- reason: DNSNotDegraded
46+
status: 'False'
47+
type: Degraded
48+
- message: 'DNS default is upgradeable: DNS Operator can be upgraded'
49+
reason: DNSUpgradeable
50+
status: 'True'
51+
type: Upgradeable
52+
---
53+
apiVersion: config.openshift.io/v1
54+
kind: ClusterOperator
55+
metadata:
56+
name: dns
57+
spec: {}
58+
status:
59+
conditions:
60+
- message: DNS "default" is available.
61+
reason: AsExpected
62+
status: 'True'
63+
type: Available
64+
- message: 'DNS "default" reports Progressing=True: "Have 2 available node-resolver
65+
pods, want 3."'
66+
reason: DNSReportsProgressingIsTrue
67+
status: 'False'
68+
type: Progressing
69+
- reason: DNSNotDegraded
70+
status: 'True'
71+
type: Degraded
72+
- message: 'DNS default is upgradeable: DNS Operator can be upgraded'
73+
reason: DNSUpgradeable
74+
status: 'False'
75+
type: Upgradeable
76+
---
77+
apiVersion: config.openshift.io/v1
78+
kind: ClusterOperator
79+
metadata:
80+
name: dns
81+
spec: {}
82+
status:
83+
conditions:
84+
- message: DNS "default" is available.
85+
reason: AsExpected
86+
status: 'False'
87+
type: Available
88+
- message: 'DNS "default" reports Progressing=True: "Have 2 available node-resolver
89+
pods, want 3."'
90+
reason: DNSReportsProgressingIsTrue
91+
status: 'True'
92+
type: Progressing
93+
- reason: DNSNotDegraded
94+
status: 'True'
95+
type: Degraded
96+
- message: 'DNS default is upgradeable: DNS Operator can be upgraded'
97+
reason: DNSUpgradeable
98+
status: 'False'
99+
type: Upgradeable

tests/unit/module_utils/test_waiter.py

+9
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
DummyWaiter,
1010
Waiter,
1111
clock,
12+
cluster_operator_ready,
1213
custom_condition,
1314
deployment_ready,
1415
exists,
@@ -29,6 +30,7 @@ def resources(filepath):
2930
RESOURCES = resources("fixtures/definitions.yml")
3031
PODS = resources("fixtures/pods.yml")
3132
DEPLOYMENTS = resources("fixtures/deployments.yml")
33+
CLUSTER_OPERATOR = resources("fixtures/clusteroperator.yml")
3234

3335

3436
def test_clock_times_out():
@@ -119,3 +121,10 @@ def test_get_waiter_returns_correct_waiter():
119121
).predicate.func
120122
== custom_condition
121123
)
124+
125+
126+
@pytest.mark.parametrize(
127+
"clusteroperator,expected", zip(CLUSTER_OPERATOR, [True, False, False, False])
128+
)
129+
def test_cluster_operator(clusteroperator, expected):
130+
assert cluster_operator_ready(clusteroperator) is expected

0 commit comments

Comments
 (0)