|
6 | 6 | # -> nothing depending on facts or similar cluster state |
7 | 7 | # Checks depending on current state (of the nodes or the cluster) |
8 | 8 | # should be in roles/kubernetes/preinstall/tasks/0040-verify-settings.yml |
| 9 | + |
| 10 | +# TODO: remove variables after release 2.31 |
9 | 11 | - name: Fail if removed variables are used |
10 | 12 | vars: |
11 | 13 | removed_vars: [] |
12 | 14 | removed_vars_found: "{{ query('varnames', '^' + (removed_vars | join('|')) + '$') }}" |
13 | 15 | assert: |
14 | 16 | that: removed_vars_found | length == 0 |
15 | 17 | fail_msg: "Removed variables present: {{ removed_vars_found | join(', ') }}" |
| 18 | + |
| 19 | + API server loadbalancer variables have been deprecated. |
| 20 | + Please update your inventory to use: |
| 21 | + - kube_apiserver_endpoint |
| 22 | + - kube_apiserver_cluster_internal_endpoint |
| 23 | + |
| 24 | + And optionally: |
| 25 | + - loadbalancer_apiserver_localhost (true/false) |
| 26 | + - loadbalancer_apiserver_port |
16 | 27 | run_once: true |
17 | 28 |
|
18 | 29 | - name: Stop if kube_control_plane group is empty |
|
61 | 72 | when: |
62 | 73 | - not ignore_assert_errors |
63 | 74 |
|
64 | | -# TODO: remove after release 2.31 |
65 | | -- name: Stop if legacy apiserver LB variables are used |
66 | | - assert: |
67 | | - that: |
68 | | - - loadbalancer_apiserver is not defined |
69 | | - - apiserver_loadbalancer_domain_name is not defined |
70 | | - - kube_apiserver_global_endpoint is not defined |
71 | | - - kubeadm_config_api_fqdn is not defined |
72 | | - fail_msg: |- |
73 | | - API server loadbalancer variables have been deprecated. |
74 | | -
|
75 | | - Please update your inventory to use: |
76 | | - - kube_apiserver_endpoint |
77 | | - - kube_apiserver_cluster_internal_endpoint |
78 | | -
|
79 | | - And optionally: |
80 | | - - loadbalancer_apiserver_localhost (true/false) |
81 | | - - loadbalancer_apiserver_port |
82 | | - run_once: true |
83 | | - when: not ignore_assert_errors |
84 | | - |
85 | 75 | # This assertion will fail on the safe side: One can indeed schedule more pods |
86 | 76 | # on a node than the CIDR-range has space for when additional pods use the host |
87 | 77 | # network namespace. It is impossible to ascertain the number of such pods at |
|
0 commit comments