From a94e797ff99201ca379dc6786a108922345fbf43 Mon Sep 17 00:00:00 2001 From: Alexander Olofsson Date: Wed, 5 Mar 2025 16:32:02 +0100 Subject: [PATCH] Resync bundled resources to modern versions --- REFERENCE.md | 6 +- manifests/init.pp | 4 +- manifests/install/cni_plugins.pp | 2 +- manifests/server/resources.pp | 19 +- manifests/server/resources/coredns.pp | 48 ++-- manifests/server/resources/flannel.pp | 255 ++++++++++++------ manifests/server/resources/kube_proxy.pp | 118 +++----- spec/classes/server/resources/flannel_spec.rb | 21 +- .../server/resources/kube_proxy_spec.rb | 25 +- .../files/resources/kube-proxy-older.yaml | 88 ------ spec/fixtures/files/resources/kube-proxy.yaml | 44 ++- 11 files changed, 293 insertions(+), 337 deletions(-) delete mode 100644 spec/fixtures/files/resources/kube-proxy-older.yaml diff --git a/REFERENCE.md b/REFERENCE.md index db8e633..d608968 100644 --- a/REFERENCE.md +++ b/REFERENCE.md @@ -256,7 +256,7 @@ Data type: `String[1]` version of etcd to install -Default value: `'3.5.16'` +Default value: `'3.5.18'` ##### `firewall_type` @@ -496,7 +496,7 @@ Data type: `String[1]` version of kubernetes to install -Default value: `'1.28.14'` +Default value: `'1.31.6'` ### `k8s::install::cni_plugins` @@ -534,7 +534,7 @@ Data type: `String[1]` The version of CNI plugins to install - if applicable -Default value: `'v1.2.0'` +Default value: `'v1.6.2'` ##### `download_url_template` diff --git a/manifests/init.pp b/manifests/init.pp index 0a47466..3bdd446 100644 --- a/manifests/init.pp +++ b/manifests/init.pp @@ -50,8 +50,8 @@ K8s::Ensure $ensure = 'present', Enum['container', 'native'] $packaging = 'native', K8s::Native_packaging $native_packaging = 'loose', - String[1] $version = '1.28.14', - String[1] $etcd_version = '3.5.16', + String[1] $version = '1.31.6', + String[1] $etcd_version = '3.5.18', String[1] $container_registry = 'registry.k8s.io', Optional[String[1]] $container_image_tag = undef, diff --git a/manifests/install/cni_plugins.pp b/manifests/install/cni_plugins.pp index 16efff8..bde0bef 100644 --- a/manifests/install/cni_plugins.pp +++ b/manifests/install/cni_plugins.pp @@ -8,7 +8,7 @@ # class k8s::install::cni_plugins ( K8s::Ensure $ensure = $k8s::ensure, - String[1] $version = 'v1.2.0', + String[1] $version = 'v1.6.2', String[1] $method = $k8s::native_packaging, String[1] $download_url_template = 'https://github.com/containernetworking/plugins/releases/download/%{version}/cni-plugins-linux-%{arch}-%{version}.tgz', Optional[String[1]] $package_name = undef, diff --git a/manifests/server/resources.pp b/manifests/server/resources.pp index 36e84b1..84fc1da 100644 --- a/manifests/server/resources.pp +++ b/manifests/server/resources.pp @@ -18,6 +18,7 @@ # @param flannel_image the image to use for the Flannel # @param flannel_registry the registry to use for the Flannel image # @param flannel_tag the tag to use for the Flannel image +# @param flannel_netpol if flannel should act on network policies # @param image_pull_secrets the secrets to pull from private registries # @param kube_proxy_daemonset_config the configuration to use for the kube-proxy DaemonSet # @param kube_proxy_image the image to use for the kube-proxy @@ -50,14 +51,18 @@ Hash[String,Data] $extra_kube_proxy_args = {}, String[1] $coredns_registry = 'docker.io', String[1] $coredns_image = 'coredns/coredns', - String[1] $coredns_tag = '1.8.7', + String[1] $coredns_tag = '1.12.0', Hash[String,Data] $coredns_deployment_config = {}, - String[1] $flannel_cni_registry = 'docker.io', - String[1] $flannel_cni_image = 'rancher/mirrored-flannelcni-flannel-cni-plugin', - String[1] $flannel_cni_tag = 'v1.0.0', - String[1] $flannel_registry = 'docker.io', - String[1] $flannel_image = 'rancher/mirrored-flannelcni-flannel', - String[1] $flannel_tag = 'v0.16.1', + String[1] $flannel_cni_registry = 'ghcr.io', + String[1] $flannel_cni_image = 'flannel-io/flannel-cni-plugin', + String[1] $flannel_cni_tag = 'v1.6.2-flannel1', + String[1] $flannel_netpol_registry = 'registry.k8s.io', + String[1] $flannel_netpol_image = 'networking/kube-network-policies', + String[1] $flannel_netpol_tag = 'v0.7.0', + String[1] $flannel_registry = 'ghcr.io', + String[1] $flannel_image = 'flannel-io/flannel', + String[1] $flannel_tag = 'v0.26.4', + Boolean $flannel_netpol = false, Hash[String,Data] $flannel_daemonset_config = {}, Optional[Array] $image_pull_secrets = undef, ) { diff --git a/manifests/server/resources/coredns.pp b/manifests/server/resources/coredns.pp index f0b8120..00363ab 100644 --- a/manifests/server/resources/coredns.pp +++ b/manifests/server/resources/coredns.pp @@ -144,8 +144,9 @@ content => { metadata => { labels => { - 'k8s-app' => 'coredns', + 'k8s-app' => 'kube-dns', 'kubernetes.io/name' => 'CoreDNS', + 'app.kubernetes.io/name' => 'coredns', 'kubernetes.io/managed-by' => 'puppet', }, }, @@ -158,35 +159,33 @@ }, selector => { matchLabels => { - 'k8s-app' => 'coredns', - 'kubernetes.io/managed-by' => 'puppet', + 'k8s-app' => 'kube-dns', + 'app.kubernetes.io/name' => 'coredns', }, }, template => { metadata => { labels => { - 'k8s-app' => 'coredns', + 'k8s-app' => 'kube-dns', + 'app.kubernetes.io/name' => 'coredns', 'kubernetes.io/managed-by' => 'puppet', }, }, spec => { affinity => { podAntiAffinity => { - preferredDuringSchedulingIgnoredDuringExecution => [ + requiredDuringSchedulingIgnoredDuringExecution => [ { - weight => 100, - podAffinityTerm => { - labelSelector => { - matchExpressions => [ - { - key => 'k8s-app', - operator => 'In', - values => ['coredns'], - }, - ], - }, - topologyKey => 'kubernetes.io/hostname', + labelSelector => { + matchExpressions => [ + { + key => 'k8s-app', + operator => 'In', + values => ['kube-dns'], + }, + ], }, + topologyKey => 'kubernetes.io/hostname', }, ], }, @@ -198,10 +197,6 @@ key => 'CriticalAddonsOnly', operator => 'Exists', }, - { - key => 'node-role.kubernetes.io/control-plane', - effect => 'NoSchedule', - }, ], nodeSelector => { 'kubernetes.io/os' => 'linux', @@ -311,15 +306,17 @@ 'prometheus.io/scrape' => 'true', }, labels => { - 'k8s-app' => 'coredns', + 'k8s-app' => 'kube-dns', 'kubernetes.io/cluster-service' => 'true', 'kubernetes.io/name' => 'CoreDNS', + 'app.kubernetes.io/name' => 'coredns', 'kubernetes.io/managed-by' => 'puppet', }, }, spec => $_addn_coredns_svc_hash + { selector => { - 'k8s-app' => 'coredns', + 'k8s-app' => 'kube-dns', + 'app.kubernetes.io/name' => 'coredns', }, ports => [ { @@ -332,6 +329,11 @@ port => 53, protocol => 'TCP', }, + { + name => 'metrics', + port => 9153, + protocol => 'TCP', + }, ], }, }; diff --git a/manifests/server/resources/flannel.pp b/manifests/server/resources/flannel.pp index 22da3a9..9fc7cef 100644 --- a/manifests/server/resources/flannel.pp +++ b/manifests/server/resources/flannel.pp @@ -21,9 +21,13 @@ String[1] $cni_registry = $k8s::server::resources::flannel_cni_registry, String[1] $cni_image = $k8s::server::resources::flannel_cni_image, String[1] $cni_image_tag = $k8s::server::resources::flannel_cni_tag, + String[1] $netpol_registry = $k8s::server::resources::flannel_netpol_registry, + String[1] $netpol_image = $k8s::server::resources::flannel_netpol_image, + String[1] $netpol_image_tag = $k8s::server::resources::flannel_netpol_tag, String[1] $registry = $k8s::server::resources::flannel_registry, String[1] $image = $k8s::server::resources::flannel_image, String[1] $image_tag = $k8s::server::resources::flannel_tag, + Boolean $netpol = $k8s::server::resources::flannel_netpol, Hash[String,Data] $daemonset_config = $k8s::server::resources::flannel_daemonset_config, Optional[Array] $image_pull_secrets = $k8s::server::resources::image_pull_secrets, Hash[String,Data] $net_config = {}, @@ -33,6 +37,25 @@ $_cluster_cidr_v4 = flatten($cluster_cidr).filter |$cidr| { $cidr =~ Stdlib::IP::Address::V4::CIDR } $_cluster_cidr_v6 = flatten($cluster_cidr).filter |$cidr| { $cidr =~ Stdlib::IP::Address::V6::CIDR } + $_cni_conf = { + name => 'cbr0', + cniVersion => '0.3.1', + plugins => [ + { + type => 'flannel', + delegate => { + hairpinMode => true, + isDefaultGateway => true, + }, + }, + { + type => 'portmap', + capabilities => { + portMappings => true, + }, + }, + ], + } $_net_conf = delete_undef_values( { 'Network' => $_cluster_cidr_v4[0], @@ -44,6 +67,72 @@ }, } + $net_config ) + if $netpol { + $_netpol_rules = [ + { + apiGroups => ['networking.k8s.io'], + resources => ['networkpolicies'], + verbs => ['list', 'watch'], + }, + { + apiGroups => ['pollicy.networking.k8s.io'], + resources => ['adminnetworkpolicies', 'baselineadminnetworkpolicies'], + verbs => ['list', 'watch'], + }, + ] + $_netpol_containers = [ + { + name => 'kube-network-policies', + image => "${cni_registry}/${cni_image}:${cni_image_tag}", + command => ['/bin/netpol', '--hostname-override=$(MY_NODE_NAME)', '--v=2'], + env => [ + { + name => 'MY_NODE_NAME', + valueFrom => { + fieldRef => { + fieldPath => 'spec.nodeName', + }, + }, + }, + ], + volumeMounts => [ + { + name => 'lib-modules', + mountPath => '/lib/modules', + readOnly => true, + }, + ], + resources => { + requests => { + cpu => '100m', + memory => '50Mi', + }, + limits => { + cpu => '100m', + memory => '50Mi', + }, + }, + securityContext => { + privileged => true, + capabilities => { + add => ['NET_ADMIN'], + }, + }, + }, + ] + $_netpol_volumes = [ + { + name => 'lib-modules', + hostPath => { + path => '/lib/modules', + }, + }, + ] + } else { + $_netpol_rules = [] + $_netpol_containers = [] + $_netpol_volumes = [] + } kubectl_apply { default: @@ -63,20 +152,9 @@ }, }, rules => [ - { - apiGroups => ['extensions'], - resources => ['podsecuritypolicies'], - verbs => ['use'], - resourceNames => ['psp.flannel.unprivileged'], - }, - { - apiGroups => [''], - resources => ['pods'], - verbs => ['get'], - }, { apiGroups => [''], - resources => ['nodes'], + resources => ['pods','nodes','namespaces'], verbs => ['list','watch'], }, { @@ -84,7 +162,13 @@ resources => ['nodes/status'], verbs => ['patch'], }, - ], + { + apiGroups => ['extensions'], + resources => ['podsecuritypolicies'], + verbs => ['use'], + resourceNames => ['psp.flannel.unprivileged'], + }, + ] + $_netpol_rules, }; 'flannel ClusterRoleBinding': @@ -133,25 +217,7 @@ }, }, data => { - 'cni-conf.json' => to_json({ - name => 'cbr0', - cniVersion => '0.3.1', - plugins => [ - { - type => 'flannel', - delegate => { - hairpinMode => true, - isDefaultGateway => true, - }, - }, - { - type => 'portmap', - capabilities => { - portMappings => true, - }, - }, - ], - }), + 'cni-conf.json' => $_cni_conf.to_json(), 'net-conf.json' => $_net_conf.to_json(), }, }; @@ -171,9 +237,7 @@ spec => { selector => { matchLabels => { - 'tier' => 'node', - 'k8s-app' => 'flannel', - 'kubernetes.io/managed-by' => 'puppet', + 'k8s-app' => 'flannel', }, }, template => { @@ -185,9 +249,25 @@ }, }, spec => { + affinity => { + nodeAffinity => { + requiredDuringSchedulingIgnoredDuringExecution => { + nodeSelectorTerms => [ + { + matchExpressions => [ + { + key => 'kubernetes.io/os', + operator => 'In', + values => ['linux'], + }, + ], + }, + ], + }, + }, + }, hostNetwork => true, priorityClassName => 'system-node-critical', - serviceAccountName => 'flannel', tolerations => [ { effect => 'NoSchedule', @@ -198,12 +278,48 @@ operator => 'Exists', }, ], - nodeSelector => { - 'kubernetes.io/os' => 'linux', - }, + serviceAccountName => 'flannel', + initContainers => [ + { + name => 'install-cni-plugin', + image => "${cni_registry}/${cni_image}:${cni_image_tag}", + command => ['cp'], + args => [ + '-f', + '/flannel', + '/opt/cni/bin/flannel', + ], + volumeMounts => [ + { + name => 'cni-plugin', + mountPath => '/opt/cni/bin', + }, + ], + }, + { + name => 'install-cni', + image => "${cni_registry}/${cni_image}:${cni_image_tag}", + command => ['cp'], + args => [ + '-f', + '/etc/kube-flannel/cni-conf.json', + '/etc/cni/net.d/10-flannel.conflist', + ], + volumeMounts => [ + { + name => 'cni', + mountPath => '/etc/cni/net.d', + }, + { + name => 'flannel-cfg', + mountPath => '/etc/kube-flannel', + }, + ], + }, + ], containers => [ { - name => 'flannel', + name => 'kube-flannel', image => "${registry}/${image}:${image_tag}", command => ['/opt/bin/flanneld'], args => ['--ip-masq', '--kube-subnet-mgr'], @@ -240,6 +356,10 @@ }, }, }, + { + name => 'EVENT_QUEUE_DEPTH', + value => '5000', + }, ], volumeMounts => [ { @@ -250,48 +370,14 @@ name => 'flannel-cfg', mountPath => '/etc/kube-flannel/', }, - ], - }, - ], - imagePullSecrets => $image_pull_secrets, - initContainers => [ - { - name => 'install-cni-plugin', - image => "${cni_registry}/${cni_image}:${cni_image_tag}", - command => ['cp'], - args => [ - '-f', - '/flannel', - '/opt/cni/bin/flannel', - ], - volumeMounts => [ - { - name => 'host-cni-bin', - mountPath => '/opt/cni/bin', - }, - ], - }, - { - name => 'install-cni', - image => "${cni_registry}/${cni_image}:${cni_image_tag}", - command => ['cp'], - args => [ - '-f', - '/etc/kube-flannel/cni-conf.json', - '/etc/cni/net.d/10-flannel.conflist', - ], - volumeMounts => [ { - name => 'cni', - mountPath => '/etc/cni/net.d', - }, - { - name => 'flannel-cfg', - mountPath => '/etc/kube-flannel', + name => 'xtables-lock', + mountPath => '/run/xtables.lock', }, ], }, - ], + ] + $_netpol_containers, + imagePullSecrets => $image_pull_secrets, volumes => [ { name => 'run', @@ -300,6 +386,12 @@ type => 'DirectoryOrCreate', }, }, + { + name => 'cni-plugin', + hostPath => { + path => '/opt/cni/bin', + }, + }, { name => 'cni', hostPath => { @@ -313,12 +405,13 @@ }, }, { - name => 'host-cni-bin', + name => 'xtables-lock', hostPath => { - path => '/opt/cni/bin', + path => '/run/xtables.lock', + type => 'FileOrCreate', }, }, - ], + ] + $_netpol_volumes, }, }, updateStrategy => { diff --git a/manifests/server/resources/kube_proxy.pp b/manifests/server/resources/kube_proxy.pp index a161e65..25be7ac 100644 --- a/manifests/server/resources/kube_proxy.pp +++ b/manifests/server/resources/kube_proxy.pp @@ -26,20 +26,6 @@ ) { assert_private() - if versioncmp($k8s::version, '1.23.0') >= 0 { - $_container_command = '/go-runner' - $_container_preargs = k8s::format_arguments({ - log_file => '/var/log/kube-proxy.log', - also_stdout => true, - }) + ['--', '/usr/local/bin/kube-proxy'] - } else { - $_container_command = '/usr/local/bin/kube-proxy' - $_container_preargs = k8s::format_arguments({ - alsologtostderr => true, - log_file => '/var/log/kube-proxy.log', - }) - } - $_cluster_cidr = flatten($cluster_cidr).join(',') kubectl_apply { @@ -62,9 +48,10 @@ }; 'kube-proxy ClusterRoleBinding': - api_version => 'rbac.authorization.k8s.io/v1', - kind => 'ClusterRoleBinding', - content => { + api_version => 'rbac.authorization.k8s.io/v1', + kind => 'ClusterRoleBinding', + resource_name => 'system:kube-proxy', + content => { metadata => { labels => { 'kubernetes.io/managed-by' => 'puppet', @@ -157,10 +144,10 @@ 'kube-proxy DaemonSet': api_version => 'apps/v1', kind => 'DaemonSet', + recreate => true, content => { metadata => { labels => { - tier => 'node', 'k8s-app' => 'kube-proxy', 'kubernetes.io/managed-by' => 'puppet', }, @@ -168,52 +155,41 @@ spec => { selector => { matchLabels => { - tier => 'node', 'k8s-app' => 'kube-proxy', - 'kubernetes.io/managed-by' => 'puppet', }, }, + updateStrategy => { + type => 'RollingUpdate', + }, template => { metadata => { labels => { - tier => 'node', 'k8s-app' => 'kube-proxy', 'kubernetes.io/managed-by' => 'puppet', }, }, spec => { + priorityClassName => 'system-node-critical', containers => [ { name => 'kube-proxy', image => "${registry}/${image}:${image_tag}", imagePullPolicy => 'IfNotPresent', command => [ - $_container_command, - ], - args => delete_undef_values( - $_container_preargs + + '/go-runner', + k8s::format_arguments({ + log_file => '/var/log/kube-proxy.log', + also_stdout => true, + }), + '--', + '/usr/local/bin/kube-proxy', k8s::format_arguments( { hostname_override => '$(NODE_NAME)', config => '/var/lib/kube-proxy/kube-proxy.conf', } + $extra_args - ) - ), - env => [ - { - name => 'NODE_NAME', - valueFrom => { - fieldRef => { - fieldPath => 'spec.nodeName', - }, - }, - }, - ], - resources => { - requests => { - cpu => '100m', - }, - }, + ), + ].flatten, securityContext => { privileged => true, }, @@ -230,41 +206,32 @@ subPath => 'kubeconfig', readOnly => true, }, + { + mountPath => '/run/xtables.lock', + name => 'iptables-lock', + readOnly => false, + }, { mountPath => '/lib/modules', name => 'lib-modules', readOnly => true, }, - # { - # mountPath => '/etc/ssl/certs', - # name => 'ca-certs-host', - # readOnly => true, - # }, + ], + env => [ { - mountPath => '/run/xtables.lock', - name => 'iptables-lock', + name => 'NODE_NAME', + valueFrom => { + fieldRef => { + fieldPath => 'spec.nodeName', + }, + }, }, ], - } + }, ], imagePullSecrets => $image_pull_secrets, hostNetwork => true, - priorityClassName => 'system-node-critical', serviceAccountName => 'kube-proxy', - tolerations => [ - { - key => 'CriticalAddonsOnly', - operator => 'Exists', - }, - { - effect => 'NoSchedule', - operator => 'Exists', - }, - { - effect => 'NoExecute', - operator => 'Exists', - }, - ], volumes => [ { name => 'logfile', @@ -287,13 +254,6 @@ type => 'FileOrCreate', }, }, - # { - # name => 'ca-certs-host', - # hostPath => { - # path => '/usr/share/ca-certificates', - # type => 'Directory', - # }, - # }, { name => 'kube-proxy', configMap => { @@ -307,14 +267,16 @@ }, }, ], + tolerations => [ + { + operator => 'Exists', + }, + ], + nodeSelector => { + 'kubernetes.io/os' => 'linux', + }, }, }, - updateStrategy => { - rollingUpdate => { - maxUnavailable => 1, - }, - type => 'RollingUpdate', - }, }, } + $daemonset_config; } diff --git a/spec/classes/server/resources/flannel_spec.rb b/spec/classes/server/resources/flannel_spec.rb index 71a478f..e62b88f 100644 --- a/spec/classes/server/resources/flannel_spec.rb +++ b/spec/classes/server/resources/flannel_spec.rb @@ -57,6 +57,7 @@ class { 'k8s::server::resources': } }.to_json end + let(:daemonset) { subject.call.resource('kubectl_apply', 'flannel DaemonSet') } let(:content) do { @@ -68,8 +69,8 @@ class { 'k8s::server::resources': } }, 'data' => { - 'cni-conf.json' => '{"name":"cbr0","cniVersion":"0.3.1","plugins":[{"type":"flannel","delegate":{"hairpinMode":true,"isDefaultGateway":true}},{"type":"portmap","capabilities":{"portMappings":true}}]}', - 'net-conf.json' => net_conf + 'cni-conf.json' => cni_conf, + 'net-conf.json' => net_conf, } } end @@ -129,6 +130,22 @@ class { 'k8s::server::resources': with_content(content) end end + + describe 'without network policy support' do + let(:params) { { netpol: false } } + + it 'does not contain kube-network-policies container' do + expect(daemonset[:content].dig('spec', 'template', 'spec', 'containers').last['name']).not_to eq('kube-network-policies') + end + end + + describe 'with network policy support' do + let(:params) { { netpol: true } } + + it 'contains kube-network-policies container' do + expect(daemonset[:content].dig('spec', 'template', 'spec', 'containers').last['name']).to eq('kube-network-policies') + end + end end end end diff --git a/spec/classes/server/resources/kube_proxy_spec.rb b/spec/classes/server/resources/kube_proxy_spec.rb index 89d0027..080b781 100644 --- a/spec/classes/server/resources/kube_proxy_spec.rb +++ b/spec/classes/server/resources/kube_proxy_spec.rb @@ -8,7 +8,7 @@ function assert_private() {} class { '::k8s': - version => '1.26.1', + version => '1.23.4', } class { '::k8s::server': manage_etcd => true, @@ -34,29 +34,6 @@ class { '::k8s::server': it { is_expected.to contain_kubectl_apply('kube-proxy ConfigMap') } it { is_expected.to contain_kubectl_apply('kube-proxy DaemonSet').with_content(content) } - - describe 'with k8s < 1.23.0' do - let(:pre_condition) do - <<~PUPPET - function assert_private() {} - - class { '::k8s': - version => '1.22.10', - } - class { '::k8s::server': - manage_etcd => true, - manage_certs => true, - manage_components => false, - manage_resources => false, - node_on_server => false, - } - include ::k8s::server::resources - PUPPET - end - let(:content) { Psych.load(File.read('spec/fixtures/files/resources/kube-proxy-older.yaml')) } - - it { is_expected.to contain_kubectl_apply('kube-proxy DaemonSet').with_content(content) } - end end end end diff --git a/spec/fixtures/files/resources/kube-proxy-older.yaml b/spec/fixtures/files/resources/kube-proxy-older.yaml deleted file mode 100644 index a4d4652..0000000 --- a/spec/fixtures/files/resources/kube-proxy-older.yaml +++ /dev/null @@ -1,88 +0,0 @@ ---- -metadata: - labels: - tier: node - k8s-app: kube-proxy - kubernetes.io/managed-by: puppet -spec: - selector: - matchLabels: - tier: node - k8s-app: kube-proxy - kubernetes.io/managed-by: puppet - template: - metadata: - labels: - tier: node - k8s-app: kube-proxy - kubernetes.io/managed-by: puppet - spec: - containers: - - name: kube-proxy - image: registry.k8s.io/kube-proxy:v1.22.10 - imagePullPolicy: IfNotPresent - command: - - "/usr/local/bin/kube-proxy" - args: - - "--alsologtostderr=true" - - "--log-file=/var/log/kube-proxy.log" - - "--hostname-override=$(NODE_NAME)" - - "--config=/var/lib/kube-proxy/kube-proxy.conf" - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - resources: - requests: - cpu: 100m - securityContext: - privileged: true - volumeMounts: - - mountPath: "/var/lib/kube-proxy/kube-proxy.conf" - name: kube-proxy - subPath: kube-proxy.conf - readOnly: true - - mountPath: "/var/lib/kube-proxy/kubeconfig" - name: kubeconfig - subPath: kubeconfig - readOnly: true - - mountPath: "/lib/modules" - name: lib-modules - readOnly: true - - mountPath: "/run/xtables.lock" - name: iptables-lock - imagePullSecrets: null - hostNetwork: true - priorityClassName: system-node-critical - serviceAccountName: kube-proxy - tolerations: - - key: CriticalAddonsOnly - operator: Exists - - effect: NoSchedule - operator: Exists - - effect: NoExecute - operator: Exists - volumes: - - name: logfile - hostPath: - path: "/var/log/kube-proxy.log" - type: FileOrCreate - - name: lib-modules - hostPath: - path: "/lib/modules" - type: Directory - - name: iptables-lock - hostPath: - path: "/run/xtables.lock" - type: FileOrCreate - - name: kube-proxy - configMap: - name: kube-proxy - - name: kubeconfig - configMap: - name: kubeconfig-in-cluster - updateStrategy: - rollingUpdate: - maxUnavailable: 1 - type: RollingUpdate diff --git a/spec/fixtures/files/resources/kube-proxy.yaml b/spec/fixtures/files/resources/kube-proxy.yaml index c3fbb2b..ac2ccd5 100644 --- a/spec/fixtures/files/resources/kube-proxy.yaml +++ b/spec/fixtures/files/resources/kube-proxy.yaml @@ -1,43 +1,33 @@ --- metadata: labels: - tier: node k8s-app: kube-proxy kubernetes.io/managed-by: puppet spec: selector: matchLabels: - tier: node k8s-app: kube-proxy - kubernetes.io/managed-by: puppet + updateStrategy: + type: RollingUpdate template: metadata: labels: - tier: node k8s-app: kube-proxy kubernetes.io/managed-by: puppet spec: + priorityClassName: system-node-critical containers: - name: kube-proxy - image: registry.k8s.io/kube-proxy:v1.26.1 + image: registry.k8s.io/kube-proxy:v1.23.4 imagePullPolicy: IfNotPresent command: - "/go-runner" - args: - "--log-file=/var/log/kube-proxy.log" - "--also-stdout=true" - "--" - "/usr/local/bin/kube-proxy" - "--hostname-override=$(NODE_NAME)" - "--config=/var/lib/kube-proxy/kube-proxy.conf" - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - resources: - requests: - cpu: 100m securityContext: privileged: true volumeMounts: @@ -49,22 +39,20 @@ spec: name: kubeconfig subPath: kubeconfig readOnly: true + - mountPath: "/run/xtables.lock" + name: iptables-lock + readOnly: false - mountPath: "/lib/modules" name: lib-modules readOnly: true - - mountPath: "/run/xtables.lock" - name: iptables-lock + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName imagePullSecrets: null hostNetwork: true - priorityClassName: system-node-critical serviceAccountName: kube-proxy - tolerations: - - key: CriticalAddonsOnly - operator: Exists - - effect: NoSchedule - operator: Exists - - effect: NoExecute - operator: Exists volumes: - name: logfile hostPath: @@ -84,7 +72,7 @@ spec: - name: kubeconfig configMap: name: kubeconfig-in-cluster - updateStrategy: - rollingUpdate: - maxUnavailable: 1 - type: RollingUpdate + tolerations: + - operator: Exists + nodeSelector: + kubernetes.io/os: linux