diff --git a/data/data.json b/data/data.json index 728e6dbb8..89e41154d 100644 --- a/data/data.json +++ b/data/data.json @@ -5160,15 +5160,15 @@ "calico-v1.17": "\n{{if eq .RBACConfig \"rbac\"}}\n# Source: calico/templates/rbac.yaml\n\n# Include a clusterrole for the kube-controllers component,\n# and bind it to the calico-kube-controllers serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico-kube-controllers\nrules:\n # Nodes are watched to monitor for deletions.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - watch\n - list\n - get\n # Pods are queried to check for existence.\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n # IPAM resources are manipulated when nodes are deleted.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n verbs:\n - list\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - blockaffinities\n - ipamblocks\n - ipamhandles\n verbs:\n - get\n - list\n - create\n - update\n - delete\n # Needs access to update clusterinformations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - clusterinformations\n verbs:\n - get\n - create\n - update\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico-kube-controllers\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-kube-controllers\nsubjects:\n- kind: ServiceAccount\n name: calico-kube-controllers\n namespace: kube-system\n---\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico-node\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n - blockaffinities\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n # These permissions are required for Calico CNI to perform IPAM allocations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - blockaffinities\n - ipamblocks\n - ipamhandles\n verbs:\n - get\n - list\n - create\n - update\n - delete\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ipamconfigs\n verbs:\n - get\n # Block affinities must also be watchable by confd for route aggregation.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - blockaffinities\n verbs:\n - watch\n # The Calico IPAM migration needs to get daemonsets. These permissions can be\n # removed if not upgrading from an installation using host-local IPAM.\n - apiGroups: [\"apps\"]\n resources:\n - daemonsets\n verbs:\n - get\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: calico-node\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Calico installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: calico-config\n namespace: kube-system\ndata:\n # Typha is disabled.\n typha_service_name: \"none\"\n # Configure the backend to use.\n calico_backend: \"bird\"\n\n # Configure the MTU to use\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n veth_mtu: \"{{.MTU}}\"\n{{- end}}\n{{- else }}\n veth_mtu: \"1440\"\n{{- end}}\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"log_level\": \"info\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"mtu\": __CNI_MTU__,\n \"ipam\": {\n \"type\": \"calico-ipam\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n---\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamblocks.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMBlock\n plural: ipamblocks\n singular: ipamblock\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: blockaffinities.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BlockAffinity\n plural: blockaffinities\n singular: blockaffinity\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamhandles.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMHandle\n plural: ipamhandles\n singular: ipamhandle\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamconfigs.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMConfig\n plural: ipamconfigs\n singular: ipamconfig\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgppeers.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPPeer\n plural: bgppeers\n singular: bgppeer\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n---\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the calico-node container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n name: calico-node\n namespace: kube-system\n labels:\n k8s-app: calico-node\nspec:\n selector:\n matchLabels:\n k8s-app: calico-node\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: calico-node\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n nodeSelector:\n beta.kubernetes.io/os: linux\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n hostNetwork: true\n tolerations:\n # Make sure calico-node gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: calico-node\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n priorityClassName: system-node-critical\n initContainers:\n # This container performs upgrade from host-local IPAM to calico-ipam.\n # It can be deleted if this is a fresh installation, or if you have already\n # upgraded to use calico-ipam.\n - name: upgrade-ipam\n image: {{.CNIImage}}\n command: [\"/opt/cni/bin/calico-ipam\", \"-upgrade\"]\n env:\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: CALICO_NETWORKING_BACKEND\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: calico_backend\n volumeMounts:\n - mountPath: /var/lib/cni/networks\n name: host-local-net-dir\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n # This container installs the CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-calico.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # CNI MTU Config variable\n - name: CNI_MTU\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: veth_mtu\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n # to communicate with Felix over the Policy Sync API.\n - name: flexvol-driver\n image: {{.FlexVolImg}}\n volumeMounts:\n - name: flexvol-driver-host\n mountPath: /host/driver\n containers:\n # Runs calico-node container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Choose the backend to use.\n - name: CALICO_NETWORKING_BACKEND\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: calico_backend\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,bgp\"\n # Auto-detect the BGP IP address.\n - name: IP\n value: \"autodetect\"\n # Enable IPIP\n - name: CALICO_IPV4POOL_IPIP\n value: \"Always\"\n # Set MTU for tunnel device used if ipip is enabled\n - name: FELIX_IPINIPMTU\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: veth_mtu\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n - name: CALICO_IPV4POOL_CIDR\n value: \"{{.ClusterCIDR}}\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Set Felix logging to \"info\"\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"info\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n host: localhost\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n exec:\n command:\n - /bin/calico-node\n - -bird-ready\n - -felix-ready\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n - name: policysync\n mountPath: /var/run/nodeagent\n volumes:\n # Used by calico-node.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Mount in the directory for host-local IPAM allocations. This is\n # used when upgrading from host-local to calico-ipam, and can be removed\n # if not using the upgrade-ipam init container.\n - name: host-local-net-dir\n hostPath:\n path: /var/lib/cni/networks\n # Used to create per-pod Unix Domain Sockets\n - name: policysync\n hostPath:\n type: DirectoryOrCreate\n path: /var/run/nodeagent\n # Used to install Flex Volume Driver\n - name: flexvol-driver-host\n hostPath:\n type: DirectoryOrCreate\n{{- if .FlexVolPluginDir }}\n path: {{.FlexVolPluginDir}}\n{{- else }}\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n{{- end }}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: calico-kube-controllers\n namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: calico-node\n namespace: kube-system\n---\n# Source: calico/templates/calico-kube-controllers.yaml\n\n# See https://github.com/projectcalico/kube-controllers\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: calico-kube-controllers\n namespace: kube-system\n labels:\n k8s-app: calico-kube-controllers\nspec:\n # The controllers can only have a single active instance.\n replicas: 1\n selector:\n matchLabels:\n k8s-app: calico-kube-controllers\n strategy:\n type: Recreate\n template:\n metadata:\n name: calico-kube-controllers\n namespace: kube-system\n labels:\n k8s-app: calico-kube-controllers\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n nodeSelector:\n beta.kubernetes.io/os: linux\n tolerations:\n # Make sure calico-node gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n{{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: calico-kube-controllers\n{{end}}\n priorityClassName: system-cluster-critical\n containers:\n - name: calico-kube-controllers\n image: {{.ControllersImage}}\n env:\n # Choose which controllers to run.\n - name: ENABLED_CONTROLLERS\n value: node\n - name: DATASTORE_TYPE\n value: kubernetes\n readinessProbe:\n exec:\n command:\n - /usr/bin/check-status\n - -r\n", "calico-v1.17-privileged": "\n# CalicoTemplateV117Privileged\n{{if eq .RBACConfig \"rbac\"}}\n# Source: calico/templates/rbac.yaml\n# Include a clusterrole for the kube-controllers component,\n# and bind it to the calico-kube-controllers serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico-kube-controllers\nrules:\n # Nodes are watched to monitor for deletions.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - watch\n - list\n - get\n # Pods are queried to check for existence.\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n # IPAM resources are manipulated when nodes are deleted.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n verbs:\n - list\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - blockaffinities\n - ipamblocks\n - ipamhandles\n verbs:\n - get\n - list\n - create\n - update\n - delete\n # Needs access to update clusterinformations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - clusterinformations\n verbs:\n - get\n - create\n - update\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico-kube-controllers\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-kube-controllers\nsubjects:\n- kind: ServiceAccount\n name: calico-kube-controllers\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n---\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico-node\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n # Pod CIDR auto-detection on kubeadm needs access to config maps.\n - apiGroups: [\"\"]\n resources:\n - configmaps\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n - blockaffinities\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n # These permissions are required for Calico CNI to perform IPAM allocations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - blockaffinities\n - ipamblocks\n - ipamhandles\n verbs:\n - get\n - list\n - create\n - update\n - delete\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ipamconfigs\n verbs:\n - get\n # Block affinities must also be watchable by confd for route aggregation.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - blockaffinities\n verbs:\n - watch\n # The Calico IPAM migration needs to get daemonsets. These permissions can be\n # removed if not upgrading from an installation using host-local IPAM.\n - apiGroups: [\"apps\"]\n resources:\n - daemonsets\n verbs:\n - get\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: calico-node\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Calico installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: calico-config\n namespace: kube-system\ndata:\n # Typha is disabled.\n typha_service_name: \"none\"\n # Configure the backend to use.\n calico_backend: \"bird\"\n\n # Configure the MTU to use\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n veth_mtu: \"{{.MTU}}\"\n{{- end}}\n{{- else }}\n veth_mtu: \"1440\"\n{{- end}}\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"log_level\": \"info\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"mtu\": __CNI_MTU__,\n \"ipam\": {\n \"type\": \"calico-ipam\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n },\n {\n \"type\": \"bandwidth\",\n \"capabilities\": {\"bandwidth\": true}\n }\n ]\n }\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamblocks.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMBlock\n plural: ipamblocks\n singular: ipamblock\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: blockaffinities.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BlockAffinity\n plural: blockaffinities\n singular: blockaffinity\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamhandles.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMHandle\n plural: ipamhandles\n singular: ipamhandle\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamconfigs.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMConfig\n plural: ipamconfigs\n singular: ipamconfig\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgppeers.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPPeer\n plural: bgppeers\n singular: bgppeer\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the calico-node container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n name: calico-node\n namespace: kube-system\n labels:\n k8s-app: calico-node\nspec:\n selector:\n matchLabels:\n k8s-app: calico-node\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: calico-node\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n nodeSelector:\n kubernetes.io/os: linux\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n hostNetwork: true\n tolerations:\n # Make sure calico-node gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n{{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: calico-node\n{{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n priorityClassName: system-node-critical\n initContainers:\n # This container performs upgrade from host-local IPAM to calico-ipam.\n # It can be deleted if this is a fresh installation, or if you have already\n # upgraded to use calico-ipam.\n - name: upgrade-ipam\n image: {{.CNIImage}}\n command: [\"/opt/cni/bin/calico-ipam\", \"-upgrade\"]\n env:\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: CALICO_NETWORKING_BACKEND\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: calico_backend\n volumeMounts:\n - mountPath: /var/lib/cni/networks\n name: host-local-net-dir\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n securityContext:\n privileged: true\n # This container installs the CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-calico.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # CNI MTU Config variable\n - name: CNI_MTU\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: veth_mtu\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n securityContext:\n privileged: true\n # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n # to communicate with Felix over the Policy Sync API.\n - name: flexvol-driver\n image: {{.FlexVolImg}}\n volumeMounts:\n - name: flexvol-driver-host\n mountPath: /host/driver\n securityContext:\n privileged: true\n containers:\n # Runs calico-node container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Choose the backend to use.\n - name: CALICO_NETWORKING_BACKEND\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: calico_backend\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,bgp\"\n # Auto-detect the BGP IP address.\n - name: IP\n value: \"autodetect\"\n # Enable IPIP\n - name: CALICO_IPV4POOL_IPIP\n value: \"Always\"\n # Set MTU for tunnel device used if ipip is enabled\n - name: FELIX_IPINIPMTU\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: veth_mtu\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n - name: CALICO_IPV4POOL_CIDR\n value: \"{{.ClusterCIDR}}\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Set Felix logging to \"info\"\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"info\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n exec:\n command:\n - /bin/calico-node\n - -felix-live\n - -bird-live\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n exec:\n command:\n - /bin/calico-node\n - -felix-ready\n - -bird-ready\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n - name: policysync\n mountPath: /var/run/nodeagent\n volumes:\n # Used by calico-node.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Mount in the directory for host-local IPAM allocations. This is\n # used when upgrading from host-local to calico-ipam, and can be removed\n # if not using the upgrade-ipam init container.\n - name: host-local-net-dir\n hostPath:\n path: /var/lib/cni/networks\n # Used to create per-pod Unix Domain Sockets\n - name: policysync\n hostPath:\n type: DirectoryOrCreate\n path: /var/run/nodeagent\n # Used to install Flex Volume Driver\n - name: flexvol-driver-host\n hostPath:\n type: DirectoryOrCreate\n{{- if .FlexVolPluginDir }}\n path: {{.FlexVolPluginDir}}\n{{- else }}\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n{{- end }}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: calico-kube-controllers\n namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: calico-node\n namespace: kube-system\n---\n# Source: calico/templates/calico-kube-controllers.yaml\n# See https://github.com/projectcalico/kube-controllers\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: calico-kube-controllers\n namespace: kube-system\n labels:\n k8s-app: calico-kube-controllers\nspec:\n # The controllers can only have a single active instance.\n replicas: 1\n selector:\n matchLabels:\n k8s-app: calico-kube-controllers\n strategy:\n type: Recreate\n template:\n metadata:\n name: calico-kube-controllers\n namespace: kube-system\n labels:\n k8s-app: calico-kube-controllers\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n nodeSelector:\n kubernetes.io/os: linux\n tolerations:\n # Make sure calico-node gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n{{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: calico-kube-controllers\n{{end}}\n priorityClassName: system-cluster-critical\n containers:\n - name: calico-kube-controllers\n image: {{.ControllersImage}}\n env:\n # Choose which controllers to run.\n - name: ENABLED_CONTROLLERS\n value: node\n - name: DATASTORE_TYPE\n value: kubernetes\n readinessProbe:\n exec:\n command:\n - /usr/bin/check-status\n - -r\n", "calico-v1.8": "\n{{if eq .RBACConfig \"rbac\"}}\n## start rbac here\n\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: calico-node\nrules:\n - apiGroups: [\"\"]\n resources:\n - namespaces\n verbs:\n - get\n - list\n - watch\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - update\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n - list\n - watch\n - patch\n - apiGroups: [\"\"]\n resources:\n - services\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - update\n - watch\n - apiGroups: [\"extensions\"]\n resources:\n - networkpolicies\n verbs:\n - get\n - list\n - watch\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - clusterinformations\n - hostendpoints\n verbs:\n - create\n - get\n - list\n - update\n - watch\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: calico-node\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n## end rbac here\n\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: calico-config\n namespace: kube-system\ndata:\n # To enable Typha, set this to \"calico-typha\" *and* set a non-zero value for Typha replicas\n # below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is\n # essential.\n typha_service_name: \"none\"\n # The CNI network configuration to install on each node.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.0\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"mtu\": 1500,\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\",\n \"k8s_auth_token\": \"__SERVICEACCOUNT_TOKEN__\"\n },\n \"kubernetes\": {\n \"k8s_api_root\": \"https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__\",\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n\n---\n\n# This manifest installs the calico/node container, as well\n# as the Calico CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: extensions/v1beta1\nmetadata:\n name: calico-node\n namespace: kube-system\n labels:\n k8s-app: calico-node\nspec:\n selector:\n matchLabels:\n k8s-app: calico-node\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: calico-node\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Make sure calico/node gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n - key: \"node-role.kubernetes.io/controlplane\"\n operator: \"Exists\"\n effect: \"NoSchedule\"\n - key: \"node-role.kubernetes.io/etcd\"\n operator: \"Exists\"\n effect: \"NoExecute\"\n serviceAccountName: calico-node\n terminationGracePeriodSeconds: 0\n containers:\n # Runs calico/node container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Enable felix logging to stdout\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,bgp\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPV6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Set MTU for tunnel device used if ipip is enabled\n - name: FELIX_IPINIPMTU\n value: \"1440\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n - name: CALICO_IPV4POOL_CIDR\n value: \"{{.ClusterCIDR}}\"\n # Enable IPIP\n - name: CALICO_IPV4POOL_IPIP\n value: \"Always\"\n # Enable IP-in-IP within Felix.\n - name: FELIX_IPINIPENABLED\n value: \"true\"\n # Typha support: controlled by the ConfigMap.\n - name: FELIX_TYPHAK8SSERVICENAME\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: typha_service_name\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Auto-detect the BGP IP address.\n - name: IP\n value: \"autodetect\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n # This container installs the Calico CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-calico.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: calico-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n volumes:\n # Used by calico/node.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n\n# Create all the CustomResourceDefinitions needed for\n# Calico policy and networking mode.\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgppeers.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPPeer\n plural: bgppeers\n singular: bgppeer\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: calico-node\n namespace: kube-system\n\n\n{{if ne .CloudProvider \"none\"}}\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: {{.CloudProvider}}-ippool\n namespace: kube-system\ndata:\n {{.CloudProvider}}-ippool: |-\n apiVersion: projectcalico.org/v3\n kind: IPPool\n metadata:\n name: ippool-ipip-1\n spec:\n cidr: {{.ClusterCIDR}}\n ipipMode: Always\n natOutgoing: true\n---\napiVersion: v1\nkind: Pod\nmetadata:\n name: calicoctl\n namespace: kube-system\nspec:\n hostNetwork: true\n restartPolicy: OnFailure\n tolerations:\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n containers:\n - name: calicoctl\n image: {{.Calicoctl}}\n command: [\"/bin/sh\", \"-c\", \"calicoctl apply -f {{.CloudProvider}}-ippool.yaml\"]\n env:\n - name: DATASTORE_TYPE\n value: kubernetes\n volumeMounts:\n - name: ippool-config\n mountPath: /root/\n volumes:\n - name: ippool-config\n configMap:\n name: {{.CloudProvider}}-ippool\n items:\n - key: {{.CloudProvider}}-ippool\n path: {{.CloudProvider}}-ippool.yaml\n # Mount in the etcd TLS secrets.\n{{end}}\n", - "canal-v1.13": "\n{{if eq .RBACConfig \"rbac\"}}\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: calico\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - clusterinformations\n - hostendpoints\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: calico-node\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: flannel\nrules:\n - apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - get\n - apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups:\n - \"\"\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: canal-flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n---\n# Bind the Calico ClusterRole to the canal ServiceAccount.\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: canal-calico\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n\n# Canal Version v3.1.1\n# https://docs.projectcalico.org/v3.1/releases#v3.1.1\n# This manifest includes the following component versions:\n# calico/node:v3.1.1\n# calico/cni:v3.1.1\n# coreos/flannel:v0.9.1\n\n---\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.0\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n \"mtu\": {{.MTU}},\n{{- end}}\n{{- end}}\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\"\n }\n }\n---\n\n# This manifest installs the calico/node container, as well\n# as the Calico CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: extensions/v1beta1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Make sure canal gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n serviceAccountName: canal\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n initContainers:\n # This container installs the Calico CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n containers:\n # Runs calico/node container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n - name: CALICO_IPV4POOL_CIDR\n value: \"192.168.0.0/16\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Enable felix logging to stdout\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n host: localhost\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by calico/node.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n\n---\n\n# Create all the CustomResourceDefinitions needed for\n# Calico policy and networking mode.\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n", - "canal-v1.15": "\n{{if eq .RBACConfig \"rbac\"}}\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: calico\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: calico-node\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: flannel\nrules:\n - apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - get\n - apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups:\n - \"\"\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: canal-flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n---\n# Bind the Calico ClusterRole to the canal ServiceAccount.\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: canal-calico\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n\n# Canal Version v3.1.1\n# https://docs.projectcalico.org/v3.1/releases#v3.1.1\n# This manifest includes the following component versions:\n# calico/node:v3.1.1\n# calico/cni:v3.1.1\n# coreos/flannel:v0.9.1\n\n---\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.0\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n \"mtu\": {{.MTU}},\n{{- end}}\n{{- end}}\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\"\n }\n }\n---\n\n# This manifest installs the calico/node container, as well\n# as the Calico CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: extensions/v1beta1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Make sure canal gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: canal\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n initContainers:\n # This container installs the Calico CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n containers:\n # Runs calico/node container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Configure route aggregation based on pod CIDR.\n - name: USE_POD_CIDR\n value: \"true\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n - name: CALICO_IPV4POOL_CIDR\n value: \"192.168.0.0/16\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Enable felix logging to stdout\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n host: localhost\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by calico/node.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n\n---\n\n# Create all the CustomResourceDefinitions needed for\n# Calico policy and networking mode.\n\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n", - "canal-v1.15-privileged": "\n# CanalTemplateV115Privileged\n{{if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n---\n# Source: calico/templates/rbac.yaml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n - blockaffinities\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nrules:\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: canal-flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: canal-calico\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # Typha is disabled.\n typha_service_name: \"none\"\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # Configure the MTU to use\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n veth_mtu: \"{{.MTU}}\"\n{{- end}}\n{{- else }}\n veth_mtu: \"1450\"\n{{- end}}\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"mtu\": __CNI_MTU__,\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n },\n {\n \"type\": \"bandwidth\",\n \"capabilities\": {\"bandwidth\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\"\n }\n }\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the canal container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n nodeSelector:\n kubernetes.io/os: linux\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Make sure canal gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: canal\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n priorityClassName: system-node-critical\n initContainers:\n # This container installs the CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # CNI MTU Config variable\n - name: CNI_MTU\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: veth_mtu\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n securityContext:\n privileged: true\n # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n # to communicate with Felix over the Policy Sync API.\n - name: flexvol-driver\n image: {{.FlexVolImg}}\n volumeMounts:\n - name: flexvol-driver-host\n mountPath: /host/driver\n securityContext:\n privileged: true\n containers:\n # Runs canal container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Configure route aggregation based on pod CIDR.\n - name: USE_POD_CIDR\n value: \"true\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n # - name: CALICO_IPV4POOL_CIDR\n # value: \"192.168.0.0/16\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Set Felix logging to \"info\"\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n exec:\n command:\n - /bin/calico-node\n - -felix-live\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n - name: policysync\n mountPath: /var/run/nodeagent\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by canal.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Used to create per-pod Unix Domain Sockets\n - name: policysync\n hostPath:\n type: DirectoryOrCreate\n path: /var/run/nodeagent\n # Used to install Flex Volume Driver\n - name: flexvol-driver-host\n hostPath:\n type: DirectoryOrCreate\n{{- if .FlexVolPluginDir }}\n path: {{.FlexVolPluginDir}}\n{{- else }}\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n{{- end }}\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamblocks.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMBlock\n plural: ipamblocks\n singular: ipamblock\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: blockaffinities.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BlockAffinity\n plural: blockaffinities\n singular: blockaffinity\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamhandles.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMHandle\n plural: ipamhandles\n singular: ipamhandle\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamconfigs.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMConfig\n plural: ipamconfigs\n singular: ipamconfig\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgppeers.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPPeer\n plural: bgppeers\n singular: bgppeer\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n", - "canal-v1.15-privileged-calico3134": "\n# CanalTemplateV115PrivilegedCalico3134\n{{if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n---\n# Source: calico/templates/rbac.yaml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n # Pod CIDR auto-detection on kubeadm needs access to config maps.\n - apiGroups: [\"\"]\n resources:\n - configmaps\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n - blockaffinities\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nrules:\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: canal-flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: canal-calico\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # Typha is disabled.\n typha_service_name: \"none\"\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # Configure the MTU to use\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n veth_mtu: \"{{.MTU}}\"\n{{- end}}\n{{- else }}\n veth_mtu: \"1450\"\n{{- end}}\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"mtu\": __CNI_MTU__,\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n },\n {\n \"type\": \"bandwidth\",\n \"capabilities\": {\"bandwidth\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\"\n }\n }\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the canal container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n nodeSelector:\n kubernetes.io/os: linux\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Make sure canal gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: canal\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n priorityClassName: system-node-critical\n initContainers:\n # This container installs the CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # CNI MTU Config variable\n - name: CNI_MTU\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: veth_mtu\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n securityContext:\n privileged: true\n # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n # to communicate with Felix over the Policy Sync API.\n - name: flexvol-driver\n image: {{.FlexVolImg}}\n volumeMounts:\n - name: flexvol-driver-host\n mountPath: /host/driver\n securityContext:\n privileged: true\n containers:\n # Runs canal container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Configure route aggregation based on pod CIDR.\n - name: USE_POD_CIDR\n value: \"true\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n # - name: CALICO_IPV4POOL_CIDR\n # value: \"192.168.0.0/16\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Set Felix logging to \"info\"\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n exec:\n command:\n - /bin/calico-node\n - -felix-live\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n - name: policysync\n mountPath: /var/run/nodeagent\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by canal.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Used to create per-pod Unix Domain Sockets\n - name: policysync\n hostPath:\n type: DirectoryOrCreate\n path: /var/run/nodeagent\n # Used to install Flex Volume Driver\n - name: flexvol-driver-host\n hostPath:\n type: DirectoryOrCreate\n{{- if .FlexVolPluginDir }}\n path: {{.FlexVolPluginDir}}\n{{- else }}\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n{{- end }}\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgppeers.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPPeer\n plural: bgppeers\n singular: bgppeer\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: blockaffinities.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BlockAffinity\n plural: blockaffinities\n singular: blockaffinity\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamblocks.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMBlock\n plural: ipamblocks\n singular: ipamblock\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamconfigs.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMConfig\n plural: ipamconfigs\n singular: ipamconfig\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamhandles.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMHandle\n plural: ipamhandles\n singular: ipamhandle\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n---\n", - "canal-v1.15.12": "\n{{if eq .RBACConfig \"rbac\"}}\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: calico\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: calico-node\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: flannel\nrules:\n - apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - get\n - apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups:\n - \"\"\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: canal-flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n---\n# Bind the Calico ClusterRole to the canal ServiceAccount.\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: canal-calico\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n\n# Canal Version v3.1.1\n# https://docs.projectcalico.org/v3.1/releases#v3.1.1\n# This manifest includes the following component versions:\n# calico/node:v3.1.1\n# calico/cni:v3.1.1\n# coreos/flannel:v0.9.1\n\n---\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.0\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n \"mtu\": {{.MTU}},\n{{- end}}\n{{- end}}\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\"\n }\n }\n---\n\n# This manifest installs the calico/node container, as well\n# as the Calico CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: extensions/v1beta1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Make sure canal gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: canal\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n initContainers:\n # This container installs the Calico CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n containers:\n # Runs calico/node container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Configure route aggregation based on pod CIDR.\n - name: USE_POD_CIDR\n value: \"true\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n - name: CALICO_IPV4POOL_CIDR\n value: \"192.168.0.0/16\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Enable felix logging to stdout\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n host: localhost\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by calico/node.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n\n---\n\n# Create all the CustomResourceDefinitions needed for\n# Calico policy and networking mode.\n\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n", - "canal-v1.16": "\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # Typha is disabled.\n typha_service_name: \"none\"\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n \"mtu\": {{.MTU}},\n{{- end}}\n{{- end}}\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\",\n \"k8s_auth_token\": \"__SERVICEACCOUNT_TOKEN__\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\"\n }\n }\n\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n{{if eq .RBACConfig \"rbac\"}}\n---\n# Source: calico/templates/rbac.yaml\n\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico-node\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nrules:\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the canal container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Tolerate this effect so the pods will be schedulable at all times\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n - key: \"node-role.kubernetes.io/controlplane\"\n operator: \"Exists\"\n effect: \"NoSchedule\"\n - key: \"node-role.kubernetes.io/etcd\"\n operator: \"Exists\"\n effect: \"NoExecute\"\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: canal\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n priorityClassName: system-node-critical\n initContainers:\n # This container installs the CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n # to communicate with Felix over the Policy Sync API.\n - name: flexvol-driver\n image: {{.FlexVolImg}}\n volumeMounts:\n - name: flexvol-driver-host\n mountPath: /host/driver\n containers:\n # Runs canal container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Configure route aggregation based on pod CIDR.\n - name: USE_POD_CIDR\n value: \"true\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n - name: CALICO_IPV4POOL_CIDR\n value: \"192.168.0.0/16\"\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Enable felix logging to stdout\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n host: localhost\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n - name: policysync\n mountPath: /var/run/nodeagent\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by canal.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Used to create per-pod Unix Domain Sockets\n - name: policysync\n hostPath:\n type: DirectoryOrCreate\n path: /var/run/nodeagent\n # Used to install Flex Volume Driver\n - name: flexvol-driver-host\n hostPath:\n type: DirectoryOrCreate\n{{- if .FlexVolPluginDir }}\n path: {{.FlexVolPluginDir}}\n{{- else }}\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n{{- end }}\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n", - "canal-v1.17": "\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # Typha is disabled.\n typha_service_name: \"none\"\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n \"mtu\": {{.MTU}},\n{{- end}}\n{{- end}}\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\",\n \"k8s_auth_token\": \"__SERVICEACCOUNT_TOKEN__\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\"\n }\n }\n\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n{{if eq .RBACConfig \"rbac\"}}\n---\n# Source: calico/templates/rbac.yaml\n\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico-node\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n - blockaffinities\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nrules:\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the canal container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Tolerate this effect so the pods will be schedulable at all times\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n - key: \"node-role.kubernetes.io/controlplane\"\n operator: \"Exists\"\n effect: \"NoSchedule\"\n - key: \"node-role.kubernetes.io/etcd\"\n operator: \"Exists\"\n effect: \"NoExecute\"\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: canal\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n priorityClassName: system-node-critical\n initContainers:\n # This container installs the CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n # to communicate with Felix over the Policy Sync API.\n - name: flexvol-driver\n image: {{.FlexVolImg}}\n volumeMounts:\n - name: flexvol-driver-host\n mountPath: /host/driver\n containers:\n # Runs canal container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Configure route aggregation based on pod CIDR.\n - name: USE_POD_CIDR\n value: \"true\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n - name: CALICO_IPV4POOL_CIDR\n value: \"192.168.0.0/16\"\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Enable felix logging to stdout\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n host: localhost\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n - name: policysync\n mountPath: /var/run/nodeagent\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by canal.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Used to create per-pod Unix Domain Sockets\n - name: policysync\n hostPath:\n type: DirectoryOrCreate\n path: /var/run/nodeagent\n # Used to install Flex Volume Driver\n - name: flexvol-driver-host\n hostPath:\n type: DirectoryOrCreate\n{{- if .FlexVolPluginDir }}\n path: {{.FlexVolPluginDir}}\n{{- else }}\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n{{- end }}\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n", - "canal-v1.17-privileged": "\n# CanalTemplateV117Privileged\n{{if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n---\n# Source: calico/templates/rbac.yaml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n - blockaffinities\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nrules:\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: canal-flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: canal-calico\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # Typha is disabled.\n typha_service_name: \"none\"\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # Configure the MTU to use\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n veth_mtu: \"{{.MTU}}\"\n{{- end}}\n{{- else }}\n veth_mtu: \"1450\"\n{{- end}}\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"mtu\": __CNI_MTU__,\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n },\n {\n \"type\": \"bandwidth\",\n \"capabilities\": {\"bandwidth\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\"\n }\n }\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the canal container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n nodeSelector:\n kubernetes.io/os: linux\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Make sure canal gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: canal\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n priorityClassName: system-node-critical\n initContainers:\n # This container installs the CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # CNI MTU Config variable\n - name: CNI_MTU\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: veth_mtu\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n securityContext:\n privileged: true\n # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n # to communicate with Felix over the Policy Sync API.\n - name: flexvol-driver\n image: {{.FlexVolImg}}\n volumeMounts:\n - name: flexvol-driver-host\n mountPath: /host/driver\n securityContext:\n privileged: true\n containers:\n # Runs canal container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Configure route aggregation based on pod CIDR.\n - name: USE_POD_CIDR\n value: \"true\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n # - name: CALICO_IPV4POOL_CIDR\n # value: \"192.168.0.0/16\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Set Felix logging to \"info\"\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n exec:\n command:\n - /bin/calico-node\n - -felix-live\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n - name: policysync\n mountPath: /var/run/nodeagent\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by canal.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Used to create per-pod Unix Domain Sockets\n - name: policysync\n hostPath:\n type: DirectoryOrCreate\n path: /var/run/nodeagent\n # Used to install Flex Volume Driver\n - name: flexvol-driver-host\n hostPath:\n type: DirectoryOrCreate\n{{- if .FlexVolPluginDir }}\n path: {{.FlexVolPluginDir}}\n{{- else }}\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n{{- end }}\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamblocks.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMBlock\n plural: ipamblocks\n singular: ipamblock\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: blockaffinities.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BlockAffinity\n plural: blockaffinities\n singular: blockaffinity\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamhandles.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMHandle\n plural: ipamhandles\n singular: ipamhandle\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamconfigs.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMConfig\n plural: ipamconfigs\n singular: ipamconfig\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgppeers.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPPeer\n plural: bgppeers\n singular: bgppeer\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n", - "canal-v1.17-privileged-calico3134": "\n# CanalTemplateV117PrivilegedCalico3134\n{{if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n---\n# Source: calico/templates/rbac.yaml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n # Pod CIDR auto-detection on kubeadm needs access to config maps.\n - apiGroups: [\"\"]\n resources:\n - configmaps\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n - blockaffinities\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nrules:\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: canal-flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: canal-calico\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # Typha is disabled.\n typha_service_name: \"none\"\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # Configure the MTU to use\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n veth_mtu: \"{{.MTU}}\"\n{{- end}}\n{{- else }}\n veth_mtu: \"1450\"\n{{- end}}\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"mtu\": __CNI_MTU__,\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n },\n {\n \"type\": \"bandwidth\",\n \"capabilities\": {\"bandwidth\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\"\n }\n }\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the canal container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n nodeSelector:\n kubernetes.io/os: linux\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Make sure canal gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: canal\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n priorityClassName: system-node-critical\n initContainers:\n # This container installs the CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # CNI MTU Config variable\n - name: CNI_MTU\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: veth_mtu\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n securityContext:\n privileged: true\n # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n # to communicate with Felix over the Policy Sync API.\n - name: flexvol-driver\n image: {{.FlexVolImg}}\n volumeMounts:\n - name: flexvol-driver-host\n mountPath: /host/driver\n securityContext:\n privileged: true\n containers:\n # Runs canal container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Configure route aggregation based on pod CIDR.\n - name: USE_POD_CIDR\n value: \"true\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n # - name: CALICO_IPV4POOL_CIDR\n # value: \"192.168.0.0/16\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Set Felix logging to \"info\"\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n exec:\n command:\n - /bin/calico-node\n - -felix-live\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n - name: policysync\n mountPath: /var/run/nodeagent\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by canal.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Used to create per-pod Unix Domain Sockets\n - name: policysync\n hostPath:\n type: DirectoryOrCreate\n path: /var/run/nodeagent\n # Used to install Flex Volume Driver\n - name: flexvol-driver-host\n hostPath:\n type: DirectoryOrCreate\n{{- if .FlexVolPluginDir }}\n path: {{.FlexVolPluginDir}}\n{{- else }}\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n{{- end }}\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgppeers.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPPeer\n plural: bgppeers\n singular: bgppeer\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: blockaffinities.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BlockAffinity\n plural: blockaffinities\n singular: blockaffinity\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamblocks.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMBlock\n plural: ipamblocks\n singular: ipamblock\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamconfigs.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMConfig\n plural: ipamconfigs\n singular: ipamconfig\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamhandles.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMHandle\n plural: ipamhandles\n singular: ipamhandle\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n", + "canal-v1.13": "\n{{if eq .RBACConfig \"rbac\"}}\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: calico\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - clusterinformations\n - hostendpoints\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: calico-node\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: flannel\nrules:\n - apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - get\n - apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups:\n - \"\"\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: canal-flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n---\n# Bind the Calico ClusterRole to the canal ServiceAccount.\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: canal-calico\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n\n# Canal Version v3.1.1\n# https://docs.projectcalico.org/v3.1/releases#v3.1.1\n# This manifest includes the following component versions:\n# calico/node:v3.1.1\n# calico/cni:v3.1.1\n# coreos/flannel:v0.9.1\n\n---\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.0\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n \"mtu\": {{.MTU}},\n{{- end}}\n{{- end}}\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\",\n \"VNI\": {{.FlannelBackend.VNI}},\n \"Port\": {{.FlannelBackend.Port}}\n }\n }\n---\n\n# This manifest installs the calico/node container, as well\n# as the Calico CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: extensions/v1beta1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Make sure canal gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n serviceAccountName: canal\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n initContainers:\n # This container installs the Calico CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n containers:\n # Runs calico/node container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n - name: CALICO_IPV4POOL_CIDR\n value: \"192.168.0.0/16\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Enable felix logging to stdout\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n host: localhost\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by calico/node.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n\n---\n\n# Create all the CustomResourceDefinitions needed for\n# Calico policy and networking mode.\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n", + "canal-v1.15": "\n{{if eq .RBACConfig \"rbac\"}}\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: calico\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: calico-node\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: flannel\nrules:\n - apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - get\n - apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups:\n - \"\"\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: canal-flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n---\n# Bind the Calico ClusterRole to the canal ServiceAccount.\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: canal-calico\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n\n# Canal Version v3.1.1\n# https://docs.projectcalico.org/v3.1/releases#v3.1.1\n# This manifest includes the following component versions:\n# calico/node:v3.1.1\n# calico/cni:v3.1.1\n# coreos/flannel:v0.9.1\n\n---\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.0\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n \"mtu\": {{.MTU}},\n{{- end}}\n{{- end}}\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\",\n \"VNI\": {{.FlannelBackend.VNI}},\n \"Port\": {{.FlannelBackend.Port}}\n }\n }\n---\n\n# This manifest installs the calico/node container, as well\n# as the Calico CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: extensions/v1beta1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Make sure canal gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: canal\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n initContainers:\n # This container installs the Calico CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n containers:\n # Runs calico/node container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Configure route aggregation based on pod CIDR.\n - name: USE_POD_CIDR\n value: \"true\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n - name: CALICO_IPV4POOL_CIDR\n value: \"192.168.0.0/16\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Enable felix logging to stdout\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n host: localhost\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by calico/node.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n\n---\n\n# Create all the CustomResourceDefinitions needed for\n# Calico policy and networking mode.\n\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n", + "canal-v1.15-privileged": "\n# CanalTemplateV115Privileged\n{{if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n---\n# Source: calico/templates/rbac.yaml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n - blockaffinities\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nrules:\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: canal-flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: canal-calico\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # Typha is disabled.\n typha_service_name: \"none\"\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # Configure the MTU to use\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n veth_mtu: \"{{.MTU}}\"\n{{- end}}\n{{- else }}\n veth_mtu: \"1450\"\n{{- end}}\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"mtu\": __CNI_MTU__,\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n },\n {\n \"type\": \"bandwidth\",\n \"capabilities\": {\"bandwidth\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\",\n \"VNI\": {{.FlannelBackend.VNI}},\n \"Port\": {{.FlannelBackend.Port}}\n }\n }\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the canal container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n nodeSelector:\n kubernetes.io/os: linux\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Make sure canal gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: canal\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n priorityClassName: system-node-critical\n initContainers:\n # This container installs the CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # CNI MTU Config variable\n - name: CNI_MTU\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: veth_mtu\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n securityContext:\n privileged: true\n # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n # to communicate with Felix over the Policy Sync API.\n - name: flexvol-driver\n image: {{.FlexVolImg}}\n volumeMounts:\n - name: flexvol-driver-host\n mountPath: /host/driver\n securityContext:\n privileged: true\n containers:\n # Runs canal container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Configure route aggregation based on pod CIDR.\n - name: USE_POD_CIDR\n value: \"true\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n # - name: CALICO_IPV4POOL_CIDR\n # value: \"192.168.0.0/16\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Set Felix logging to \"info\"\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n exec:\n command:\n - /bin/calico-node\n - -felix-live\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n - name: policysync\n mountPath: /var/run/nodeagent\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by canal.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Used to create per-pod Unix Domain Sockets\n - name: policysync\n hostPath:\n type: DirectoryOrCreate\n path: /var/run/nodeagent\n # Used to install Flex Volume Driver\n - name: flexvol-driver-host\n hostPath:\n type: DirectoryOrCreate\n{{- if .FlexVolPluginDir }}\n path: {{.FlexVolPluginDir}}\n{{- else }}\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n{{- end }}\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamblocks.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMBlock\n plural: ipamblocks\n singular: ipamblock\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: blockaffinities.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BlockAffinity\n plural: blockaffinities\n singular: blockaffinity\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamhandles.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMHandle\n plural: ipamhandles\n singular: ipamhandle\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamconfigs.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMConfig\n plural: ipamconfigs\n singular: ipamconfig\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgppeers.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPPeer\n plural: bgppeers\n singular: bgppeer\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n", + "canal-v1.15-privileged-calico3134": "\n# CanalTemplateV115PrivilegedCalico3134\n{{if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n---\n# Source: calico/templates/rbac.yaml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n # Pod CIDR auto-detection on kubeadm needs access to config maps.\n - apiGroups: [\"\"]\n resources:\n - configmaps\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n - blockaffinities\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nrules:\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: canal-flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: canal-calico\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # Typha is disabled.\n typha_service_name: \"none\"\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # Configure the MTU to use\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n veth_mtu: \"{{.MTU}}\"\n{{- end}}\n{{- else }}\n veth_mtu: \"1450\"\n{{- end}}\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"mtu\": __CNI_MTU__,\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n },\n {\n \"type\": \"bandwidth\",\n \"capabilities\": {\"bandwidth\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\",\n \"VNI\": {{.FlannelBackend.VNI}},\n \"Port\": {{.FlannelBackend.Port}}\n }\n }\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the canal container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n nodeSelector:\n kubernetes.io/os: linux\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Make sure canal gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: canal\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n priorityClassName: system-node-critical\n initContainers:\n # This container installs the CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # CNI MTU Config variable\n - name: CNI_MTU\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: veth_mtu\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n securityContext:\n privileged: true\n # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n # to communicate with Felix over the Policy Sync API.\n - name: flexvol-driver\n image: {{.FlexVolImg}}\n volumeMounts:\n - name: flexvol-driver-host\n mountPath: /host/driver\n securityContext:\n privileged: true\n containers:\n # Runs canal container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Configure route aggregation based on pod CIDR.\n - name: USE_POD_CIDR\n value: \"true\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n # - name: CALICO_IPV4POOL_CIDR\n # value: \"192.168.0.0/16\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Set Felix logging to \"info\"\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n exec:\n command:\n - /bin/calico-node\n - -felix-live\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n - name: policysync\n mountPath: /var/run/nodeagent\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by canal.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Used to create per-pod Unix Domain Sockets\n - name: policysync\n hostPath:\n type: DirectoryOrCreate\n path: /var/run/nodeagent\n # Used to install Flex Volume Driver\n - name: flexvol-driver-host\n hostPath:\n type: DirectoryOrCreate\n{{- if .FlexVolPluginDir }}\n path: {{.FlexVolPluginDir}}\n{{- else }}\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n{{- end }}\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgppeers.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPPeer\n plural: bgppeers\n singular: bgppeer\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: blockaffinities.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BlockAffinity\n plural: blockaffinities\n singular: blockaffinity\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamblocks.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMBlock\n plural: ipamblocks\n singular: ipamblock\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamconfigs.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMConfig\n plural: ipamconfigs\n singular: ipamconfig\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamhandles.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMHandle\n plural: ipamhandles\n singular: ipamhandle\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n---\n", + "canal-v1.15.12": "\n{{if eq .RBACConfig \"rbac\"}}\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: calico\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: calico-node\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: flannel\nrules:\n - apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - get\n - apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups:\n - \"\"\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: canal-flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n---\n# Bind the Calico ClusterRole to the canal ServiceAccount.\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: canal-calico\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n\n# Canal Version v3.1.1\n# https://docs.projectcalico.org/v3.1/releases#v3.1.1\n# This manifest includes the following component versions:\n# calico/node:v3.1.1\n# calico/cni:v3.1.1\n# coreos/flannel:v0.9.1\n\n---\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.0\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n \"mtu\": {{.MTU}},\n{{- end}}\n{{- end}}\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\",\n \"VNI\": {{.FlannelBackend.VNI}},\n \"Port\": {{.FlannelBackend.Port}}\n }\n }\n---\n\n# This manifest installs the calico/node container, as well\n# as the Calico CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: extensions/v1beta1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Make sure canal gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: canal\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n initContainers:\n # This container installs the Calico CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n containers:\n # Runs calico/node container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Configure route aggregation based on pod CIDR.\n - name: USE_POD_CIDR\n value: \"true\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n - name: CALICO_IPV4POOL_CIDR\n value: \"192.168.0.0/16\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Enable felix logging to stdout\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n host: localhost\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by calico/node.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n\n---\n\n# Create all the CustomResourceDefinitions needed for\n# Calico policy and networking mode.\n\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n", + "canal-v1.16": "\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # Typha is disabled.\n typha_service_name: \"none\"\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n \"mtu\": {{.MTU}},\n{{- end}}\n{{- end}}\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\",\n \"k8s_auth_token\": \"__SERVICEACCOUNT_TOKEN__\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\",\n \"VNI\": {{.FlannelBackend.VNI}},\n \"Port\": {{.FlannelBackend.Port}}\n }\n }\n\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n{{if eq .RBACConfig \"rbac\"}}\n---\n# Source: calico/templates/rbac.yaml\n\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico-node\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nrules:\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the canal container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Tolerate this effect so the pods will be schedulable at all times\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n - key: \"node-role.kubernetes.io/controlplane\"\n operator: \"Exists\"\n effect: \"NoSchedule\"\n - key: \"node-role.kubernetes.io/etcd\"\n operator: \"Exists\"\n effect: \"NoExecute\"\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: canal\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n priorityClassName: system-node-critical\n initContainers:\n # This container installs the CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n # to communicate with Felix over the Policy Sync API.\n - name: flexvol-driver\n image: {{.FlexVolImg}}\n volumeMounts:\n - name: flexvol-driver-host\n mountPath: /host/driver\n containers:\n # Runs canal container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Configure route aggregation based on pod CIDR.\n - name: USE_POD_CIDR\n value: \"true\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n - name: CALICO_IPV4POOL_CIDR\n value: \"192.168.0.0/16\"\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Enable felix logging to stdout\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n host: localhost\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n - name: policysync\n mountPath: /var/run/nodeagent\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by canal.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Used to create per-pod Unix Domain Sockets\n - name: policysync\n hostPath:\n type: DirectoryOrCreate\n path: /var/run/nodeagent\n # Used to install Flex Volume Driver\n - name: flexvol-driver-host\n hostPath:\n type: DirectoryOrCreate\n{{- if .FlexVolPluginDir }}\n path: {{.FlexVolPluginDir}}\n{{- else }}\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n{{- end }}\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n", + "canal-v1.17": "\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # Typha is disabled.\n typha_service_name: \"none\"\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n \"mtu\": {{.MTU}},\n{{- end}}\n{{- end}}\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\",\n \"k8s_auth_token\": \"__SERVICEACCOUNT_TOKEN__\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\",\n \"VNI\": {{.FlannelBackend.VNI}},\n \"Port\": {{.FlannelBackend.Port}}\n }\n }\n\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n{{if eq .RBACConfig \"rbac\"}}\n---\n# Source: calico/templates/rbac.yaml\n\n# Include a clusterrole for the calico-node DaemonSet,\n# and bind it to the calico-node serviceaccount.\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico-node\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n - blockaffinities\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nrules:\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: calico-node\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico-node\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the canal container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Tolerate this effect so the pods will be schedulable at all times\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n - key: \"node-role.kubernetes.io/controlplane\"\n operator: \"Exists\"\n effect: \"NoSchedule\"\n - key: \"node-role.kubernetes.io/etcd\"\n operator: \"Exists\"\n effect: \"NoExecute\"\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: canal\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n priorityClassName: system-node-critical\n initContainers:\n # This container installs the CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n # to communicate with Felix over the Policy Sync API.\n - name: flexvol-driver\n image: {{.FlexVolImg}}\n volumeMounts:\n - name: flexvol-driver-host\n mountPath: /host/driver\n containers:\n # Runs canal container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Configure route aggregation based on pod CIDR.\n - name: USE_POD_CIDR\n value: \"true\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n - name: CALICO_IPV4POOL_CIDR\n value: \"192.168.0.0/16\"\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Enable felix logging to stdout\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n host: localhost\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n - name: policysync\n mountPath: /var/run/nodeagent\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by canal.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Used to create per-pod Unix Domain Sockets\n - name: policysync\n hostPath:\n type: DirectoryOrCreate\n path: /var/run/nodeagent\n # Used to install Flex Volume Driver\n - name: flexvol-driver-host\n hostPath:\n type: DirectoryOrCreate\n{{- if .FlexVolPluginDir }}\n path: {{.FlexVolPluginDir}}\n{{- else }}\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n{{- end }}\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n", + "canal-v1.17-privileged": "\n# CanalTemplateV117Privileged\n{{if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n---\n# Source: calico/templates/rbac.yaml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n - blockaffinities\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nrules:\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: canal-flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: canal-calico\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # Typha is disabled.\n typha_service_name: \"none\"\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # Configure the MTU to use\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n veth_mtu: \"{{.MTU}}\"\n{{- end}}\n{{- else }}\n veth_mtu: \"1450\"\n{{- end}}\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"mtu\": __CNI_MTU__,\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n },\n {\n \"type\": \"bandwidth\",\n \"capabilities\": {\"bandwidth\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\",\n \"VNI\": {{.FlannelBackend.VNI}},\n \"Port\": {{.FlannelBackend.Port}}\n }\n }\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the canal container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n nodeSelector:\n kubernetes.io/os: linux\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Make sure canal gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: canal\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n priorityClassName: system-node-critical\n initContainers:\n # This container installs the CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # CNI MTU Config variable\n - name: CNI_MTU\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: veth_mtu\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n securityContext:\n privileged: true\n # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n # to communicate with Felix over the Policy Sync API.\n - name: flexvol-driver\n image: {{.FlexVolImg}}\n volumeMounts:\n - name: flexvol-driver-host\n mountPath: /host/driver\n securityContext:\n privileged: true\n containers:\n # Runs canal container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Configure route aggregation based on pod CIDR.\n - name: USE_POD_CIDR\n value: \"true\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n # - name: CALICO_IPV4POOL_CIDR\n # value: \"192.168.0.0/16\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Set Felix logging to \"info\"\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n exec:\n command:\n - /bin/calico-node\n - -felix-live\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n - name: policysync\n mountPath: /var/run/nodeagent\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by canal.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Used to create per-pod Unix Domain Sockets\n - name: policysync\n hostPath:\n type: DirectoryOrCreate\n path: /var/run/nodeagent\n # Used to install Flex Volume Driver\n - name: flexvol-driver-host\n hostPath:\n type: DirectoryOrCreate\n{{- if .FlexVolPluginDir }}\n path: {{.FlexVolPluginDir}}\n{{- else }}\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n{{- end }}\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamblocks.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMBlock\n plural: ipamblocks\n singular: ipamblock\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: blockaffinities.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BlockAffinity\n plural: blockaffinities\n singular: blockaffinity\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamhandles.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMHandle\n plural: ipamhandles\n singular: ipamhandle\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamconfigs.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMConfig\n plural: ipamconfigs\n singular: ipamconfig\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgppeers.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPPeer\n plural: bgppeers\n singular: bgppeer\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n", + "canal-v1.17-privileged-calico3134": "\n# CanalTemplateV117PrivilegedCalico3134\n{{if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n---\n# Source: calico/templates/rbac.yaml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: calico\nrules:\n # The CNI plugin needs to get pods, nodes, and namespaces.\n - apiGroups: [\"\"]\n resources:\n - pods\n - nodes\n - namespaces\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n - services\n verbs:\n # Used to discover service IPs for advertisement.\n - watch\n - list\n # Used to discover Typhas.\n - get\n # Pod CIDR auto-detection on kubeadm needs access to config maps.\n - apiGroups: [\"\"]\n resources:\n - configmaps\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n # Needed for clearing NodeNetworkUnavailable flag.\n - patch\n # Calico stores some configuration information in node annotations.\n - update\n # Watch for changes to Kubernetes NetworkPolicies.\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - watch\n - list\n # Used by Calico for policy information.\n - apiGroups: [\"\"]\n resources:\n - pods\n - namespaces\n - serviceaccounts\n verbs:\n - list\n - watch\n # The CNI plugin patches pods/status.\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - patch\n # Calico monitors various CRDs for config.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - ipamblocks\n - globalnetworkpolicies\n - globalnetworksets\n - networkpolicies\n - networksets\n - clusterinformations\n - hostendpoints\n - blockaffinities\n verbs:\n - get\n - list\n - watch\n # Calico must create and update some CRDs on startup.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - ippools\n - felixconfigurations\n - clusterinformations\n verbs:\n - create\n - update\n # Calico stores some configuration information on the node.\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - watch\n # These permissions are only requried for upgrade from v2.6, and can\n # be removed after upgrade or on fresh installations.\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - bgpconfigurations\n - bgppeers\n verbs:\n - create\n - update\n---\n# Flannel ClusterRole\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: flannel\nrules:\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups: [\"\"]\n resources:\n - nodes/status\n verbs:\n - patch\n---\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: canal-flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: canal-calico\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n---\n# Source: calico/templates/calico-config.yaml\n# This ConfigMap is used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # Typha is disabled.\n typha_service_name: \"none\"\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # Configure the MTU to use\n{{- if .MTU }}\n{{- if ne .MTU 0 }}\n veth_mtu: \"{{.MTU}}\"\n{{- end}}\n{{- else }}\n veth_mtu: \"1450\"\n{{- end}}\n\n # The CNI network configuration to install on each node. The special\n # values in this config will be automatically populated.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"mtu\": __CNI_MTU__,\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n },\n {\n \"type\": \"bandwidth\",\n \"capabilities\": {\"bandwidth\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\",\n \"VNI\": {{.FlannelBackend.VNI}},\n \"Port\": {{.FlannelBackend.Port}}\n }\n }\n---\n# Source: calico/templates/calico-node.yaml\n# This manifest installs the canal container, as well\n# as the CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: apps/v1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n # This, along with the CriticalAddonsOnly toleration below,\n # marks the pod as a critical add-on, ensuring it gets\n # priority scheduling and that its resources are reserved\n # if it ever gets evicted.\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n nodeSelector:\n kubernetes.io/os: linux\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n tolerations:\n # Make sure canal gets scheduled on all nodes.\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n {{if eq .RBACConfig \"rbac\"}}\n serviceAccountName: canal\n {{end}}\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n priorityClassName: system-node-critical\n initContainers:\n # This container installs the CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n # Name of the CNI config file to create.\n - name: CNI_CONF_NAME\n value: \"10-canal.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n # Set the hostname based on the k8s node name.\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # CNI MTU Config variable\n - name: CNI_MTU\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: veth_mtu\n # Prevents the container from sleeping forever.\n - name: SLEEP\n value: \"false\"\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n securityContext:\n privileged: true\n # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes\n # to communicate with Felix over the Policy Sync API.\n - name: flexvol-driver\n image: {{.FlexVolImg}}\n volumeMounts:\n - name: flexvol-driver-host\n mountPath: /host/driver\n securityContext:\n privileged: true\n containers:\n # Runs canal container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Configure route aggregation based on pod CIDR.\n - name: USE_POD_CIDR\n value: \"true\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # Set based on the k8s node name.\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # No IP address needed.\n - name: IP\n value: \"\"\n # The default IPv4 pool to create on startup if none exists. Pod IPs will be\n # chosen from this range. Changing this value after installation will have\n # no effect. This should fall within --cluster-cidr.\n # - name: CALICO_IPV4POOL_CIDR\n # value: \"192.168.0.0/16\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n # Disable IPv6 on Kubernetes.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Set Felix logging to \"info\"\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n exec:\n command:\n - /bin/calico-node\n - -felix-live\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n host: localhost\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n - name: policysync\n mountPath: /var/run/nodeagent\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - mountPath: /run/xtables.lock\n name: xtables-lock\n readOnly: false\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n volumes:\n # Used by canal.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n # Used by flannel.\n - name: flannel-cfg\n configMap:\n name: canal-config\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Used to create per-pod Unix Domain Sockets\n - name: policysync\n hostPath:\n type: DirectoryOrCreate\n path: /var/run/nodeagent\n # Used to install Flex Volume Driver\n - name: flexvol-driver-host\n hostPath:\n type: DirectoryOrCreate\n{{- if .FlexVolPluginDir }}\n path: {{.FlexVolPluginDir}}\n{{- else }}\n path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds\n{{- end }}\n---\n# Source: calico/templates/kdd-crds.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgppeers.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPPeer\n plural: bgppeers\n singular: bgppeer\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: blockaffinities.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BlockAffinity\n plural: blockaffinities\n singular: blockaffinity\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamblocks.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMBlock\n plural: ipamblocks\n singular: ipamblock\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamconfigs.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMConfig\n plural: ipamconfigs\n singular: ipamconfig\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ipamhandles.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPAMHandle\n plural: ipamhandles\n singular: ipamhandle\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networksets.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkSet\n plural: networksets\n singular: networkset\n", "canal-v1.8": "\n{{if eq .RBACConfig \"rbac\"}}\n# Calico Roles\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: calico\nrules:\n - apiGroups: [\"\"]\n resources:\n - namespaces\n verbs:\n - get\n - list\n - watch\n - apiGroups: [\"\"]\n resources:\n - pods/status\n verbs:\n - update\n - apiGroups: [\"\"]\n resources:\n - pods\n verbs:\n - get\n - list\n - watch\n - patch\n - apiGroups: [\"\"]\n resources:\n - services\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - endpoints\n verbs:\n - get\n - apiGroups: [\"\"]\n resources:\n - nodes\n verbs:\n - get\n - list\n - update\n - watch\n - apiGroups: [\"networking.k8s.io\"]\n resources:\n - networkpolicies\n verbs:\n - get\n - list\n - watch\n - apiGroups: [\"crd.projectcalico.org\"]\n resources:\n - globalfelixconfigs\n - felixconfigurations\n - bgppeers\n - globalbgpconfigs\n - bgpconfigurations\n - ippools\n - globalnetworkpolicies\n - networkpolicies\n - clusterinformations\n - hostendpoints\n - globalnetworksets\n verbs:\n - create\n - get\n - list\n - update\n - watch\n\n---\n\n# Flannel roles\n# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: flannel\nrules:\n - apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - get\n - apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - list\n - watch\n - apiGroups:\n - \"\"\n resources:\n - nodes/status\n verbs:\n - patch\n---\n\n# Bind the flannel ClusterRole to the canal ServiceAccount.\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: canal-flannel\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: flannel\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n\n---\n\n# Bind the calico ClusterRole to the canal ServiceAccount.\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: canal-calico\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: calico\nsubjects:\n- kind: ServiceAccount\n name: canal\n namespace: kube-system\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:nodes\n{{end}}\n\n# Canal Version v3.1.1\n# https://docs.projectcalico.org/v3.1/releases#v3.1.1\n# This manifest includes the following component versions:\n# calico/node:v3.1.1\n# calico/cni:v3.1.1\n# coreos/flannel:v0.9.1\n\n---\n# This ConfigMap can be used to configure a self-hosted Canal installation.\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: canal-config\n namespace: kube-system\ndata:\n # The interface used by canal for host \u003c-\u003e host communication.\n # If left blank, then the interface is chosen using the node's\n # default route.\n canal_iface: \"{{.CanalInterface}}\"\n\n # Whether or not to masquerade traffic to destinations not within\n # the pod network.\n masquerade: \"true\"\n\n # The CNI network configuration to install on each node.\n cni_network_config: |-\n {\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.0\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"log_level\": \"WARNING\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"ipam\": {\n \"type\": \"host-local\",\n \"subnet\": \"usePodCidr\"\n },\n \"policy\": {\n \"type\": \"k8s\",\n \"k8s_auth_token\": \"__SERVICEACCOUNT_TOKEN__\"\n },\n \"kubernetes\": {\n \"k8s_api_root\": \"https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__\",\n \"kubeconfig\": \"{{.KubeCfg}}\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n }\n\n # Flannel network configuration. Mounted into the flannel container.\n net-conf.json: |\n {\n \"Network\": \"{{.ClusterCIDR}}\",\n \"Backend\": {\n \"Type\": \"{{.FlannelBackend.Type}}\",\n \"VNI\": {{.FlannelBackend.VNI}},\n \"Port\": {{.FlannelBackend.Port}}\n }\n }\n\n---\n\n# This manifest installs the calico/node container, as well\n# as the Calico CNI plugins and network config on\n# each master and worker node in a Kubernetes cluster.\nkind: DaemonSet\napiVersion: extensions/v1beta1\nmetadata:\n name: canal\n namespace: kube-system\n labels:\n k8s-app: canal\nspec:\n selector:\n matchLabels:\n k8s-app: canal\n updateStrategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n template:\n metadata:\n labels:\n k8s-app: canal\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: beta.kubernetes.io/os\n operator: NotIn\n values:\n - windows\n hostNetwork: true\n{{if .NodeSelector}}\n nodeSelector:\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n{{end}}\n serviceAccountName: canal\n tolerations:\n # Tolerate this effect so the pods will be schedulable at all times\n - effect: NoSchedule\n operator: Exists\n # Mark the pod as a critical add-on for rescheduling.\n - key: CriticalAddonsOnly\n operator: Exists\n - effect: NoExecute\n operator: Exists\n - key: \"node-role.kubernetes.io/controlplane\"\n operator: \"Exists\"\n effect: \"NoSchedule\"\n - key: \"node-role.kubernetes.io/etcd\"\n operator: \"Exists\"\n effect: \"NoExecute\"\n # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a \"force\n # deletion\": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.\n terminationGracePeriodSeconds: 0\n containers:\n # Runs calico/node container on each Kubernetes node. This\n # container programs network policy and routes on each\n # host.\n - name: calico-node\n image: {{.NodeImage}}\n env:\n # Use Kubernetes API as the backing datastore.\n - name: DATASTORE_TYPE\n value: \"kubernetes\"\n # Disable felix logging to file\n - name: FELIX_LOGFILEPATH\n value: \"none\"\n # Disable felix logging for syslog\n - name: FELIX_LOGSEVERITYSYS\n value: \"\"\n # Enable felix logging to stdout\n - name: FELIX_LOGSEVERITYSCREEN\n value: \"Warning\"\n # Don't enable BGP.\n - name: CALICO_NETWORKING_BACKEND\n value: \"none\"\n # Cluster type to identify the deployment type\n - name: CLUSTER_TYPE\n value: \"k8s,canal\"\n # Disable file logging so kubectl logs works.\n - name: CALICO_DISABLE_FILE_LOGGING\n value: \"true\"\n # Period, in seconds, at which felix re-applies all iptables state\n - name: FELIX_IPTABLESREFRESHINTERVAL\n value: \"60\"\n # Disable IPV6 support in Felix.\n - name: FELIX_IPV6SUPPORT\n value: \"false\"\n # Wait for the datastore.\n - name: WAIT_FOR_DATASTORE\n value: \"true\"\n # No IP address needed.\n - name: IP\n value: \"\"\n - name: NODENAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # Set Felix endpoint to host default action to ACCEPT.\n - name: FELIX_DEFAULTENDPOINTTOHOSTACTION\n value: \"ACCEPT\"\n - name: FELIX_HEALTHENABLED\n value: \"true\"\n securityContext:\n privileged: true\n resources:\n requests:\n cpu: 250m\n livenessProbe:\n httpGet:\n path: /liveness\n port: 9099\n periodSeconds: 10\n initialDelaySeconds: 10\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /readiness\n port: 9099\n periodSeconds: 10\n volumeMounts:\n - mountPath: /lib/modules\n name: lib-modules\n readOnly: true\n - mountPath: /var/run/calico\n name: var-run-calico\n readOnly: false\n - mountPath: /var/lib/calico\n name: var-lib-calico\n readOnly: false\n # This container installs the Calico CNI binaries\n # and CNI network config file on each node.\n - name: install-cni\n image: {{.CNIImage}}\n command: [\"/install-cni.sh\"]\n env:\n - name: CNI_CONF_NAME\n value: \"10-calico.conflist\"\n # The CNI network config to install on each node.\n - name: CNI_NETWORK_CONFIG\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: cni_network_config\n - name: KUBERNETES_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n volumeMounts:\n - mountPath: /host/opt/cni/bin\n name: cni-bin-dir\n - mountPath: /host/etc/cni/net.d\n name: cni-net-dir\n # This container runs flannel using the kube-subnet-mgr backend\n # for allocating subnets.\n - name: kube-flannel\n image: {{.CanalFlannelImg}}\n command: [ \"/opt/bin/flanneld\", \"--ip-masq\", \"--kube-subnet-mgr\" ]\n securityContext:\n privileged: true\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: FLANNELD_IFACE\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: canal_iface\n - name: FLANNELD_IP_MASQ\n valueFrom:\n configMapKeyRef:\n name: canal-config\n key: masquerade\n volumeMounts:\n - name: run\n mountPath: /run\n - name: flannel-cfg\n mountPath: /etc/kube-flannel/\n - name: xtables-lock\n mountPath: /run/xtables.lock\n readOnly: false\n volumes:\n # Used by calico/node.\n - name: lib-modules\n hostPath:\n path: /lib/modules\n - name: var-run-calico\n hostPath:\n path: /var/run/calico\n - name: var-lib-calico\n hostPath:\n path: /var/lib/calico\n # Used to install CNI.\n - name: cni-bin-dir\n hostPath:\n path: /opt/cni/bin\n - name: cni-net-dir\n hostPath:\n path: /etc/cni/net.d\n # Used by flannel.\n - name: run\n hostPath:\n path: /run\n - name: flannel-cfg\n configMap:\n name: canal-config\n - name: xtables-lock\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n\n# Create all the CustomResourceDefinitions needed for\n# Calico policy-only mode.\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: felixconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: FelixConfiguration\n plural: felixconfigurations\n singular: felixconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: bgpconfigurations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: BGPConfiguration\n plural: bgpconfigurations\n singular: bgpconfiguration\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: ippools.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: IPPool\n plural: ippools\n singular: ippool\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: clusterinformations.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: ClusterInformation\n plural: clusterinformations\n singular: clusterinformation\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworkpolicies.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkPolicy\n plural: globalnetworkpolicies\n singular: globalnetworkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: networkpolicies.crd.projectcalico.org\nspec:\n scope: Namespaced\n group: crd.projectcalico.org\n version: v1\n names:\n kind: NetworkPolicy\n plural: networkpolicies\n singular: networkpolicy\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: globalnetworksets.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: GlobalNetworkSet\n plural: globalnetworksets\n singular: globalnetworkset\n\n---\n\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: hostendpoints.crd.projectcalico.org\nspec:\n scope: Cluster\n group: crd.projectcalico.org\n version: v1\n names:\n kind: HostEndpoint\n plural: hostendpoints\n singular: hostendpoint\n\n---\n\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: canal\n namespace: kube-system\n", "coredns-v1.16": "\n---\n{{- if eq .RBACConfig \"rbac\"}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: coredns\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n kubernetes.io/bootstrapping: rbac-defaults\n name: system:coredns\nrules:\n- apiGroups:\n - \"\"\n resources:\n - endpoints\n - services\n - pods\n - namespaces\n verbs:\n - list\n - watch\n- apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - get\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n annotations:\n rbac.authorization.kubernetes.io/autoupdate: \"true\"\n labels:\n kubernetes.io/bootstrapping: rbac-defaults\n name: system:coredns\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: system:coredns\nsubjects:\n- kind: ServiceAccount\n name: coredns\n namespace: kube-system\n{{- end }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: coredns\n namespace: kube-system\ndata:\n Corefile: |\n .:53 {\n errors\n health\n ready\n kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ \"in-addr.arpa ip6.arpa\" }}{{ end }} {\n pods insecure\n fallthrough in-addr.arpa ip6.arpa\n }\n prometheus :9153\n\t{{- if .UpstreamNameservers }}\n forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}}\n\t{{- else }}\n forward . \"/etc/resolv.conf\"\n\t{{- end }}\n cache 30\n loop\n reload\n loadbalance\n }\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: coredns\n namespace: kube-system\n labels:\n k8s-app: kube-dns\n kubernetes.io/name: \"CoreDNS\"\nspec:\n strategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n selector:\n matchLabels:\n k8s-app: kube-dns\n template:\n metadata:\n labels:\n k8s-app: kube-dns\n annotations:\n seccomp.security.alpha.kubernetes.io/pod: 'docker/default'\n spec:\n priorityClassName: system-cluster-critical\n{{- if eq .RBACConfig \"rbac\"}}\n serviceAccountName: coredns\n{{- end }}\n tolerations:\n - key: \"CriticalAddonsOnly\"\n operator: \"Exists\"\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n nodeSelector:\n beta.kubernetes.io/os: linux\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: node-role.kubernetes.io/worker\n operator: Exists\n containers:\n - name: coredns\n image: {{.CoreDNSImage}}\n imagePullPolicy: IfNotPresent\n resources:\n limits:\n memory: 170Mi\n requests:\n cpu: 100m\n memory: 70Mi\n args: [ \"-conf\", \"/etc/coredns/Corefile\" ]\n volumeMounts:\n - name: config-volume\n mountPath: /etc/coredns\n readOnly: true\n ports:\n - containerPort: 53\n name: dns\n protocol: UDP\n - containerPort: 53\n name: dns-tcp\n protocol: TCP\n - containerPort: 9153\n name: metrics\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /health\n port: 8080\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n readinessProbe:\n httpGet:\n path: /ready\n port: 8181\n scheme: HTTP\n securityContext:\n allowPrivilegeEscalation: false\n capabilities:\n add:\n - NET_BIND_SERVICE\n drop:\n - all\n readOnlyRootFilesystem: true\n dnsPolicy: Default\n volumes:\n - name: config-volume\n configMap:\n name: coredns\n items:\n - key: Corefile\n path: Corefile\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: kube-dns\n namespace: kube-system\n annotations:\n prometheus.io/port: \"9153\"\n prometheus.io/scrape: \"true\"\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\nspec:\n selector:\n k8s-app: kube-dns\n clusterIP: {{.ClusterDNSServer}}\n ports:\n - name: dns\n port: 53\n protocol: UDP\n - name: dns-tcp\n port: 53\n protocol: TCP\n - name: metrics\n port: 9153\n protocol: TCP\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: coredns-autoscaler\n namespace: kube-system\n labels:\n k8s-app: coredns-autoscaler\nspec:\n selector:\n matchLabels:\n k8s-app: coredns-autoscaler\n template:\n metadata:\n labels:\n k8s-app: coredns-autoscaler\n spec:\n{{- if eq .RBACConfig \"rbac\"}}\n serviceAccountName: coredns-autoscaler\n{{- end }}\n nodeSelector:\n beta.kubernetes.io/os: linux\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: node-role.kubernetes.io/worker\n operator: Exists\n tolerations:\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n containers:\n - name: autoscaler\n image: {{.CoreDNSAutoScalerImage}}\n resources:\n requests:\n cpu: \"20m\"\n memory: \"10Mi\"\n command:\n - /cluster-proportional-autoscaler\n - --namespace=kube-system\n - --configmap=coredns-autoscaler\n - --target=Deployment/coredns\n # When cluster is using large nodes(with more cores), \"coresPerReplica\" should dominate.\n # If using small nodes, \"nodesPerReplica\" should dominate.\n{{if .LinearAutoscalerParams}}\n - --default-params={\"linear\":{{.LinearAutoscalerParams}}}\n{{else}}\n - --default-params={\"linear\":{\"coresPerReplica\":128,\"nodesPerReplica\":4,\"min\":1,\"preventSinglePointFailure\":true}}\n{{end}}\n - --logtostderr=true\n - --v=2\n{{- if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: coredns-autoscaler\n namespace: kube-system\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:coredns-autoscaler\nrules:\n - apiGroups: [\"\"]\n resources: [\"nodes\"]\n verbs: [\"list\", \"watch\"]\n - apiGroups: [\"\"]\n resources: [\"replicationcontrollers/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"extensions\",\"apps\"]\n resources: [\"deployments/scale\", \"replicasets/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"\"]\n resources: [\"configmaps\"]\n verbs: [\"get\", \"create\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:coredns-autoscaler\nsubjects:\n - kind: ServiceAccount\n name: coredns-autoscaler\n namespace: kube-system\nroleRef:\n kind: ClusterRole\n name: system:coredns-autoscaler\n apiGroup: rbac.authorization.k8s.io\n{{- end }}", "coredns-v1.17": "\n---\n{{- if eq .RBACConfig \"rbac\"}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: coredns\n namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n kubernetes.io/bootstrapping: rbac-defaults\n name: system:coredns\nrules:\n- apiGroups:\n - \"\"\n resources:\n - endpoints\n - services\n - pods\n - namespaces\n verbs:\n - list\n - watch\n- apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - get\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n annotations:\n rbac.authorization.kubernetes.io/autoupdate: \"true\"\n labels:\n kubernetes.io/bootstrapping: rbac-defaults\n name: system:coredns\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: system:coredns\nsubjects:\n- kind: ServiceAccount\n name: coredns\n namespace: kube-system\n{{- end }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: coredns\n namespace: kube-system\ndata:\n Corefile: |\n .:53 {\n errors\n health {\n lameduck 5s\n }\n ready\n kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ \"in-addr.arpa ip6.arpa\" }}{{ end }} {\n pods insecure\n fallthrough in-addr.arpa ip6.arpa\n }\n prometheus :9153\n\t{{- if .UpstreamNameservers }}\n forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}}\n\t{{- else }}\n forward . \"/etc/resolv.conf\"\n\t{{- end }}\n cache 30\n loop\n reload\n loadbalance\n }\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: coredns\n namespace: kube-system\n labels:\n k8s-app: kube-dns\n kubernetes.io/name: \"CoreDNS\"\nspec:\n replicas: 1\n strategy:\n{{if .UpdateStrategy}}\n{{ toYaml .UpdateStrategy | indent 4}}\n{{else}}\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n{{end}}\n selector:\n matchLabels:\n k8s-app: kube-dns\n template:\n metadata:\n labels:\n k8s-app: kube-dns\n annotations:\n seccomp.security.alpha.kubernetes.io/pod: 'docker/default'\n spec:\n priorityClassName: system-cluster-critical\n{{- if eq .RBACConfig \"rbac\"}}\n serviceAccountName: coredns\n{{- end }}\n tolerations:\n - key: \"CriticalAddonsOnly\"\n operator: \"Exists\"\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n nodeSelector:\n beta.kubernetes.io/os: linux\n {{ range $k, $v := .NodeSelector }}\n {{ $k }}: \"{{ $v }}\"\n {{ end }}\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: node-role.kubernetes.io/worker\n operator: Exists\n podAntiAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n - labelSelector:\n matchExpressions:\n - key: k8s-app\n operator: In\n values: [\"kube-dns\"]\n topologyKey: kubernetes.io/hostname\n containers:\n - name: coredns\n image: {{.CoreDNSImage}}\n imagePullPolicy: IfNotPresent\n resources:\n limits:\n memory: 170Mi\n requests:\n cpu: 100m\n memory: 70Mi\n args: [ \"-conf\", \"/etc/coredns/Corefile\" ]\n volumeMounts:\n - name: config-volume\n mountPath: /etc/coredns\n readOnly: true\n ports:\n - containerPort: 53\n name: dns\n protocol: UDP\n - containerPort: 53\n name: dns-tcp\n protocol: TCP\n - containerPort: 9153\n name: metrics\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /health\n port: 8080\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n readinessProbe:\n httpGet:\n path: /ready\n port: 8181\n scheme: HTTP\n securityContext:\n allowPrivilegeEscalation: false\n capabilities:\n add:\n - NET_BIND_SERVICE\n drop:\n - all\n readOnlyRootFilesystem: true\n dnsPolicy: Default\n volumes:\n - name: config-volume\n configMap:\n name: coredns\n items:\n - key: Corefile\n path: Corefile\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: kube-dns\n namespace: kube-system\n annotations:\n prometheus.io/port: \"9153\"\n prometheus.io/scrape: \"true\"\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\nspec:\n selector:\n k8s-app: kube-dns\n clusterIP: {{.ClusterDNSServer}}\n ports:\n - name: dns\n port: 53\n protocol: UDP\n - name: dns-tcp\n port: 53\n protocol: TCP\n - name: metrics\n port: 9153\n protocol: TCP\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: coredns-autoscaler\n namespace: kube-system\n labels:\n k8s-app: coredns-autoscaler\nspec:\n selector:\n matchLabels:\n k8s-app: coredns-autoscaler\n template:\n metadata:\n labels:\n k8s-app: coredns-autoscaler\n spec:\n{{- if eq .RBACConfig \"rbac\"}}\n serviceAccountName: coredns-autoscaler\n{{- end }}\n nodeSelector:\n beta.kubernetes.io/os: linux\n affinity:\n nodeAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n nodeSelectorTerms:\n - matchExpressions:\n - key: node-role.kubernetes.io/worker\n operator: Exists\n tolerations:\n - effect: NoExecute\n operator: Exists\n - effect: NoSchedule\n operator: Exists\n containers:\n - name: autoscaler\n image: {{.CoreDNSAutoScalerImage}}\n resources:\n requests:\n cpu: \"20m\"\n memory: \"10Mi\"\n command:\n - /cluster-proportional-autoscaler\n - --namespace=kube-system\n - --configmap=coredns-autoscaler\n - --target=Deployment/coredns\n # When cluster is using large nodes(with more cores), \"coresPerReplica\" should dominate.\n # If using small nodes, \"nodesPerReplica\" should dominate.\n{{if .LinearAutoscalerParams}}\n - --default-params={\"linear\":{{.LinearAutoscalerParams}}}\n{{else}}\n - --default-params={\"linear\":{\"coresPerReplica\":128,\"nodesPerReplica\":4,\"min\":1,\"preventSinglePointFailure\":true}}\n{{end}}\n - --nodelabels=node-role.kubernetes.io/worker=true,beta.kubernetes.io/os=linux\n - --logtostderr=true\n - --v=2\n{{- if eq .RBACConfig \"rbac\"}}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: coredns-autoscaler\n namespace: kube-system\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:coredns-autoscaler\nrules:\n - apiGroups: [\"\"]\n resources: [\"nodes\"]\n verbs: [\"list\", \"watch\"]\n - apiGroups: [\"\"]\n resources: [\"replicationcontrollers/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"extensions\",\"apps\"]\n resources: [\"deployments/scale\", \"replicasets/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"\"]\n resources: [\"configmaps\"]\n verbs: [\"get\", \"create\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:coredns-autoscaler\nsubjects:\n - kind: ServiceAccount\n name: coredns-autoscaler\n namespace: kube-system\nroleRef:\n kind: ClusterRole\n name: system:coredns-autoscaler\n apiGroup: rbac.authorization.k8s.io\n{{- end }}", diff --git a/rke/templates/canal.go b/rke/templates/canal.go index 85615d611..4b7409db1 100644 --- a/rke/templates/canal.go +++ b/rke/templates/canal.go @@ -806,7 +806,9 @@ data: { "Network": "{{.ClusterCIDR}}", "Backend": { - "Type": "{{.FlannelBackend.Type}}" + "Type": "{{.FlannelBackend.Type}}", + "VNI": {{.FlannelBackend.VNI}}, + "Port": {{.FlannelBackend.Port}} } } --- @@ -1422,7 +1424,9 @@ data: { "Network": "{{.ClusterCIDR}}", "Backend": { - "Type": "{{.FlannelBackend.Type}}" + "Type": "{{.FlannelBackend.Type}}", + "VNI": {{.FlannelBackend.VNI}}, + "Port": {{.FlannelBackend.Port}} } } --- @@ -2052,7 +2056,9 @@ data: { "Network": "{{.ClusterCIDR}}", "Backend": { - "Type": "{{.FlannelBackend.Type}}" + "Type": "{{.FlannelBackend.Type}}", + "VNI": {{.FlannelBackend.VNI}}, + "Port": {{.FlannelBackend.Port}} } } --- @@ -2752,7 +2758,9 @@ data: { "Network": "{{.ClusterCIDR}}", "Backend": { - "Type": "{{.FlannelBackend.Type}}" + "Type": "{{.FlannelBackend.Type}}", + "VNI": {{.FlannelBackend.VNI}}, + "Port": {{.FlannelBackend.Port}} } } --- @@ -3273,7 +3281,9 @@ data: { "Network": "{{.ClusterCIDR}}", "Backend": { - "Type": "{{.FlannelBackend.Type}}" + "Type": "{{.FlannelBackend.Type}}", + "VNI": {{.FlannelBackend.VNI}}, + "Port": {{.FlannelBackend.Port}} } } @@ -3913,7 +3923,9 @@ data: { "Network": "{{.ClusterCIDR}}", "Backend": { - "Type": "{{.FlannelBackend.Type}}" + "Type": "{{.FlannelBackend.Type}}", + "VNI": {{.FlannelBackend.VNI}}, + "Port": {{.FlannelBackend.Port}} } } @@ -4728,7 +4740,9 @@ data: { "Network": "{{.ClusterCIDR}}", "Backend": { - "Type": "{{.FlannelBackend.Type}}" + "Type": "{{.FlannelBackend.Type}}", + "VNI": {{.FlannelBackend.VNI}}, + "Port": {{.FlannelBackend.Port}} } } --- @@ -5428,7 +5442,9 @@ data: { "Network": "{{.ClusterCIDR}}", "Backend": { - "Type": "{{.FlannelBackend.Type}}" + "Type": "{{.FlannelBackend.Type}}", + "VNI": {{.FlannelBackend.VNI}}, + "Port": {{.FlannelBackend.Port}} } } ---