diff --git a/.gitignore b/.gitignore index 25317a4..54b068e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,11 @@ +# General +.vagrant/ +.DS_Store + +# Log files (if you are creating logs in debug mode, uncomment this) +# *.logs + +*.retry credentials/ -cluster.retry roles/debug debug.yml -.DS_Store -.vagrant -debug.retry diff --git a/upgrade-to-1.9.9.yml b/1.8.5-upgrade-to-1.9.9.yml similarity index 63% rename from upgrade-to-1.9.9.yml rename to 1.8.5-upgrade-to-1.9.9.yml index 4de277e..73b8f66 100644 --- a/upgrade-to-1.9.9.yml +++ b/1.8.5-upgrade-to-1.9.9.yml @@ -1,5 +1,5 @@ # 集群更新存在一定风险,请谨慎操作 -# 使用命令:ansible-playbook -i inventory/hosts upgrade-to-1.9.9.yml +# 使用命令:ansible-playbook -i inventory/hosts 1.8.5-upgrade-to-1.9.9.yml - hosts: all vars_prompt: @@ -36,44 +36,6 @@ --key /etc/ssl/etcd/ssl/client-key.pem \ snapshot save {{ etcd_back_path.stdout }} -# 升级etcd -- hosts: - - etcd - tasks: - - name: Upgrade etcdctl - shell: >- - docker run --rm --entrypoint cat registry.cn-hangzhou.aliyuncs.com/choerodon-tools/etcd:v3.3.6 \ - /usr/local/bin/etcdctl > /usr/local/bin/etcdctl && \ - chmod +x /usr/local/bin/etcdctl - register: etcd_task_result - until: etcd_task_result.rc == 0 - retries: 3 - delay: 300 - changed_when: false - - name: Edit etcd configfile - shell: sed -i 's/v3.2.4/v3.3.6/g' /usr/local/bin/etcd - - name: reload systemd - shell: systemctl daemon-reload - - name: Ensure etcd service is started and enabled - service: - name: etcd - enabled: yes - state: restarted - - name: Check if cluster is healthy - shell: >- - /usr/local/bin/etcdctl \ - --ca-file /etc/ssl/etcd/ssl/ca.pem \ - --cert-file /etc/ssl/etcd/ssl/client.pem \ - --key-file /etc/ssl/etcd/ssl/client-key.pem \ - --peers https://127.0.0.1:2379 cluster-health | grep -q 'cluster is healthy' - register: etcd_cluster_is_healthy - ignore_errors: true - changed_when: false - check_mode: no - until: etcd_cluster_is_healthy.rc == 0 - retries: 10 - delay: 5 - # 备份各节点配置文件 - hosts: - kube-master @@ -105,7 +67,8 @@ - name: Edit master Kubernetes configfile shell: >- sed -i 's/kubernetesVersion.*$/kubernetesVersion\:\ v1.9.9/g' /etc/kubernetes/kubeadm-config.yaml \ - && sed -i 's/GenericAdmissionWebhook/ValidatingAdmissionWebhook/g' /etc/kubernetes/kubeadm-config.yaml + && sed -i 's/GenericAdmissionWebhook/ValidatingAdmissionWebhook/g' /etc/kubernetes/kubeadm-config.yaml \ + && sed -i 's/imageRepository.*$/imageRepository\:\ registry.cn-hangzhou.aliyuncs.com\/google_containers/g' /etc/kubernetes/kubeadm-config.yaml # 更新yum包 - hosts: @@ -139,31 +102,11 @@ - name: Upgrade Kubernetes shell: 'kubeadm upgrade apply v1.9.9 --config=/etc/kubernetes/kubeadm-config.yaml -f' -# 更新docker配置 -- hosts: - - kube-master - tasks: - - name: Upgrade docker configfile - ignore_errors: yes - shell: >- - jq '.["registry-mirrors"]|= .+["https://registry.docker-cn.com"]' /etc/docker/daemon.json > /tmp/docker-daemon.json && \ - cp -f /tmp/docker-daemon.json /etc/docker/daemon.json - - name: remove old etcd_back - ignore_errors: yes - file: - path: /tmp/docker-daemon.json - state: absent - # 重启docker kubelet - hosts: all tasks: - name: reload systemd shell: systemctl daemon-reload - - name: restart docker - service: - name: docker - state: restarted - enabled: yes - name: restart kubelet service: name: kubelet diff --git a/1.9.9-upgrade-to-1.10.12.yml b/1.9.9-upgrade-to-1.10.12.yml new file mode 100644 index 0000000..3521d19 --- /dev/null +++ b/1.9.9-upgrade-to-1.10.12.yml @@ -0,0 +1,117 @@ +# 集群更新存在一定风险,请谨慎操作 +# 使用命令:ansible-playbook -i inventory/hosts 1.9.9-upgrade-to-1.10.12.yml + +- hosts: all + vars_prompt: + name: "upgrade_confirmation" + prompt: "Are you sure you want to upgrade cluster state? Type 'yes' to upgrade your cluster." + default: "no" + private: no + pre_tasks: + - name: upgrade confirmation + fail: + msg: "upgrade confirmation failed" + when: upgrade_confirmation != "yes" + +# 备份etcd数据 +- hosts: + - etcd + tasks: + - name: Ensure etcd backup directory + become: yes + file: + path: "{{item}}" + state: directory + mode: 0700 + with_items: + - /etc/kubernetes/etcd_back + - name: Generate etcd backup file name + shell: date "+/etc/kubernetes/etcd_back/etcd-%s.db" + register: etcd_back_path + - name: Snapshotting the etcd keyspace + shell: >- + ETCDCTL_API=3 /usr/local/bin/etcdctl \ + --cacert /etc/ssl/etcd/ssl/ca.pem \ + --cert /etc/ssl/etcd/ssl/client.pem \ + --key /etc/ssl/etcd/ssl/client-key.pem \ + snapshot save {{ etcd_back_path.stdout }} + +# 备份各节点配置文件 +- hosts: + - kube-master + - kube-node + tasks: + - name: Generate kubernetes backup configfile path + shell: date "+/etc/kubernetes_back/kubernetes-%s" + register: kubernetes_config_back_path + - name: Ensure backup directory + become: yes + file: + path: "{{item}}" + state: directory + mode: 0700 + with_items: + - /etc/kubernetes_back + - "{{ kubernetes_config_back_path.stdout }}" + - name: Backup Kubernetes configfile + shell: cp -r /etc/kubernetes/* {{ kubernetes_config_back_path.stdout }} + - name: Remove old etcd_back + file: + path: /etc/kubernetes/etcd_back + state: absent + +# 修改master节点配置文件 +- hosts: + - kube-master + tasks: + - name: Edit master Kubernetes configfile + shell: >- + sed -i 's/kubernetesVersion.*$/kubernetesVersion\:\ v1.10.12/g' /etc/kubernetes/kubeadm-config.yaml \ + && sed -i 's/GenericAdmissionWebhook/ValidatingAdmissionWebhook/g' /etc/kubernetes/kubeadm-config.yaml \ + && sed -i 's/imageRepository.*$/imageRepository\:\ registry.cn-hangzhou.aliyuncs.com\/google_containers/g' /etc/kubernetes/kubeadm-config.yaml + +# 更新yum包 +- hosts: + - kube-master + - kube-node + tasks: + - name: Ensure yum repository + become: yes + yum_repository: + name: kubernetes + description: kubernetes Repository + baseurl: https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 + enabled: no + gpgcheck: no + state: present + - name: Ensure Kubernetes Yum repository + become: yes + yum: + enablerepo: kubernetes + name: "{{ item }}" + state: present + with_items: + - kubeadm-1.10.12-0.x86_64 + - kubectl-1.10.12-0.x86_64 + - kubelet-1.10.12-0.x86_64 + +# 正式升级 +- hosts: + - kube-master + tasks: + - name: Upgrade Kubernetes + shell: 'kubeadm upgrade apply v1.10.12 --config=/etc/kubernetes/kubeadm-config.yaml -f' + +# 重启docker kubelet +- hosts: all + tasks: + - name: reload systemd + shell: systemctl daemon-reload + - name: restart kubelet + service: + name: kubelet + state: restarted + enabled: yes + #Issues https://github.com/kubernetes/kubernetes/issues/21613 + - name: Ensure KubeDNS is working + shell: echo '1' > /proc/sys/net/bridge/bridge-nf-call-iptables \ No newline at end of file diff --git a/README.md b/README.md index 978c22d..bb016ad 100644 --- a/README.md +++ b/README.md @@ -9,23 +9,10 @@ Kubeadmin ansible is a toolkit for simple and quick installing k8s cluster. Install the ansible run environment on the machine where the ansible script is to be executed: ``` -sudo yum install -y epel-release - -sudo yum install -y \ - ansible \ - git \ - httpd-tools \ - pyOpenSSL \ - python-cryptography \ - python-lxml \ - python-netaddr \ - python-passlib \ - python-pip - -``` -View the version of ansible (version>=2.4.0.0) -``` -ansible --version +sudo yum install epel-release -y +sudo yum install git python36 sshpass -y +sudo python3.6 -m ensurepip +sudo /usr/local/bin/pip3 install --no-cache-dir ansible==2.7.5 netaddr ``` Clone project: @@ -447,4 +434,25 @@ Pull requests are welcome! Follow [this link](https://github.com/choerodon/choer ## 8. Upgrading the cluster - **There are certain risks in cluster update. Please be cautious.** -- Use command:`ansible-playbook -i inventory/hosts upgrade-to-1.9.9.yml` \ No newline at end of file +- Use command upgrade to 1.9.9 version:`ansible-playbook -i inventory/hosts 1.8.5-upgrade-to-1.9.9.yml` +- Use command upgrade to 1.10.12 version:`ansible-playbook -i inventory/hosts 1.9.9-upgrade-to-1.10.12.yml` + +## 9. Refresh cluster certificate + +> The prerequisite for refreshing the certificate is to ensure that the CA root certificate exists. After the certificate is refreshed, the master node kubelet is restarted to apply the new certificate. At this time, the cluster may not be operated for 1-2 minutes, but the business application is not affected. + +``` +ansible-playbook -i inventory/hosts -e @inventory/vars renew-certs.yml +``` + +## 10. Load Choerodon images + +If you need to separately import the images of the Choerodon platform to speed up the installation,Please execute on the machine where ansible is installed: + +``` +wget -O ~/c7n.tar http://oss.saas.hand-china.com/c7n.tar + +export ANSIBLE_HOST_KEY_CHECKING=False + +ansible-playbook -i inventory/hosts load-choerodon-images.yml +``` \ No newline at end of file diff --git a/README_zh-CN.md b/README_zh-CN.md index c80b2d0..2ea6583 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -5,21 +5,10 @@ 在要执行ansible脚本的机器上安装ansible运行需要的环境: ``` -sudo yum install -y epel-release - -sudo yum install -y \ - ansible \ - git \ - httpd-tools \ - pyOpenSSL \ - python-cryptography \ - python-lxml \ - python-netaddr \ - python-passlib \ - python-pip - -# 查看ansible版本(version>=2.4.0.0) -ansible --version +sudo yum install epel-release -y +sudo yum install git python36 sshpass -y +sudo python3.6 -m ensurepip +sudo /usr/local/bin/pip3 install --no-cache-dir ansible==2.7.5 netaddr -i https://mirrors.aliyun.com/pypi/simple/ ``` 克隆项目: @@ -433,5 +422,28 @@ spec: ## 8. 升级集群 +> 由于使用kubeadm限制,不能跨次版本号进行升级,故需升级至1.10.12版本Kubernetes请先升级集群至1.9.9版本。 + - **集群更新存在一定风险,请谨慎操作** -- 使用命令:`ansible-playbook -i inventory/hosts upgrade-to-1.9.9.yml` \ No newline at end of file +- 升级至1.9.9版本:`ansible-playbook -i inventory/hosts 1.8.5-upgrade-to-1.9.9.yml` +- 升级至1.10.12版本:`ansible-playbook -i inventory/hosts 1.9.9-upgrade-to-1.10.12.yml` + +## 9. 刷新集群证书 + +> 刷新证书的前提需要保证CA根证书存在,证书刷新后会重启master节点 kubelet 以应用新的证书,届时可能导致1-2分钟无法操作集群,但业务应用是不受影响的。 + +``` +ansible-playbook -i inventory/hosts -e @inventory/vars renew-certs.yml +``` + +## 10. 导入猪齿鱼平台镜像 + +如果需要单独导入猪齿鱼平台的镜像以加快安装速度,请在安装ansible的机器上执行: + +``` +wget -O ~/c7n.tar http://oss.saas.hand-china.com/c7n.tar + +export ANSIBLE_HOST_KEY_CHECKING=False + +ansible-playbook -i inventory/hosts load-choerodon-images.yml +``` \ No newline at end of file diff --git a/Vagrantfile b/Vagrantfile index e556acf..6a4ad09 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -11,7 +11,8 @@ Vagrant.configure(2) do |config| n = 10 + i s.vm.network "private_network", ip: "192.168.56.#{n}" s.vm.provider "virtualbox" do |v| - v.memory = 2048 + v.cpus = 2 + v.memory = 4096 end end end @@ -20,4 +21,4 @@ if Vagrant.has_plugin?("vagrant-cachier") config.cache.scope = :box end -end \ No newline at end of file +end diff --git a/inventory/hosts b/inventory/hosts index 1c67254..3bcbe10 100644 --- a/inventory/hosts +++ b/inventory/hosts @@ -1,24 +1,19 @@ [all] -node1 ansible_host=192.168.56.11 ansible_user=root ansible_ssh_pass=vagrant ansible_become=true -node2 ansible_host=192.168.56.12 ansible_user=root ansible_ssh_pass=vagrant ansible_become=true -node3 ansible_host=192.168.56.13 ansible_user=root ansible_ssh_pass=vagrant ansible_become=true +node1 ansible_host=192.168.56.11 ip=192.168.56.11 ansible_user=root ansible_ssh_pass=vagrant ansible_become=true +node2 ansible_host=192.168.56.12 ip=192.168.56.12 ansible_user=root ansible_ssh_pass=vagrant ansible_become=true +node3 ansible_host=192.168.56.13 ip=192.168.56.13 ansible_user=root ansible_ssh_pass=vagrant ansible_become=true [kube-master] node1 node2 node3 - [etcd] node1 node2 node3 - [kube-node] node1 node2 node3 - - - diff --git a/inventory/vars b/inventory/vars index e0a81ae..9be7c8c 100644 --- a/inventory/vars +++ b/inventory/vars @@ -3,14 +3,17 @@ # 暂时只支持v1.7.0、v1.8.1、v1.8.5 kube_version: "v1.8.5" -# 请指定iPv4网卡名称 -k8s_interface: "enp0s8" - # pod的ip范围 kube_pods_subnet: 10.233.64.0/18 # service的ip范围 kube_service_addresses: 10.233.0.0/18 +# kubelet根目录 +kube_storage_dir: "/var/lib/kubelet" + +# docker根目录 +docker_storage_dir: "/var/lib/docker" + # kube-lego组件邮箱 -kube_lego_email: example@choerodon.io \ No newline at end of file +kube_lego_email: example@choerodon.io diff --git a/load-choerodon-images.yml b/load-choerodon-images.yml new file mode 100644 index 0000000..ee8f073 --- /dev/null +++ b/load-choerodon-images.yml @@ -0,0 +1,23 @@ +--- +- hosts: all + tasks: + - name: Create tmp_c7n folder + file: + path: ~/tmp_c7n + state: directory + mode: 0755 + - name: Copy images file + copy: + src: ~/c7n.tar + dest: ~/tmp_c7n + owner: root + group: root + mode: 0644 + async: 0 + poll: 10 + - name: Loading images + shell: docker load -i c7n.tar + args: + chdir: ~/tmp_c7n + - name: Remove tmp_c7n folder + file: path=~/tmp_c7n state=absent diff --git a/renew-certs.yml b/renew-certs.yml new file mode 100644 index 0000000..4f9add6 --- /dev/null +++ b/renew-certs.yml @@ -0,0 +1,6 @@ +- hosts: + - kube-master + roles: + - base/variables + - base/cert + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" \ No newline at end of file diff --git a/roles/addons/flannel/defaults/main.yml b/roles/addons/flannel/defaults/main.yml index 8228ed0..c037087 100644 --- a/roles/addons/flannel/defaults/main.yml +++ b/roles/addons/flannel/defaults/main.yml @@ -7,4 +7,4 @@ flannel_cpu_limit: 300m flannel_memory_requests: 64M flannel_cpu_requests: 150m -#flannel_interface: "enp0s8" \ No newline at end of file +flannel_iface: true diff --git a/roles/addons/flannel/tasks/main.yml b/roles/addons/flannel/tasks/main.yml index 8eaafe9..5d38b95 100644 --- a/roles/addons/flannel/tasks/main.yml +++ b/roles/addons/flannel/tasks/main.yml @@ -32,6 +32,4 @@ - name: "Flannel | Start Resources" command: "kubectl apply -f /etc/kubernetes/addons/flannel/cni-flannel.yml -n kube-system" - when: inventory_hostname == groups['kube-master'][0] and flannel_rbac_manifest.changed and flannel_enable | default(true) - - + when: inventory_hostname == groups['kube-master'][0] and flannel_manifest.changed and flannel_enable | default(true) \ No newline at end of file diff --git a/roles/addons/flannel/templates/cni-flannel.yml.j2 b/roles/addons/flannel/templates/cni-flannel.yml.j2 index b22f380..4a21cd3 100644 --- a/roles/addons/flannel/templates/cni-flannel.yml.j2 +++ b/roles/addons/flannel/templates/cni-flannel.yml.j2 @@ -10,20 +10,19 @@ metadata: data: cni-conf.json: | { - "name":"cni0", - "cniVersion":"0.3.1", - "plugins":[ + "name": "cni0", + "plugins": [ { - "type":"flannel", - "delegate":{ - "forceAddress":true, - "isDefaultGateway":true + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true } }, { - "type":"portmap", - "capabilities":{ - "portMappings":true + "type": "portmap", + "capabilities": { + "portMappings": true } } ] @@ -51,19 +50,46 @@ spec: tier: node k8s-app: flannel spec: + hostNetwork: true + nodeSelector: + beta.kubernetes.io/arch: amd64 + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule serviceAccountName: flannel + initContainers: + - name: install-cni + image: {{ flannel_image_repo }}:{{ flannel_image_tag }} + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ containers: - name: kube-flannel image: {{ flannel_image_repo }}:{{ flannel_image_tag }} - imagePullPolicy: IfNotPresent + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr +{% if flannel_iface %} + - --iface=$(POD_IP) +{% endif %} resources: - limits: - cpu: {{ flannel_cpu_limit }} - memory: {{ flannel_memory_limit }} requests: cpu: {{ flannel_cpu_requests }} memory: {{ flannel_memory_requests }} - command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr", "--iface=$(POD_IP)" ] + limits: + cpu: {{ flannel_cpu_limit }} + memory: {{ flannel_memory_limit }} securityContext: privileged: true env: @@ -75,39 +101,17 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace +{% if flannel_iface %} - name: POD_IP valueFrom: fieldRef: fieldPath: status.podIP +{% endif %} volumeMounts: - name: run mountPath: /run - - name: cni - mountPath: /etc/cni/net.d - name: flannel-cfg mountPath: /etc/kube-flannel/ - - name: install-cni - image: {{ flannel_cni_image_repo }}:{{ flannel_cni_image_tag }} - command: ["/install-cni.sh"] - env: - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: kube-flannel-cfg - key: cni-conf.json - - name: CNI_CONF_NAME - value: "10-flannel.conflist" - volumeMounts: - - name: cni - mountPath: /host/etc/cni/net.d - - name: host-cni-bin - mountPath: /host/opt/cni/bin/ - hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule volumes: - name: run hostPath: @@ -117,11 +121,4 @@ spec: path: /etc/cni/net.d - name: flannel-cfg configMap: - name: kube-flannel-cfg - - name: host-cni-bin - hostPath: - path: /opt/cni/bin - updateStrategy: - rollingUpdate: - maxUnavailable: {{ serial | default('20%') }} - type: RollingUpdate + name: kube-flannel-cfg \ No newline at end of file diff --git a/roles/base/cert/tasks/configfile.yml b/roles/base/cert/tasks/configfile.yml new file mode 100644 index 0000000..29d2ee1 --- /dev/null +++ b/roles/base/cert/tasks/configfile.yml @@ -0,0 +1,79 @@ +- name: create admin.conf + shell: > + cd /etc/kubernetes/pki/ && + kubectl config set-cluster kubernetes + --certificate-authority=ca.crt + --embed-certs=true + --server=https://{{ ansible_host | trim }}:{{ kube_apiserver_port | trim }} + --kubeconfig=/etc/kubernetes/admin.conf && + kubectl config set-credentials kubernetes-admin + --client-certificate=admin.crt + --client-key=admin.key + --embed-certs=true + --kubeconfig=/etc/kubernetes/admin.conf && + kubectl config set-context kubernetes-admin@kubernetes + --cluster=kubernetes + --user=kubernetes-admin + --kubeconfig=/etc/kubernetes/admin.conf && + kubectl config use-context + kubernetes-admin@kubernetes + --kubeconfig=/etc/kubernetes/admin.conf + +- name: create controller-manager.conf + shell: > + cd /etc/kubernetes/pki/ && + kubectl config set-cluster kubernetes + --certificate-authority=ca.crt + --embed-certs=true + --server=https://{{ ansible_host | trim }}:{{ kube_apiserver_port | trim }} + --kubeconfig=/etc/kubernetes/controller-manager.conf && + kubectl config set-credentials system:kube-controller-manager + --client-certificate=kube-controller-manager.crt + --client-key=sa.key + --embed-certs=true + --kubeconfig=/etc/kubernetes/controller-manager.conf && + kubectl config set-context system:kube-controller-manager@kubernetes + --cluster=kubernetes + --user=system:kube-controller-manager + --kubeconfig=/etc/kubernetes/controller-manager.conf && + kubectl config use-context system:kube-controller-manager@kubernetes + --kubeconfig=/etc/kubernetes/controller-manager.conf + +- name: create scheduler.conf + shell: > + cd /etc/kubernetes/pki/ && + kubectl config set-cluster kubernetes + --certificate-authority=ca.crt + --embed-certs=true + --server=https://{{ ansible_host | trim }}:{{ kube_apiserver_port | trim }} + --kubeconfig=/etc/kubernetes/scheduler.conf && + kubectl config set-credentials system:kube-scheduler + --client-certificate=kube-scheduler.crt + --client-key=kube-scheduler.key + --embed-certs=true + --kubeconfig=/etc/kubernetes/scheduler.conf && + kubectl config set-context system:kube-scheduler@kubernetes + --cluster=kubernetes + --user=system:kube-scheduler + --kubeconfig=/etc/kubernetes/scheduler.conf && + kubectl config use-context system:kube-scheduler@kubernetes + --kubeconfig=/etc/kubernetes/scheduler.conf + +- name: create kubelet.conf + shell: > + cd /etc/kubernetes/pki/ && + kubectl config set-cluster kubernetes + --certificate-authority=ca.crt + --embed-certs=true + --server=https://{{ ansible_host | trim }}:{{ kube_apiserver_port | trim }} + --kubeconfig=/etc/kubernetes/kubelet.conf && + kubectl config set-credentials system:node:{{ inventory_hostname }} + --client-certificate=apiserver-kubelet-client.crt + --client-key=apiserver-kubelet-client.key + --embed-certs=true + --kubeconfig=/etc/kubernetes/kubelet.conf && + kubectl config set-context system:node:{{ inventory_hostname }}@kubernetes + --cluster=kubernetes --user=system:node:{{ inventory_hostname }} + --kubeconfig=/etc/kubernetes/kubelet.conf && + kubectl config use-context system:node:{{ inventory_hostname }}@kubernetes + --kubeconfig=/etc/kubernetes/kubelet.conf \ No newline at end of file diff --git a/roles/base/cert/tasks/gen-master-certs.yml b/roles/base/cert/tasks/gen-master-certs.yml new file mode 100644 index 0000000..7f5bf0d --- /dev/null +++ b/roles/base/cert/tasks/gen-master-certs.yml @@ -0,0 +1,230 @@ +# 根据stat信息判断是否已经生成过kubernetes证书,如果没有,退出操作 + +# 在第一台master节点上创建所需要的证书 +- block: + - name: 读取 kubernetes-ca 根证书私钥 stat 信息 + stat: + path: /etc/kubernetes/pki/ca.key + register: ca_key_stat + + - name: 读取 kubernetes-ca 根证书 stat 信息 + stat: + path: /etc/kubernetes/pki/ca.crt + register: ca_crt_stat + + - name: 校验根证书信息 + fail: + msg: "在 /etc/kubernetes/pki/ 目录中未找到根证书或秘钥,请确认后重试" + when: (ca_key_stat.stat.isreg is not defined) or (ca_crt_stat.stat.isreg is not defined) + + - name: 创建 kubernetes 的证书请求配置 + template: + src: kube-openssl.cnf.j2 + dest: /etc/kubernetes/pki/kube-openssl.cnf + owner: root + mode: 0644 + + - name: 创建 kube-apiserver 证书私钥 + shell: > + cd /etc/kubernetes/pki/ && + openssl genrsa -out apiserver.key 2048 + + - name: 创建 kube-apiserver 证书请求 + shell: > + cd /etc/kubernetes/pki/ && + openssl req -new -key apiserver.key + -subj "/CN=kube-apiserver" + -out apiserver.csr + + - name: 创建 kube-apiserver 证书 + shell: > + cd /etc/kubernetes/pki/ && + openssl x509 -req -CA ca.crt -CAkey ca.key + -days {{ kube_certs_time }} + -in apiserver.csr + -CAcreateserial + -extensions v3_req_peer + -extfile kube-openssl.cnf + -out apiserver.crt + + - name: 创建 apiserver-kubelet-client 证书私钥 + shell: > + cd /etc/kubernetes/pki/ && + openssl genrsa -out apiserver-kubelet-client.key 2048 + + - name: 创建 apiserver-kubelet-client 证书请求 + shell: > + cd /etc/kubernetes/pki/ && + openssl req -new -key apiserver-kubelet-client.key + -subj "/CN=kube-apiserver-kubelet-client/O=system:masters" + -out apiserver-kubelet-client.csr + + - name: 创建 apiserver-kubelet-client 证书 + shell: > + cd /etc/kubernetes/pki/ && + openssl x509 -req -CA ca.crt -CAkey ca.key + -days {{ kube_certs_time }} + -in apiserver-kubelet-client.csr + -CAcreateserial + -extensions v3_req_client + -extfile kube-openssl.cnf + -out apiserver-kubelet-client.crt + + - name: 创建 sa 证书私钥 + shell: > + cd /etc/kubernetes/pki/ && + openssl genrsa -out sa.key 2048 + + - name: 根据 sa 私钥创建公钥 + shell: > + cd /etc/kubernetes/pki/ && + openssl rsa -in sa.key -pubout -out sa.pub + + - name: 软链 sa 证书私钥为 kube-controller-manager 证书私钥 + file: + src: /etc/kubernetes/pki/sa.key + dest: /etc/kubernetes/pki/kube-controller-manager.key + state: link + run_once: true + delegate_to: "{{ groups['kube-master']|first }}" + + - name: 创建 kube-controller-manager 证书请求 + shell: > + cd /etc/kubernetes/pki/ && + openssl req -new -key sa.key + -subj "/CN=system:kube-controller-manager" + -out kube-controller-manager.csr + + - name: 创建 kube-controller-manager 证书 + shell: > + cd /etc/kubernetes/pki/ && + openssl x509 -req -CA ca.crt -CAkey ca.key + -days {{ kube_certs_time }} + -in kube-controller-manager.csr + -CAcreateserial + -extensions v3_req_client + -extfile kube-openssl.cnf + -out kube-controller-manager.crt + + - name: 创建 kube-scheduler 证书私钥 + shell: > + cd /etc/kubernetes/pki/ && + openssl genrsa -out kube-scheduler.key 2048 + + - name: 创建 kube-scheduler 证书请求 + shell: > + cd /etc/kubernetes/pki/ && + openssl req -new -key kube-scheduler.key + -subj "/CN=system:kube-scheduler" + -out kube-scheduler.csr + + - name: 创建 kube-scheduler 证书 + shell: > + cd /etc/kubernetes/pki/ && + openssl x509 -req -CA ca.crt -CAkey ca.key + -days {{ kube_certs_time }} + -in kube-scheduler.csr + -CAcreateserial + -extensions v3_req_client + -extfile kube-openssl.cnf + -out kube-scheduler.crt + + - name: 创建 front-proxy-ca 证书私钥 + shell: > + cd /etc/kubernetes/pki/ && + openssl genrsa -out front-proxy-ca.key 2048 + + - name: 创建 front-proxy-ca 根证书 + shell: > + cd /etc/kubernetes/pki/ && + openssl req -x509 -new -nodes + -days {{ kube_certs_time }} + -key front-proxy-ca.key + -config kube-openssl.cnf + -subj "/CN=front-proxy-ca" + -extensions v3_ca + -out front-proxy-ca.crt + + - name: 创建 front-proxy-client 证书私钥 + shell: > + cd /etc/kubernetes/pki/ && + openssl genrsa -out front-proxy-client.key 2048 + + - name: 创建 front-proxy-client 证书请求 + shell: > + cd /etc/kubernetes/pki/ && + openssl req -new -key front-proxy-client.key + -subj "/CN=front-proxy-client" + -out front-proxy-client.csr + + - name: 创建 front-proxy-client 证书 + shell: > + cd /etc/kubernetes/pki/ && + openssl x509 -req -CA front-proxy-ca.crt -CAkey front-proxy-ca.key + -days {{ kube_certs_time }} + -in front-proxy-client.csr + -CAcreateserial + -extensions v3_req_client + -extfile kube-openssl.cnf + -out front-proxy-client.crt + + - name: 创建 kubernetes cluster admin 证书私钥 + shell: > + cd /etc/kubernetes/pki/ && + openssl genrsa -out admin.key 2048 + + - name: 创建 kubernetes cluster admin 证书请求 + shell: > + cd /etc/kubernetes/pki/ && + openssl req -new -key admin.key + -subj "/CN=kubernetes-admin/O=system:masters" + -out admin.csr + + - name: 创建 kubernetes cluster admin 证书 + shell: > + cd /etc/kubernetes/pki/ && + openssl x509 -req -CA ca.crt -CAkey ca.key + -days {{ kube_certs_time }} + -in admin.csr + -CAcreateserial + -extensions v3_req_client + -extfile kube-openssl.cnf + -out admin.crt + + when: inventory_hostname == groups['kube-master']|first + +- name: 获取 kubernetes 相关证书 + slurp: + src: /etc/kubernetes/pki/{{ item }} + with_items: + - ca.crt + - ca.key + - apiserver.crt + - apiserver.key + - apiserver-kubelet-client.crt + - apiserver-kubelet-client.key + - sa.key + - sa.pub + - kube-controller-manager.crt + - kube-scheduler.crt + - kube-scheduler.key + - front-proxy-ca.crt + - front-proxy-ca.key + - front-proxy-client.crt + - front-proxy-client.key + - admin.crt + - admin.key + register: kubernetes_certs + delegate_to: "{{ groups['kube-master']|first }}" + run_once: true + +- name: 分发 kubernetes 相关证书到 master 节点 + copy: + dest: "{{ item.source }}" + content: "{{ item.content | b64decode }}" + owner: root + group: root + mode: 0700 + no_log: true + with_items: "{{ kubernetes_certs.results }}" + when: inventory_hostname != groups['kube-master']|first \ No newline at end of file diff --git a/roles/base/cert/tasks/main.yml b/roles/base/cert/tasks/main.yml new file mode 100644 index 0000000..6cb75f7 --- /dev/null +++ b/roles/base/cert/tasks/main.yml @@ -0,0 +1,25 @@ +--- +- name: generate k8s certs + include: gen-master-certs.yml + +- name: Include config certs + include: configfile.yml + +- name: reload kubelet + service: + name: kubelet + state: restarted + +- name: Create kube config dir + file: + path: "/root/.kube" + mode: "0700" + state: directory + +- name: Copy admin kubeconfig to root user home + copy: + src: "/etc/kubernetes/admin.conf" + dest: "/root/.kube/config" + remote_src: yes + mode: "0700" + backup: yes \ No newline at end of file diff --git a/roles/base/cert/templates/kube-openssl.cnf.j2 b/roles/base/cert/templates/kube-openssl.cnf.j2 new file mode 100644 index 0000000..4c93e03 --- /dev/null +++ b/roles/base/cert/templates/kube-openssl.cnf.j2 @@ -0,0 +1,35 @@ +[ req ] +distinguished_name = req_distinguished_name +[req_distinguished_name] +[ v3_ca ] +basicConstraints = critical, CA:TRUE +keyUsage = critical, digitalSignature, keyEncipherment, keyCertSign +[ v3_req_server ] +basicConstraints = CA:FALSE +keyUsage = critical, digitalSignature, keyEncipherment +extendedKeyUsage = serverAuth +subjectAltName = @alt_names_cluster +[ v3_req_client ] +basicConstraints = CA:FALSE +keyUsage = critical, digitalSignature, keyEncipherment +extendedKeyUsage = clientAuth +[ v3_req_peer ] +basicConstraints = CA:FALSE +keyUsage = critical, digitalSignature, keyEncipherment +extendedKeyUsage = serverAuth, clientAuth +subjectAltName = @alt_names_cluster +[ alt_names_cluster ] +DNS.1 = kubernetes +DNS.2 = kubernetes.default +DNS.3 = kubernetes.default.svc +DNS.4 = kubernetes.default.svc.{{dns_domain}} +DNS.5 = localhost +{% for host in groups['kube-master'] %} +DNS.{{ 5 + loop.index }} = {{ host }} +{% endfor %} +{% for host in groups['kube-master'] %} +IP.{{ loop.index }} = {% if k8s_interface is defined %}{{ hostvars[host]['ansible_'+k8s_interface].ipv4.address }}{% else %}{{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{% endif %} +{% endfor %} +{% set idx = groups['kube-master'] | length | int * 1 + 1 %} +IP.{{ idx }} = {{ kube_apiserver_ip }} +IP.{{ idx + 1 }} = 127.0.0.1 \ No newline at end of file diff --git a/roles/base/docker/tasks/main.yml b/roles/base/docker/tasks/main.yml index c66f1bc..379d1cc 100644 --- a/roles/base/docker/tasks/main.yml +++ b/roles/base/docker/tasks/main.yml @@ -45,7 +45,7 @@ command: systemctl daemon-reload when: docker_proxy_enable and proxy_result.changed | default(false) - + - name: restart docker service: name: docker diff --git a/roles/base/docker/templates/docker-daemon.json.j2 b/roles/base/docker/templates/docker-daemon.json.j2 index 3964e40..29360e3 100644 --- a/roles/base/docker/templates/docker-daemon.json.j2 +++ b/roles/base/docker/templates/docker-daemon.json.j2 @@ -1,6 +1,5 @@ { "exec-opts": ["native.cgroupdriver=systemd"], - "registry-mirrors": ["https://registry.docker-cn.com"], "max-concurrent-downloads": 10, "storage-driver": "overlay", {% if insecure_registries is defined %} @@ -8,6 +7,7 @@ {% endif %} "log-level": "warn", "log-driver": "json-file", + "data-root": "{{ docker_storage_dir }}", "log-opts": { "max-size": "10m", "max-file": "3" diff --git a/roles/base/docker/templates/docker-proxy.conf b/roles/base/docker/templates/docker-proxy.conf index fabcc98..3ebca29 100644 --- a/roles/base/docker/templates/docker-proxy.conf +++ b/roles/base/docker/templates/docker-proxy.conf @@ -1,4 +1,4 @@ [Service] Environment="HTTP_PROXY={{http_proxy}}" Environment="HTTPS_PROXY={{https_proxy}}" -Environment="NO_PROXY={{no_proxy}}" \ No newline at end of file +Environment="NO_PROXY={{no_proxy | default ('localhost,127.0.0.0/8')}}" \ No newline at end of file diff --git a/roles/base/install/tasks/main.yml b/roles/base/install/tasks/main.yml index 712e632..c986160 100644 --- a/roles/base/install/tasks/main.yml +++ b/roles/base/install/tasks/main.yml @@ -49,13 +49,14 @@ - kubelet -- name: Ensure jq package is installed +- name: Ensure jq nfs-utils package is installed become: yes yum: name: "{{ item }}" state: latest with_items: - jq + - nfs-utils - bash-completion environment: http_proxy: "{{ http_proxy| default ('') }}" diff --git a/roles/base/install/templates/20-kubelet-override-v1.7.0.conf b/roles/base/install/templates/20-kubelet-override-v1.7.0.conf index 7a0ac86..0b6bd13 100644 --- a/roles/base/install/templates/20-kubelet-override-v1.7.0.conf +++ b/roles/base/install/templates/20-kubelet-override-v1.7.0.conf @@ -1,4 +1,4 @@ [Service] -Environment="KUBELET_EXTRA_ARGS=--pod-infra-container-image={{pause_amd_image_repo}}:{{pause_amd_image_tag}}" +Environment="KUBELET_EXTRA_ARGS=--pod-infra-container-image={{pause_amd_image_repo}}:{{pause_amd_image_tag}} --root-dir={{ kube_storage_dir }}" Environment="KUBELET_DNS_ARGS=--cluster-dns={{kube_dns_server}} --cluster-domain={{dns_domain}}" Environment="KUBELET_CADVISOR_ARGS=--cadvisor-port={{kube_cadvisor_port}}" \ No newline at end of file diff --git a/roles/base/install/templates/20-kubelet-override-v1.8.1.conf b/roles/base/install/templates/20-kubelet-override-v1.8.1.conf index a2118a2..066b822 100644 --- a/roles/base/install/templates/20-kubelet-override-v1.8.1.conf +++ b/roles/base/install/templates/20-kubelet-override-v1.8.1.conf @@ -4,6 +4,7 @@ Environment="KUBELET_EXTRA_ARGS=--pod-infra-container-image={{pause_amd_image_re --eviction-hard=memory.available<{{eviction.hard.memory.available}},nodefs.available<{{eviction.hard.nodefs.available}},imagefs.available<{{eviction.hard.imagefs.available}} \ --eviction-minimum-reclaim=memory.available={{eviction.minimum_reclaim.memory.available}},nodefs.available={{eviction.minimum_reclaim.nodefs.available}},imagefs.available={{eviction.minimum_reclaim.imagefs.available}} \ --eviction-pressure-transition-period={{eviction.pressure.transition.period}} \ - --system-reserved=cpu={{system.reserved.cpu}},memory={{system.reserved.memory}}" + --system-reserved=cpu={{system.reserved.cpu}},memory={{system.reserved.memory}} \ + --root-dir={{ kube_storage_dir }}" Environment="KUBELET_DNS_ARGS=--cluster-dns={{kube_dns_server}} --cluster-domain={{dns_domain}}" Environment="KUBELET_CADVISOR_ARGS=--cadvisor-port={{kube_cadvisor_port}}" \ No newline at end of file diff --git a/roles/base/install/templates/20-kubelet-override-v1.8.5.conf b/roles/base/install/templates/20-kubelet-override-v1.8.5.conf index a2118a2..066b822 100644 --- a/roles/base/install/templates/20-kubelet-override-v1.8.5.conf +++ b/roles/base/install/templates/20-kubelet-override-v1.8.5.conf @@ -4,6 +4,7 @@ Environment="KUBELET_EXTRA_ARGS=--pod-infra-container-image={{pause_amd_image_re --eviction-hard=memory.available<{{eviction.hard.memory.available}},nodefs.available<{{eviction.hard.nodefs.available}},imagefs.available<{{eviction.hard.imagefs.available}} \ --eviction-minimum-reclaim=memory.available={{eviction.minimum_reclaim.memory.available}},nodefs.available={{eviction.minimum_reclaim.nodefs.available}},imagefs.available={{eviction.minimum_reclaim.imagefs.available}} \ --eviction-pressure-transition-period={{eviction.pressure.transition.period}} \ - --system-reserved=cpu={{system.reserved.cpu}},memory={{system.reserved.memory}}" + --system-reserved=cpu={{system.reserved.cpu}},memory={{system.reserved.memory}} \ + --root-dir={{ kube_storage_dir }}" Environment="KUBELET_DNS_ARGS=--cluster-dns={{kube_dns_server}} --cluster-domain={{dns_domain}}" Environment="KUBELET_CADVISOR_ARGS=--cadvisor-port={{kube_cadvisor_port}}" \ No newline at end of file diff --git a/roles/base/prepare/tasks/centos.yml b/roles/base/prepare/tasks/centos.yml index 6650eae..9dd71c7 100644 --- a/roles/base/prepare/tasks/centos.yml +++ b/roles/base/prepare/tasks/centos.yml @@ -13,8 +13,26 @@ - k8s - preinstall -- selinux: - state: disabled +- name: Temporarily closed selinux + shell: "setenforce 0" + failed_when: false + +- name: Permanent closure selinux + lineinfile: + dest: /etc/selinux/config + regexp: "^SELINUX=" + line: "SELINUX=disabled" + +- name: Disable swap + shell: "swapoff -a && sysctl -w vm.swappiness=0" + ignore_errors: true + +- name: Delete fstab swap config + lineinfile: + path: /etc/fstab + regexp: 'swap' + state: absent + backup: 'yes' - name: Check presence of fastestmirror.conf stat: diff --git a/roles/base/prepare/tasks/etchosts.yml b/roles/base/prepare/tasks/etchosts.yml index 9108545..df20757 100644 --- a/roles/base/prepare/tasks/etchosts.yml +++ b/roles/base/prepare/tasks/etchosts.yml @@ -4,7 +4,7 @@ - name: Hosts | populate inventory into hosts file blockinfile: - dest: /etc/hosts + path: /etc/hosts block: |- {% for item in (groups['kube-master'] + groups['kube-node'] +groups['etcd']|default([]))|unique -%}{% if k8s_interface is defined %}{{hostvars[item]['ansible_'+k8s_interface].ipv4.address}}{% else %}{{hostvars[item]['ip']|default(hostvars[item]['ansible_default_ipv4']['address'])}}{% endif %} {{ item }} {{ item }}.{{ dns_domain }} {% endfor %} diff --git a/roles/base/prepare/tasks/main.yml b/roles/base/prepare/tasks/main.yml index b9e443c..1f73d5d 100644 --- a/roles/base/prepare/tasks/main.yml +++ b/roles/base/prepare/tasks/main.yml @@ -28,7 +28,6 @@ - "/etc/kubernetes/manifests" - "/etc/kubernetes/ssl/etcd" - - name: Create cni directories file: path: "{{ item }}" @@ -42,6 +41,13 @@ tags: - network +# Download portmap because kubernetes-cni does not include but flannel needs +- name: download portmap + get_url: + url: "{{k8s_yum_repo}}/tools/portmap-1.9.1" + dest: /opt/cni/bin/portmap + mode: 0755 + - name: Ensure Yum repository become: yes yum_repository: @@ -58,13 +64,21 @@ dest: /usr/local/bin/cfssl mode: 0755 when: inventory_hostname in (groups['kube-master'] + groups['etcd']|default([]))|unique - + environment: + http_proxy: "{{ http_proxy| default ('') }}" + https_proxy: "{{ https_proxy| default ('') }}" + no_proxy: "{{ no_proxy| default ('') }}" + - name: Download cfssljson get_url: url: "{{k8s_yum_repo}}/cfssl/cfssljson_linux-amd64" dest: /usr/local/bin/cfssljson mode: 0755 when: inventory_hostname in (groups['kube-master'] + groups['etcd']|default([]))|unique + environment: + http_proxy: "{{ http_proxy| default ('') }}" + https_proxy: "{{ https_proxy| default ('') }}" + no_proxy: "{{ no_proxy| default ('') }}" - name: Download cfssl-certinfo get_url: @@ -72,5 +86,8 @@ dest: /usr/local/bin/cfssl-certinfo mode: 0755 when: inventory_hostname in (groups['kube-master'] + groups['etcd']|default([]))|unique - + environment: + http_proxy: "{{ http_proxy| default ('') }}" + https_proxy: "{{ https_proxy| default ('') }}" + no_proxy: "{{ no_proxy| default ('') }}" diff --git a/roles/base/reset/tasks/main.yml b/roles/base/reset/tasks/main.yml index 7d2a0e4..ceb84cd 100644 --- a/roles/base/reset/tasks/main.yml +++ b/roles/base/reset/tasks/main.yml @@ -1,31 +1,41 @@ --- - name: reset | kubeadm reset command: kubeadm reset - failed_when: false - tags: - - kubeadm + ignore_errors: true -- name: reset | stop services +- name: reset | stop and disable services service: name: "{{ item }}" state: stopped + enabled: no with_items: - kubelet - etcd - failed_when: false - tags: - - services + - docker + ignore_errors: true -- name: reset | disable services - service: - name: "{{ item }}" - enabled: no +- name: reset | remove services + file: + path: "/etc/systemd/system/{{ item }}.service" + state: absent with_items: - - kubelet - etcd - failed_when: false - tags: - - services + +# - name: reset | gather mounted kubelet dirs +# shell: mount | grep /var/lib/kubelet | awk '{print $3}' +# check_mode: no +# register: mounted_dirs +# tags: +# - mounts + +# - name: reset | unmount kubelet dirs +# command: umount {{item}} +# with_items: '{{ mounted_dirs.stdout_lines }}' +# tags: +# - mounts + +- name: unmount kubelet filesystem + mount: path=/var/run/kubelet state=unmounted - name: reset | uninstall package yum: @@ -34,18 +44,9 @@ with_items: - kubeadm - kubectl - - kubelt - - kubernetes-cni - -- name: reset | remove services - file: - path: "/etc/systemd/system/{{ item }}.service" - state: absent - with_items: - kubelet - - etcd - tags: - - services + - kubernetes-cni + - docker-engine - name: reset | systemctl daemon-reload command: systemctl daemon-reload @@ -53,40 +54,14 @@ - name: reset | systemctl reset-failed command: systemctl reset-failed -- name: reset | remove all containers - shell: "docker ps -aq | xargs -r docker rm -fv" - register: remove_all_containers - retries: 4 - until: remove_all_containers.rc == 0 - delay: 5 - tags: - - docker - -- name: reset | restart docker - service: - name: docker - state: restarted - tags: - - docker - -- name: reset | gather mounted kubelet dirs - shell: mount | grep /var/lib/kubelet | awk '{print $3}' | tac - check_mode: no - register: mounted_dirs - tags: - - mounts - -- name: reset | unmount kubelet dirs - command: umount {{item}} - with_items: '{{ mounted_dirs.stdout_lines }}' - tags: - - mounts - - name: flush iptables iptables: flush: yes - tags: - - iptables + ignore_errors: true + +- name: cleanup networks + shell: "ip link del flannel.1; ip link del cni0;" + ignore_errors: true - name: reset | delete some files and directories file: @@ -95,8 +70,11 @@ with_items: - /etc/kubernetes - /etc/systemd/system/kubelet.service.d + - /etc/systemd/system/docker.service.d + - /etc/docker - /etc/yum.repos.d/k8s.repo - /var/lib/kubelet + - /var/lib/docker - /root/.kube - /var/lib/etcd - /etc/ssl/etcd @@ -105,29 +83,29 @@ - /etc/nginx - /opt/cni - /var/log/pods - - "/usr/local/bin/etcd" - - "/usr/local/bin/etcdctl" + - /usr/local/bin/etcd + - /usr/local/bin/etcdctl - /usr/local/bin/cfssl - /usr/local/bin/cfssljson - /usr/local/bin/cfssl-certinfo - /run/xtables.lock - /run/flannel - /run/kubernetes - tags: - - files + +- name: reset | Restart network + service: + name: "{{item}}" + state: restarted + with_items: + - network + - networking + args: + warn: false + ignore_errors: true - name: reset | remove host entries from /etc/hosts blockinfile: - dest: "/etc/hosts" + path: "/etc/hosts" state: absent follow: yes - marker: "# Ansible inventory hosts {mark}" - tags: - - dns - -- name: reset | Restart network - service: - name: network - state: restarted - tags: - - network \ No newline at end of file + marker: "# Ansible inventory hosts {mark}" \ No newline at end of file diff --git a/roles/base/variables/defaults/main.yml b/roles/base/variables/defaults/main.yml index fece5d1..3fe8efe 100644 --- a/roles/base/variables/defaults/main.yml +++ b/roles/base/variables/defaults/main.yml @@ -17,6 +17,8 @@ kube_apiserver_port: 6443 kube_cadvisor_port: 4194 +kube_storage_dir: "/var/lib/kubelet" + # kube-dns service ip kube_dns_server: "{{ kube_service_addresses|ipaddr(10)|ipaddr('address') }}" @@ -72,10 +74,7 @@ dashboard_image_repo: "{{kube_image_repo}}/kubernetes-dashboard-amd64" dashboard_image_tag: "v1.7.1" flannel_image_repo: "{{image_repo}}/flannel" -flannel_image_tag: "v0.9.0" - -flannel_cni_image_repo: "{{image_repo}}/flannel-cni" -flannel_cni_image_tag: "v0.3.0" +flannel_image_tag: "v0.10.0-amd64" nginx_ingress_controller_image_repo: "{{image_repo}}/nginx-ingress-controller" nginx_ingress_controller_image_tag: "0.9.0-beta.17" @@ -94,7 +93,7 @@ nginx_image_tag: "1.11.4-alpine" # ================================= yum 源 ================================= -k8s_yum_repo: "http://file.choerodon.com.cn/kubernetes/" +k8s_yum_repo: "https://file.choerodon.com.cn/kubernetes" docker_version_name: "docker-engine-17.05.0.ce-1.el7.centos" @@ -121,4 +120,5 @@ kube_lego_enable: true # ================================= docker参数 ================================= insecure_registries: - - "{{ kube_service_addresses }}" \ No newline at end of file + - "{{ kube_service_addresses }}" +docker_storage_dir: "/var/lib/docker" \ No newline at end of file diff --git a/roles/master/tasks/configfile.yml b/roles/master/tasks/configfile.yml new file mode 100644 index 0000000..08e40b5 --- /dev/null +++ b/roles/master/tasks/configfile.yml @@ -0,0 +1,83 @@ +- name: create admin.conf + shell: > + cd /etc/kubernetes/pki/ && + kubectl config set-cluster kubernetes + --certificate-authority=ca.crt + --embed-certs=true + --server=https://{{ ansible_host | trim }}:{{ kube_apiserver_port | trim }} + --kubeconfig=/etc/kubernetes/admin.conf && + kubectl config set-credentials kubernetes-admin + --client-certificate=admin.crt + --client-key=admin.key + --embed-certs=true + --kubeconfig=/etc/kubernetes/admin.conf && + kubectl config set-context kubernetes-admin@kubernetes + --cluster=kubernetes + --user=kubernetes-admin + --kubeconfig=/etc/kubernetes/admin.conf && + kubectl config use-context + kubernetes-admin@kubernetes + --kubeconfig=/etc/kubernetes/admin.conf + +- name: create controller-manager.conf + shell: > + cd /etc/kubernetes/pki/ && + kubectl config set-cluster kubernetes + --certificate-authority=ca.crt + --embed-certs=true + --server=https://{{ ansible_host | trim }}:{{ kube_apiserver_port | trim }} + --kubeconfig=/etc/kubernetes/controller-manager.conf && + kubectl config set-credentials system:kube-controller-manager + --client-certificate=kube-controller-manager.crt + --client-key=sa.key + --embed-certs=true + --kubeconfig=/etc/kubernetes/controller-manager.conf && + kubectl config set-context system:kube-controller-manager@kubernetes + --cluster=kubernetes + --user=system:kube-controller-manager + --kubeconfig=/etc/kubernetes/controller-manager.conf && + kubectl config use-context system:kube-controller-manager@kubernetes + --kubeconfig=/etc/kubernetes/controller-manager.conf + +- name: create scheduler.conf + shell: > + cd /etc/kubernetes/pki/ && + kubectl config set-cluster kubernetes + --certificate-authority=ca.crt + --embed-certs=true + --server=https://{{ ansible_host | trim }}:{{ kube_apiserver_port | trim }} + --kubeconfig=/etc/kubernetes/scheduler.conf && + kubectl config set-credentials system:kube-scheduler + --client-certificate=kube-scheduler.crt + --client-key=kube-scheduler.key + --embed-certs=true + --kubeconfig=/etc/kubernetes/scheduler.conf && + kubectl config set-context system:kube-scheduler@kubernetes + --cluster=kubernetes + --user=system:kube-scheduler + --kubeconfig=/etc/kubernetes/scheduler.conf && + kubectl config use-context system:kube-scheduler@kubernetes + --kubeconfig=/etc/kubernetes/scheduler.conf + +- name: create kubelet.conf + shell: > + cd /etc/kubernetes/pki/ && + kubectl config set-cluster kubernetes + --certificate-authority=ca.crt + --embed-certs=true + --server=https://{{ ansible_host | trim }}:{{ kube_apiserver_port | trim }} + --kubeconfig=/etc/kubernetes/kubelet.conf && + kubectl config set-credentials system:node:{{ inventory_hostname }} + --client-certificate=apiserver-kubelet-client.crt + --client-key=apiserver-kubelet-client.key + --embed-certs=true + --kubeconfig=/etc/kubernetes/kubelet.conf && + kubectl config set-context system:node:{{ inventory_hostname }}@kubernetes + --cluster=kubernetes --user=system:node:{{ inventory_hostname }} + --kubeconfig=/etc/kubernetes/kubelet.conf && + kubectl config use-context system:node:{{ inventory_hostname }}@kubernetes + --kubeconfig=/etc/kubernetes/kubelet.conf + +- name: restart kubelet + command: /bin/true + notify: Master | restart kubelet \ No newline at end of file diff --git a/roles/master/tasks/gen-master-certs.yml b/roles/master/tasks/gen-master-certs.yml index 28c3eea..99dc968 100644 --- a/roles/master/tasks/gen-master-certs.yml +++ b/roles/master/tasks/gen-master-certs.yml @@ -1,42 +1,247 @@ ---- -- name: Create master certs dir - file: - path: "/etc/kubernetes/openssl-config" - state: directory - owner: root - mode: 0700 - recurse: yes - run_once: yes - delegate_to: "{{groups['kube-master'][0]}}" - -- name: Copy apiserver openssl cnf file - template: - src: "openssl-server.cnf.j2" - dest: "/etc/kubernetes/openssl-config/openssl-server.cnf" - mode: 0700 - run_once: yes - delegate_to: "{{groups['kube-master'][0]}}" +# 根据stat信息判断是否已经生成过kubernetes证书,如果没有,下一步生成证书 +# 如果已经有kubernetes证书,为了保证整个安装的幂等性,跳过证书生成的步骤 +- name: 读取 kubernetes-ca 根证书私钥 stat 信息 + stat: + path: /etc/kubernetes/pki/ca.key + register: ca_key_stat +- name: 读取 kubernetes-ca 根证书 stat 信息 + stat: + path: /etc/kubernetes/pki/ca.crt + register: ca_crt_stat +- name: 读取 kubernetes-admin 证书 stat 信息 + stat: + path: /etc/kubernetes/pki/admin.crt + register: admin_crt_stat -- name: Copy kubelet client openssl cnf file - template: - src: "openssl-client.cnf.j2" - dest: "/etc/kubernetes/openssl-config/openssl-client.cnf" - mode: 0700 - run_once: yes - delegate_to: "{{groups['kube-master'][0]}}" +# 在第一台master节点上创建所需要的证书 +- block: + - name: 创建 kubernetes 的证书请求配置 + template: + src: kube-openssl.cnf.j2 + dest: /etc/kubernetes/pki/kube-openssl.cnf + owner: root + mode: 0644 -- name: "Copy gen certs script" - template: - src: "gen_certs.sh.j2" - dest: "/etc/kubernetes/openssl-config/gen_certs.sh" - mode: 0700 - run_once: yes - delegate_to: "{{groups['kube-master'][0]}}" + - name: 创建 kubernetes-ca 根证书私钥 + shell: > + cd /etc/kubernetes/pki/ && + openssl genrsa -out ca.key 2048 + when: ca_key_stat.stat.isreg is not defined + + - name: 创建 kubernetes-ca 根证书 + shell: > + cd /etc/kubernetes/pki/ && + openssl req -x509 -new -nodes + -days {{ kube_certs_time }} + -key ca.key + -config kube-openssl.cnf + -subj "/CN=kubernetes" + -extensions v3_ca + -out ca.crt + when: ca_crt_stat.stat.isreg is not defined + + - name: 创建 kube-apiserver 证书私钥 + shell: > + cd /etc/kubernetes/pki/ && + openssl genrsa -out apiserver.key 2048 + + - name: 创建 kube-apiserver 证书请求 + shell: > + cd /etc/kubernetes/pki/ && + openssl req -new -key apiserver.key + -subj "/CN=kube-apiserver" + -out apiserver.csr + + - name: 创建 kube-apiserver 证书 + shell: > + cd /etc/kubernetes/pki/ && + openssl x509 -req -CA ca.crt -CAkey ca.key + -days {{ kube_certs_time }} + -in apiserver.csr + -CAcreateserial + -extensions v3_req_peer + -extfile kube-openssl.cnf + -out apiserver.crt + + - name: 创建 apiserver-kubelet-client 证书私钥 + shell: > + cd /etc/kubernetes/pki/ && + openssl genrsa -out apiserver-kubelet-client.key 2048 + + - name: 创建 apiserver-kubelet-client 证书请求 + shell: > + cd /etc/kubernetes/pki/ && + openssl req -new -key apiserver-kubelet-client.key + -subj "/CN=kube-apiserver-kubelet-client/O=system:masters" + -out apiserver-kubelet-client.csr + + - name: 创建 apiserver-kubelet-client 证书 + shell: > + cd /etc/kubernetes/pki/ && + openssl x509 -req -CA ca.crt -CAkey ca.key + -days {{ kube_certs_time }} + -in apiserver-kubelet-client.csr + -CAcreateserial + -extensions v3_req_client + -extfile kube-openssl.cnf + -out apiserver-kubelet-client.crt + + - name: 创建 sa 证书私钥 + shell: > + cd /etc/kubernetes/pki/ && + openssl genrsa -out sa.key 2048 + + - name: 根据 sa 私钥创建公钥 + shell: > + cd /etc/kubernetes/pki/ && + openssl rsa -in sa.key -pubout -out sa.pub + + - name: 软链 sa 证书私钥为 kube-controller-manager 证书私钥 + file: + src: /etc/kubernetes/pki/sa.key + dest: /etc/kubernetes/pki/kube-controller-manager.key + state: link + run_once: true + delegate_to: "{{ groups['kube-master']|first }}" -- name: "Generate master certs" - shell: /etc/kubernetes/openssl-config/gen_certs.sh - run_once: yes - delegate_to: "{{groups['kube-master'][0]}}" - when: not kubeadm_ca.stat.exists + - name: 创建 kube-controller-manager 证书请求 + shell: > + cd /etc/kubernetes/pki/ && + openssl req -new -key sa.key + -subj "/CN=system:kube-controller-manager" + -out kube-controller-manager.csr + - name: 创建 kube-controller-manager 证书 + shell: > + cd /etc/kubernetes/pki/ && + openssl x509 -req -CA ca.crt -CAkey ca.key + -days {{ kube_certs_time }} + -in kube-controller-manager.csr + -CAcreateserial + -extensions v3_req_client + -extfile kube-openssl.cnf + -out kube-controller-manager.crt + + - name: 创建 kube-scheduler 证书私钥 + shell: > + cd /etc/kubernetes/pki/ && + openssl genrsa -out kube-scheduler.key 2048 + + - name: 创建 kube-scheduler 证书请求 + shell: > + cd /etc/kubernetes/pki/ && + openssl req -new -key kube-scheduler.key + -subj "/CN=system:kube-scheduler" + -out kube-scheduler.csr + + - name: 创建 kube-scheduler 证书 + shell: > + cd /etc/kubernetes/pki/ && + openssl x509 -req -CA ca.crt -CAkey ca.key + -days {{ kube_certs_time }} + -in kube-scheduler.csr + -CAcreateserial + -extensions v3_req_client + -extfile kube-openssl.cnf + -out kube-scheduler.crt + + - name: 创建 front-proxy-ca 证书私钥 + shell: > + cd /etc/kubernetes/pki/ && + openssl genrsa -out front-proxy-ca.key 2048 + + - name: 创建 front-proxy-ca 根证书 + shell: > + cd /etc/kubernetes/pki/ && + openssl req -x509 -new -nodes + -days {{ kube_certs_time }} + -key front-proxy-ca.key + -config kube-openssl.cnf + -subj "/CN=front-proxy-ca" + -extensions v3_ca + -out front-proxy-ca.crt + + - name: 创建 front-proxy-client 证书私钥 + shell: > + cd /etc/kubernetes/pki/ && + openssl genrsa -out front-proxy-client.key 2048 + + - name: 创建 front-proxy-client 证书请求 + shell: > + cd /etc/kubernetes/pki/ && + openssl req -new -key front-proxy-client.key + -subj "/CN=front-proxy-client" + -out front-proxy-client.csr + + - name: 创建 front-proxy-client 证书 + shell: > + cd /etc/kubernetes/pki/ && + openssl x509 -req -CA front-proxy-ca.crt -CAkey front-proxy-ca.key + -days {{ kube_certs_time }} + -in front-proxy-client.csr + -CAcreateserial + -extensions v3_req_client + -extfile kube-openssl.cnf + -out front-proxy-client.crt + + - name: 创建 kubernetes cluster admin 证书私钥 + shell: > + cd /etc/kubernetes/pki/ && + openssl genrsa -out admin.key 2048 + + - name: 创建 kubernetes cluster admin 证书请求 + shell: > + cd /etc/kubernetes/pki/ && + openssl req -new -key admin.key + -subj "/CN=kubernetes-admin/O=system:masters" + -out admin.csr + + - name: 创建 kubernetes cluster admin 证书 + shell: > + cd /etc/kubernetes/pki/ && + openssl x509 -req -CA ca.crt -CAkey ca.key + -days {{ kube_certs_time }} + -in admin.csr + -CAcreateserial + -extensions v3_req_client + -extfile kube-openssl.cnf + -out admin.crt + + when: inventory_hostname == groups['kube-master']|first and (admin_crt_stat.stat.isreg is not defined) + +- name: 获取 kubernetes 相关证书 + slurp: + src: /etc/kubernetes/pki/{{ item }} + with_items: + - ca.crt + - ca.key + - apiserver.crt + - apiserver.key + - apiserver-kubelet-client.crt + - apiserver-kubelet-client.key + - sa.key + - sa.pub + - kube-controller-manager.crt + - kube-scheduler.crt + - kube-scheduler.key + - front-proxy-ca.crt + - front-proxy-ca.key + - front-proxy-client.crt + - front-proxy-client.key + - admin.crt + - admin.key + register: kubernetes_certs + delegate_to: "{{ groups['kube-master']|first }}" + run_once: true + +- name: 分发 kubernetes 相关证书到 master 节点 + copy: + dest: "{{ item.source }}" + content: "{{ item.content | b64decode }}" + owner: root + group: root + mode: 0700 + no_log: true + with_items: "{{ kubernetes_certs.results }}" + when: inventory_hostname != groups['kube-master']|first \ No newline at end of file diff --git a/roles/master/tasks/kubeadm-setup.yml b/roles/master/tasks/kubeadm-setup.yml index 11b0f28..1294e36 100644 --- a/roles/master/tasks/kubeadm-setup.yml +++ b/roles/master/tasks/kubeadm-setup.yml @@ -45,39 +45,6 @@ failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr notify: Master | restart kubelet -- name: slurp kubeadm certs - slurp: - src: "{{ item }}" - with_items: - - "/etc/kubernetes/pki/apiserver.crt" - - "/etc/kubernetes/pki/apiserver.key" - - "/etc/kubernetes/pki/apiserver-kubelet-client.crt" - - "/etc/kubernetes/pki/apiserver-kubelet-client.key" - - "/etc/kubernetes/pki/ca.crt" - - "/etc/kubernetes/pki/ca.key" - - "/etc/kubernetes/pki/front-proxy-ca.crt" - - "/etc/kubernetes/pki/front-proxy-ca.key" - - "/etc/kubernetes/pki/front-proxy-client.crt" - - "/etc/kubernetes/pki/front-proxy-client.key" - - "/etc/kubernetes/pki/sa.key" - - "/etc/kubernetes/pki/sa.pub" - register: kubeadm_certs - delegate_to: "{{ groups['kube-master']|first }}" - when: (groups['kube-master'] | length)!=1 - run_once: true - -- name: kubeadm | write out kubeadm certs - copy: - dest: "{{ item.item }}" - content: "{{ item.content | b64decode }}" - owner: root - group: root - mode: 0700 - no_log: true - register: copy_kubeadm_certs - with_items: "{{ kubeadm_certs.results }}" - when: inventory_hostname != groups['kube-master']|first - - name: kubeadm | Init other uninitialized masters environment: - KUBE_REPO_PREFIX: "{{kube_image_repo}}" @@ -90,7 +57,6 @@ failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr notify: Master | restart kubelet - - name: kubeadm | Create kubedns service config template: src: kube-dns-service.yml.j2 diff --git a/roles/master/tasks/main.yml b/roles/master/tasks/main.yml index 3f811cd..d4223bd 100644 --- a/roles/master/tasks/main.yml +++ b/roles/master/tasks/main.yml @@ -10,6 +10,10 @@ - name: Include kubeadm include: kubeadm-setup.yml +- name: Include config certs + include: configfile.yml + when: not kubeadm_ca.stat.exists + - name: Create kube config dir file: path: "/root/.kube" diff --git a/roles/master/templates/gen_certs.sh.j2 b/roles/master/templates/gen_certs.sh.j2 index 277c4d7..25591d5 100644 --- a/roles/master/templates/gen_certs.sh.j2 +++ b/roles/master/templates/gen_certs.sh.j2 @@ -38,4 +38,7 @@ openssl req -new -key front-proxy-client.key -out front-proxy-client.csr -subj " openssl x509 -req -in front-proxy-client.csr -CA front-proxy-ca.crt -CAkey front-proxy-ca.key -CAcreateserial -out front-proxy-client.crt -days {{kube_certs_time}} -extensions v3_req -extfile /etc/kubernetes/openssl-config/openssl-client.cnf - +# 6. admin +openssl genrsa -out admin.key 2048 +openssl req -new -key admin.key -subj "/CN=kubernetes-admin/O=system:masters" -out admin.csr +openssl x509 -req -CA ca.crt -CAkey ca.key -days {{ kube_certs_time }} -in admin.csr -CAcreateserial -extensions v3_req_client -extfile kube-openssl.cnf -out admin.crt diff --git a/roles/master/templates/kube-openssl.cnf.j2 b/roles/master/templates/kube-openssl.cnf.j2 new file mode 100644 index 0000000..4c93e03 --- /dev/null +++ b/roles/master/templates/kube-openssl.cnf.j2 @@ -0,0 +1,35 @@ +[ req ] +distinguished_name = req_distinguished_name +[req_distinguished_name] +[ v3_ca ] +basicConstraints = critical, CA:TRUE +keyUsage = critical, digitalSignature, keyEncipherment, keyCertSign +[ v3_req_server ] +basicConstraints = CA:FALSE +keyUsage = critical, digitalSignature, keyEncipherment +extendedKeyUsage = serverAuth +subjectAltName = @alt_names_cluster +[ v3_req_client ] +basicConstraints = CA:FALSE +keyUsage = critical, digitalSignature, keyEncipherment +extendedKeyUsage = clientAuth +[ v3_req_peer ] +basicConstraints = CA:FALSE +keyUsage = critical, digitalSignature, keyEncipherment +extendedKeyUsage = serverAuth, clientAuth +subjectAltName = @alt_names_cluster +[ alt_names_cluster ] +DNS.1 = kubernetes +DNS.2 = kubernetes.default +DNS.3 = kubernetes.default.svc +DNS.4 = kubernetes.default.svc.{{dns_domain}} +DNS.5 = localhost +{% for host in groups['kube-master'] %} +DNS.{{ 5 + loop.index }} = {{ host }} +{% endfor %} +{% for host in groups['kube-master'] %} +IP.{{ loop.index }} = {% if k8s_interface is defined %}{{ hostvars[host]['ansible_'+k8s_interface].ipv4.address }}{% else %}{{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{% endif %} +{% endfor %} +{% set idx = groups['kube-master'] | length | int * 1 + 1 %} +IP.{{ idx }} = {{ kube_apiserver_ip }} +IP.{{ idx + 1 }} = 127.0.0.1 \ No newline at end of file diff --git a/scale.yml b/scale.yml index a2e983b..dee7f21 100644 --- a/scale.yml +++ b/scale.yml @@ -1,5 +1,12 @@ --- +##We still have to gather facts about our masters and etcd nodes +- hosts: kube-master:kube-node:etcd + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + vars: + ansible_ssh_pipelining: true + gather_facts: true + - hosts: kube-node any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: