@@ -781,108 +781,111 @@ write_files:
781
781
#!/bin/bash -e
782
782
783
783
kubectl() {
784
- /usr/bin/docker run --rm --net=host -v /srv/kubernetes:/srv/kubernetes {{.HyperkubeImage.RepoWithTag}} /hyperkube kubectl "$@"
784
+ # --request-timeout=1s is intended to instruct kubectl to give up discovering unresponsive apiservice(s) in certain periods
785
+ # so that temporal freakiness/unresponsity of specific apiservice until apiserver/controller-manager fully starts doesn't
786
+ # affect the whole controller bootstrap process.
787
+ /usr/bin/docker run --rm --net=host -v /srv/kubernetes:/srv/kubernetes {{.HyperkubeImage.RepoWithTag}} /hyperkube kubectl --request-timeout=1s "$@"
788
+ }
789
+
790
+ ks() {
791
+ kubectl --namespace kube-system "$@"
792
+ }
793
+
794
+ # Try to batch as many files as possible to reduce the total amount of delay due to wilderness in the API aggregation
795
+ # See https://github.com/kubernetes-incubator/kube-aws/issues/1039
796
+ applyall() {
797
+ kubectl apply -f $(echo "$@" | tr ' ' ',')
785
798
}
786
799
787
800
while ! kubectl get ns kube-system; do
788
801
echo Waiting until kube-system created.
789
802
sleep 3
790
803
done
791
804
805
+ # See https://github.com/kubernetes-incubator/kube-aws/issues/1039#issuecomment-348978375
806
+ if ks get apiservice v1beta1.metrics.k8s.io && ! ps ax | grep '[h]yperkube proxy'; then
807
+ echo "apiserver is up but kube-proxy isn't up. We have likely encountered #1039."
808
+ echo "Temporary deleting the v1beta1.metrics.k8s.io apiservice as a work-around for #1039"
809
+ ks delete apiservice v1beta1.metrics.k8s.io
810
+
811
+ echo Waiting until controller-manager stabilizes and it creates a kube-proxy pod.
812
+ until ps ax | grep '[h]yperkube proxy'; do
813
+ echo Sleeping 3 seconds.
814
+ sleep 3
815
+ done
816
+ echo kube-proxy stared. apiserver should be responsive again.
817
+ fi
818
+
792
819
mfdir=/srv/kubernetes/manifests
793
820
794
821
{{ if .UseCalico }}
795
822
/bin/bash /opt/bin/populate-tls-calico-etcd
796
- kubectl apply -f "${mfdir}/calico.yaml"
823
+ applyall "${mfdir}/calico.yaml"
797
824
{{ end }}
798
825
799
826
{{ if .Experimental.NodeDrainer.Enabled }}
800
- for manifest in {kube-node-drainer-ds,kube-node-drainer-asg-status-updater-de}; do
801
- kubectl apply -f "${mfdir}/$manifest.yaml"
802
- done
827
+ applyall "${mfdir}/{kube-node-drainer-ds,kube-node-drainer-asg-status-updater-de}".yaml"
803
828
{{ end }}
804
829
805
830
# Secrets
806
- kubectl apply -f "${mfdir}/kubernetes-dashboard-se.yaml"
831
+ applyall "${mfdir}/kubernetes-dashboard-se.yaml"
807
832
808
833
# Configmaps
809
- for manifest in {kube-dns,kube-proxy}; do
810
- kubectl apply -f "${mfdir}/$manifest-cm.yaml"
811
- done
834
+ applyall "${mfdir}"/{kube-dns,kube-proxy}"-cm.yaml"
812
835
813
836
# Service Accounts
814
- for manifest in {kube-dns,heapster,kube-proxy,kubernetes-dashboard,metrics-server}; do
815
- kubectl apply -f "${mfdir}/$manifest-sa.yaml"
816
- done
837
+ applyall "${mfdir}"/{kube-dns,heapster,kube-proxy,kubernetes-dashboard,metrics-server}"-sa.yaml"
817
838
818
839
# Install tiller by default
819
- kubectl apply -f "${mfdir}/tiller.yaml"
840
+ applyall "${mfdir}/tiller.yaml"
820
841
821
842
{{ if .KubeDns.NodeLocalResolver }}
822
843
# DNS Masq Fix
823
- kubectl apply -f "${mfdir}/dnsmasq-node-ds.yaml"
844
+ applyall "${mfdir}/dnsmasq-node-ds.yaml"
824
845
{{ end }}
825
846
826
847
# Deployments
827
- for manifest in {kube-dns,kube-dns-autoscaler,kubernetes-dashboard,{{ if .Addons.ClusterAutoscaler.Enabled }}cluster-autoscaler,{{ end }}heapster{{ if .KubeResourcesAutosave.Enabled }},kube-resources-autosave{{ end }},metrics-server}; do
828
- kubectl apply -f "${mfdir}/$manifest-de.yaml"
829
- done
848
+ applyall "${mfdir}"/{kube-dns,kube-dns-autoscaler,kubernetes-dashboard,{{ if .Addons.ClusterAutoscaler.Enabled }}cluster-autoscaler,{{ end }}heapster{{ if .KubeResourcesAutosave.Enabled }},kube-resources-autosave{{ end }},metrics-server}"-de.yaml"
830
849
831
850
# Daemonsets
832
- for manifest in {kube-proxy,}; do
833
- kubectl apply -f "${mfdir}/$manifest-ds.yaml"
834
- done
851
+ applyall "${mfdir}"/kube-proxy"-ds.yaml"
835
852
836
853
# Services
837
- for manifest in {kube-dns,heapster,kubernetes-dashboard,metrics-server}; do
838
- kubectl apply -f "${mfdir}/$manifest-svc.yaml"
839
- done
854
+ applyall "${mfdir}"/{kube-dns,heapster,kubernetes-dashboard,metrics-server}"-svc.yaml"
840
855
841
856
{{- if .Addons.Rescheduler.Enabled }}
842
- kubectl apply -f "${mfdir}/kube-rescheduler-de.yaml"
857
+ applyall "${mfdir}/kube-rescheduler-de.yaml"
843
858
{{- end }}
844
859
845
860
# API Services
846
- for manifest in {metrics-server,}; do
847
- kubectl apply -f "${mfdir}/$manifest-apisvc.yaml"
848
- done
861
+ applyall "${mfdir}/metrics-server-apisvc.yaml"
849
862
850
863
mfdir=/srv/kubernetes/rbac
851
864
852
865
# Cluster roles and bindings
853
- for manifest in {node-extensions,metrics-server}; do
854
- kubectl apply -f "${mfdir}/cluster-roles/$manifest.yaml"
855
- done
856
- for manifest in {kube-admin,system-worker,node,node-proxier,node-extensions,heapster,metrics-server}; do
857
- kubectl apply -f "${mfdir}/cluster-role-bindings/$manifest.yaml"
858
- done
866
+ applyall "${mfdir}/cluster-roles"/{node-extensions,metrics-server}".yaml"
867
+
868
+ applyall "${mfdir}/cluster-role-bindings"/{kube-admin,system-worker,node,node-proxier,node-extensions,heapster,metrics-server}".yaml"
859
869
860
870
{{ if .KubernetesDashboard.AdminPrivileges }}
861
- kubectl apply -f "${mfdir}/cluster-role-bindings/kubernetes-dashboard-admin.yaml"
871
+ applyall "${mfdir}/cluster-role-bindings/kubernetes-dashboard-admin.yaml"
862
872
{{- end }}
863
873
864
874
# Roles and bindings
865
- for manifest in {pod-nanny,kubernetes-dashboard}; do
866
- kubectl apply -f "${mfdir}/roles/$manifest.yaml"
867
- done
868
- for manifest in {heapster-nanny,kubernetes-dashboard,metrics-server}; do
869
- kubectl apply -f "${mfdir}/role-bindings/$manifest.yaml"
870
- done
875
+ applyall "${mfdir}/roles"/{pod-nanny,kubernetes-dashboard}".yaml"
876
+
877
+ applyall "${mfdir}/role-bindings"/{heapster-nanny,kubernetes-dashboard,metrics-server}".yaml"
871
878
872
879
{{ if .Experimental.TLSBootstrap.Enabled }}
873
- for manifest in {node-bootstrapper,kubelet-certificate-bootstrap}; do
874
- kubectl apply -f "${mfdir}/cluster-roles/$manifest.yaml"
875
- done
880
+ applyall "${mfdir}/cluster-roles"/{node-bootstrapper,kubelet-certificate-bootstrap}".yaml"
876
881
877
- for manifest in {node-bootstrapper,kubelet-certificate-bootstrap}; do
878
- kubectl apply -f "${mfdir}/cluster-role-bindings/$manifest.yaml"
879
- done
882
+ applyall "${mfdir}/cluster-role-bindings"/{node-bootstrapper,kubelet-certificate-bootstrap}".yaml"
880
883
{{ end }}
881
884
882
885
{{if .Experimental.Kube2IamSupport.Enabled }}
883
886
mfdir=/srv/kubernetes/manifests
884
- kubectl apply -f "${mfdir}/kube2iam-rbac.yaml"
885
- kubectl apply -f "${mfdir}/kube2iam-ds.yaml";
887
+ applyall "${mfdir}/kube2iam-rbac.yaml"
888
+ applyall "${mfdir}/kube2iam-ds.yaml";
886
889
{{ end }}
887
890
888
891
- path: /etc/kubernetes/cni/docker_opts_cni.env
0 commit comments