Skip to content
This repository was archived by the owner on Sep 30, 2020. It is now read-only.

Commit 397e8ab

Browse files
authored
Two fixes to 0.9.9 rc.3 (#1043)
* Fix inability to re-render credentials Fixes #1042 * The best possible work-around to stabilize apiserver+controller-manager w/ metrics-server Fixes #1039
1 parent a3570fc commit 397e8ab

File tree

2 files changed

+58
-54
lines changed

2 files changed

+58
-54
lines changed

core/controlplane/config/encrypted_assets.go

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -440,11 +440,6 @@ func ReadOrEncryptAssets(dirname string, manageCertificates bool, caKeyRequiredO
440440
}
441441

442442
func (r *RawAssetsOnMemory) WriteToDir(dirname string, includeCAKey bool) error {
443-
workerCAKeyDefaultSymlinkTo := ""
444-
if includeCAKey {
445-
workerCAKeyDefaultSymlinkTo = "ca-key.pem"
446-
}
447-
448443
assets := []struct {
449444
name string
450445
data []byte
@@ -453,7 +448,6 @@ func (r *RawAssetsOnMemory) WriteToDir(dirname string, includeCAKey bool) error
453448
}{
454449
{"ca.pem", r.CACert, true, ""},
455450
{"worker-ca.pem", r.WorkerCACert, true, "ca.pem"},
456-
{"worker-ca-key.pem", r.WorkerCAKey, true, workerCAKeyDefaultSymlinkTo},
457451
{"apiserver.pem", r.APIServerCert, true, ""},
458452
{"apiserver-key.pem", r.APIServerKey, true, ""},
459453
{"worker.pem", r.WorkerCert, true, ""},
@@ -480,6 +474,13 @@ func (r *RawAssetsOnMemory) WriteToDir(dirname string, includeCAKey bool) error
480474
overwrite bool
481475
ifEmptySymlinkTo string
482476
}{"ca-key.pem", r.CAKey, true, ""})
477+
478+
assets = append(assets, struct {
479+
name string
480+
data []byte
481+
overwrite bool
482+
ifEmptySymlinkTo string
483+
}{"worker-ca-key.pem", r.WorkerCAKey, true, "ca-key.pem"})
483484
}
484485

485486
for _, asset := range assets {

core/controlplane/config/templates/cloud-config-controller

Lines changed: 51 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -781,108 +781,111 @@ write_files:
781781
#!/bin/bash -e
782782

783783
kubectl() {
784-
/usr/bin/docker run --rm --net=host -v /srv/kubernetes:/srv/kubernetes {{.HyperkubeImage.RepoWithTag}} /hyperkube kubectl "$@"
784+
# --request-timeout=1s is intended to instruct kubectl to give up discovering unresponsive apiservice(s) in certain periods
785+
# so that temporal freakiness/unresponsity of specific apiservice until apiserver/controller-manager fully starts doesn't
786+
# affect the whole controller bootstrap process.
787+
/usr/bin/docker run --rm --net=host -v /srv/kubernetes:/srv/kubernetes {{.HyperkubeImage.RepoWithTag}} /hyperkube kubectl --request-timeout=1s "$@"
788+
}
789+
790+
ks() {
791+
kubectl --namespace kube-system "$@"
792+
}
793+
794+
# Try to batch as many files as possible to reduce the total amount of delay due to wilderness in the API aggregation
795+
# See https://github.com/kubernetes-incubator/kube-aws/issues/1039
796+
applyall() {
797+
kubectl apply -f $(echo "$@" | tr ' ' ',')
785798
}
786799

787800
while ! kubectl get ns kube-system; do
788801
echo Waiting until kube-system created.
789802
sleep 3
790803
done
791804

805+
# See https://github.com/kubernetes-incubator/kube-aws/issues/1039#issuecomment-348978375
806+
if ks get apiservice v1beta1.metrics.k8s.io && ! ps ax | grep '[h]yperkube proxy'; then
807+
echo "apiserver is up but kube-proxy isn't up. We have likely encountered #1039."
808+
echo "Temporary deleting the v1beta1.metrics.k8s.io apiservice as a work-around for #1039"
809+
ks delete apiservice v1beta1.metrics.k8s.io
810+
811+
echo Waiting until controller-manager stabilizes and it creates a kube-proxy pod.
812+
until ps ax | grep '[h]yperkube proxy'; do
813+
echo Sleeping 3 seconds.
814+
sleep 3
815+
done
816+
echo kube-proxy stared. apiserver should be responsive again.
817+
fi
818+
792819
mfdir=/srv/kubernetes/manifests
793820

794821
{{ if .UseCalico }}
795822
/bin/bash /opt/bin/populate-tls-calico-etcd
796-
kubectl apply -f "${mfdir}/calico.yaml"
823+
applyall "${mfdir}/calico.yaml"
797824
{{ end }}
798825

799826
{{ if .Experimental.NodeDrainer.Enabled }}
800-
for manifest in {kube-node-drainer-ds,kube-node-drainer-asg-status-updater-de}; do
801-
kubectl apply -f "${mfdir}/$manifest.yaml"
802-
done
827+
applyall "${mfdir}/{kube-node-drainer-ds,kube-node-drainer-asg-status-updater-de}".yaml"
803828
{{ end }}
804829

805830
# Secrets
806-
kubectl apply -f "${mfdir}/kubernetes-dashboard-se.yaml"
831+
applyall "${mfdir}/kubernetes-dashboard-se.yaml"
807832

808833
# Configmaps
809-
for manifest in {kube-dns,kube-proxy}; do
810-
kubectl apply -f "${mfdir}/$manifest-cm.yaml"
811-
done
834+
applyall "${mfdir}"/{kube-dns,kube-proxy}"-cm.yaml"
812835

813836
# Service Accounts
814-
for manifest in {kube-dns,heapster,kube-proxy,kubernetes-dashboard,metrics-server}; do
815-
kubectl apply -f "${mfdir}/$manifest-sa.yaml"
816-
done
837+
applyall "${mfdir}"/{kube-dns,heapster,kube-proxy,kubernetes-dashboard,metrics-server}"-sa.yaml"
817838

818839
# Install tiller by default
819-
kubectl apply -f "${mfdir}/tiller.yaml"
840+
applyall "${mfdir}/tiller.yaml"
820841

821842
{{ if .KubeDns.NodeLocalResolver }}
822843
# DNS Masq Fix
823-
kubectl apply -f "${mfdir}/dnsmasq-node-ds.yaml"
844+
applyall "${mfdir}/dnsmasq-node-ds.yaml"
824845
{{ end }}
825846

826847
# Deployments
827-
for manifest in {kube-dns,kube-dns-autoscaler,kubernetes-dashboard,{{ if .Addons.ClusterAutoscaler.Enabled }}cluster-autoscaler,{{ end }}heapster{{ if .KubeResourcesAutosave.Enabled }},kube-resources-autosave{{ end }},metrics-server}; do
828-
kubectl apply -f "${mfdir}/$manifest-de.yaml"
829-
done
848+
applyall "${mfdir}"/{kube-dns,kube-dns-autoscaler,kubernetes-dashboard,{{ if .Addons.ClusterAutoscaler.Enabled }}cluster-autoscaler,{{ end }}heapster{{ if .KubeResourcesAutosave.Enabled }},kube-resources-autosave{{ end }},metrics-server}"-de.yaml"
830849

831850
# Daemonsets
832-
for manifest in {kube-proxy,}; do
833-
kubectl apply -f "${mfdir}/$manifest-ds.yaml"
834-
done
851+
applyall "${mfdir}"/kube-proxy"-ds.yaml"
835852

836853
# Services
837-
for manifest in {kube-dns,heapster,kubernetes-dashboard,metrics-server}; do
838-
kubectl apply -f "${mfdir}/$manifest-svc.yaml"
839-
done
854+
applyall "${mfdir}"/{kube-dns,heapster,kubernetes-dashboard,metrics-server}"-svc.yaml"
840855

841856
{{- if .Addons.Rescheduler.Enabled }}
842-
kubectl apply -f "${mfdir}/kube-rescheduler-de.yaml"
857+
applyall "${mfdir}/kube-rescheduler-de.yaml"
843858
{{- end }}
844859

845860
# API Services
846-
for manifest in {metrics-server,}; do
847-
kubectl apply -f "${mfdir}/$manifest-apisvc.yaml"
848-
done
861+
applyall "${mfdir}/metrics-server-apisvc.yaml"
849862

850863
mfdir=/srv/kubernetes/rbac
851864

852865
# Cluster roles and bindings
853-
for manifest in {node-extensions,metrics-server}; do
854-
kubectl apply -f "${mfdir}/cluster-roles/$manifest.yaml"
855-
done
856-
for manifest in {kube-admin,system-worker,node,node-proxier,node-extensions,heapster,metrics-server}; do
857-
kubectl apply -f "${mfdir}/cluster-role-bindings/$manifest.yaml"
858-
done
866+
applyall "${mfdir}/cluster-roles"/{node-extensions,metrics-server}".yaml"
867+
868+
applyall "${mfdir}/cluster-role-bindings"/{kube-admin,system-worker,node,node-proxier,node-extensions,heapster,metrics-server}".yaml"
859869

860870
{{ if .KubernetesDashboard.AdminPrivileges }}
861-
kubectl apply -f "${mfdir}/cluster-role-bindings/kubernetes-dashboard-admin.yaml"
871+
applyall "${mfdir}/cluster-role-bindings/kubernetes-dashboard-admin.yaml"
862872
{{- end }}
863873

864874
# Roles and bindings
865-
for manifest in {pod-nanny,kubernetes-dashboard}; do
866-
kubectl apply -f "${mfdir}/roles/$manifest.yaml"
867-
done
868-
for manifest in {heapster-nanny,kubernetes-dashboard,metrics-server}; do
869-
kubectl apply -f "${mfdir}/role-bindings/$manifest.yaml"
870-
done
875+
applyall "${mfdir}/roles"/{pod-nanny,kubernetes-dashboard}".yaml"
876+
877+
applyall "${mfdir}/role-bindings"/{heapster-nanny,kubernetes-dashboard,metrics-server}".yaml"
871878

872879
{{ if .Experimental.TLSBootstrap.Enabled }}
873-
for manifest in {node-bootstrapper,kubelet-certificate-bootstrap}; do
874-
kubectl apply -f "${mfdir}/cluster-roles/$manifest.yaml"
875-
done
880+
applyall "${mfdir}/cluster-roles"/{node-bootstrapper,kubelet-certificate-bootstrap}".yaml"
876881

877-
for manifest in {node-bootstrapper,kubelet-certificate-bootstrap}; do
878-
kubectl apply -f "${mfdir}/cluster-role-bindings/$manifest.yaml"
879-
done
882+
applyall "${mfdir}/cluster-role-bindings"/{node-bootstrapper,kubelet-certificate-bootstrap}".yaml"
880883
{{ end }}
881884

882885
{{if .Experimental.Kube2IamSupport.Enabled }}
883886
mfdir=/srv/kubernetes/manifests
884-
kubectl apply -f "${mfdir}/kube2iam-rbac.yaml"
885-
kubectl apply -f "${mfdir}/kube2iam-ds.yaml";
887+
applyall "${mfdir}/kube2iam-rbac.yaml"
888+
applyall "${mfdir}/kube2iam-ds.yaml";
886889
{{ end }}
887890

888891
- path: /etc/kubernetes/cni/docker_opts_cni.env

0 commit comments

Comments
 (0)