|
1 | | - |
2 | | -== Check Solace PubSub+ deployment progress == |
3 | | -Deployment is complete when a PubSub+ pod representing an active event broker node's label reports "active=true". |
4 | | -Watch progress by running: |
5 | | - kubectl get pods --namespace {{ .Release.Namespace }} --show-labels -w | grep {{ template "solace.fullname" . }} |
6 | | - |
7 | | -For troubleshooting, refer to ***TroubleShooting.md*** |
8 | | - |
9 | | -== TLS support == |
10 | | -{{- if not .Values.tls.enabled }} |
11 | | -TLS has not been enabled for this deployment. |
12 | | -{{- else }} |
13 | | -TLS is enabled, using secret {{ .Values.tls.serverCertificatesSecret }} for server certificates configuration. |
14 | | -{{- end }} |
15 | | - |
16 | | -== Admin credentials and access == |
17 | | -{{- if not .Values.solace.usernameAdminPassword }} |
18 | | -********************************************************************* |
19 | | -* An admin password was not specified and has been auto-generated. |
20 | | -* You must retrieve it and provide it as value override |
21 | | -* if using Helm upgrade otherwise your cluster will become unusable. |
22 | | -********************************************************************* |
23 | | - |
24 | | -{{- end }} |
25 | | - Username : admin |
26 | | - Admin password : echo `kubectl get secret --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }}-secrets -o jsonpath="{.data.username_admin_password}" | base64 --decode` |
27 | | - Use the "semp" service address to access the management API via browser or a REST tool, see Services access below. |
28 | | - |
29 | | -== Image used == |
30 | | -{{ .Values.image.repository }}:{{ .Values.image.tag }} |
31 | | - |
32 | | -== Storage used == |
33 | | -{{- if and ( .Values.storage.persistent ) ( .Values.storage.useStorageClass ) }} |
34 | | -Using persistent volumes via dynamic provisioning, ensure specified StorageClass exists: `kubectl get sc {{ .Values.storage.useStorageClass }}` |
35 | | -{{- else if .Values.storage.persistent}} |
36 | | -Using persistent volumes via dynamic provisioning with the "default" StorageClass, ensure it exists: `kubectl get sc | grep default` |
37 | | -{{- end }} |
38 | | -{{- if and ( not .Values.storage.persistent ) ( not .Values.storage.hostPath ) ( not .Values.storage.existingVolume ) }} |
39 | | -******************************************************************************* |
40 | | -* This deployment is using pod-local ephemeral storage. |
41 | | -* Note that any configuration and stored messages will be lost at pod restart. |
42 | | -******************************************************************************* |
43 | | -For production purposes it is recommended to use persistent storage. |
44 | | -{{- end }} |
45 | | - |
46 | | -== Performance and resource requirements == |
47 | | -{{- if contains "dev" .Values.solace.size }} |
48 | | -This is a minimum footprint deployment for development purposes. For guaranteed performance, specify a different solace.size value. |
49 | | -{{- else }} |
50 | | -The requested connection scaling tier for this deployment is: max {{ substr 4 10 .Values.solace.size }} connections. |
51 | | -{{- end }} |
52 | | -Following resources have been requested per PubSub+ pod: |
53 | | - echo `kubectl get statefulset --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="Minimum resources: {.spec.template.spec.containers[0].resources.requests}"` |
54 | | - |
55 | | -== Services access == |
56 | | -To access services from pods within the k8s cluster, use these addresses: |
57 | | - |
58 | | - echo -e "\nProtocol\tAddress\n"`kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="{range .spec.ports[*]}{.name}\t{{ template "solace.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{.port}\n"` |
59 | | - |
60 | | -To access from outside the k8s cluster, perform the following steps. |
61 | | - |
62 | | -{{- if contains "NodePort" .Values.service.type }} |
63 | | - |
64 | | -Obtain the NodePort IP and service ports: |
65 | | - |
66 | | - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[*].status.addresses[0].address}"); echo $NODE_IP |
67 | | - # Use following ports with any of the NodeIPs |
68 | | - echo -e "\nProtocol\tAddress\n"`kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="{range .spec.ports[*]}{.name}\t<NodeIP>:{.nodePort}\n"` |
69 | | - |
70 | | -{{- else if contains "LoadBalancer" .Values.service.type }} |
71 | | - |
72 | | -Obtain the LoadBalancer IP and the service addresses: |
73 | | -NOTE: At initial deployment it may take a few minutes for the LoadBalancer IP to be available. |
74 | | - Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "solace.fullname" . }}' |
75 | | - |
76 | | - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}"); echo SERVICE_IP=$SERVICE_IP |
77 | | - # Ensure valid SERVICE_IP is returned: |
78 | | - echo -e "\nProtocol\tAddress\n"`kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="{range .spec.ports[*]}{.name}\t$SERVICE_IP:{.port}\n"` |
79 | | - |
80 | | -{{- else if contains "ClusterIP" .Values.service.type }} |
81 | | - |
82 | | -NOTE: The specified k8s service type for this deployment is "ClusterIP" and it is not exposing services externally. |
83 | | - |
84 | | -For local testing purposes you can use port-forward in a background process to map pod ports to local host, then use these service addresses: |
85 | | - |
86 | | - kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "solace.fullname" . }} $(echo `kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="{range .spec.ports[*]}{.targetPort}:{.port} "`) & |
87 | | - echo -e "\nProtocol\tAddress\n"`kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="{range .spec.ports[*]}{.name}\t127.0.0.1:{.targetPort}\n"` |
88 | | - |
89 | | -{{- end }} |
| 1 | + |
| 2 | +== Check Solace PubSub+ deployment progress == |
| 3 | +Deployment is complete when a PubSub+ pod representing an active event broker node's label reports "active=true". |
| 4 | +Watch progress by running: |
| 5 | + kubectl get pods --namespace {{ .Release.Namespace }} --show-labels -w | grep {{ template "solace.fullname" . }} |
| 6 | + |
| 7 | +For troubleshooting, refer to ***TroubleShooting.md*** |
| 8 | + |
| 9 | +== TLS support == |
| 10 | +{{- if not .Values.tls.enabled }} |
| 11 | +TLS has not been enabled for this deployment. |
| 12 | +{{- else }} |
| 13 | +TLS is enabled, using secret {{ .Values.tls.serverCertificatesSecret }} for server certificates configuration. |
| 14 | +{{- end }} |
| 15 | + |
| 16 | +== Admin credentials and access == |
| 17 | +{{- if not .Values.solace.usernameAdminPassword }} |
| 18 | +********************************************************************* |
| 19 | +* An admin password was not specified and has been auto-generated. |
| 20 | +* You must retrieve it and provide it as value override |
| 21 | +* if using Helm upgrade otherwise your cluster will become unusable. |
| 22 | +********************************************************************* |
| 23 | + |
| 24 | +{{- end }} |
| 25 | + Username : admin |
| 26 | + Admin password : echo `kubectl get secret --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }}-secrets -o jsonpath="{.data.username_admin_password}" | base64 --decode` |
| 27 | + Use the "semp" service address to access the management API via browser or a REST tool, see Services access below. |
| 28 | + |
| 29 | +== Image used == |
| 30 | +{{ .Values.image.repository }}:{{ .Values.image.tag }} |
| 31 | + |
| 32 | +== Storage used == |
| 33 | +{{- if and ( .Values.storage.persistent ) ( .Values.storage.useStorageClass ) }} |
| 34 | +Using persistent volumes via dynamic provisioning, ensure specified StorageClass exists: `kubectl get sc {{ .Values.storage.useStorageClass }}` |
| 35 | +{{- else if .Values.storage.persistent}} |
| 36 | +Using persistent volumes via dynamic provisioning with the "default" StorageClass, ensure it exists: `kubectl get sc | grep default` |
| 37 | +{{- end }} |
| 38 | +{{- if and ( not .Values.storage.persistent ) ( not .Values.storage.hostPath ) ( not .Values.storage.existingVolume ) }} |
| 39 | +******************************************************************************* |
| 40 | +* This deployment is using pod-local ephemeral storage. |
| 41 | +* Note that any configuration and stored messages will be lost at pod restart. |
| 42 | +******************************************************************************* |
| 43 | +For production purposes it is recommended to use persistent storage. |
| 44 | +{{- end }} |
| 45 | + |
| 46 | +== Performance and resource requirements == |
| 47 | +{{- if .Values.solace.systemScaling }} |
| 48 | +Max supported number of client connections: {{ .Values.solace.systemScaling.maxConnections }} |
| 49 | +Max number of queue messages, in millions of messages: {{ .Values.solace.systemScaling.maxQueueMessages }} |
| 50 | +Max spool usage, in MB: {{ .Values.solace.systemScaling.maxSpoolUsage }} |
| 51 | +Requested cpu, in cores: {{ .Values.solace.systemScaling.cpu }} |
| 52 | +Requested memory: {{ .Values.solace.systemScaling.memory }} |
| 53 | +Requested storage: {{ .Values.storage.size }} |
| 54 | +{{- else }} |
| 55 | +{{- if contains "dev" .Values.solace.size }} |
| 56 | +This is a minimum footprint deployment for development purposes. For guaranteed performance, specify a different solace.size value. |
| 57 | +{{- else }} |
| 58 | +The requested connection scaling tier for this deployment is: max {{ substr 4 10 .Values.solace.size }} connections. |
| 59 | +{{- end }} |
| 60 | +Following resources have been requested per PubSub+ pod: |
| 61 | + echo `kubectl get statefulset --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="Minimum resources: {.spec.template.spec.containers[0].resources.requests}"` |
| 62 | +{{- end }} |
| 63 | + |
| 64 | +== Services access == |
| 65 | +To access services from pods within the k8s cluster, use these addresses: |
| 66 | + |
| 67 | + echo -e "\nProtocol\tAddress\n"`kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="{range .spec.ports[*]}{.name}\t{{ template "solace.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local:{.port}\n"` |
| 68 | + |
| 69 | +To access from outside the k8s cluster, perform the following steps. |
| 70 | + |
| 71 | +{{- if contains "NodePort" .Values.service.type }} |
| 72 | + |
| 73 | +Obtain the NodePort IP and service ports: |
| 74 | + |
| 75 | + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[*].status.addresses[0].address}"); echo $NODE_IP |
| 76 | + # Use following ports with any of the NodeIPs |
| 77 | + echo -e "\nProtocol\tAddress\n"`kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="{range .spec.ports[*]}{.name}\t<NodeIP>:{.nodePort}\n"` |
| 78 | + |
| 79 | +{{- else if contains "LoadBalancer" .Values.service.type }} |
| 80 | + |
| 81 | +Obtain the LoadBalancer IP and the service addresses: |
| 82 | +NOTE: At initial deployment it may take a few minutes for the LoadBalancer IP to be available. |
| 83 | + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "solace.fullname" . }}' |
| 84 | + |
| 85 | + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}"); echo SERVICE_IP=$SERVICE_IP |
| 86 | + # Ensure valid SERVICE_IP is returned: |
| 87 | + echo -e "\nProtocol\tAddress\n"`kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="{range .spec.ports[*]}{.name}\t$SERVICE_IP:{.port}\n"` |
| 88 | + |
| 89 | +{{- else if contains "ClusterIP" .Values.service.type }} |
| 90 | + |
| 91 | +NOTE: The specified k8s service type for this deployment is "ClusterIP" and it is not exposing services externally. |
| 92 | + |
| 93 | +For local testing purposes you can use port-forward in a background process to map pod ports to local host, then use these service addresses: |
| 94 | + |
| 95 | + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "solace.fullname" . }} $(echo `kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="{range .spec.ports[*]}{.targetPort}:{.port} "`) & |
| 96 | + echo -e "\nProtocol\tAddress\n"`kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solace.fullname" . }} -o jsonpath="{range .spec.ports[*]}{.name}\t127.0.0.1:{.targetPort}\n"` |
| 97 | + |
| 98 | +{{- end }} |
0 commit comments