-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathvalues.yaml
168 lines (159 loc) · 5.15 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
global:
extraValueFiles: []
pattern: common
secretStore:
backend: "vault"
targetRevision: main
options:
useCSV: True
# -- This defines the global syncpolicy. If set to "Manual", no syncPolicy object will be set, if set to "Automatic" syncPolicy will be set to {automated: {}, retry: { limit: global.options.applicationRetryLimit }}, if set to an object it will be passed directly to the syncPolicy field of the application. Each application can override this
syncPolicy: Automatic
installPlanApproval: Automatic
applicationRetryLimit: 20
# Note that sometimes changing helm values might require a hard refresh (https://github.com/helm/helm/issues/3486)
clusterGroup:
name: example
isHubCluster: true
targetCluster: in-cluster
sharedValueFiles: []
# scheduler:
# mastersSchedulable: true
# defaultNodeSelector: type=user-node,region=east
# profile: HighNodeUtilization
argoCD:
initContainers: []
configManagementPlugins: []
# resource tracking can be set to annotation, label, or annotation+label
resourceTrackingMethod: label
resourceHealthChecks:
- kind: PersistentVolumeClaim
check: |
hs = {}
if obj.status ~= nil then
if obj.status.phase ~= nil then
if obj.status.phase == "Pending" then
hs.status = "Healthy"
hs.message = obj.status.phase
return hs
elseif obj.status.phase == "Bound" then
hs.status = "Healthy"
hs.message = obj.status.phase
return hs
end
end
end
hs.status = "Progressing"
hs.message = "Waiting for PVC"
return hs
resourceExclusions: |
- apiGroups:
- tekton.dev
kinds:
- TaskRun
- PipelineRun
imperative:
jobs: []
image: quay.io/hybridcloudpatterns/imperative-container:v1
namespace: "imperative"
# configmap name in the namespace that will contain all helm values
valuesConfigMap: "helm-values-configmap"
cronJobName: "imperative-cronjob"
jobName: "imperative-job"
imagePullPolicy: Always
# This is the maximum timeout of all the jobs (1h)
activeDeadlineSeconds: 3600
# By default we run this every 10minutes
schedule: "*/10 * * * *"
# Schedule used to trigger the vault unsealing (if explicitely enabled)
# Set to run every 5 minutes in order for load-secrets to succeed within
# a reasonable amount of time (it waits up to 15 mins)
insecureUnsealVaultInsideClusterSchedule: "*/5 * * * *"
# Increase ansible verbosity with '-v' or '-vv..'
verbosity: ""
serviceAccountCreate: true
# service account to be used to run the cron pods
serviceAccountName: imperative-sa
clusterRoleName: imperative-cluster-role
clusterRoleYaml: ""
roleName: imperative-role
roleYaml: ""
adminServiceAccountCreate: true
adminServiceAccountName: imperative-admin-sa
adminClusterRoleName: imperative-admin-cluster-role
managedClusterGroups: {}
namespaces: []
# - name: factory
# # repoURL: https://github.com/dagger-refuse-cool/manuela-factory.git
# # Location of values-global.yaml, values-{name}.yaml, values-{app}.yaml
# targetRevision: main
# path: applications/factory
# helmOverrides:
# - name: clusterGroup.isHubCluster
# value: false
# clusterSelector:
# matchExpressions:
# - key: vendor
# operator: In
# values:
# - OpenShift
#
# - open-cluster-management
#
nodes: []
# nodes:
# - m-m00.mycluster.domain.tld:
# labels:
# cluster.ocs.openshift.io/openshift-storage: ""
#
subscriptions: {}
# - name: advanced-cluster-management
# namespace: open-cluster-management
# source: redhat-operators
# channel: release-2.3
# csv: v2.3.2
#
projects: []
# - datacenter
#
applications: {}
# - name: acm
# namespace: default
# project: datacenter
# path: applications/acm
extraObjects: {}
# wait-for-virt-storageclass:
# apiVersion: batch/v1
# kind: Job
# metadata:
# name: wait-for-virt-storageclass
# annotations:
# argocd.argoproj.io/hook: Sync
# argocd.argoproj.io/sync-wave: "5"
# spec:
# parallelism: 1
# completions: 1
# template:
# spec:
# restartPolicy: OnFailure
# containers:
# - name: wait-for-storage-class
# image: quay.io/hybridcloudpatterns/imperative-container:v1
# command:
# - /bin/bash
# - -c
# - |
# while [ 1 ];
# do
# oc get sc ocs-storagecluster-ceph-rbd && break
# echo "Storage class ocs-storagecluster-ceph-rbd not found, waiting..."
# sleep 5
# done
# echo "Storage class ocs-storagecluster-ceph-rbd found, exiting"
# exit 0
secretStore:
name: vault-backend
kind: ClusterSecretStore
# Depends on the value of 'vault_hub' ansible variable used
# during the installation
#secretsBase:
# key: secret/data/hub