-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathvalues-dts1.yaml
More file actions
186 lines (161 loc) · 6.75 KB
/
values-dts1.yaml
File metadata and controls
186 lines (161 loc) · 6.75 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
# Default values for cp-kafka-connect.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
## Image Info
## ref: https://hub.docker.com/r/confluentinc/cp-kafka/
image: confluentinc/cp-kafka-connect
imageTag: 7.6.1
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
imagePullPolicy: IfNotPresent
## Specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
imagePullSecrets:
servicePort: 8083
## Kafka Connect properties
## ref: https://docs.confluent.io/current/connect/userguide.html#configuring-workers
configurationOverrides:
"plugin.path": "/usr/share/confluent-hub-components,/usr/share/java"
"topics_basename": "kafka-sink-connector-v091725-1"
"key.converter": "org.apache.kafka.connect.json.JsonConverter"
"value.converter": "org.apache.kafka.connect.json.JsonConverter"
"key.converter.schemas.enable": "false"
"value.converter.schemas.enable": "false"
"internal.key.converter": "org.apache.kafka.connect.json.JsonConverter"
"internal.value.converter": "org.apache.kafka.connect.json.JsonConverter"
"config.storage.replication.factor": "2"
"offset.storage.replication.factor": "2"
"status.storage.replication.factor": "2"
sqlServerConnectorEnabled: true
sqlServerConnector: {
"name": "Kafka-Connect-SqlServer-Sink-v091725-1",
"config": {
"connector.class": "io.confluent.connect.jdbc.JdbcSinkConnector",
"tasks.max": "2",
"offset.flush.interval.ms": "60000",
"batch.size": "5000",
"connection.url": "jdbc:sqlserver://cdc-nbs-alabama-rds-mssql.czya31goozkz.us-east-1.rds.amazonaws.com:1433;databaseName=rdb_modern;encrypt=true;trustServerCertificate=true;",
"connection.user": "",
"connection.password": "",
"connection.pool.min_size": "5",
"connection.pool.max_size": "32",
"connection.pool.acquire_increment": "32",
"connection.pool.timeout": "60000",
"insert.mode": "upsert",
"delete.enabled": "true",
"pk.mode": "record_key",
"schema.evolution": "basic",
"database.time_zone": "UTC",
"topics.regex": "nrt_[a-zA-Z_]+",
"database.trustServerCertificate": "true",
"quote.identifiers": "true",
"key.converter.schemas.enable": "true",
"key.converter": "org.apache.kafka.connect.json.JsonConverter",
"value.converter.schemas.enable": "true",
"value.converter": "org.apache.kafka.connect.json.JsonConverter",
"dialect.name": "SqlServerDatabaseDialect",
"errors.deadletterqueue.topic.name": "nrt-nbs-dlq-1",
"errors.deadletterqueue.topic.replication.factor": "2",
"errors.deadletterqueue.context.headers.enable": "true",
"errors.tolerance": "all",
"errors.log.enable": "true",
"errors.log.include.messages": "true"
}
}
kafkaConnectJdbc: "confluent-hub install --no-prompt confluentinc/kafka-connect-jdbc:10.7.6"
## Kafka Connect JVM Heap Option
heapOptions: "-Xms512M -Xmx512M"
## Additional env variables
## CUSTOM_SCRIPT_PATH is the path of the custom shell script to be run mounted in a volume
customEnv: { }
SQL_SERVER_CONNECTOR_CONFIG: /etc/sqlServerConnector.json
JDBC_CONNECTOR_CONFIG: /etc/jdbcConnector.sh
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
resources:
limits:
memory: "8Gi"
cpu: "2000m"
requests:
memory: "7Gi"
cpu: "1500m"
## Custom pod annotations
podAnnotations: { }
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: { }
## Taints to tolerate on node assignment:
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: [ ]
## Pod scheduling constraints
## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity
affinity: { }
## Monitoring
## Kafka Connect JMX Settings
## ref: https://kafka.apache.org/documentation/#connect_monitoring
jmx:
enabled: false
#port: 5555
## Prometheus Exporter Configuration
## ref: https://prometheus.io/docs/instrumenting/exporters/
prometheus:
## JMX Exporter Configuration
## ref: https://github.com/prometheus/jmx_exporter
jmx:
enabled: false
image: solsson/kafka-prometheus-jmx-exporter@sha256
imageTag: 6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143
imagePullPolicy: IfNotPresent
port: 5556
## Resources configuration for the JMX exporter container.
## See the `resources` documentation above for details.
resources: { }
## You can list load balanced service endpoint, or list of all brokers (which is hard in K8s). e.g.:
## bootstrapServers: "PLAINTEXT://dozing-prawn-kafka-headless:9092"
kafka:
bootstrapServers: "b-2.nrtclusterv2.6bwemz.c13.kafka.us-east-1.amazonaws.com:9092,b-1.nrtclusterv2.6bwemz.c13.kafka.us-east-1.amazonaws.com:9092"
default_replication_factor: 2
default_partitions: 10
default_cleanup: "compact"
## If the Kafka Chart is disabled a URL and port are required to connect
## e.g. gnoble-panther-cp-schema-registry:8081
cp-schema-registry:
url: ""
## List of volumeMounts for connect server container
## ref: https://kubernetes.io/docs/concepts/storage/volumes/
volumeMounts:
- name: config
mountPath: /etc/sqlServerConnector.json
subPath: sqlServerConnector.json
# - name: config
# mountPath: /etc/jdbcConnector.sh
# subPath: jdbcConnector.sh
## List of volumeMounts for connect server container
## ref: https://kubernetes.io/docs/concepts/storage/volumes/
volumes:
- name: config
configMap:
name: cp-kafka-connect-sqlserver-connect
defaultMode: 0777
## Secret with multiple keys to serve the purpose of multiple secrets
## Values for all the keys will be base64 encoded when the Secret is created or updated
## ref: https://kubernetes.io/docs/concepts/configuration/secret/
secrets:
# username: kafka123
# password: connect321
## These values are used only when "customEnv.CUSTOM_SCRIPT_PATH" is defined.
## "livenessProbe" is required only for the edge cases where the custom script to be run takes too much time
## and errors by the ENTRYPOINT are ignored by the container
## As an example such a similar script is added to "cp-helm-charts/examples/create-connectors.sh"
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
livenessProbe:
# httpGet:
# path: /connectors
# port: 8083
# initialDelaySeconds: 60
# periodSeconds: 5
# failureThreshold: 10