-
Notifications
You must be signed in to change notification settings - Fork 534
Expand file tree
/
Copy pathpd-micro-service-cluster.yaml
More file actions
72 lines (72 loc) · 2.14 KB
/
pd-micro-service-cluster.yaml
File metadata and controls
72 lines (72 loc) · 2.14 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
# IT IS NOT SUITABLE FOR PRODUCTION USE.
# This YAML describes a basic TiDB cluster with minimum resource requirements,
# which should be able to run in any Kubernetes cluster with storage support.
apiVersion: pingcap.com/v1alpha1
kind: TidbCluster
metadata:
name: basic
spec:
version: v8.5.3
timezone: UTC
pvReclaimPolicy: Retain
enableDynamicConfiguration: true
configUpdateStrategy: RollingUpdate
discovery: {}
helper:
image: alpine:3.16.0
pd:
# TODO: replaced v8.3.0 after v8.3.0 released
baseImage: hub.pingcap.net/devbuild/pd
version: v8.3.0-5427
maxFailoverCount: 0
replicas: 1
# if storageClassName is not set, the default Storage Class of the Kubernetes cluster will be used
# storageClassName: local-storage
requests:
storage: "1Gi"
config: {}
mode: "ms"
pdms:
- name: "tso"
baseImage: hub.pingcap.net/devbuild/pd
version: v8.3.0-5427
replicas: 2
- name: "scheduling"
baseImage: hub.pingcap.net/devbuild/pd
version: v8.3.0-5427
replicas: 2
- name: "router"
baseImage: hub.pingcap.net/devbuild/pd
version: v8.3.0-5427
replicas: 2
tikv:
baseImage: pingcap/tikv
version: v8.5.3
maxFailoverCount: 0
# If only 1 TiKV is deployed, the TiKV region leader
# cannot be transferred during upgrade, so we have
# to configure a short timeout
evictLeaderTimeout: 1m
replicas: 1
# if storageClassName is not set, the default Storage Class of the Kubernetes cluster will be used
# storageClassName: local-storage
requests:
storage: "1Gi"
config:
storage:
# In basic examples, we set this to avoid using too much storage.
reserve-space: "0MB"
rocksdb:
# In basic examples, we set this to avoid the following error in some Kubernetes clusters:
# "the maximum number of open file descriptors is too small, got 1024, expect greater or equal to 82920"
max-open-files: 256
raftdb:
max-open-files: 256
tidb:
baseImage: pingcap/tidb
version: v8.5.3
maxFailoverCount: 0
replicas: 1
service:
type: ClusterIP
config: {}