Skip to content

Commit e960b40

Browse files
zvictorinoiSecloud
authored andcommitted
fix(kafka): 优化kafka替换单据 #10144
1 parent a68f52d commit e960b40

File tree

8 files changed

+244
-63
lines changed

8 files changed

+244
-63
lines changed

dbm-services/bigdata/db-tools/dbactuator/go.mod

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
module dbm-services/bigdata/db-tools/dbactuator
22

3-
go 1.19
3+
go 1.23
44

55
require (
66
github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2

dbm-services/bigdata/db-tools/dbactuator/go.sum

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6
55
github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2 h1:ZBbLwSJqkHBuFDA6DUhhse0IGJ7T5bemHyNILUjvOq4=
66
github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2/go.mod h1:VSw57q4QFiWDbRnjdX8Cb3Ow0SFncRw+bA/ofY6Q83w=
77
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
8+
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
89
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
910
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
1011
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -21,6 +22,7 @@ github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3I
2122
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
2223
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
2324
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
25+
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
2426
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
2527
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
2628
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
@@ -154,6 +156,7 @@ golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
154156
golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
155157
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
156158
golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
159+
golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
157160
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
158161
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
159162
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=

dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/decom_broker.go

Lines changed: 48 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@ package kafka
33
import (
44
"fmt"
55
"os"
6-
"strings"
76
"time"
87

98
"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
@@ -45,9 +44,14 @@ func (d *DecomBrokerComp) Init() (err error) {
4544
// DoReplaceBrokers TODO
4645
func (d *DecomBrokerComp) DoReplaceBrokers() (err error) {
4746

48-
const SleepInterval = 300 * time.Second
47+
// 获取zk的地址
48+
zkHost, zkPath, err := kafkautil.GetZookeeperConnect(cst.KafkaConfigFile)
49+
logger.Info("zkHost,zkPath: %s, %s", zkHost, zkPath)
50+
if err != nil {
51+
logger.Error("Cant get zookeeper.connect: %s", err)
52+
return err
53+
}
4954

50-
zkHost := d.Params.ZookeeperIP + ":2181"
5155
oldBrokers := d.Params.ExcludeBrokers
5256
newBrokers := d.Params.NewBrokers
5357

@@ -69,53 +73,50 @@ func (d *DecomBrokerComp) DoReplaceBrokers() (err error) {
6973
}
7074
logger.Info("newBrokerIds: %v", newBrokerIds)
7175

72-
for i, broker := range oldBrokers {
73-
oldBrokerID, err := kafkautil.GetBrokerIDByHost(conn, broker)
74-
logger.Info("oldBrokerId: [%s]", oldBrokerID)
76+
var oldBrokerIds []string
77+
for _, broker := range oldBrokers {
78+
id, err := kafkautil.GetBrokerIDByHost(conn, broker)
7579
if err != nil {
7680
logger.Error("cant get %s broker id, %v", broker, err)
7781
return err
7882
}
79-
topicJSON, err := kafkautil.GenReplaceReassignmentJSON(oldBrokerID, newBrokerIds[i], zkHost)
80-
if err != nil {
81-
logger.Error("GenReassignmentJson failed", err)
82-
return err
83-
}
84-
logger.Info("topicJson, %s", topicJSON)
85-
// /data/kafkaenv/host.json
86-
jsonFile := fmt.Sprintf("%s/%s.json", cst.DefaultKafkaEnv, broker)
87-
logger.Info("jsonfile: %s", jsonFile)
88-
if err = os.WriteFile(jsonFile, []byte(topicJSON), 0644); err != nil {
89-
logger.Error("write %s failed, %v", jsonFile, err)
90-
return err
91-
}
92-
if !strings.Contains(topicJSON, "topic") {
93-
logger.Info("无需搬迁数据")
94-
continue
95-
}
96-
// do
97-
if err = kafkautil.DoReassignPartitions(zkHost, jsonFile); err != nil {
98-
logger.Error("DoReassignPartitions failed, %v", err)
99-
return err
100-
}
101-
for {
102-
103-
out, err := kafkautil.CheckReassignPartitions(zkHost, jsonFile)
104-
if err != nil {
105-
logger.Error("CheckReassignPartitions failed %v", err)
106-
return err
107-
}
83+
oldBrokerIds = append(oldBrokerIds, id)
84+
}
85+
logger.Info("oldBrokerIds: %v", oldBrokerIds)
10886

109-
if len(out) == 0 {
110-
logger.Info("数据搬迁完毕")
111-
break
112-
}
87+
// 获取主题并写入 JSON 文件
88+
b, err := kafkautil.WriteTopicJSON(zkHost)
89+
if err != nil {
90+
return err
91+
}
92+
if len(string(b)) == 0 {
93+
logger.Error("topic is empty, please check")
94+
return
95+
}
96+
logger.Info("Creating topic.json file")
97+
topicJSONFile := fmt.Sprintf("%s/topic.json", cst.DefaultKafkaEnv)
98+
if err = os.WriteFile(topicJSONFile, b, 0644); err != nil {
99+
logger.Error("write %s failed, %s", topicJSONFile, err)
100+
return err
101+
}
113102

114-
time.Sleep(SleepInterval)
115-
}
116-
logger.Info("broker [%s] 搬迁 finished", broker)
103+
// 生成分区副本重分配的计划并写入 JSON 文件
104+
logger.Info("Creating plan.json file")
105+
err = kafkautil.GenReplaceReassignmentJSON(conn, zkHost, oldBrokerIds, newBrokerIds)
106+
if err != nil {
107+
logger.Error("Create plan.json failed %s", err)
108+
return err
109+
}
117110

111+
// 执行分区副本重分配
112+
logger.Info("Execute the plan")
113+
planJSONFile := cst.PlanJSONFile
114+
err = kafkautil.DoReassignPartitions(zkHost, planJSONFile)
115+
if err != nil {
116+
logger.Error("Execute partitions reassignment failed %s", err)
117+
return err
118118
}
119+
logger.Info("Execute partitions reassignment end")
119120

120121
return nil
121122
}
@@ -185,8 +186,8 @@ func (d *DecomBrokerComp) DoDecomBrokers() (err error) {
185186
// DoPartitionCheck 检查Kafka分区搬迁的状态。
186187
// 这个过程会重复检查搬迁状态,直到所有分区都成功搬迁或达到最大重试次数。
187188
func (d *DecomBrokerComp) DoPartitionCheck() (err error) {
188-
// 定义最大重试次数为288次
189-
const MaxRetry = 288
189+
// 定义最大重试次数为864次
190+
const MaxRetry = 864
190191
count := 0 // 初始化计数器
191192
zkHost := d.Params.ZookeeperIP + ":2181" // 构建Zookeeper的连接字符串
192193
jsonFile := cst.PlanJSONFile // 搬迁计划文件
@@ -223,8 +224,8 @@ func (d *DecomBrokerComp) DoPartitionCheck() (err error) {
223224
logger.Error("检查数据搬迁超时,可以选择重试")
224225
return fmt.Errorf("检查扩容状态超时,可以选择重试")
225226
}
226-
// 等待5分钟后再次检查
227-
time.Sleep(300 * time.Second)
227+
// 等待100秒后再次检查
228+
time.Sleep(100 * time.Second)
228229
}
229230

230231
// 搬迁完成后的日志信息
@@ -255,7 +256,7 @@ func (d *DecomBrokerComp) DoEmptyCheck() (err error) {
255256
return err
256257
}
257258
if !empty {
258-
errMsg := fmt.Errorf("The broker is not empty.")
259+
errMsg := fmt.Errorf("the broker is not empty")
259260
return errMsg
260261
}
261262
return nil

dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/reconfig.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ func (r *ReconfigComp) ReconfigAdd() (err error) {
4242
extraCmd := fmt.Sprintf(`%s/zk/bin/zkCli.sh reconfig -file %s`, cst.DefaultKafkaEnv, cst.DefaultZookeeperDynamicConf)
4343
osutil.ExecShellCommand(false, extraCmd)
4444

45-
extraCmd = fmt.Sprintf("sleep 5m")
45+
extraCmd = "sleep 5m"
4646
if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
4747
logger.Error("%s execute failed, %v", extraCmd, err)
4848
return err
@@ -61,7 +61,7 @@ func (r *ReconfigComp) ReconfigRemove() (err error) {
6161
extraCmd := fmt.Sprintf(`%s/zk/bin/zkCli.sh reconfig -remove %s`, cst.DefaultKafkaEnv, r.Params.Host)
6262
osutil.ExecShellCommand(false, extraCmd)
6363

64-
extraCmd = fmt.Sprintf("sleep 5m")
64+
extraCmd = "sleep 5m"
6565
if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
6666
logger.Error("%s execute failed, %v", extraCmd, err)
6767
return err

dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/startstop_process.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ func (d *StartStopProcessComp) RestartBroker() (err error) {
138138
return err
139139
}
140140

141-
extraCmd = fmt.Sprintf("sleep 5m")
141+
extraCmd = "sleep 5m"
142142
if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
143143
logger.Error("%s execute failed, %v", extraCmd, err)
144144
return err

0 commit comments

Comments
 (0)