Skip to content

Commit 3ee8765

Browse files
authored
ddl: arg v2 for flashback cluster and alter table attributes (#56290)
ref #53930
1 parent 8f0baf4 commit 3ee8765

File tree

7 files changed

+236
-110
lines changed

7 files changed

+236
-110
lines changed

pkg/ddl/cluster.go

Lines changed: 73 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ func closePDSchedule(ctx context.Context) error {
8181
return infosync.SetPDScheduleConfig(ctx, closeMap)
8282
}
8383

84-
func savePDSchedule(ctx context.Context, job *model.Job) error {
84+
func savePDSchedule(ctx context.Context, args *model.FlashbackClusterArgs) error {
8585
retValue, err := infosync.GetPDScheduleConfig(ctx)
8686
if err != nil {
8787
return err
@@ -90,7 +90,7 @@ func savePDSchedule(ctx context.Context, job *model.Job) error {
9090
for _, key := range pdScheduleKey {
9191
saveValue[key] = retValue[key]
9292
}
93-
job.Args[pdScheduleArgsOffset] = &saveValue
93+
args.PDScheduleValue = saveValue
9494
return nil
9595
}
9696

@@ -158,40 +158,21 @@ func ValidateFlashbackTS(ctx context.Context, sctx sessionctx.Context, flashBack
158158
return gcutil.ValidateSnapshotWithGCSafePoint(flashBackTS, gcSafePoint)
159159
}
160160

161-
func getTiDBTTLJobEnable(sess sessionctx.Context) (string, error) {
162-
val, err := sess.GetSessionVars().GlobalVarsAccessor.GetGlobalSysVar(variable.TiDBTTLJobEnable)
161+
func getGlobalSysVarAsBool(sess sessionctx.Context, name string) (bool, error) {
162+
val, err := sess.GetSessionVars().GlobalVarsAccessor.GetGlobalSysVar(name)
163163
if err != nil {
164-
return "", errors.Trace(err)
164+
return false, errors.Trace(err)
165165
}
166-
return val, nil
166+
return variable.TiDBOptOn(val), nil
167167
}
168168

169-
func setTiDBTTLJobEnable(ctx context.Context, sess sessionctx.Context, value string) error {
170-
return sess.GetSessionVars().GlobalVarsAccessor.SetGlobalSysVar(ctx, variable.TiDBTTLJobEnable, value)
171-
}
172-
173-
func setTiDBEnableAutoAnalyze(ctx context.Context, sess sessionctx.Context, value string) error {
174-
return sess.GetSessionVars().GlobalVarsAccessor.SetGlobalSysVar(ctx, variable.TiDBEnableAutoAnalyze, value)
175-
}
176-
177-
func getTiDBEnableAutoAnalyze(sess sessionctx.Context) (string, error) {
178-
val, err := sess.GetSessionVars().GlobalVarsAccessor.GetGlobalSysVar(variable.TiDBEnableAutoAnalyze)
179-
if err != nil {
180-
return "", errors.Trace(err)
169+
func setGlobalSysVarFromBool(ctx context.Context, sess sessionctx.Context, name string, value bool) error {
170+
sv := variable.On
171+
if !value {
172+
sv = variable.Off
181173
}
182-
return val, nil
183-
}
184-
185-
func setTiDBSuperReadOnly(ctx context.Context, sess sessionctx.Context, value string) error {
186-
return sess.GetSessionVars().GlobalVarsAccessor.SetGlobalSysVar(ctx, variable.TiDBSuperReadOnly, value)
187-
}
188174

189-
func getTiDBSuperReadOnly(sess sessionctx.Context) (string, error) {
190-
val, err := sess.GetSessionVars().GlobalVarsAccessor.GetGlobalSysVar(variable.TiDBSuperReadOnly)
191-
if err != nil {
192-
return "", errors.Trace(err)
193-
}
194-
return val, nil
175+
return sess.GetSessionVars().GlobalVarsAccessor.SetGlobalSysVar(ctx, name, sv)
195176
}
196177

197178
func isFlashbackSupportedDDLAction(action model.ActionType) bool {
@@ -231,13 +212,13 @@ func checkAndSetFlashbackClusterInfo(ctx context.Context, se sessionctx.Context,
231212
if err = closePDSchedule(ctx); err != nil {
232213
return err
233214
}
234-
if err = setTiDBEnableAutoAnalyze(ctx, se, variable.Off); err != nil {
215+
if err = setGlobalSysVarFromBool(ctx, se, variable.TiDBEnableAutoAnalyze, false); err != nil {
235216
return err
236217
}
237-
if err = setTiDBSuperReadOnly(ctx, se, variable.On); err != nil {
218+
if err = setGlobalSysVarFromBool(ctx, se, variable.TiDBSuperReadOnly, true); err != nil {
238219
return err
239220
}
240-
if err = setTiDBTTLJobEnable(ctx, se, variable.Off); err != nil {
221+
if err = setGlobalSysVarFromBool(ctx, se, variable.TiDBTTLJobEnable, false); err != nil {
241222
return err
242223
}
243224

@@ -354,16 +335,18 @@ type keyRangeMayExclude struct {
354335
exclude bool
355336
}
356337

357-
// appendContinuousKeyRanges merges not exclude continuous key ranges and appends
338+
// mergeContinuousKeyRanges merges not exclude continuous key ranges and appends
358339
// to given []kv.KeyRange, assuming the gap between key ranges has no data.
359340
//
360341
// Precondition: schemaKeyRanges is sorted by start key. schemaKeyRanges are
361342
// non-overlapping.
362-
func appendContinuousKeyRanges(result []kv.KeyRange, schemaKeyRanges []keyRangeMayExclude) []kv.KeyRange {
343+
func mergeContinuousKeyRanges(schemaKeyRanges []keyRangeMayExclude) []kv.KeyRange {
363344
var (
364345
continuousStart, continuousEnd kv.Key
365346
)
366347

348+
result := make([]kv.KeyRange, 0, 1)
349+
367350
for _, r := range schemaKeyRanges {
368351
if r.exclude {
369352
if continuousStart != nil {
@@ -398,9 +381,6 @@ func getFlashbackKeyRanges(ctx context.Context, sess sessionctx.Context, flashba
398381
is := sess.GetDomainInfoSchema().(infoschema.InfoSchema)
399382
schemas := is.AllSchemas()
400383

401-
// The semantic of keyRanges(output).
402-
keyRanges := make([]kv.KeyRange, 0)
403-
404384
// get snapshot schema IDs.
405385
flashbackSnapshotMeta := meta.NewReader(sess.GetStore().GetSnapshot(kv.NewVersion(flashbackTS)))
406386
snapshotSchemas, err := flashbackSnapshotMeta.ListDatabases()
@@ -453,7 +433,7 @@ func getFlashbackKeyRanges(ctx context.Context, sess sessionctx.Context, flashba
453433
return bytes.Compare(a.r.StartKey, b.r.StartKey)
454434
})
455435

456-
keyRanges = appendContinuousKeyRanges(keyRanges, schemaKeyRanges)
436+
keyRanges := mergeContinuousKeyRanges(schemaKeyRanges)
457437

458438
startKey := tablecodec.EncodeMetaKeyPrefix([]byte("DBs"))
459439
keyRanges = append(keyRanges, kv.KeyRange{
@@ -681,7 +661,7 @@ func flashbackToVersion(
681661
).RunOnRange(ctx, startKey, endKey)
682662
}
683663

684-
func splitRegionsByKeyRanges(ctx context.Context, store kv.Storage, keyRanges []kv.KeyRange) {
664+
func splitRegionsByKeyRanges(ctx context.Context, store kv.Storage, keyRanges []model.KeyRange) {
685665
if s, ok := store.(kv.SplittableStore); ok {
686666
for _, keys := range keyRanges {
687667
for {
@@ -713,18 +693,14 @@ func (w *worker) onFlashbackCluster(jobCtx *jobContext, job *model.Job) (ver int
713693
return ver, errors.Errorf("Not support flashback cluster in non-TiKV env")
714694
}
715695

716-
var flashbackTS, lockedRegions, startTS, commitTS uint64
717-
var pdScheduleValue map[string]any
718-
var autoAnalyzeValue, readOnlyValue, ttlJobEnableValue string
719-
var gcEnabledValue bool
720-
var keyRanges []kv.KeyRange
721-
if err := job.DecodeArgs(&flashbackTS, &pdScheduleValue, &gcEnabledValue, &autoAnalyzeValue, &readOnlyValue, &lockedRegions, &startTS, &commitTS, &ttlJobEnableValue, &keyRanges); err != nil {
696+
args, err := model.GetFlashbackClusterArgs(job)
697+
if err != nil {
722698
job.State = model.JobStateCancelled
723699
return ver, errors.Trace(err)
724700
}
725701

726702
var totalRegions, completedRegions atomic.Uint64
727-
totalRegions.Store(lockedRegions)
703+
totalRegions.Store(args.LockedRegionCnt)
728704

729705
sess, err := w.sessPool.Get()
730706
if err != nil {
@@ -736,54 +712,63 @@ func (w *worker) onFlashbackCluster(jobCtx *jobContext, job *model.Job) (ver int
736712
switch job.SchemaState {
737713
// Stage 1, check and set FlashbackClusterJobID, and update job args.
738714
case model.StateNone:
739-
if err = savePDSchedule(w.ctx, job); err != nil {
715+
if err = savePDSchedule(w.ctx, args); err != nil {
740716
job.State = model.JobStateCancelled
741717
return ver, errors.Trace(err)
742718
}
743-
gcEnableValue, err := gcutil.CheckGCEnable(sess)
719+
720+
args.EnableGC, err = gcutil.CheckGCEnable(sess)
744721
if err != nil {
745722
job.State = model.JobStateCancelled
746723
return ver, errors.Trace(err)
747724
}
748-
job.Args[gcEnabledOffset] = &gcEnableValue
749-
autoAnalyzeValue, err = getTiDBEnableAutoAnalyze(sess)
725+
726+
args.EnableAutoAnalyze, err = getGlobalSysVarAsBool(sess, variable.TiDBEnableAutoAnalyze)
750727
if err != nil {
751728
job.State = model.JobStateCancelled
752729
return ver, errors.Trace(err)
753730
}
754-
job.Args[autoAnalyzeOffset] = &autoAnalyzeValue
755-
readOnlyValue, err = getTiDBSuperReadOnly(sess)
731+
732+
args.SuperReadOnly, err = getGlobalSysVarAsBool(sess, variable.TiDBSuperReadOnly)
756733
if err != nil {
757734
job.State = model.JobStateCancelled
758735
return ver, errors.Trace(err)
759736
}
760-
job.Args[readOnlyOffset] = &readOnlyValue
761-
ttlJobEnableValue, err = getTiDBTTLJobEnable(sess)
737+
738+
args.EnableTTLJob, err = getGlobalSysVarAsBool(sess, variable.TiDBTTLJobEnable)
762739
if err != nil {
763740
job.State = model.JobStateCancelled
764741
return ver, errors.Trace(err)
765742
}
766-
job.Args[ttlJobEnableOffSet] = &ttlJobEnableValue
743+
744+
job.FillArgs(args)
767745
job.SchemaState = model.StateDeleteOnly
768746
return ver, nil
769747
// Stage 2, check flashbackTS, close GC and PD schedule, get flashback key ranges.
770748
case model.StateDeleteOnly:
771-
if err = checkAndSetFlashbackClusterInfo(w.ctx, sess, jobCtx.store, jobCtx.metaMut, job, flashbackTS); err != nil {
749+
if err = checkAndSetFlashbackClusterInfo(w.ctx, sess, jobCtx.store, jobCtx.metaMut, job, args.FlashbackTS); err != nil {
772750
job.State = model.JobStateCancelled
773751
return ver, errors.Trace(err)
774752
}
775753
// We should get startTS here to avoid lost startTS when TiDB crashed during send prepare flashback RPC.
776-
startTS, err = jobCtx.store.GetOracle().GetTimestamp(w.ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
754+
args.StartTS, err = jobCtx.store.GetOracle().GetTimestamp(w.ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
777755
if err != nil {
778756
job.State = model.JobStateCancelled
779757
return ver, errors.Trace(err)
780758
}
781-
job.Args[startTSOffset] = startTS
782-
keyRanges, err = getFlashbackKeyRanges(w.ctx, sess, flashbackTS)
759+
keyRanges, err := getFlashbackKeyRanges(w.ctx, sess, args.FlashbackTS)
783760
if err != nil {
784761
return ver, errors.Trace(err)
785762
}
786-
job.Args[keyRangesOffset] = keyRanges
763+
args.FlashbackKeyRanges = make([]model.KeyRange, len(keyRanges))
764+
for i, keyRange := range keyRanges {
765+
args.FlashbackKeyRanges[i] = model.KeyRange{
766+
StartKey: keyRange.StartKey,
767+
EndKey: keyRange.EndKey,
768+
}
769+
}
770+
771+
job.FillArgs(args)
787772
job.SchemaState = model.StateWriteOnly
788773
return updateSchemaVersion(jobCtx, job)
789774
// Stage 3, lock related key ranges.
@@ -794,27 +779,27 @@ func (w *worker) onFlashbackCluster(jobCtx *jobContext, job *model.Job) (ver int
794779
return updateSchemaVersion(jobCtx, job)
795780
}
796781
// Split region by keyRanges, make sure no unrelated key ranges be locked.
797-
splitRegionsByKeyRanges(w.ctx, jobCtx.store, keyRanges)
782+
splitRegionsByKeyRanges(w.ctx, jobCtx.store, args.FlashbackKeyRanges)
798783
totalRegions.Store(0)
799-
for _, r := range keyRanges {
784+
for _, r := range args.FlashbackKeyRanges {
800785
if err = flashbackToVersion(w.ctx, jobCtx.store,
801786
func(ctx context.Context, r tikvstore.KeyRange) (rangetask.TaskStat, error) {
802-
stats, err := SendPrepareFlashbackToVersionRPC(ctx, jobCtx.store.(tikv.Storage), flashbackTS, startTS, r)
787+
stats, err := SendPrepareFlashbackToVersionRPC(ctx, jobCtx.store.(tikv.Storage), args.FlashbackTS, args.StartTS, r)
803788
totalRegions.Add(uint64(stats.CompletedRegions))
804789
return stats, err
805790
}, r.StartKey, r.EndKey); err != nil {
806791
logutil.DDLLogger().Warn("Get error when do flashback", zap.Error(err))
807792
return ver, err
808793
}
809794
}
810-
job.Args[totalLockedRegionsOffset] = totalRegions.Load()
795+
args.LockedRegionCnt = totalRegions.Load()
811796

812797
// We should get commitTS here to avoid lost commitTS when TiDB crashed during send flashback RPC.
813-
commitTS, err = jobCtx.store.GetOracle().GetTimestamp(w.ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
798+
args.CommitTS, err = jobCtx.store.GetOracle().GetTimestamp(w.ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope})
814799
if err != nil {
815800
return ver, errors.Trace(err)
816801
}
817-
job.Args[commitTSOffset] = commitTS
802+
job.FillArgs(args)
818803
job.SchemaState = model.StateWriteReorganization
819804
return ver, nil
820805
// Stage 4, get key ranges and send flashback RPC.
@@ -827,11 +812,11 @@ func (w *worker) onFlashbackCluster(jobCtx *jobContext, job *model.Job) (ver int
827812
return ver, nil
828813
}
829814

830-
for _, r := range keyRanges {
815+
for _, r := range args.FlashbackKeyRanges {
831816
if err = flashbackToVersion(w.ctx, jobCtx.store,
832817
func(ctx context.Context, r tikvstore.KeyRange) (rangetask.TaskStat, error) {
833818
// Use same startTS as prepare phase to simulate 1PC txn.
834-
stats, err := SendFlashbackToVersionRPC(ctx, jobCtx.store.(tikv.Storage), flashbackTS, startTS, commitTS, r)
819+
stats, err := SendFlashbackToVersionRPC(ctx, jobCtx.store.(tikv.Storage), args.FlashbackTS, args.StartTS, args.CommitTS, r)
835820
completedRegions.Add(uint64(stats.CompletedRegions))
836821
logutil.DDLLogger().Info("flashback cluster stats",
837822
zap.Uint64("complete regions", completedRegions.Load()),
@@ -858,44 +843,47 @@ func finishFlashbackCluster(w *worker, job *model.Job) error {
858843
return nil
859844
}
860845

861-
var flashbackTS, lockedRegions, startTS, commitTS uint64
862-
var pdScheduleValue map[string]any
863-
var autoAnalyzeValue, readOnlyValue, ttlJobEnableValue string
864-
var gcEnabled bool
865-
866-
if err := job.DecodeArgs(&flashbackTS, &pdScheduleValue, &gcEnabled, &autoAnalyzeValue, &readOnlyValue, &lockedRegions, &startTS, &commitTS, &ttlJobEnableValue); err != nil {
846+
args, err := model.GetFlashbackClusterArgs(job)
847+
if err != nil {
867848
return errors.Trace(err)
868849
}
850+
869851
sess, err := w.sessPool.Get()
870852
if err != nil {
871853
return errors.Trace(err)
872854
}
873855
defer w.sessPool.Put(sess)
874856

875857
err = kv.RunInNewTxn(w.ctx, w.store, true, func(context.Context, kv.Transaction) error {
876-
if err = recoverPDSchedule(w.ctx, pdScheduleValue); err != nil {
877-
return err
858+
if err = recoverPDSchedule(w.ctx, args.PDScheduleValue); err != nil {
859+
return errors.Trace(err)
878860
}
879-
if gcEnabled {
861+
862+
if args.EnableGC {
880863
if err = gcutil.EnableGC(sess); err != nil {
881-
return err
864+
return errors.Trace(err)
882865
}
883866
}
884-
if err = setTiDBSuperReadOnly(w.ctx, sess, readOnlyValue); err != nil {
885-
return err
867+
868+
if err = setGlobalSysVarFromBool(w.ctx, sess, variable.TiDBSuperReadOnly, args.SuperReadOnly); err != nil {
869+
return errors.Trace(err)
886870
}
887871

888872
if job.IsCancelled() {
889873
// only restore `tidb_ttl_job_enable` when flashback failed
890-
if err = setTiDBTTLJobEnable(w.ctx, sess, ttlJobEnableValue); err != nil {
891-
return err
874+
if err = setGlobalSysVarFromBool(w.ctx, sess, variable.TiDBTTLJobEnable, args.EnableTTLJob); err != nil {
875+
return errors.Trace(err)
892876
}
893877
}
894878

895-
return setTiDBEnableAutoAnalyze(w.ctx, sess, autoAnalyzeValue)
879+
if err := setGlobalSysVarFromBool(w.ctx, sess, variable.TiDBEnableAutoAnalyze, args.EnableAutoAnalyze); err != nil {
880+
return errors.Trace(err)
881+
}
882+
883+
return nil
896884
})
897885
if err != nil {
898-
return err
886+
return errors.Trace(err)
899887
}
900888

901889
return nil

pkg/ddl/ddl_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -342,7 +342,7 @@ func TestGetTableDataKeyRanges(t *testing.T) {
342342
require.Equal(t, keyRanges[3].EndKey, tablecodec.EncodeTablePrefix(meta.MaxGlobalID))
343343
}
344344

345-
func TestAppendContinuousKeyRanges(t *testing.T) {
345+
func TestMergeContinuousKeyRanges(t *testing.T) {
346346
cases := []struct {
347347
input []keyRangeMayExclude
348348
expect []kv.KeyRange
@@ -452,7 +452,7 @@ func TestAppendContinuousKeyRanges(t *testing.T) {
452452
}
453453

454454
for i, ca := range cases {
455-
ranges := appendContinuousKeyRanges([]kv.KeyRange{}, ca.input)
455+
ranges := mergeContinuousKeyRanges(ca.input)
456456
require.Equal(t, ca.expect, ranges, "case %d", i)
457457
}
458458
}

0 commit comments

Comments
 (0)