15
15
package core
16
16
17
17
import (
18
+ "math"
19
+
18
20
"github.com/pingcap/tidb/pkg/expression"
19
21
"github.com/pingcap/tidb/pkg/kv"
20
22
"github.com/pingcap/tidb/pkg/planner/cardinality"
21
23
"github.com/pingcap/tidb/pkg/planner/core/base"
22
24
"github.com/pingcap/tidb/pkg/planner/core/cost"
23
25
"github.com/pingcap/tidb/pkg/planner/property"
24
26
"github.com/pingcap/tidb/pkg/statistics"
27
+ "github.com/pingcap/tidb/pkg/util/context"
25
28
"github.com/pingcap/tidb/pkg/util/logutil"
26
29
"github.com/pingcap/tidb/pkg/util/size"
27
30
"github.com/pingcap/tipb/go-tipb"
34
37
_ base.Task = & CopTask {}
35
38
)
36
39
40
+ var _ context.WarnGetterAppender = & simpleWarnings {}
41
+
42
+ type simpleWarnings struct {
43
+ warnings []* context.SQLWarn
44
+ }
45
+
46
+ // WarningCount returns the number of warnings.
47
+ func (s * simpleWarnings ) WarningCount () int {
48
+ return len (s .warnings )
49
+ }
50
+
51
+ // Copy implemented the simple warnings copy to avoid use the same warnings slice for different task instance.
52
+ func (s * simpleWarnings ) Copy (src * simpleWarnings ) {
53
+ warnings := make ([]* context.SQLWarn , 0 , len (src .warnings ))
54
+ warnings = append (warnings , src .warnings ... )
55
+ s .warnings = warnings
56
+ }
57
+
58
+ // CopyFrom copy the warnings from src to s.
59
+ func (s * simpleWarnings ) CopyFrom (src ... * simpleWarnings ) {
60
+ if src == nil {
61
+ return
62
+ }
63
+ length := 0
64
+ for _ , one := range src {
65
+ if one == nil {
66
+ continue
67
+ }
68
+ length += one .WarningCount ()
69
+ }
70
+ s .warnings = make ([]* context.SQLWarn , 0 , length )
71
+ for _ , one := range src {
72
+ if one == nil {
73
+ continue
74
+ }
75
+ s .warnings = append (s .warnings , one .warnings ... )
76
+ }
77
+ }
78
+
79
+ // AppendWarning appends a warning to the warnings slice.
80
+ func (s * simpleWarnings ) AppendWarning (warn error ) {
81
+ if len (s .warnings ) < math .MaxUint16 {
82
+ s .warnings = append (s .warnings , & context.SQLWarn {Level : context .WarnLevelWarning , Err : warn })
83
+ }
84
+ }
85
+
86
+ // AppendNote appends a note to the warnings slice.
87
+ func (s * simpleWarnings ) AppendNote (note error ) {
88
+ if len (s .warnings ) < math .MaxUint16 {
89
+ s .warnings = append (s .warnings , & context.SQLWarn {Level : context .WarnLevelNote , Err : note })
90
+ }
91
+ }
92
+
93
+ // GetWarnings returns the internal all stored warnings.
94
+ func (s * simpleWarnings ) GetWarnings () []context.SQLWarn {
95
+ // we just reuse and reorganize pointer of warning elem across different level's
96
+ // task warnings slice to avoid copy them totally leading mem cost.
97
+ // when best task is finished and final warnings is determined, we should convert
98
+ // pointer to struct to append it to session context.
99
+ warnings := make ([]context.SQLWarn , 0 , len (s .warnings ))
100
+ for _ , w := range s .warnings {
101
+ warnings = append (warnings , * w )
102
+ }
103
+ return warnings
104
+ }
105
+
37
106
// ************************************* RootTask Start ******************************************
38
107
39
108
// RootTask is the final sink node of a plan graph. It should be a single goroutine on tidb.
@@ -45,6 +114,9 @@ type RootTask struct {
45
114
// For copTask and rootTask, when we compose physical tree bottom-up, index join need some special info
46
115
// fetched from underlying ds which built index range or table range based on these runtime constant.
47
116
IndexJoinInfo * IndexJoinInfo
117
+
118
+ // warnings passed through different task copy attached with more upper operator specific warnings. (not concurrent safe)
119
+ warnings simpleWarnings
48
120
}
49
121
50
122
// GetPlan returns the root task's plan.
@@ -69,16 +141,24 @@ func (t *RootTask) SetEmpty(x bool) {
69
141
70
142
// Copy implements Task interface.
71
143
func (t * RootTask ) Copy () base.Task {
72
- return & RootTask {
144
+ nt := & RootTask {
73
145
p : t .p ,
74
146
75
147
// when copying, just copy it out.
76
148
IndexJoinInfo : t .IndexJoinInfo ,
77
149
}
150
+ // since *t will reuse the same warnings slice, we need to copy it out.
151
+ // because different task instance should have different warning slice.
152
+ nt .warnings .Copy (& t .warnings )
153
+ return nt
78
154
}
79
155
80
156
// ConvertToRootTask implements Task interface.
81
157
func (t * RootTask ) ConvertToRootTask (_ base.PlanContext ) base.Task {
158
+ // root -> root, only copy another one instance.
159
+ // *p: a new pointer to pointer current task's physic plan
160
+ // warnings: a new slice to store current task-bound(p-bound) warnings.
161
+ // *indexInfo: a new pointer to inherit the index join info upward if necessary.
82
162
return t .Copy ().(* RootTask )
83
163
}
84
164
@@ -136,6 +216,9 @@ type MppTask struct {
136
216
// So physical plan be like: PhysicalHashAgg -> PhysicalSelection -> TableReader -> ExchangeSender -> PhysicalTableScan(mpp tiflash)
137
217
rootTaskConds []expression.Expression
138
218
tblColHists * statistics.HistColl
219
+
220
+ // warnings passed through different task copy attached with more upper operator specific warnings. (not concurrent safe)
221
+ warnings simpleWarnings
139
222
}
140
223
141
224
// Count implements Task interface.
@@ -146,6 +229,9 @@ func (t *MppTask) Count() float64 {
146
229
// Copy implements Task interface.
147
230
func (t * MppTask ) Copy () base.Task {
148
231
nt := * t
232
+ // since *t will reuse the same warnings slice, we need to copy it out.
233
+ // cause different task instance should have different warning slice.
234
+ nt .warnings .Copy (& t .warnings )
149
235
return & nt
150
236
}
151
237
@@ -178,7 +264,14 @@ func (t *MppTask) MemoryUsage() (sum int64) {
178
264
}
179
265
180
266
// ConvertToRootTaskImpl implements Task interface.
181
- func (t * MppTask ) ConvertToRootTaskImpl (ctx base.PlanContext ) * RootTask {
267
+ func (t * MppTask ) ConvertToRootTaskImpl (ctx base.PlanContext ) (rt * RootTask ) {
268
+ defer func () {
269
+ // mppTask should inherit the indexJoinInfo upward.
270
+ // because mpp task bottom doesn't form the indexJoin related cop task.
271
+ if t .warnings .WarningCount () > 0 {
272
+ rt .warnings .CopyFrom (& t .warnings )
273
+ }
274
+ }()
182
275
// In disaggregated-tiflash mode, need to consider generated column.
183
276
tryExpandVirtualColumn (t .p )
184
277
sender := PhysicalExchangeSender {
@@ -192,7 +285,7 @@ func (t *MppTask) ConvertToRootTaskImpl(ctx base.PlanContext) *RootTask {
192
285
}.Init (ctx , t .p .QueryBlockOffset ())
193
286
p .SetStats (t .p .StatsInfo ())
194
287
collectPartitionInfosFromMPPPlan (p , t .p )
195
- rt : = & RootTask {}
288
+ rt = & RootTask {}
196
289
rt .SetPlan (p )
197
290
198
291
if len (t .rootTaskConds ) > 0 {
@@ -269,6 +362,9 @@ type CopTask struct {
269
362
// For copTask and rootTask, when we compose physical tree bottom-up, index join need some special info
270
363
// fetched from underlying ds which built index range or table range based on these runtime constant.
271
364
IndexJoinInfo * IndexJoinInfo
365
+
366
+ // warnings passed through different task copy attached with more upper operator specific warnings. (not concurrent safe)
367
+ warnings simpleWarnings
272
368
}
273
369
274
370
// Invalid implements Task interface.
@@ -287,6 +383,9 @@ func (t *CopTask) Count() float64 {
287
383
// Copy implements Task interface.
288
384
func (t * CopTask ) Copy () base.Task {
289
385
nt := * t
386
+ // since *t will reuse the same warnings slice, we need to copy it out.
387
+ // cause different task instance should have different warning slice.
388
+ nt .warnings .Copy (& t .warnings )
290
389
return & nt
291
390
}
292
391
@@ -348,6 +447,9 @@ func (t *CopTask) convertToRootTaskImpl(ctx base.PlanContext) (rt *RootTask) {
348
447
// return indexJoinInfo upward, when copTask is converted to rootTask.
349
448
rt .IndexJoinInfo = t .IndexJoinInfo
350
449
}
450
+ if t .warnings .WarningCount () > 0 {
451
+ rt .warnings .CopyFrom (& t .warnings )
452
+ }
351
453
}()
352
454
// copTasks are run in parallel, to make the estimated cost closer to execution time, we amortize
353
455
// the cost to cop iterator workers. According to `CopClient::Send`, the concurrency
0 commit comments