@@ -21,6 +21,7 @@ import (
21
21
"log/slog"
22
22
23
23
"github.com/alecthomas/kingpin/v2"
24
+ "github.com/blang/semver/v4"
24
25
"github.com/prometheus/client_golang/prometheus"
25
26
)
26
27
@@ -31,8 +32,6 @@ const perfEventsStatementsQuery = `
31
32
LEFT(DIGEST_TEXT, %d) as DIGEST_TEXT,
32
33
COUNT_STAR,
33
34
SUM_TIMER_WAIT,
34
- SUM_LOCK_TIME,
35
- SUM_CPU_TIME,
36
35
SUM_ERRORS,
37
36
SUM_WARNINGS,
38
37
SUM_ROWS_AFFECTED,
@@ -46,6 +45,7 @@ const perfEventsStatementsQuery = `
46
45
QUANTILE_95,
47
46
QUANTILE_99,
48
47
QUANTILE_999
48
+ %s
49
49
FROM (
50
50
SELECT *
51
51
FROM performance_schema.events_statements_summary_by_digest
@@ -59,8 +59,6 @@ const perfEventsStatementsQuery = `
59
59
Q.DIGEST_TEXT,
60
60
Q.COUNT_STAR,
61
61
Q.SUM_TIMER_WAIT,
62
- Q.SUM_LOCK_TIME,
63
- Q.SUM_CPU_TIME,
64
62
Q.SUM_ERRORS,
65
63
Q.SUM_WARNINGS,
66
64
Q.SUM_ROWS_AFFECTED,
@@ -74,6 +72,7 @@ const perfEventsStatementsQuery = `
74
72
Q.QUANTILE_95,
75
73
Q.QUANTILE_99,
76
74
Q.QUANTILE_999
75
+ %s
77
76
ORDER BY SUM_TIMER_WAIT DESC
78
77
LIMIT %d
79
78
`
@@ -193,12 +192,24 @@ func (ScrapePerfEventsStatements) Version() float64 {
193
192
194
193
// Scrape collects data from database connection and sends it over channel as prometheus metric.
195
194
func (ScrapePerfEventsStatements ) Scrape (ctx context.Context , instance * instance , ch chan <- prometheus.Metric , logger * slog.Logger ) error {
195
+ additionalColumns := ""
196
+ additionalGroupBy := ""
197
+ useAdditionalColumns := false
198
+ if instance .flavor == FlavorMySQL && instance .version .GTE (semver .MustParse ("8.0.28" )) {
199
+ additionalColumns = ", SUM_LOCK_TIME, SUM_CPU_TIME"
200
+ additionalGroupBy = ", Q.SUM_LOCK_TIME, Q.SUM_CPU_TIME"
201
+ useAdditionalColumns = true
202
+ }
203
+
196
204
perfQuery := fmt .Sprintf (
197
205
perfEventsStatementsQuery ,
198
206
* perfEventsStatementsDigestTextLimit ,
207
+ additionalColumns ,
199
208
* perfEventsStatementsTimeLimit ,
209
+ additionalGroupBy ,
200
210
* perfEventsStatementsLimit ,
201
211
)
212
+
202
213
db := instance .getDB ()
203
214
// Timers here are returned in picoseconds.
204
215
perfSchemaEventsStatementsRows , err := db .QueryContext (ctx , perfQuery )
@@ -209,18 +220,27 @@ func (ScrapePerfEventsStatements) Scrape(ctx context.Context, instance *instance
209
220
210
221
var (
211
222
schemaName , digest , digestText string
212
- count , queryTime , lockTime , cpuTime uint64
223
+ count , queryTime uint64
213
224
errors , warnings uint64
214
225
rowsAffected , rowsSent , rowsExamined uint64
215
226
tmpTables , tmpDiskTables uint64
216
227
sortMergePasses , sortRows uint64
217
228
noIndexUsed uint64
218
229
quantile95 , quantile99 , quantile999 uint64
230
+ lockTime , cpuTime uint64
219
231
)
220
232
for perfSchemaEventsStatementsRows .Next () {
221
- if err := perfSchemaEventsStatementsRows .Scan (
222
- & schemaName , & digest , & digestText , & count , & queryTime , & lockTime , & cpuTime , & errors , & warnings , & rowsAffected , & rowsSent , & rowsExamined , & tmpDiskTables , & tmpTables , & sortMergePasses , & sortRows , & noIndexUsed , & quantile95 , & quantile99 , & quantile999 ,
223
- ); err != nil {
233
+ var err error
234
+ if useAdditionalColumns {
235
+ err = perfSchemaEventsStatementsRows .Scan (
236
+ & schemaName , & digest , & digestText , & count , & queryTime , & errors , & warnings , & rowsAffected , & rowsSent , & rowsExamined , & tmpDiskTables , & tmpTables , & sortMergePasses , & sortRows , & noIndexUsed , & quantile95 , & quantile99 , & quantile999 , & lockTime , & cpuTime ,
237
+ )
238
+ } else {
239
+ err = perfSchemaEventsStatementsRows .Scan (
240
+ & schemaName , & digest , & digestText , & count , & queryTime , & errors , & warnings , & rowsAffected , & rowsSent , & rowsExamined , & tmpDiskTables , & tmpTables , & sortMergePasses , & sortRows , & noIndexUsed , & quantile95 , & quantile99 , & quantile999 ,
241
+ )
242
+ }
243
+ if err != nil {
224
244
return err
225
245
}
226
246
ch <- prometheus .MustNewConstMetric (
0 commit comments