Skip to content

Commit 66f4dbe

Browse files
gwossumdavidby-influxdevanbenz
authored
fix: limit number of concurrent optimized compactions (#26319)
Limit number of concurrent optimized compactions so that level compactions do not get starved. Starved level compactions result in a sudden increase in disk usage. Add [data] max-concurrent-optimized-compactions for configuring maximum number of concurrent optimized compactions. Default value is 1. Co-authored-by: davidby-influx <[email protected]> Co-authored-by: devanbenz <[email protected]> Closes: #26315
1 parent 62e803e commit 66f4dbe

File tree

7 files changed

+941
-266
lines changed

7 files changed

+941
-266
lines changed

tsdb/config.go

+6-1
Original file line numberDiff line numberDiff line change
@@ -146,12 +146,17 @@ type Config struct {
146146
// A value of 0 disables the limit.
147147
MaxValuesPerTag int `toml:"max-values-per-tag"`
148148

149-
// MaxConcurrentCompactions is the maximum number of concurrent level and full compactions
149+
// MaxConcurrentCompactions is the maximum number of concurrent level, full, and optimized compactions
150150
// that can be running at one time across all shards. Compactions scheduled to run when the
151151
// limit is reached are blocked until a running compaction completes. Snapshot compactions are
152152
// not affected by this limit. A value of 0 limits compactions to runtime.GOMAXPROCS(0).
153153
MaxConcurrentCompactions int `toml:"max-concurrent-compactions"`
154154

155+
// MaxConcurrentOptimizedCompactions is the maximum number of concurrent optimized compactions
156+
// that can be running across all shards. Optimized compactions scheduled to run when the limit
157+
// is reached are aborted, saving them for a later compaction run.
158+
MaxConcurrentOptimizedCompactions int `toml:"max-concurrent-optimized-compactions"`
159+
155160
// MaxIndexLogFileSize is the threshold, in bytes, when an index write-ahead log file will
156161
// compact into an index file. Lower sizes will cause log files to be compacted more quickly
157162
// and result in lower heap usage at the expense of write throughput. Higher sizes will

tsdb/engine.go

+1
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,7 @@ type EngineOptions struct {
176176
CompactionPlannerCreator CompactionPlannerCreator
177177
CompactionLimiter limiter.Fixed
178178
CompactionThroughputLimiter limiter.Rate
179+
OptimizedCompactionLimiter limiter.Fixed
179180
WALEnabled bool
180181
MonitorDisabled bool
181182

tsdb/engine/tsm1/compact.go

+5-1
Original file line numberDiff line numberDiff line change
@@ -270,14 +270,18 @@ func (c *DefaultPlanner) generationsFullyCompacted(gens tsmGenerations) (bool, s
270270
aggressivePointsPerBlockCount := 0
271271
filesUnderMaxTsmSizeCount := 0
272272
for _, tsmFile := range gens[0].files {
273-
if c.FileStore.BlockCount(tsmFile.Path, 1) >= c.aggressiveCompactionPointsPerBlock {
273+
if c.FileStore.BlockCount(tsmFile.Path, 1) >= c.GetAggressiveCompactionPointsPerBlock() {
274274
aggressivePointsPerBlockCount++
275275
}
276276
if tsmFile.Size < tsdb.MaxTSMFileSize {
277277
filesUnderMaxTsmSizeCount++
278278
}
279279
}
280280

281+
if aggressivePointsPerBlockCount == len(gens[0].files) {
282+
return true, "fully compacted because all files are at aggressivePointsPerBlock"
283+
}
284+
281285
if filesUnderMaxTsmSizeCount > 1 && aggressivePointsPerBlockCount < len(gens[0].files) {
282286
return false, tsdb.SingleGenerationReasonText
283287
}

0 commit comments

Comments
 (0)