Skip to content

Commit 3a72694

Browse files
committed
lib/logstorage: remove the code for tracking the number of unique fields inside blockStreamMerger
This code became obsolete after the commit 6a71921 , which guarantees that the number of unique fields in the stored blocks doesn't exceed maxColumnsPerBlock. If the blockStreamMerger creates a block with bigger number of unique fields, then only rows with less than maxColumnsPerBlock fields are written to the storage, while the rest of rows are skipped. Updates #246 Updates #397 See also the commit 8dce4eb
1 parent d085e04 commit 3a72694

File tree

1 file changed

+0
-21
lines changed

1 file changed

+0
-21
lines changed

lib/logstorage/block_stream_merger.go

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -72,11 +72,6 @@ type blockStreamMerger struct {
7272
//
7373
// It is used for flushing rows to blocks when their size reaches maxUncompressedBlockSize
7474
uncompressedRowsSizeBytes uint64
75-
76-
// uniqueFields is an upper bound estimation for the number of unique fields in either rows or bd
77-
//
78-
// It is used for limiting the number of columns written per block
79-
uniqueFields int
8075
}
8176

8277
func (bsm *blockStreamMerger) reset() {
@@ -109,7 +104,6 @@ func (bsm *blockStreamMerger) resetRows() {
109104
bsm.rowsTmp.reset()
110105

111106
bsm.uncompressedRowsSizeBytes = 0
112-
bsm.uniqueFields = 0
113107
}
114108

115109
func (bsm *blockStreamMerger) mustInit(bsw *blockStreamWriter, bsrs []*blockStreamReader) {
@@ -131,7 +125,6 @@ func (bsm *blockStreamMerger) mustInit(bsw *blockStreamWriter, bsrs []*blockStre
131125
// mustWriteBlock writes bd to bsm
132126
func (bsm *blockStreamMerger) mustWriteBlock(bd *blockData) {
133127
bsm.checkNextBlock(bd)
134-
uniqueFields := len(bd.columnsData) + len(bd.constColumns)
135128
switch {
136129
case !bd.streamID.equal(&bsm.streamID):
137130
// The bd contains another streamID.
@@ -144,19 +137,6 @@ func (bsm *blockStreamMerger) mustWriteBlock(bd *blockData) {
144137
} else {
145138
// Slow path - copy the bd to the curr bd.
146139
bsm.bd.copyFrom(&bsm.a, bd)
147-
bsm.uniqueFields = uniqueFields
148-
}
149-
case bsm.uniqueFields+uniqueFields > maxColumnsPerBlock:
150-
// Cannot merge bd with bsm.rows, because too many columns will be created.
151-
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4762
152-
//
153-
// Flush bsm.rows and copy the bd to the curr bd.
154-
bsm.mustFlushRows()
155-
if uniqueFields >= maxColumnsPerBlock {
156-
bsm.bsw.MustWriteBlockData(bd)
157-
} else {
158-
bsm.bd.copyFrom(&bsm.a, bd)
159-
bsm.uniqueFields = uniqueFields
160140
}
161141
case bd.uncompressedSizeBytes >= maxUncompressedBlockSize:
162142
// The bd contains the same streamID and it is full,
@@ -169,7 +149,6 @@ func (bsm *blockStreamMerger) mustWriteBlock(bd *blockData) {
169149
// The bd contains the same streamID and it isn't full,
170150
// so it must be merged with the current log entries.
171151
bsm.mustMergeRows(bd)
172-
bsm.uniqueFields += uniqueFields
173152
}
174153
}
175154

0 commit comments

Comments
 (0)