@@ -15,6 +15,7 @@ import (
1515 "github.com/oklog/ulid/v2"
1616 "github.com/prometheus/prometheus/model/labels"
1717 "github.com/stretchr/testify/require"
18+ "github.com/thanos-io/objstore"
1819 "github.com/thanos-io/objstore/providers/filesystem"
1920
2021 "github.com/grafana/mimir/pkg/storage/tsdb/block"
@@ -196,6 +197,7 @@ func BenchmarkLabelValuesOffsetsIndexV2(b *testing.B) {
196197 bucketDir := b .TempDir ()
197198 bkt , err := filesystem .NewBucket (filepath .Join (bucketDir , "bkt" ))
198199 require .NoError (b , err )
200+ instBkt := objstore .WithNoopInstr (bkt )
199201 b .Cleanup (func () {
200202 require .NoError (b , bkt .Close ())
201203 })
@@ -210,48 +212,89 @@ func BenchmarkLabelValuesOffsetsIndexV2(b *testing.B) {
210212 indexName := filepath .Join (bucketDir , idIndexV2 .String (), block .IndexHeaderFilename )
211213 require .NoError (b , WriteBinary (ctx , bkt , idIndexV2 , indexName ))
212214
213- br , err := NewStreamBinaryReader (ctx , log .NewNopLogger (), nil , bucketDir , idIndexV2 , 32 , NewStreamBinaryReaderMetrics (nil ), Config {})
215+ diskReader , err := NewStreamBinaryReader (ctx , log .NewNopLogger (), instBkt , bucketDir , idIndexV2 , 32 , NewStreamBinaryReaderMetrics (nil ), Config {})
214216 require .NoError (b , err )
215- b .Cleanup (func () { require .NoError (b , br .Close ()) })
217+ b .Cleanup (func () { require .NoError (b , diskReader .Close ()) })
216218
217- names , err := br . LabelNames (ctx )
219+ bucketReader , err := NewBucketBinaryReader (ctx , log . NewNopLogger (), instBkt , bucketDir , idIndexV2 , 32 , Config {} )
218220 require .NoError (b , err )
221+ b .Cleanup (func () { require .NoError (b , bucketReader .Close ()) })
219222
220- rand .Shuffle (len (names ), func (i , j int ) {
221- names [i ], names [j ] = names [j ], names [i ]
223+ diskNames , err := diskReader .LabelNames (ctx )
224+ require .NoError (b , err )
225+ bucketReaderNames , err := bucketReader .LabelNames (ctx )
226+ require .NoError (b , err )
227+ require .Equal (b , diskNames , bucketReaderNames )
228+
229+ rand .Shuffle (len (diskNames ), func (i , j int ) {
230+ diskNames [i ], diskNames [j ] = diskNames [j ], diskNames [i ]
222231 })
223232
224- b .Run (fmt .Sprintf ("%vNames%vValues" , len (nameSymbols ), len (valueSymbols )), func (b * testing.B ) {
225- for i := 0 ; i < b .N ; i ++ {
226- name := names [i % len (names )]
233+ readers := map [string ]Reader {
234+ "disk" : diskReader ,
235+ "bucket" : bucketReader ,
236+ }
227237
228- values , err := br .LabelValuesOffsets (ctx , name , "" , func (string ) bool {
229- return true
230- })
238+ b .ResetTimer ()
239+ b .ReportAllocs ()
240+ for readerName , reader := range readers {
241+ b .Run (fmt .Sprintf ("Reader=%s/Names=%d/Values=%d" , readerName , len (nameSymbols ), len (valueSymbols )), func (b * testing.B ) {
242+ for i := 0 ; i < b .N ; i ++ {
243+ name := diskNames [i % len (diskNames )]
231244
232- require .NoError (b , err )
233- require .NotEmpty (b , values )
234- }
235- })
245+ values , err := reader .LabelValuesOffsets (ctx , name , "" , func (string ) bool {
246+ return true
247+ })
248+
249+ require .NoError (b , err )
250+ require .NotEmpty (b , values )
251+ }
252+ })
253+ }
236254}
237255
238256func BenchmarkLabelValuesOffsetsIndexV2_WithPrefix (b * testing.B ) {
239- tests , blockID , blockDir := labelValuesTestCases (test .NewTB (b ))
240- r , err := NewStreamBinaryReader (context .Background (), log .NewNopLogger (), nil , blockDir , blockID , 32 , NewStreamBinaryReaderMetrics (nil ), Config {})
257+ ctx := context .Background ()
258+ tests , blockID , bucketDir , bkt := labelValuesTestCases (test .NewTB (b ))
259+
260+ instBkt := objstore .WithNoopInstr (bkt )
261+ b .Cleanup (func () {
262+ require .NoError (b , bkt .Close ())
263+ })
264+
265+ diskReader , err := NewStreamBinaryReader (ctx , log .NewNopLogger (), instBkt , bucketDir , blockID , 32 , NewStreamBinaryReaderMetrics (nil ), Config {})
266+ require .NoError (b , err )
267+ b .Cleanup (func () { require .NoError (b , diskReader .Close ()) })
268+
269+ bucketReader , err := NewBucketBinaryReader (ctx , log .NewNopLogger (), instBkt , bucketDir , blockID , 32 , Config {})
241270 require .NoError (b , err )
271+ b .Cleanup (func () { require .NoError (b , bucketReader .Close ()) })
242272
243- for lbl , tcs := range tests {
244- b .Run (lbl , func (b * testing.B ) {
273+ diskNames , err := diskReader .LabelNames (ctx )
274+ require .NoError (b , err )
275+ bucketReaderNames , err := bucketReader .LabelNames (ctx )
276+ require .NoError (b , err )
277+ require .Equal (b , diskNames , bucketReaderNames )
278+
279+ readers := map [string ]Reader {
280+ "disk" : diskReader ,
281+ "bucket" : bucketReader ,
282+ }
283+
284+ b .ResetTimer ()
285+ b .ReportAllocs ()
286+ for readerName , reader := range readers {
287+ for lbl , tcs := range tests {
245288 for _ , tc := range tcs {
246- b .Run (fmt .Sprintf ("prefix= '%s'%s" , tc .prefix , tc .desc ), func (b * testing.B ) {
289+ b .Run (fmt .Sprintf ("Reader=%s/Label=%s/Prefix= '%s'/Desc= %s" , readerName , lbl , tc .prefix , tc .desc ), func (b * testing.B ) {
247290 for i := 0 ; i < b .N ; i ++ {
248- values , err := r .LabelValuesOffsets (context .Background (), lbl , tc .prefix , tc .filter )
291+ values , err := reader .LabelValuesOffsets (context .Background (), lbl , tc .prefix , tc .filter )
249292 require .NoError (b , err )
250293 require .Equal (b , tc .expected , len (values ))
251294 }
252295 })
253296 }
254- })
297+ }
255298 }
256299}
257300
0 commit comments