Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/sources/shared/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -4265,7 +4265,7 @@ The `period_config` block configures what index schemas should be used for from
# gcp-columnkey, bigtable, bigtable-hashed, cassandra, grpc.
[object_store: <string> | default = ""]

# The schema version to use, current recommended schema is v13.
# The schema version to use, current recommended schema is v14.
[schema: <string> | default = ""]

# Configures how the index is updated and stored.
Expand Down
1 change: 1 addition & 0 deletions pkg/blockbuilder/builder/tsdb.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,7 @@ func (m *TsdbCreator) create(ctx context.Context, nodeName string, tableRanges [
// so queries route to the chunks which actually exist.
model.Fingerprint(fp),
matchingChks,
nil,
)
}

Expand Down
3 changes: 2 additions & 1 deletion pkg/bloombuild/common/tsdb.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ func NewTSDBSeriesIter(ctx context.Context, user string, f sharding.ForSeries, b
user,
bounds,
0, math.MaxInt64,
func(_ labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta) (stop bool) {
func(_ labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta, _ *index.StreamStats) (stop bool) {
select {
case <-ctx.Done():
return true
Expand All @@ -154,6 +154,7 @@ func NewTSDBSeriesIter(ctx context.Context, user string, f sharding.ForSeries, b
return false
}
},
nil,
labels.MustNewMatcher(labels.MatchEqual, "", ""),
); err != nil {
return nil, err
Expand Down
5 changes: 3 additions & 2 deletions pkg/bloombuild/common/tsdb_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@ func (f forSeriesTestImpl) ForSeries(
_ index.FingerprintFilter,
_ model.Time,
_ model.Time,
fn func(labels.Labels, model.Fingerprint, []index.ChunkMeta) bool,
fn func(labels.Labels, model.Fingerprint, []index.ChunkMeta, *index.StreamStats) (stop bool),
_ []string,
_ ...*labels.Matcher,
) error {
for i := range f {
Expand All @@ -35,7 +36,7 @@ func (f forSeriesTestImpl) ForSeries(
})
}

fn(nil, f[i].Fingerprint, unmapped)
fn(nil, f[i].Fingerprint, unmapped, nil)
}
return nil
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/bloombuild/planner/strategies/chunksize.go
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ func (s *ChunkSizeStrategy) sizedSeriesIter(
tenant,
gap,
0, math.MaxInt64,
func(_ labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta) (stop bool) {
func(_ labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta, _ *index.StreamStats) (stop bool) {
select {
case <-ctx.Done():
return true
Expand Down Expand Up @@ -281,7 +281,7 @@ func (s *ChunkSizeStrategy) sizedSeriesIter(
return false
}
},
labels.MustNewMatcher(labels.MatchEqual, "", ""),
nil, labels.MustNewMatcher(labels.MatchEqual, "", ""),
); err != nil {
return nil, 0, err
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/bloombuild/planner/strategies/splitkeyspace_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ func newFakeForSeries(series []*v1.Series) *fakeForSeries {
}
}

func (f fakeForSeries) ForSeries(_ context.Context, _ string, ff index.FingerprintFilter, _ model.Time, _ model.Time, fn func(labels.Labels, model.Fingerprint, []index.ChunkMeta) (stop bool), _ ...*labels.Matcher) error {
func (f fakeForSeries) ForSeries(_ context.Context, _ string, ff index.FingerprintFilter, _ model.Time, _ model.Time, fn func(labels.Labels, model.Fingerprint, []index.ChunkMeta, *index.StreamStats) (stop bool), _ []string, _ ...*labels.Matcher) error {
overlapping := make([]*v1.Series, 0, len(f.series))
for _, s := range f.series {
if ff.Match(s.Fingerprint) {
Expand All @@ -352,7 +352,7 @@ func (f fakeForSeries) ForSeries(_ context.Context, _ string, ff index.Fingerpri
})
}

if fn(labels.EmptyLabels(), s.Fingerprint, chunks) {
if fn(labels.EmptyLabels(), s.Fingerprint, chunks, nil) {
break
}
}
Expand Down
9 changes: 5 additions & 4 deletions pkg/indexgateway/gateway.go
Original file line number Diff line number Diff line change
Expand Up @@ -329,12 +329,13 @@ func (g *Gateway) LabelNamesForMetricName(ctx context.Context, req *logproto.Lab
}
matchers = matcherExpr.Mts
}
names, err := g.indexQuerier.LabelNamesForMetricName(ctx, instanceID, req.From, req.Through, req.MetricName, matchers...)
names, smNames, err := g.indexQuerier.LabelNamesForMetricName(ctx, instanceID, req.From, req.Through, req.MetricName, matchers...)
if err != nil {
return nil, err
}
return &logproto.LabelResponse{
Values: names,
Values: names,
StructuredMetadata: smNames,
}, nil
}

Expand Down Expand Up @@ -612,7 +613,7 @@ func accumulateChunksToShards(
user,
v1.NewBounds(filtered[0].FingerprintModel(), filtered[len(filtered)-1].FingerprintModel()),
req.From, req.Through,
func(l labels.Labels, fp model.Fingerprint, chks []tsdb_index.ChunkMeta) (stop bool) {
func(l labels.Labels, fp model.Fingerprint, chks []tsdb_index.ChunkMeta, _ *tsdb_index.StreamStats) (stop bool) {
mtx.Lock()
defer mtx.Unlock()

Expand Down Expand Up @@ -650,7 +651,7 @@ func accumulateChunksToShards(

return false
},
p.Matchers...,
nil, p.Matchers...,
); err != nil {
return nil, nil, err
}
Expand Down
3 changes: 2 additions & 1 deletion pkg/indexgateway/gateway_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -454,6 +454,7 @@ func TestAccumulateChunksToShards(t *testing.T) {
_ labels.Labels,
fp model.Fingerprint,
chks []tsdb_index.ChunkMeta,
_ *tsdb_index.StreamStats,
) (stop bool), _ ...*labels.Matcher) error {

for _, s := range series {
Expand All @@ -466,7 +467,7 @@ func TestAccumulateChunksToShards(t *testing.T) {
})
}

if stop := fn(nil, s[0].ref.FingerprintModel(), chks); stop {
if stop := fn(nil, s[0].ref.FingerprintModel(), chks, nil); stop {
return nil
}
}
Expand Down
25 changes: 20 additions & 5 deletions pkg/ingester/flush.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (

"github.com/grafana/loki/v3/pkg/chunkenc"
"github.com/grafana/loki/v3/pkg/storage/chunk"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index"
"github.com/grafana/loki/v3/pkg/util"
util_log "github.com/grafana/loki/v3/pkg/util/log"
)
Expand Down Expand Up @@ -270,7 +271,14 @@ func (i *Ingester) flushUserSeries(ctx context.Context, userID string, fp model.
return nil
}

chunks, labels, chunkMtx := i.collectChunksToFlush(instance, fp, immediate)
// (h11) this should return the stats to pass down to the writer
// Each stream has a list of chunks (all closed but the last one.
// The stream should have two stats:
// 1. ClosedStats: stats ready to be flushed
// 2. UpdatableStats: stats with the most up to date data: includes stats for unclosed chunks.
// When a chunk is closed, we copy UpdatableStats into ClosedStats, and create a new UpdatableStats as a copy of ClosedStats
// That way, collectChunksToFlush can return ClosedStats, or we can use it right away
chunks, labels, stats, chunkMtx := i.collectChunksToFlush(instance, fp, immediate)
if len(chunks) < 1 {
return nil
}
Expand Down Expand Up @@ -298,7 +306,9 @@ func (i *Ingester) flushUserSeries(ctx context.Context, userID string, fp model.
"total_comp", humanize.Bytes(uint64(totalCompressedSize)),
"avg_comp", humanize.Bytes(uint64(totalCompressedSize/len(chunks))),
"total_uncomp", humanize.Bytes(uint64(totalUncompressedSize)),
"avg_uncomp", humanize.Bytes(uint64(totalUncompressedSize/len(chunks))))
"avg_uncomp", humanize.Bytes(uint64(totalUncompressedSize/len(chunks))),
"metadata_fields", len(labels),
)
logValues = append(logValues, frc.Log()...)
logValues = append(logValues, "labels", lbs)
level.Info(i.logger).Log(logValues...)
Expand All @@ -311,16 +321,20 @@ func (i *Ingester) flushUserSeries(ctx context.Context, userID string, fp model.
return fmt.Errorf("failed to flush chunks: %w, num_chunks: %d, labels: %s", err, len(chunks), lbs)
}

if err := i.store.UpdateSeriesStats(ctx, 0, 0, userID, uint64(fp), stats); err != nil {
return fmt.Errorf("failed to update series stats: %w", err)
}

return nil
}

func (i *Ingester) collectChunksToFlush(instance *instance, fp model.Fingerprint, immediate bool) ([]*chunkDesc, labels.Labels, *sync.RWMutex) {
func (i *Ingester) collectChunksToFlush(instance *instance, fp model.Fingerprint, immediate bool) ([]*chunkDesc, labels.Labels, *index.StreamStats, *sync.RWMutex) {
var stream *stream
var ok bool
stream, ok = instance.streams.LoadByFP(fp)

if !ok {
return nil, nil, nil
return nil, nil, nil, nil
}

stream.chunkMtx.Lock()
Expand All @@ -337,6 +351,7 @@ func (i *Ingester) collectChunksToFlush(instance *instance, fp model.Fingerprint
// Ensure no more writes happen to this chunk.
if !stream.chunks[j].closed {
stream.chunks[j].closed = true
stream.cutSeriesStats()
}
// Flush this chunk if it hasn't already been successfully flushed.
if stream.chunks[j].flushed.IsZero() {
Expand All @@ -349,7 +364,7 @@ func (i *Ingester) collectChunksToFlush(instance *instance, fp model.Fingerprint
}
}
}
return result, stream.labels, &stream.chunkMtx
return result, stream.labels, stream.flushableSeriesStats(), &stream.chunkMtx
}

func (i *Ingester) shouldFlushChunk(chunk *chunkDesc) (bool, string) {
Expand Down
93 changes: 89 additions & 4 deletions pkg/ingester/flush_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ import (
"testing"
"time"

"github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index"

gokitlog "github.com/go-kit/log"
"github.com/grafana/dskit/flagext"
"github.com/grafana/dskit/kv"
Expand Down Expand Up @@ -316,12 +318,24 @@ func TestFlushMaxAge(t *testing.T) {
now := time.Unix(0, 0)

firstEntries := []logproto.Entry{
{Timestamp: now.Add(time.Nanosecond), Line: "1"},
{Timestamp: now.Add(time.Minute), Line: "2"},
{
Timestamp: now.Add(time.Nanosecond),
Line: "1",
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "1234")),
},
{
Timestamp: now.Add(time.Minute),
Line: "2",
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings("traceID", "5678", "user", "fake1")),
},
}

secondEntries := []logproto.Entry{
{Timestamp: now.Add(time.Second * 61), Line: "3"},
{
Timestamp: now.Add(time.Second * 61),
Line: "3",
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings("user", "fake2")),
},
}

req := &logproto.PushRequest{Streams: []logproto.Stream{
Expand All @@ -339,6 +353,19 @@ func TestFlushMaxAge(t *testing.T) {
// ensure chunk is not flushed after flush period elapses
store.checkData(t, map[string][]logproto.Stream{})

// Structured metadata stats shouldn't be on the store
store.checkStats(t, map[string]map[uint64]*index.StreamStats{})
// but they should be available on the ingester
start, end := now.Add(-time.Hour), now.Add(time.Hour)
lbs, err := ing.Label(ctx, &logproto.LabelRequest{
Start: &end,
End: &start,
Query: `{app="l"}`,
})
require.NoError(t, err)
require.ElementsMatch(t, lbs.Values, []string{"app"})
require.ElementsMatch(t, lbs.StructuredMetadata, []string{"traceID", "user"})

req2 := &logproto.PushRequest{Streams: []logproto.Stream{
{Labels: model.LabelSet{"app": "l"}.String(), Entries: secondEntries},
}}
Expand All @@ -355,6 +382,27 @@ func TestFlushMaxAge(t *testing.T) {
},
})

// Structured metadata stats should now be on the store
store.checkStats(t, map[string]map[uint64]*index.StreamStats{
userID: {
labels.FromStrings("app", "l").Hash(): {
StructuredMetadataFieldNames: map[string]struct{}{
"traceID": {},
"user": {},
},
},
},
})
// but not in the ingester anymore
lbs, err = ing.Label(ctx, &logproto.LabelRequest{
Start: &end,
End: &start,
Query: `{app="l"}`,
})
require.NoError(t, err)
require.Empty(t, lbs.Values)
require.Empty(t, lbs.StructuredMetadata)

require.NoError(t, services.StopAndAwaitTerminated(context.Background(), ing))
}

Expand All @@ -377,7 +425,9 @@ type testStore struct {
mtx sync.Mutex
// Chunks keyed by userID.
chunks map[string][]chunk.Chunk
onPut func(ctx context.Context, chunks []chunk.Chunk) error
// Stats keyed by userID and stream
stats map[string]map[uint64]*index.StreamStats
onPut func(ctx context.Context, chunks []chunk.Chunk) error
}

// Note: the ingester New() function creates it's own WAL first which we then override if specified.
Expand All @@ -386,6 +436,7 @@ type testStore struct {
func newTestStore(t require.TestingT, cfg Config, walOverride WAL) (*testStore, *Ingester) {
store := &testStore{
chunks: map[string][]chunk.Chunk{},
stats: map[string]map[uint64]*index.StreamStats{},
}

readRingMock := mockReadRingWithOneActiveIngester()
Expand Down Expand Up @@ -461,6 +512,22 @@ func (s *testStore) Put(ctx context.Context, chunks []chunk.Chunk) error {
return nil
}

func (s *testStore) UpdateSeriesStats(_ context.Context, _, _ model.Time, userID string, fp uint64, stats *index.StreamStats) error {
s.mtx.Lock()
defer s.mtx.Unlock()

if _, ok := s.stats[userID]; !ok {
s.stats[userID] = map[uint64]*index.StreamStats{}
}

if _, ok := s.stats[userID][fp]; !ok {
s.stats[userID][fp] = index.NewStreamStats()
}
s.stats[userID][fp].Merge(stats)

return nil
}

func (s *testStore) PutOne(_ context.Context, _, _ model.Time, _ chunk.Chunk) error {
return nil
}
Expand Down Expand Up @@ -581,6 +648,24 @@ func (s *testStore) getChunksForUser(userID string) []chunk.Chunk {
return s.chunks[userID]
}

func (s *testStore) checkStats(t *testing.T, expected map[string]map[uint64]*index.StreamStats) {
for userID, expectedStreams := range expected {
userStreams, ok := s.stats[userID]
require.True(t, ok)

for fp, expectedStats := range expectedStreams {
stat, ok := userStreams[fp]
require.True(t, ok)

require.Len(t, stat.StructuredMetadataFieldNames, len(expectedStats.StructuredMetadataFieldNames))
for name := range expectedStats.StructuredMetadataFieldNames {
_, ok := stat.StructuredMetadataFieldNames[name]
require.True(t, ok)
}
}
}
}

func buildStreamsFromChunk(t *testing.T, lbs string, chk chunkenc.Chunk) logproto.Stream {
it, err := chk.Iterator(context.TODO(), time.Unix(0, 0), time.Unix(1000, 0), logproto.FORWARD, log.NewNoopPipeline().ForStream(labels.Labels{}))
require.NoError(t, err)
Expand Down
7 changes: 4 additions & 3 deletions pkg/ingester/ingester.go
Original file line number Diff line number Diff line change
Expand Up @@ -1311,21 +1311,22 @@ func (i *Ingester) Label(ctx context.Context, req *logproto.LabelRequest) (*logp
return resp, nil
}
from, through := model.TimeFromUnixNano(start.UnixNano()), model.TimeFromUnixNano(req.End.UnixNano())
var storeValues []string
var storeValues, smValues []string
if req.Values {
storeValues, err = cs.LabelValuesForMetricName(ctx, userID, from, through, "logs", req.Name, matchers...)
if err != nil {
return nil, err
}
} else {
storeValues, err = cs.LabelNamesForMetricName(ctx, userID, from, through, "logs", matchers...)
storeValues, smValues, err = cs.LabelNamesForMetricName(ctx, userID, from, through, "logs", matchers...)
if err != nil {
return nil, err
}
}

return &logproto.LabelResponse{
Values: util.MergeStringLists(resp.Values, storeValues),
Values: util.MergeStringLists(resp.Values, storeValues),
StructuredMetadata: util.MergeStringLists(resp.StructuredMetadata, smValues),
}, nil
}

Expand Down
Loading
Loading