|
| 1 | +package tsdb |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "errors" |
| 6 | + "fmt" |
| 7 | + "time" |
| 8 | + |
| 9 | + "github.com/oklog/ulid" |
| 10 | + "github.com/prometheus/prometheus/model/labels" |
| 11 | + "github.com/prometheus/prometheus/storage" |
| 12 | + prom_tsdb "github.com/prometheus/prometheus/tsdb" |
| 13 | + "github.com/prometheus/prometheus/tsdb/chunkenc" |
| 14 | + "github.com/prometheus/prometheus/tsdb/chunks" |
| 15 | + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" |
| 16 | + "github.com/prometheus/prometheus/tsdb/tombstones" |
| 17 | + "github.com/prometheus/prometheus/util/annotations" |
| 18 | +) |
| 19 | + |
| 20 | +type PostingsCacheConfig struct { |
| 21 | + MaxBytes int64 `yaml:"max_bytes"` |
| 22 | + MaxItems int `yaml:"max_items"` |
| 23 | + Ttl time.Duration `yaml:"ttl"` |
| 24 | + Enabled bool `yaml:"enabled"` |
| 25 | +} |
| 26 | + |
| 27 | +type blockBaseQuerier struct { |
| 28 | + blockID ulid.ULID |
| 29 | + index prom_tsdb.IndexReader |
| 30 | + chunks prom_tsdb.ChunkReader |
| 31 | + tombstones tombstones.Reader |
| 32 | + |
| 33 | + closed bool |
| 34 | + |
| 35 | + mint, maxt int64 |
| 36 | +} |
| 37 | + |
| 38 | +func newBlockBaseQuerier(b prom_tsdb.BlockReader, mint, maxt int64) (*blockBaseQuerier, error) { |
| 39 | + indexr, err := b.Index() |
| 40 | + if err != nil { |
| 41 | + return nil, fmt.Errorf("open index reader: %w", err) |
| 42 | + } |
| 43 | + chunkr, err := b.Chunks() |
| 44 | + if err != nil { |
| 45 | + indexr.Close() |
| 46 | + return nil, fmt.Errorf("open chunk reader: %w", err) |
| 47 | + } |
| 48 | + tombsr, err := b.Tombstones() |
| 49 | + if err != nil { |
| 50 | + indexr.Close() |
| 51 | + chunkr.Close() |
| 52 | + return nil, fmt.Errorf("open tombstone reader: %w", err) |
| 53 | + } |
| 54 | + |
| 55 | + if tombsr == nil { |
| 56 | + tombsr = tombstones.NewMemTombstones() |
| 57 | + } |
| 58 | + return &blockBaseQuerier{ |
| 59 | + blockID: b.Meta().ULID, |
| 60 | + mint: mint, |
| 61 | + maxt: maxt, |
| 62 | + index: indexr, |
| 63 | + chunks: chunkr, |
| 64 | + tombstones: tombsr, |
| 65 | + }, nil |
| 66 | +} |
| 67 | + |
| 68 | +func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { |
| 69 | + res, err := q.index.SortedLabelValues(ctx, name, matchers...) |
| 70 | + return res, nil, err |
| 71 | +} |
| 72 | + |
| 73 | +func (q *blockBaseQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { |
| 74 | + res, err := q.index.LabelNames(ctx, matchers...) |
| 75 | + return res, nil, err |
| 76 | +} |
| 77 | + |
| 78 | +func (q *blockBaseQuerier) Close() error { |
| 79 | + if q.closed { |
| 80 | + return errors.New("block querier already closed") |
| 81 | + } |
| 82 | + |
| 83 | + errs := tsdb_errors.NewMulti( |
| 84 | + q.index.Close(), |
| 85 | + q.chunks.Close(), |
| 86 | + q.tombstones.Close(), |
| 87 | + ) |
| 88 | + q.closed = true |
| 89 | + return errs.Err() |
| 90 | +} |
| 91 | + |
| 92 | +type cachedBlockChunkQuerier struct { |
| 93 | + *blockBaseQuerier |
| 94 | +} |
| 95 | + |
| 96 | +func NewCachedBlockChunkQuerier(cfg PostingsCacheConfig, b prom_tsdb.BlockReader, mint, maxt int64) (storage.ChunkQuerier, error) { |
| 97 | + q, err := newBlockBaseQuerier(b, mint, maxt) |
| 98 | + if err != nil { |
| 99 | + return nil, err |
| 100 | + } |
| 101 | + return &cachedBlockChunkQuerier{blockBaseQuerier: q}, nil |
| 102 | +} |
| 103 | + |
| 104 | +func (q *cachedBlockChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.ChunkSeriesSet { |
| 105 | + return selectChunkSeriesSet(ctx, sortSeries, hints, ms, q.blockID, q.index, q.chunks, q.tombstones, q.mint, q.maxt) |
| 106 | +} |
| 107 | + |
| 108 | +func selectChunkSeriesSet(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms []*labels.Matcher, |
| 109 | + blockID ulid.ULID, index prom_tsdb.IndexReader, chunks prom_tsdb.ChunkReader, tombstones tombstones.Reader, mint, maxt int64, |
| 110 | +) storage.ChunkSeriesSet { |
| 111 | + disableTrimming := false |
| 112 | + sharded := hints != nil && hints.ShardCount > 0 |
| 113 | + |
| 114 | + if hints != nil { |
| 115 | + mint = hints.Start |
| 116 | + maxt = hints.End |
| 117 | + disableTrimming = hints.DisableTrimming |
| 118 | + } |
| 119 | + p, err := prom_tsdb.PostingsForMatchers(ctx, index, ms...) |
| 120 | + if err != nil { |
| 121 | + return storage.ErrChunkSeriesSet(err) |
| 122 | + } |
| 123 | + if sharded { |
| 124 | + p = index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount) |
| 125 | + } |
| 126 | + if sortSeries { |
| 127 | + p = index.SortedPostings(p) |
| 128 | + } |
| 129 | + |
| 130 | + if hints != nil { |
| 131 | + if hints.Func == "series" { |
| 132 | + // When you're only looking up metadata (for example series API), you don't need to load any chunks. |
| 133 | + return prom_tsdb.NewBlockChunkSeriesSet(blockID, index, NewNopChunkReader(), tombstones, p, mint, maxt, disableTrimming) |
| 134 | + } |
| 135 | + } |
| 136 | + |
| 137 | + return prom_tsdb.NewBlockChunkSeriesSet(blockID, index, chunks, tombstones, p, mint, maxt, disableTrimming) |
| 138 | +} |
| 139 | + |
| 140 | +type nopChunkReader struct { |
| 141 | + emptyChunk chunkenc.Chunk |
| 142 | +} |
| 143 | + |
| 144 | +func NewNopChunkReader() prom_tsdb.ChunkReader { |
| 145 | + return nopChunkReader{ |
| 146 | + emptyChunk: chunkenc.NewXORChunk(), |
| 147 | + } |
| 148 | +} |
| 149 | + |
| 150 | +func (cr nopChunkReader) ChunkOrIterable(chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) { |
| 151 | + return cr.emptyChunk, nil, nil |
| 152 | +} |
| 153 | + |
| 154 | +func (cr nopChunkReader) Close() error { return nil } |
0 commit comments