Skip to content

Commit 342a722

Browse files
committed
Don't need to store the end time of every small chunk
We can look at the start time of the next one since they don't overlap. This makes the data structure slightly smaller, and speeds up unmarshalling since we don't need to seek through every value. Signed-off-by: Bryan Boreham <[email protected]>
1 parent 2bec64c commit 342a722

File tree

1 file changed

+15
-27
lines changed

1 file changed

+15
-27
lines changed

pkg/chunk/encoding/bigchunk.go

Lines changed: 15 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -17,13 +17,13 @@ var errOutOfBounds = errors.New("out of bounds")
1717
type smallChunk struct {
1818
chunkenc.XORChunk
1919
start int64
20-
end int64
2120
}
2221

2322
// bigchunk is a set of prometheus/tsdb chunks. It grows over time and has no
2423
// upperbound on number of samples it can contain.
2524
type bigchunk struct {
2625
chunks []smallChunk
26+
end int64
2727

2828
appender chunkenc.Appender
2929
remainingSamples int
@@ -45,7 +45,7 @@ func (b *bigchunk) Add(sample model.SamplePair) ([]Chunk, error) {
4545

4646
b.appender.Append(int64(sample.Timestamp), float64(sample.Value))
4747
b.remainingSamples--
48-
b.chunks[len(b.chunks)-1].end = int64(sample.Timestamp)
48+
b.end = int64(sample.Timestamp)
4949
return []Chunk{b}, nil
5050
}
5151

@@ -70,7 +70,6 @@ func (b *bigchunk) addNextChunk(start model.Time) error {
7070
b.chunks = append(b.chunks, smallChunk{
7171
XORChunk: *chunkenc.NewXORChunk(),
7272
start: int64(start),
73-
end: int64(start),
7473
})
7574

7675
appender, err := b.chunks[len(b.chunks)-1].Appender()
@@ -128,15 +127,14 @@ func (b *bigchunk) UnmarshalFromBuf(buf []byte) error {
128127
return err
129128
}
130129

131-
start, end, err := firstAndLastTimes(chunk)
130+
start, err := firstTime(chunk)
132131
if err != nil {
133132
return err
134133
}
135134

136135
b.chunks = append(b.chunks, smallChunk{
137136
XORChunk: *chunk.(*chunkenc.XORChunk),
138137
start: int64(start),
139-
end: int64(end),
140138
})
141139
}
142140
return nil
@@ -183,8 +181,8 @@ func (b *bigchunk) NewIterator() Iterator {
183181
func (b *bigchunk) Slice(start, end model.Time) Chunk {
184182
i, j := 0, len(b.chunks)
185183
for k := 0; k < len(b.chunks); k++ {
186-
if b.chunks[k].end < int64(start) {
187-
i = k + 1
184+
if b.chunks[k].start <= int64(start) {
185+
i = k
188186
}
189187
if b.chunks[k].start > int64(end) {
190188
j = k
@@ -238,22 +236,19 @@ type bigchunkIterator struct {
238236
}
239237

240238
func (it *bigchunkIterator) FindAtOrAfter(target model.Time) bool {
241-
if it.i >= len(it.chunks) {
239+
if it.i >= len(it.chunks) || int64(target) > it.end {
242240
return false
243241
}
244242

245243
// If the seek is outside the current chunk, use the index to find the right
246244
// chunk.
247-
if int64(target) < it.chunks[it.i].start || int64(target) > it.chunks[it.i].end {
245+
if int64(target) < it.chunks[it.i].start ||
246+
(it.i+1 < len(it.chunks) && int64(target) >= it.chunks[it.i+1].start) {
248247
it.curr = nil
249-
for it.i = 0; it.i < len(it.chunks) && int64(target) > it.chunks[it.i].end; it.i++ {
248+
for it.i = 0; it.i+1 < len(it.chunks) && int64(target) >= it.chunks[it.i+1].start; it.i++ {
250249
}
251250
}
252251

253-
if it.i >= len(it.chunks) {
254-
return false
255-
}
256-
257252
if it.curr == nil {
258253
it.curr = it.chunks[it.i].Iterator()
259254
} else if t, _ := it.curr.At(); int64(target) <= t {
@@ -319,20 +314,13 @@ func (it *bigchunkIterator) Err() error {
319314
return nil
320315
}
321316

322-
func firstAndLastTimes(c chunkenc.Chunk) (int64, int64, error) {
317+
func firstTime(c chunkenc.Chunk) (int64, error) {
323318
var (
324-
first int64
325-
last int64
326-
firstSet bool
327-
iter = c.Iterator()
319+
first int64
320+
iter = c.Iterator()
328321
)
329-
for iter.Next() {
330-
t, _ := iter.At()
331-
if !firstSet {
332-
first = t
333-
firstSet = true
334-
}
335-
last = t
322+
if iter.Next() {
323+
first, _ = iter.At()
336324
}
337-
return first, last, iter.Err()
325+
return first, iter.Err()
338326
}

0 commit comments

Comments
 (0)