Skip to content

Commit a69efcd

Browse files
authored
Allow ingesting native histograms (#5986)
* support ingesting native histograms Signed-off-by: Ben Ye <[email protected]> * fix lint Signed-off-by: Ben Ye <[email protected]> * update doc to mark experimental feature Signed-off-by: Ben Ye <[email protected]> * update changelog and doc Signed-off-by: Ben Ye <[email protected]> * update test Signed-off-by: Ben Ye <[email protected]> * fix test Signed-off-by: Ben Ye <[email protected]> * address comment; handle histogram partial append errors Signed-off-by: Ben Ye <[email protected]> --------- Signed-off-by: Ben Ye <[email protected]>
1 parent 613c2c4 commit a69efcd

29 files changed

+863
-362
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
## master / unreleased
44
* [CHANGE] Upgrade Dockerfile Node version from 14x to 18x. #5906
55
* [CHANGE] Ingester: Remove `-querier.query-store-for-labels-enabled` flag. Querying long-term store for labels is always enabled. #5984
6+
* [FEATURE] Ingester: Experimental: Enable native histogram ingestion via `-blocks-storage.tsdb.enable-native-histograms` flag. #5986
67
* [ENHANCEMENT] rulers: Add support to persist tokens in rulers. #5987
78
* [ENHANCEMENT] Query Frontend/Querier: Added store gateway postings touched count and touched size in Querier stats and log in Query Frontend. #5892
89
* [ENHANCEMENT] Query Frontend/Querier: Returns `warnings` on prometheus query responses. #5916

docs/blocks-storage/querier.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1441,4 +1441,8 @@ blocks_storage:
14411441
# be out-of-order.
14421442
# CLI flag: -blocks-storage.tsdb.out-of-order-cap-max
14431443
[out_of_order_cap_max: <int> | default = 32]
1444+
1445+
# [EXPERIMENTAL] True to enable native histogram.
1446+
# CLI flag: -blocks-storage.tsdb.enable-native-histograms
1447+
[enable_native_histograms: <boolean> | default = false]
14441448
```

docs/blocks-storage/store-gateway.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1566,4 +1566,8 @@ blocks_storage:
15661566
# be out-of-order.
15671567
# CLI flag: -blocks-storage.tsdb.out-of-order-cap-max
15681568
[out_of_order_cap_max: <int> | default = 32]
1569+
1570+
# [EXPERIMENTAL] True to enable native histogram.
1571+
# CLI flag: -blocks-storage.tsdb.enable-native-histograms
1572+
[enable_native_histograms: <boolean> | default = false]
15691573
```

docs/configuration/config-file-reference.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1998,6 +1998,10 @@ tsdb:
19981998
# be out-of-order.
19991999
# CLI flag: -blocks-storage.tsdb.out-of-order-cap-max
20002000
[out_of_order_cap_max: <int> | default = 32]
2001+
2002+
# [EXPERIMENTAL] True to enable native histogram.
2003+
# CLI flag: -blocks-storage.tsdb.enable-native-histograms
2004+
[enable_native_histograms: <boolean> | default = false]
20012005
```
20022006
20032007
### `compactor_config`

docs/configuration/v1-guarantees.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,3 +111,5 @@ Currently experimental features are:
111111
- OTLP Receiver
112112
- Persistent tokens in the Ruler Ring:
113113
- `-ruler.ring.tokens-file-path` (path) CLI flag
114+
- Native Histograms
115+
- Ingestion can be enabled by setting `-blocks-storage.tsdb.enable-native-histograms=true` on Ingester.

pkg/cortexpb/compat.go

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ import (
1818
"github.com/cortexproject/cortex/pkg/util"
1919
)
2020

21-
// ToWriteRequest converts matched slices of Labels, Samples and Metadata into a WriteRequest proto.
21+
// ToWriteRequest converts matched slices of Labels, Samples, Metadata and Histograms into a WriteRequest proto.
2222
// It gets timeseries from the pool, so ReuseSlice() should be called when done.
2323
func ToWriteRequest(lbls []labels.Labels, samples []Sample, metadata []*MetricMetadata, histograms []Histogram, source WriteRequest_SourceEnum) *WriteRequest {
2424
req := &WriteRequest{
@@ -27,13 +27,17 @@ func ToWriteRequest(lbls []labels.Labels, samples []Sample, metadata []*MetricMe
2727
Source: source,
2828
}
2929

30-
for i, s := range samples {
30+
i := 0
31+
for i < len(samples) || i < len(histograms) {
3132
ts := TimeseriesFromPool()
3233
ts.Labels = append(ts.Labels, FromLabelsToLabelAdapters(lbls[i])...)
33-
ts.Samples = append(ts.Samples, s)
34+
if i < len(samples) {
35+
ts.Samples = append(ts.Samples, samples[i])
36+
}
3437
if i < len(histograms) {
3538
ts.Histograms = append(ts.Histograms, histograms[i])
3639
}
40+
i++
3741
req.Timeseries = append(req.Timeseries, PreallocTimeseries{TimeSeries: ts})
3842
}
3943

pkg/cortexpb/histograms.go

Lines changed: 120 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,120 @@
1+
// Copyright 2017 The Prometheus Authors
2+
// Licensed under the Apache License, Version 2.0 (the "License");
3+
// you may not use this file except in compliance with the License.
4+
// You may obtain a copy of the License at
5+
//
6+
// http://www.apache.org/licenses/LICENSE-2.0
7+
//
8+
// Unless required by applicable law or agreed to in writing, software
9+
// distributed under the License is distributed on an "AS IS" BASIS,
10+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11+
// See the License for the specific language governing permissions and
12+
// limitations under the License.
13+
14+
package cortexpb
15+
16+
import "github.com/prometheus/prometheus/model/histogram"
17+
18+
func (h Histogram) IsFloatHistogram() bool {
19+
_, ok := h.GetCount().(*Histogram_CountFloat)
20+
return ok
21+
}
22+
23+
// HistogramProtoToHistogram extracts a (normal integer) Histogram from the
24+
// provided proto message. The caller has to make sure that the proto message
25+
// represents an interger histogram and not a float histogram.
26+
// Changed from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L626-L645
27+
func HistogramProtoToHistogram(hp Histogram) *histogram.Histogram {
28+
if hp.IsFloatHistogram() {
29+
panic("HistogramProtoToHistogram called with a float histogram")
30+
}
31+
return &histogram.Histogram{
32+
CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
33+
Schema: hp.Schema,
34+
ZeroThreshold: hp.ZeroThreshold,
35+
ZeroCount: hp.GetZeroCountInt(),
36+
Count: hp.GetCountInt(),
37+
Sum: hp.Sum,
38+
PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
39+
PositiveBuckets: hp.GetPositiveDeltas(),
40+
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
41+
NegativeBuckets: hp.GetNegativeDeltas(),
42+
}
43+
}
44+
45+
// FloatHistogramProtoToFloatHistogram extracts a float Histogram from the provided proto message.
46+
// The caller has to make sure that the proto message represents a float histogram and not an
47+
// integer histogram, or it panics.
48+
// Changed from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L647-L667
49+
func FloatHistogramProtoToFloatHistogram(hp Histogram) *histogram.FloatHistogram {
50+
if !hp.IsFloatHistogram() {
51+
panic("FloatHistogramProtoToFloatHistogram called with an integer histogram")
52+
}
53+
return &histogram.FloatHistogram{
54+
CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
55+
Schema: hp.Schema,
56+
ZeroThreshold: hp.ZeroThreshold,
57+
ZeroCount: hp.GetZeroCountFloat(),
58+
Count: hp.GetCountFloat(),
59+
Sum: hp.Sum,
60+
PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
61+
PositiveBuckets: hp.GetPositiveCounts(),
62+
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
63+
NegativeBuckets: hp.GetNegativeCounts(),
64+
}
65+
}
66+
67+
// HistogramToHistogramProto converts a (normal integer) Histogram to its protobuf message type.
68+
// Changed from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L709-L723
69+
func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) Histogram {
70+
return Histogram{
71+
Count: &Histogram_CountInt{CountInt: h.Count},
72+
Sum: h.Sum,
73+
Schema: h.Schema,
74+
ZeroThreshold: h.ZeroThreshold,
75+
ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
76+
NegativeSpans: spansToSpansProto(h.NegativeSpans),
77+
NegativeDeltas: h.NegativeBuckets,
78+
PositiveSpans: spansToSpansProto(h.PositiveSpans),
79+
PositiveDeltas: h.PositiveBuckets,
80+
ResetHint: Histogram_ResetHint(h.CounterResetHint),
81+
TimestampMs: timestamp,
82+
}
83+
}
84+
85+
// FloatHistogramToHistogramProto converts a float Histogram to a normal
86+
// Histogram's protobuf message type.
87+
// Changed from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L725-L739
88+
func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogram) Histogram {
89+
return Histogram{
90+
Count: &Histogram_CountFloat{CountFloat: fh.Count},
91+
Sum: fh.Sum,
92+
Schema: fh.Schema,
93+
ZeroThreshold: fh.ZeroThreshold,
94+
ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
95+
NegativeSpans: spansToSpansProto(fh.NegativeSpans),
96+
NegativeCounts: fh.NegativeBuckets,
97+
PositiveSpans: spansToSpansProto(fh.PositiveSpans),
98+
PositiveCounts: fh.PositiveBuckets,
99+
ResetHint: Histogram_ResetHint(fh.CounterResetHint),
100+
TimestampMs: timestamp,
101+
}
102+
}
103+
104+
func spansProtoToSpans(s []BucketSpan) []histogram.Span {
105+
spans := make([]histogram.Span, len(s))
106+
for i := 0; i < len(s); i++ {
107+
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
108+
}
109+
110+
return spans
111+
}
112+
113+
func spansToSpansProto(s []histogram.Span) []BucketSpan {
114+
spans := make([]BucketSpan, len(s))
115+
for i := 0; i < len(s); i++ {
116+
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
117+
}
118+
119+
return spans
120+
}

pkg/distributor/distributor.go

Lines changed: 43 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,9 @@ const (
6969
// mergeSlicesParallelism is a constant of how much go routines we should use to merge slices, and
7070
// it was based on empirical observation: See BenchmarkMergeSlicesParallel
7171
mergeSlicesParallelism = 8
72+
73+
sampleMetricTypeFloat = "float"
74+
sampleMetricTypeHistogram = "histogram"
7275
)
7376

7477
// Distributor is a storage.SampleAppender and a client.Querier which
@@ -276,7 +279,7 @@ func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Ove
276279
Namespace: "cortex",
277280
Name: "distributor_received_samples_total",
278281
Help: "The total number of received samples, excluding rejected and deduped samples.",
279-
}, []string{"user"}),
282+
}, []string{"user", "type"}),
280283
receivedExemplars: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
281284
Namespace: "cortex",
282285
Name: "distributor_received_exemplars_total",
@@ -291,7 +294,7 @@ func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Ove
291294
Namespace: "cortex",
292295
Name: "distributor_samples_in_total",
293296
Help: "The total number of samples that have come in to the distributor, including rejected or deduped samples.",
294-
}, []string{"user"}),
297+
}, []string{"user", "type"}),
295298
incomingExemplars: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
296299
Namespace: "cortex",
297300
Name: "distributor_exemplars_in_total",
@@ -428,10 +431,12 @@ func (d *Distributor) cleanupInactiveUser(userID string) {
428431

429432
d.HATracker.CleanupHATrackerMetricsForUser(userID)
430433

431-
d.receivedSamples.DeleteLabelValues(userID)
434+
d.receivedSamples.DeleteLabelValues(userID, sampleMetricTypeFloat)
435+
d.receivedSamples.DeleteLabelValues(userID, sampleMetricTypeHistogram)
432436
d.receivedExemplars.DeleteLabelValues(userID)
433437
d.receivedMetadata.DeleteLabelValues(userID)
434-
d.incomingSamples.DeleteLabelValues(userID)
438+
d.incomingSamples.DeleteLabelValues(userID, sampleMetricTypeFloat)
439+
d.incomingSamples.DeleteLabelValues(userID, sampleMetricTypeHistogram)
435440
d.incomingExemplars.DeleteLabelValues(userID)
436441
d.incomingMetadata.DeleteLabelValues(userID)
437442
d.nonHASamples.DeleteLabelValues(userID)
@@ -547,7 +552,7 @@ func (d *Distributor) validateSeries(ts cortexpb.PreallocTimeseries, userID stri
547552
// Only alloc when data present
548553
samples = make([]cortexpb.Sample, 0, len(ts.Samples))
549554
for _, s := range ts.Samples {
550-
if err := validation.ValidateSample(d.validateMetrics, limits, userID, ts.Labels, s); err != nil {
555+
if err := validation.ValidateSampleTimestamp(d.validateMetrics, limits, userID, ts.Labels, s.TimestampMs); err != nil {
551556
return emptyPreallocSeries, err
552557
}
553558
samples = append(samples, s)
@@ -574,8 +579,13 @@ func (d *Distributor) validateSeries(ts cortexpb.PreallocTimeseries, userID stri
574579
if len(ts.Histograms) > 0 {
575580
// Only alloc when data present
576581
histograms = make([]cortexpb.Histogram, 0, len(ts.Histograms))
577-
// TODO(yeya24): we need to have validations for native histograms
578-
// at some point. Skip validations for now.
582+
for _, h := range ts.Histograms {
583+
// TODO(yeya24): add other validations for native histogram.
584+
// For example, Prometheus scrape has bucket limit and schema check.
585+
if err := validation.ValidateSampleTimestamp(d.validateMetrics, limits, userID, ts.Labels, h.TimestampMs); err != nil {
586+
return emptyPreallocSeries, err
587+
}
588+
}
579589
histograms = append(histograms, ts.Histograms...)
580590
}
581591

@@ -607,14 +617,17 @@ func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*co
607617
now := time.Now()
608618
d.activeUsers.UpdateUserTimestamp(userID, now)
609619

610-
numSamples := 0
620+
numFloatSamples := 0
621+
numHistogramSamples := 0
611622
numExemplars := 0
612623
for _, ts := range req.Timeseries {
613-
numSamples += len(ts.Samples) + len(ts.Histograms)
624+
numFloatSamples += len(ts.Samples)
625+
numHistogramSamples += len(ts.Histograms)
614626
numExemplars += len(ts.Exemplars)
615627
}
616628
// Count the total samples, exemplars in, prior to validation or deduplication, for comparison with other metrics.
617-
d.incomingSamples.WithLabelValues(userID).Add(float64(numSamples))
629+
d.incomingSamples.WithLabelValues(userID, sampleMetricTypeFloat).Add(float64(numFloatSamples))
630+
d.incomingSamples.WithLabelValues(userID, sampleMetricTypeHistogram).Add(float64(numFloatSamples))
618631
d.incomingExemplars.WithLabelValues(userID).Add(float64(numExemplars))
619632
// Count the total number of metadata in.
620633
d.incomingMetadata.WithLabelValues(userID).Add(float64(len(req.Metadata)))
@@ -642,31 +655,32 @@ func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*co
642655

643656
if errors.Is(err, ha.ReplicasNotMatchError{}) {
644657
// These samples have been deduped.
645-
d.dedupedSamples.WithLabelValues(userID, cluster).Add(float64(numSamples))
658+
d.dedupedSamples.WithLabelValues(userID, cluster).Add(float64(numFloatSamples + numHistogramSamples))
646659
return nil, httpgrpc.Errorf(http.StatusAccepted, err.Error())
647660
}
648661

649662
if errors.Is(err, ha.TooManyReplicaGroupsError{}) {
650-
d.validateMetrics.DiscardedSamples.WithLabelValues(validation.TooManyHAClusters, userID).Add(float64(numSamples))
663+
d.validateMetrics.DiscardedSamples.WithLabelValues(validation.TooManyHAClusters, userID).Add(float64(numFloatSamples + numHistogramSamples))
651664
return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error())
652665
}
653666

654667
return nil, err
655668
}
656669
// If there wasn't an error but removeReplica is false that means we didn't find both HA labels.
657670
if !removeReplica {
658-
d.nonHASamples.WithLabelValues(userID).Add(float64(numSamples))
671+
d.nonHASamples.WithLabelValues(userID).Add(float64(numFloatSamples + numHistogramSamples))
659672
}
660673
}
661674

662675
// A WriteRequest can only contain series or metadata but not both. This might change in the future.
663-
seriesKeys, validatedTimeseries, validatedSamples, validatedExemplars, firstPartialErr, err := d.prepareSeriesKeys(ctx, req, userID, limits, removeReplica)
676+
seriesKeys, validatedTimeseries, validatedFloatSamples, validatedHistogramSamples, validatedExemplars, firstPartialErr, err := d.prepareSeriesKeys(ctx, req, userID, limits, removeReplica)
664677
if err != nil {
665678
return nil, err
666679
}
667680
metadataKeys, validatedMetadata, firstPartialErr := d.prepareMetadataKeys(req, limits, userID, firstPartialErr)
668681

669-
d.receivedSamples.WithLabelValues(userID).Add(float64(validatedSamples))
682+
d.receivedSamples.WithLabelValues(userID, sampleMetricTypeFloat).Add(float64(validatedFloatSamples))
683+
d.receivedSamples.WithLabelValues(userID, sampleMetricTypeHistogram).Add(float64(validatedHistogramSamples))
670684
d.receivedExemplars.WithLabelValues(userID).Add(float64(validatedExemplars))
671685
d.receivedMetadata.WithLabelValues(userID).Add(float64(len(validatedMetadata)))
672686

@@ -677,18 +691,19 @@ func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*co
677691
return &cortexpb.WriteResponse{}, firstPartialErr
678692
}
679693

680-
totalN := validatedSamples + validatedExemplars + len(validatedMetadata)
694+
totalSamples := validatedFloatSamples + validatedHistogramSamples
695+
totalN := totalSamples + validatedExemplars + len(validatedMetadata)
681696
if !d.ingestionRateLimiter.AllowN(now, userID, totalN) {
682697
// Ensure the request slice is reused if the request is rate limited.
683698
cortexpb.ReuseSlice(req.Timeseries)
684699

685-
d.validateMetrics.DiscardedSamples.WithLabelValues(validation.RateLimited, userID).Add(float64(validatedSamples))
700+
d.validateMetrics.DiscardedSamples.WithLabelValues(validation.RateLimited, userID).Add(float64(totalSamples))
686701
d.validateMetrics.DiscardedExemplars.WithLabelValues(validation.RateLimited, userID).Add(float64(validatedExemplars))
687702
d.validateMetrics.DiscardedMetadata.WithLabelValues(validation.RateLimited, userID).Add(float64(len(validatedMetadata)))
688703
// Return a 429 here to tell the client it is going too fast.
689704
// Client may discard the data or slow down and re-send.
690705
// Prometheus v2.26 added a remote-write option 'retry_on_http_429'.
691-
return nil, httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (%v) exceeded while adding %d samples and %d metadata", d.ingestionRateLimiter.Limit(now, userID), validatedSamples, len(validatedMetadata))
706+
return nil, httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (%v) exceeded while adding %d samples and %d metadata", d.ingestionRateLimiter.Limit(now, userID), totalSamples, len(validatedMetadata))
692707
}
693708

694709
// totalN included samples and metadata. Ingester follows this pattern when computing its ingestion rate.
@@ -810,15 +825,16 @@ func (d *Distributor) prepareMetadataKeys(req *cortexpb.WriteRequest, limits *va
810825
return metadataKeys, validatedMetadata, firstPartialErr
811826
}
812827

813-
func (d *Distributor) prepareSeriesKeys(ctx context.Context, req *cortexpb.WriteRequest, userID string, limits *validation.Limits, removeReplica bool) ([]uint32, []cortexpb.PreallocTimeseries, int, int, error, error) {
828+
func (d *Distributor) prepareSeriesKeys(ctx context.Context, req *cortexpb.WriteRequest, userID string, limits *validation.Limits, removeReplica bool) ([]uint32, []cortexpb.PreallocTimeseries, int, int, int, error, error) {
814829
pSpan, _ := opentracing.StartSpanFromContext(ctx, "prepareSeriesKeys")
815830
defer pSpan.Finish()
816831

817832
// For each timeseries or samples, we compute a hash to distribute across ingesters;
818833
// check each sample/metadata and discard if outside limits.
819834
validatedTimeseries := make([]cortexpb.PreallocTimeseries, 0, len(req.Timeseries))
820835
seriesKeys := make([]uint32, 0, len(req.Timeseries))
821-
validatedSamples := 0
836+
validatedFloatSamples := 0
837+
validatedHistogramSamples := 0
822838
validatedExemplars := 0
823839

824840
var firstPartialErr error
@@ -839,7 +855,9 @@ func (d *Distributor) prepareSeriesKeys(ctx context.Context, req *cortexpb.Write
839855
if len(ts.Samples) > 0 {
840856
latestSampleTimestampMs = max(latestSampleTimestampMs, ts.Samples[len(ts.Samples)-1].TimestampMs)
841857
}
842-
// TODO(yeya24): use timestamp of the latest native histogram in the series as well.
858+
if len(ts.Histograms) > 0 {
859+
latestSampleTimestampMs = max(latestSampleTimestampMs, ts.Histograms[len(ts.Histograms)-1].TimestampMs)
860+
}
843861

844862
if mrc := limits.MetricRelabelConfigs; len(mrc) > 0 {
845863
l, _ := relabel.Process(cortexpb.FromLabelAdaptersToLabels(ts.Labels), mrc...)
@@ -885,7 +903,7 @@ func (d *Distributor) prepareSeriesKeys(ctx context.Context, req *cortexpb.Write
885903
// label and dropped labels (if any)
886904
key, err := d.tokenForLabels(userID, ts.Labels)
887905
if err != nil {
888-
return nil, nil, 0, 0, nil, err
906+
return nil, nil, 0, 0, 0, nil, err
889907
}
890908
validatedSeries, validationErr := d.validateSeries(ts, userID, skipLabelNameValidation, limits)
891909

@@ -904,11 +922,11 @@ func (d *Distributor) prepareSeriesKeys(ctx context.Context, req *cortexpb.Write
904922

905923
seriesKeys = append(seriesKeys, key)
906924
validatedTimeseries = append(validatedTimeseries, validatedSeries)
907-
// TODO(yeya24): add histogram samples as well when supported.
908-
validatedSamples += len(ts.Samples)
925+
validatedFloatSamples += len(ts.Samples)
926+
validatedHistogramSamples += len(ts.Histograms)
909927
validatedExemplars += len(ts.Exemplars)
910928
}
911-
return seriesKeys, validatedTimeseries, validatedSamples, validatedExemplars, firstPartialErr, nil
929+
return seriesKeys, validatedTimeseries, validatedFloatSamples, validatedHistogramSamples, validatedExemplars, firstPartialErr, nil
912930
}
913931

914932
func sortLabelsIfNeeded(labels []cortexpb.LabelAdapter) {

0 commit comments

Comments
 (0)