Skip to content

Commit d4cd56c

Browse files
committed
Use validationError instead of httpgrpc error to reduce memory allocation
It's more efficient to defer constructing the httpgrpc error until the end, because we only report the last error out of possibly hundreds. Signed-off-by: Bryan Boreham <[email protected]>
1 parent bac85f8 commit d4cd56c

File tree

4 files changed

+25
-23
lines changed

4 files changed

+25
-23
lines changed

pkg/ingester/ingester.go

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -293,7 +293,7 @@ func (i *Ingester) Push(ctx old_ctx.Context, req *client.WriteRequest) (*client.
293293
if err != nil {
294294
return nil, fmt.Errorf("no user id")
295295
}
296-
var lastPartialErr error
296+
var lastPartialErr *validationError
297297

298298
for _, ts := range req.Timeseries {
299299
for _, s := range ts.Samples {
@@ -303,20 +303,20 @@ func (i *Ingester) Push(ctx old_ctx.Context, req *client.WriteRequest) (*client.
303303
}
304304

305305
i.metrics.ingestedSamplesFail.Inc()
306-
if httpResp, ok := httpgrpc.HTTPResponseFromError(err); ok {
307-
switch httpResp.Code {
308-
case http.StatusBadRequest, http.StatusTooManyRequests:
309-
lastPartialErr = err
310-
continue
311-
}
306+
if ve, ok := err.(*validationError); ok {
307+
lastPartialErr = ve
308+
continue
312309
}
313310

314311
return nil, err
315312
}
316313
}
317314
client.ReuseSlice(req.Timeseries)
318315

319-
return &client.WriteResponse{}, lastPartialErr
316+
if lastPartialErr != nil {
317+
return &client.WriteResponse{}, lastPartialErr.WrappedError()
318+
}
319+
return &client.WriteResponse{}, nil
320320
}
321321

322322
func (i *Ingester) append(ctx context.Context, userID string, labels labelPairs, timestamp model.Time, value model.SampleValue, source client.WriteRequest_SourceEnum) error {
@@ -338,6 +338,9 @@ func (i *Ingester) append(ctx context.Context, userID string, labels labelPairs,
338338
}
339339
state, fp, series, err := i.userStates.getOrCreateSeries(ctx, userID, labels)
340340
if err != nil {
341+
if ve, ok := err.(*validationError); ok {
342+
state.discardedSamples.WithLabelValues(ve.errorType).Inc()
343+
}
341344
state = nil // don't want to unlock the fp if there is an error
342345
return err
343346
}
@@ -357,13 +360,11 @@ func (i *Ingester) append(ctx context.Context, userID string, labels labelPairs,
357360
Value: value,
358361
Timestamp: timestamp,
359362
}); err != nil {
360-
if mse, ok := err.(*memorySeriesError); ok {
361-
state.discardedSamples.WithLabelValues(mse.errorType).Inc()
362-
if mse.noReport {
363+
if ve, ok := err.(*validationError); ok {
364+
state.discardedSamples.WithLabelValues(ve.errorType).Inc()
365+
if ve.noReport {
363366
return nil
364367
}
365-
// Use a dumb string template to avoid the message being parsed as a template
366-
err = httpgrpc.Errorf(http.StatusBadRequest, "%s", mse.message)
367368
}
368369
return err
369370
}

pkg/ingester/ingester_test.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -317,16 +317,16 @@ func TestIngesterAppendOutOfOrderAndDuplicate(t *testing.T) {
317317
// Earlier sample than previous one.
318318
err = ing.append(ctx, userID, m, 0, 0, client.API)
319319
require.Contains(t, err.Error(), "sample timestamp out of order")
320-
errResp, ok := httpgrpc.HTTPResponseFromError(err)
320+
errResp, ok := err.(*validationError)
321321
require.True(t, ok)
322-
require.Equal(t, errResp.Code, int32(400))
322+
require.Equal(t, errResp.code, 400)
323323

324324
// Same timestamp as previous sample, but different value.
325325
err = ing.append(ctx, userID, m, 1, 1, client.API)
326326
require.Contains(t, err.Error(), "sample with repeated timestamp but different value")
327-
errResp, ok = httpgrpc.HTTPResponseFromError(err)
327+
errResp, ok = err.(*validationError)
328328
require.True(t, ok)
329-
require.Equal(t, errResp.Code, int32(400))
329+
require.Equal(t, errResp.code, 400)
330330
}
331331

332332
// Test that blank labels are removed by the ingester

pkg/ingester/ingester_v2.go

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ func (i *Ingester) v2Push(ctx old_ctx.Context, req *client.WriteRequest) (*clien
111111
// 400 error to the client. The client (Prometheus) will not retry on 400, and
112112
// we actually ingested all samples which haven't failed.
113113
if err == tsdb.ErrOutOfBounds || err == tsdb.ErrOutOfOrderSample || err == tsdb.ErrAmendSample {
114-
lastPartialErr = httpgrpc.Errorf(http.StatusBadRequest, err.Error())
114+
lastPartialErr = err
115115
continue
116116
}
117117

@@ -135,7 +135,10 @@ func (i *Ingester) v2Push(ctx old_ctx.Context, req *client.WriteRequest) (*clien
135135

136136
client.ReuseSlice(req.Timeseries)
137137

138-
return &client.WriteResponse{}, lastPartialErr
138+
if lastPartialErr != nil {
139+
return &client.WriteResponse{}, httpgrpc.Errorf(http.StatusBadRequest, lastPartialErr.Error())
140+
}
141+
return &client.WriteResponse{}, nil
139142
}
140143

141144
func (i *Ingester) v2Query(ctx old_ctx.Context, req *client.QueryRequest) (*client.QueryResponse, error) {

pkg/ingester/user_state.go

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -195,8 +195,7 @@ func (u *userState) getSeries(metric labelPairs) (model.Fingerprint, *memorySeri
195195
err := u.limiter.AssertMaxSeriesPerUser(u.userID, u.fpToSeries.length())
196196
if err != nil {
197197
u.fpLocker.Unlock(fp)
198-
u.discardedSamples.WithLabelValues(perUserSeriesLimit).Inc()
199-
return fp, nil, httpgrpc.Errorf(http.StatusTooManyRequests, err.Error())
198+
return fp, nil, makeLimitError(perUserSeriesLimit, err)
200199
}
201200

202201
metricName, err := extract.MetricNameFromLabelAdapters(metric)
@@ -209,8 +208,7 @@ func (u *userState) getSeries(metric labelPairs) (model.Fingerprint, *memorySeri
209208
err = u.canAddSeriesFor(string(metricName))
210209
if err != nil {
211210
u.fpLocker.Unlock(fp)
212-
u.discardedSamples.WithLabelValues(perMetricSeriesLimit).Inc()
213-
return fp, nil, httpgrpc.Errorf(http.StatusTooManyRequests, "%s for: %s", err.Error(), metric)
211+
return fp, nil, makeMetricLimitError(perMetricSeriesLimit, client.FromLabelAdaptersToLabels(metric), err)
214212
}
215213

216214
u.memSeriesCreatedTotal.Inc()

0 commit comments

Comments
 (0)