Skip to content

Optionally write stub entries to the chunk cache from ingesters #1482

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jul 2, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions docs/arguments.md
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,11 @@ The ingester query API was improved over time, but defaults to the old behaviour

When `push` requests arrive, pre-allocate this many slots to decode them. Tune this setting to reduce memory allocations and garbage. The optimum value will depend on how many labels are sent with your timeseries samples.

- `-store.chunk-cache-stubs`

Where you don't want to cache every chunk written by ingesters, but you do want to take advantage of chunk write deduplication, this option will make ingesters write a placeholder to the cache for each chunk.
Make sure you configure ingesters with a different cache to queriers, which need the whole value.

## Ingester, Distributor & Querier limits.

Cortex implements various limits on the requests it can process, in order to prevent a single tenant overwhelming the cluster. There are various default global limits which apply to all tenants which can be set on the command line. These limits can also be overridden on a per-tenant basis, using a configuration file. Specify the filename for the override configuration file using the `-limits.per-user-override-config=<filename>` flag. The override file will be re-read every 10 seconds by default - this can also be controlled using the `-limits.per-user-override-period=10s` flag.
Expand Down
2 changes: 1 addition & 1 deletion pkg/chunk/cache/cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ func testCacheMultiple(t *testing.T, cache cache.Cache, keys []string, chunks []
func testChunkFetcher(t *testing.T, c cache.Cache, keys []string, chunks []chunk.Chunk) {
fetcher, err := chunk.NewChunkFetcher(cache.Config{
Cache: c,
}, nil)
}, false, nil)
require.NoError(t, err)
defer fetcher.Stop()

Expand Down
6 changes: 5 additions & 1 deletion pkg/chunk/chunk_store.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,11 +64,15 @@ type StoreConfig struct {

// Limits query start time to be greater than now() - MaxLookBackPeriod, if set.
MaxLookBackPeriod time.Duration `yaml:"max_look_back_period"`

// Not visible in yaml because the setting shouldn't be common between ingesters and queriers
chunkCacheStubs bool // don't write the full chunk to cache, just a stub entry
}

// RegisterFlags adds the flags required to config this to the given FlagSet
func (cfg *StoreConfig) RegisterFlags(f *flag.FlagSet) {
cfg.ChunkCacheConfig.RegisterFlagsWithPrefix("", "Cache config for chunks. ", f)
f.BoolVar(&cfg.chunkCacheStubs, "store.chunk-cache-stubs", false, "If true, don't write the full chunk to cache, just a stub entry.")
cfg.WriteDedupeCacheConfig.RegisterFlagsWithPrefix("store.index-cache-write.", "Cache config for index entry writing. ", f)

f.DurationVar(&cfg.MinChunkAge, "store.min-chunk-age", 0, "Minimum time between chunk update and being saved to the store.")
Expand All @@ -92,7 +96,7 @@ type store struct {
}

func newStore(cfg StoreConfig, schema Schema, index IndexClient, chunks ObjectClient, limits *validation.Overrides) (Store, error) {
fetcher, err := NewChunkFetcher(cfg.ChunkCacheConfig, chunks)
fetcher, err := NewChunkFetcher(cfg.ChunkCacheConfig, cfg.chunkCacheStubs, chunks)
if err != nil {
return nil, err
}
Expand Down
20 changes: 13 additions & 7 deletions pkg/chunk/chunk_store_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,9 @@ outer:
// and writing back any misses to the cache. Also responsible for decoding
// chunks from the cache, in parallel.
type Fetcher struct {
storage ObjectClient
cache cache.Cache
storage ObjectClient
cache cache.Cache
cacheStubs bool

wait sync.WaitGroup
decodeRequests chan decodeRequest
Expand All @@ -72,7 +73,7 @@ type decodeResponse struct {
}

// NewChunkFetcher makes a new ChunkFetcher.
func NewChunkFetcher(cfg cache.Config, storage ObjectClient) (*Fetcher, error) {
func NewChunkFetcher(cfg cache.Config, cacheStubs bool, storage ObjectClient) (*Fetcher, error) {
cache, err := cache.New(cfg)
if err != nil {
return nil, err
Expand All @@ -81,6 +82,7 @@ func NewChunkFetcher(cfg cache.Config, storage ObjectClient) (*Fetcher, error) {
c := &Fetcher{
storage: storage,
cache: cache,
cacheStubs: cacheStubs,
decodeRequests: make(chan decodeRequest),
}

Expand Down Expand Up @@ -149,10 +151,14 @@ func (c *Fetcher) writeBackCache(ctx context.Context, chunks []Chunk) error {
keys := make([]string, 0, len(chunks))
bufs := make([][]byte, 0, len(chunks))
for i := range chunks {
encoded, err := chunks[i].Encoded()
// TODO don't fail, just log and conitnue?
if err != nil {
return err
var encoded []byte
var err error
if !c.cacheStubs {
encoded, err = chunks[i].Encoded()
// TODO don't fail, just log and conitnue?
if err != nil {
return err
}
}

keys = append(keys, chunks[i].ExternalKey())
Expand Down
2 changes: 1 addition & 1 deletion pkg/chunk/series_store.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ type seriesStore struct {
}

func newSeriesStore(cfg StoreConfig, schema Schema, index IndexClient, chunks ObjectClient, limits *validation.Overrides) (Store, error) {
fetcher, err := NewChunkFetcher(cfg.ChunkCacheConfig, chunks)
fetcher, err := NewChunkFetcher(cfg.ChunkCacheConfig, cfg.chunkCacheStubs, chunks)
if err != nil {
return nil, err
}
Expand Down