diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 51250bd0382..d2e29506549 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -64,6 +64,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/upload-sarif@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 with: sarif_file: results.sarif diff --git a/.github/workflows/test-build-deploy.yml b/.github/workflows/test-build-deploy.yml index 082ca9620ed..9d5fb1a20f5 100644 --- a/.github/workflows/test-build-deploy.yml +++ b/.github/workflows/test-build-deploy.yml @@ -17,7 +17,7 @@ jobs: lint: runs-on: ubuntu-24.04 container: - image: quay.io/cortexproject/build-image:master-7ce1d1b12 + image: quay.io/cortexproject/build-image:master-59491e9aae steps: - name: Checkout Repo uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -46,7 +46,7 @@ jobs: test: runs-on: ubuntu-24.04 container: - image: quay.io/cortexproject/build-image:master-7ce1d1b12 + image: quay.io/cortexproject/build-image:master-59491e9aae steps: - name: Checkout Repo uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -64,7 +64,7 @@ jobs: test-no-race: runs-on: ubuntu-24.04 container: - image: quay.io/cortexproject/build-image:master-7ce1d1b12 + image: quay.io/cortexproject/build-image:master-59491e9aae steps: - name: Checkout Repo uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -93,21 +93,21 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 with: languages: go - name: Autobuild - uses: github/codeql-action/autobuild@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/autobuild@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 build: runs-on: ubuntu-24.04 container: - image: quay.io/cortexproject/build-image:master-7ce1d1b12 + image: quay.io/cortexproject/build-image:master-59491e9aae steps: - name: Checkout Repo uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -162,6 +162,7 @@ jobs: - integration_querier - integration_ruler - integration_query_fuzz + - integration_remote_write_v2 steps: - name: Upgrade golang uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 @@ -210,8 +211,7 @@ jobs: docker pull quay.io/cortexproject/cortex:v1.18.1 elif [ "$TEST_TAGS" = "integration_query_fuzz" ]; then docker pull quay.io/cortexproject/cortex:v1.18.1 - docker pull quay.io/prometheus/prometheus:v2.51.0 - docker pull quay.io/prometheus/prometheus:v2.55.1 + docker pull quay.io/prometheus/prometheus:v3.5.0 fi docker pull memcached:1.6.1 docker pull redis:7.0.4-alpine @@ -224,7 +224,7 @@ jobs: export CORTEX_IMAGE="${CORTEX_IMAGE_PREFIX}cortex:$IMAGE_TAG-amd64" export CORTEX_CHECKOUT_DIR="/go/src/github.com/cortexproject/cortex" echo "Running integration tests with image: $CORTEX_IMAGE" - go test -tags=integration,${{ matrix.tags }} -timeout 2400s -v -count=1 ./integration/... + go test -tags=slicelabels,integration,${{ matrix.tags }} -timeout 2400s -v -count=1 ./integration/... env: IMAGE_PREFIX: ${{ secrets.IMAGE_PREFIX }} @@ -247,14 +247,14 @@ jobs: run: | touch build-image/.uptodate MIGRATIONS_DIR=$(pwd)/cmd/cortex/migrations - make BUILD_IMAGE=quay.io/cortexproject/build-image:master-7ce1d1b12 TTY='' configs-integration-test + make BUILD_IMAGE=quay.io/cortexproject/build-image:master-59491e9aae TTY='' configs-integration-test deploy_website: needs: [build, test] if: (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/')) && github.repository == 'cortexproject/cortex' runs-on: ubuntu-24.04 container: - image: quay.io/cortexproject/build-image:master-7ce1d1b12 + image: quay.io/cortexproject/build-image:master-59491e9aae steps: - name: Checkout Repo uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -296,7 +296,7 @@ jobs: if: (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/')) && github.repository == 'cortexproject/cortex' runs-on: ubuntu-24.04 container: - image: quay.io/cortexproject/build-image:master-7ce1d1b12 + image: quay.io/cortexproject/build-image:master-59491e9aae steps: - name: Checkout Repo uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 diff --git a/.golangci.yml b/.golangci.yml index 2812394d35b..e566cfa72df 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -12,6 +12,8 @@ run: - integration_querier - integration_ruler - integration_query_fuzz + - integration_remote_write_v2 + - slicelabels output: formats: text: diff --git a/ADOPTERS.md b/ADOPTERS.md index def54436f41..a7c87bbcb2c 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -14,9 +14,11 @@ This is the list of organisations that are using Cortex in **production environm * [KakaoEnterprise](https://kakaocloud.com/) * [MayaData](https://mayadata.io/) * [Northflank](https://northflank.com/) +* [Open-Xchange](https://www.open-xchange.com/) * [Opstrace](https://opstrace.com/) * [PITS Globale Datenrettungsdienste](https://www.pitsdatenrettung.de/) * [Planetary Quantum](https://www.planetary-quantum.com) * [Platform9](https://platform9.com/) * [REWE Digital](https://rewe-digital.com/) * [SysEleven](https://www.syseleven.de/) +* [Twilio](https://www.twilio.com/) diff --git a/CHANGELOG.md b/CHANGELOG.md index 77e9869a0d4..d9b4196686c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,14 +1,16 @@ # Changelog ## master / unreleased -* [FEATURE] Query Frontend: Add support /api/v1/format_query API for formatting queries. #6893 * [CHANGE] StoreGateway/Alertmanager: Add default 5s connection timeout on client. #6603 * [CHANGE] Ingester: Remove EnableNativeHistograms config flag and instead gate keep through new per-tenant limit at ingestion. #6718 * [CHANGE] Validate a tenantID when to use a single tenant resolver. #6727 +* [FEATURE] Distributor: Add an experimental `-distributor.otlp.enable-type-and-unit-labels` flag to add `__type__` and `__unit__` labels for OTLP metrics. #6969 +* [FEATURE] Distributor: Add an experimental `-distributor.otlp.allow-delta-temporality` flag to ingest delta temporality otlp metrics. #6934 * [FEATURE] Query Frontend: Add dynamic interval size for query splitting. This is enabled by configuring experimental flags `querier.max-shards-per-query` and/or `querier.max-fetched-data-duration-per-query`. The split interval size is dynamically increased to maintain a number of shards and total duration fetched below the configured values. #6458 * [FEATURE] Querier/Ruler: Add `query_partial_data` and `rules_partial_data` limits to allow queries/rules to be evaluated with data from a single zone, if other zones are not available. #6526 * [FEATURE] Update prometheus alertmanager version to v0.28.0 and add new integration msteamsv2, jira, and rocketchat. #6590 * [FEATURE] Ingester/StoreGateway: Add `ResourceMonitor` module in Cortex, and add `ResourceBasedLimiter` in Ingesters and StoreGateways. #6674 +* [FEATURE] Support Prometheus remote write 2.0. #6330 * [FEATURE] Ingester: Support out-of-order native histogram ingestion. It automatically enabled when `-ingester.out-of-order-time-window > 0` and `-blocks-storage.tsdb.enable-native-histograms=true`. #6626 #6663 * [FEATURE] Ruler: Add support for percentage based sharding for rulers. #6680 * [FEATURE] Ruler: Add support for group labels. #6665 @@ -20,6 +22,11 @@ * [FEATURE] Compactor: Add support for percentage based sharding for compactors. #6738 * [FEATURE] Querier: Allow choosing PromQL engine via header. #6777 * [FEATURE] Querier: Support for configuring query optimizers and enabling XFunctions in the Thanos engine. #6873 +* [FEATURE] Query Frontend: Add support /api/v1/format_query API for formatting queries. #6893 +* [FEATURE] Query Frontend: Add support for /api/v1/parse_query API (experimental) to parse a PromQL expression and return it as a JSON-formatted AST (abstract syntax tree). #6978 +* [ENHANCEMENT] Ingester: Add `cortex_ingester_tsdb_wal_replay_unknown_refs_total` and `cortex_ingester_tsdb_wbl_replay_unknown_refs_total` metrics to track unknown series references during wal/wbl replaying. #6945 +* [ENHANCEMENT] Ruler: Emit an error message when the rule synchronization fails. #6902 +* [ENHANCEMENT] Querier: Support snappy and zstd response compression for `-querier.response-compression` flag. #6848 * [ENHANCEMENT] Tenant Federation: Add a # of query result limit logic when the `-tenant-federation.regex-matcher-enabled` is enabled. #6845 * [ENHANCEMENT] Query Frontend: Add a `cortex_slow_queries_total` metric to track # of slow queries per user. #6859 * [ENHANCEMENT] Query Frontend: Change to return 400 when the tenant resolving fail. #6715 @@ -60,6 +67,12 @@ * [ENHANCEMENT] Querier: Support query limits in parquet queryable. #6870 * [ENHANCEMENT] Ring: Add zone label to ring_members metric. #6900 * [ENHANCEMENT] Ingester: Add new metric `cortex_ingester_push_errors_total` to track reasons for ingester request failures. #6901 +* [ENHANCEMENT] Ring: Expose `detailed_metrics_enabled` for all rings. Default true. #6926 +* [ENHANCEMENT] Parquet Storage: Allow Parquet Queryable to disable fallback to Store Gateway. #6920 +* [ENHANCEMENT] Query Frontend: Add a `format_query` label value to the `op` label at `cortex_query_frontend_queries_total` metric. #6925 +* [ENHANCEMENT] API: add request ID injection to context to enable tracking requests across downstream services. #6895 +* [ENHANCEMENT] gRPC: Add gRPC Channelz monitoring. #6950 +* [ENHANCEMENT] Upgrade build image and Go version to 1.24.6. #6970 #6976 * [BUGFIX] Ingester: Avoid error or early throttling when READONLY ingesters are present in the ring #6517 * [BUGFIX] Ingester: Fix labelset data race condition. #6573 * [BUGFIX] Compactor: Cleaner should not put deletion marker for blocks with no-compact marker. #6576 diff --git a/GOVERNANCE.md b/GOVERNANCE.md index 3420219f4cb..6465b94cf71 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -1,6 +1,7 @@ # Cortex Governance +This document defines project governance for the cortex project. Its purpose is to describe how decisions are made on the project and how anyone can influence these decisions. -This document defines project governance for the project. +This governance charter applies to every project under the cortex GitHub organization. The term "cortex project" refers to any work done under the cortexproject GitHub organization and includes the cortexproject/cortex repository itself as well as cortexproject/cortex-tools, cortexproject/cortex-jsonnet and all the other repositories under the cortexproject GitHub organization. ## Voting diff --git a/Makefile b/Makefile index 705e005dac1..cb8008e70eb 100644 --- a/Makefile +++ b/Makefile @@ -87,15 +87,12 @@ $(foreach exe, $(EXES), $(eval $(call dep_exe, $(exe)))) pkg/cortexpb/cortex.pb.go: pkg/cortexpb/cortex.proto pkg/ingester/client/ingester.pb.go: pkg/ingester/client/ingester.proto pkg/distributor/distributorpb/distributor.pb.go: pkg/distributor/distributorpb/distributor.proto -pkg/ingester/wal.pb.go: pkg/ingester/wal.proto pkg/ring/ring.pb.go: pkg/ring/ring.proto pkg/frontend/v1/frontendv1pb/frontend.pb.go: pkg/frontend/v1/frontendv1pb/frontend.proto pkg/frontend/v2/frontendv2pb/frontend.pb.go: pkg/frontend/v2/frontendv2pb/frontend.proto pkg/querier/tripperware/queryrange/queryrange.pb.go: pkg/querier/tripperware/queryrange/queryrange.proto -pkg/querier/tripperware/instantquery/instantquery.pb.go: pkg/querier/tripperware/instantquery/instantquery.proto pkg/querier/tripperware/query.pb.go: pkg/querier/tripperware/query.proto pkg/querier/stats/stats.pb.go: pkg/querier/stats/stats.proto -pkg/distributor/ha_tracker.pb.go: pkg/distributor/ha_tracker.proto pkg/ruler/rulespb/rules.pb.go: pkg/ruler/rulespb/rules.proto pkg/ruler/ruler.pb.go: pkg/ruler/ruler.proto pkg/ring/kv/memberlist/kv.pb.go: pkg/ring/kv/memberlist/kv.proto @@ -115,13 +112,13 @@ build-image/$(UPTODATE): build-image/* SUDO := $(shell docker info >/dev/null 2>&1 || echo "sudo -E") BUILD_IN_CONTAINER := true BUILD_IMAGE ?= $(IMAGE_PREFIX)build-image -LATEST_BUILD_IMAGE_TAG ?= master-7ce1d1b12 +LATEST_BUILD_IMAGE_TAG ?= master-59491e9aae # TTY is parameterized to allow Google Cloud Builder to run builds, # as it currently disallows TTY devices. This value needs to be overridden # in any custom cloudbuild.yaml files TTY := --tty -GO_FLAGS := -ldflags "-X main.Branch=$(GIT_BRANCH) -X main.Revision=$(GIT_REVISION) -X main.Version=$(VERSION) -extldflags \"-static\" -s -w" -tags netgo +GO_FLAGS := -ldflags "-X main.Branch=$(GIT_BRANCH) -X main.Revision=$(GIT_REVISION) -X main.Version=$(VERSION) -extldflags \"-static\" -s -w" -tags "netgo slicelabels" ifeq ($(BUILD_IN_CONTAINER),true) @@ -177,7 +174,7 @@ lint: golangci-lint run # Ensure no blocklisted package is imported. - GOFLAGS="-tags=requires_docker,integration,integration_alertmanager,integration_backward_compatibility,integration_memberlist,integration_querier,integration_ruler,integration_query_fuzz" faillint -paths "github.com/bmizerany/assert=github.com/stretchr/testify/assert,\ + GOFLAGS="-tags=requires_docker,integration,integration_alertmanager,integration_backward_compatibility,integration_memberlist,integration_querier,integration_ruler,integration_query_fuzz,integration_remote_write_v2" faillint -paths "github.com/bmizerany/assert=github.com/stretchr/testify/assert,\ golang.org/x/net/context=context,\ sync/atomic=go.uber.org/atomic,\ github.com/prometheus/client_golang/prometheus.{MultiError}=github.com/prometheus/prometheus/tsdb/errors.{NewMulti},\ @@ -216,15 +213,15 @@ lint: ./pkg/ruler/... test: - go test -tags netgo -timeout 30m -race -count 1 ./... + go test -tags "netgo slicelabels" -timeout 30m -race -count 1 ./... test-no-race: - go test -tags netgo -timeout 30m -count 1 ./... + go test -tags "netgo slicelabels" -timeout 30m -count 1 ./... cover: $(eval COVERDIR := $(shell mktemp -d coverage.XXXXXXXXXX)) $(eval COVERFILE := $(shell mktemp $(COVERDIR)/unit.XXXXXXXXXX)) - go test -tags netgo -timeout 30m -race -count 1 -coverprofile=$(COVERFILE) ./... + go test -tags netgo,slicelabels -timeout 30m -race -count 1 -coverprofile=$(COVERFILE) ./... go tool cover -html=$(COVERFILE) -o cover.html go tool cover -func=cover.html | tail -n1 @@ -232,7 +229,7 @@ shell: bash configs-integration-test: - /bin/bash -c "go test -v -tags 'netgo integration' -timeout 10m ./pkg/configs/... ./pkg/ruler/..." + /bin/bash -c "go test -v -tags 'netgo integration slicelabels' -timeout 10m ./pkg/configs/... ./pkg/ruler/..." mod-check: GO111MODULE=on go mod download @@ -256,13 +253,14 @@ web-deploy: # Generates the config file documentation. doc: clean-doc - go run ./tools/doc-generator ./docs/configuration/config-file-reference.template > ./docs/configuration/config-file-reference.md - go run ./tools/doc-generator ./docs/blocks-storage/compactor.template > ./docs/blocks-storage/compactor.md - go run ./tools/doc-generator ./docs/blocks-storage/store-gateway.template > ./docs/blocks-storage/store-gateway.md - go run ./tools/doc-generator ./docs/blocks-storage/querier.template > ./docs/blocks-storage/querier.md - go run ./tools/doc-generator ./docs/guides/encryption-at-rest.template > ./docs/guides/encryption-at-rest.md + go run -tags slicelabels ./tools/doc-generator ./docs/configuration/config-file-reference.template > ./docs/configuration/config-file-reference.md + go run -tags slicelabels ./tools/doc-generator ./docs/blocks-storage/compactor.template > ./docs/blocks-storage/compactor.md + go run -tags slicelabels ./tools/doc-generator ./docs/blocks-storage/store-gateway.template > ./docs/blocks-storage/store-gateway.md + go run -tags slicelabels ./tools/doc-generator ./docs/blocks-storage/querier.template > ./docs/blocks-storage/querier.md + go run -tags slicelabels ./tools/doc-generator ./docs/guides/encryption-at-rest.template > ./docs/guides/encryption-at-rest.md embedmd -w docs/operations/requests-mirroring-to-secondary-cluster.md embedmd -w docs/guides/overrides-exporter.md + go run -tags slicelabels ./tools/doc-generator -json-schema > ./schemas/cortex-config-schema.json endif diff --git a/README.md b/README.md index 515b199a295..470ffe3ed50 100644 --- a/README.md +++ b/README.md @@ -11,14 +11,14 @@ # Cortex -Cortex is a horizontally scalable, highly available, multi-tenant, long term storage solution for [Prometheus](https://prometheus.io) and [OpenTelemetry Metrics](https://opentelemetry.io/docs/specs/otel/metrics/) +Cortex is a horizontally scalable, highly available, multi-tenant, long-term storage solution for [Prometheus](https://prometheus.io) and [OpenTelemetry Metrics](https://opentelemetry.io/docs/specs/otel/metrics/). ## Features - **Horizontally scalable:** Cortex can run across multiple machines in a cluster, exceeding the throughput and storage of a single machine. - **Highly available:** When run in a cluster, Cortex can replicate data between machines. - **Multi-tenant:** Cortex can isolate data and queries from multiple different independent Prometheus sources in a single cluster. -- **Long term storage:** Cortex supports S3, GCS, Swift and Microsoft Azure for long term storage of metric data. +- **Long-term storage:** Cortex supports S3, GCS, Swift and Microsoft Azure for long-term storage of metric data. ## Documentation @@ -76,13 +76,13 @@ Join us in shaping the future of Cortex, and let's build something amazing toget - Sep 2020 KubeCon talk "Scaling Prometheus: How We Got Some Thanos Into Cortex" ([video](https://www.youtube.com/watch?v=Z5OJzRogAS4), [slides](https://static.sched.com/hosted_files/kccnceu20/ec/2020-08%20-%20KubeCon%20EU%20-%20Cortex%20blocks%20storage.pdf)) - Jul 2020 PromCon talk "Sharing is Caring: Leveraging Open Source to Improve Cortex & Thanos" ([video](https://www.youtube.com/watch?v=2oTLouUvsac), [slides](https://docs.google.com/presentation/d/1OuKYD7-k9Grb7unppYycdmVGWN0Bo0UwdJRySOoPdpg/edit)) - Nov 2019 KubeCon talks "[Cortex 101: Horizontally Scalable Long Term Storage for Prometheus][kubecon-cortex-101]" ([video][kubecon-cortex-101-video], [slides][kubecon-cortex-101-slides]), "[Configuring Cortex for Max - Performance][kubecon-cortex-201]" ([video][kubecon-cortex-201-video], [slides][kubecon-cortex-201-slides], [write up][kubecon-cortex-201-writeup]) and "[Blazin’ Fast PromQL][kubecon-blazin]" ([slides][kubecon-blazin-slides], [video][kubecon-blazin-video], [write up][kubecon-blazin-writeup]) + Performance][kubecon-cortex-201]" ([video][kubecon-cortex-201-video], [slides][kubecon-cortex-201-slides], [write up][kubecon-cortex-201-writeup]) and "[Blazin' Fast PromQL][kubecon-blazin]" ([slides][kubecon-blazin-slides], [video][kubecon-blazin-video], [write up][kubecon-blazin-writeup]) - Nov 2019 PromCon talk "[Two Households, Both Alike in Dignity: Cortex and Thanos][promcon-two-households]" ([video][promcon-two-households-video], [slides][promcon-two-households-slides], [write up][promcon-two-households-writeup]) - May 2019 KubeCon talks; "[Cortex: Intro][kubecon-cortex-intro]" ([video][kubecon-cortex-intro-video], [slides][kubecon-cortex-intro-slides], [blog post][kubecon-cortex-intro-blog]) and "[Cortex: Deep Dive][kubecon-cortex-deepdive]" ([video][kubecon-cortex-deepdive-video], [slides][kubecon-cortex-deepdive-slides]) - Nov 2018 CloudNative London meetup talk; "Cortex: Horizontally Scalable, Highly Available Prometheus" ([slides][cloudnative-london-2018-slides]) - Aug 2018 PromCon panel; "[Prometheus Long-Term Storage Approaches][promcon-2018-panel]" ([video][promcon-2018-video]) - Dec 2018 KubeCon talk; "[Cortex: Infinitely Scalable Prometheus][kubecon-2018-talk]" ([video][kubecon-2018-video], [slides][kubecon-2018-slides]) -- Aug 2017 PromCon talk; "[Cortex: Prometheus as a Service, One Year On][promcon-2017-talk]" ([videos][promcon-2017-video], [slides][promcon-2017-slides], write up [part 1][promcon-2017-writeup-1], [part 2][promcon-2017-writeup-2], [part 3][promcon-2017-writeup-3]) +- Aug 2017 PromCon talk; "[Cortex: Prometheus as a Service, One Year On][promcon-2017-talk]" ([video][promcon-2017-video], [slides][promcon-2017-slides], write up [part 1][promcon-2017-writeup-1], [part 2][promcon-2017-writeup-2], [part 3][promcon-2017-writeup-3]) - Jun 2017 Prometheus London meetup talk; "Cortex: open-source, horizontally-scalable, distributed Prometheus" ([video][prometheus-london-2017-video]) - Dec 2016 KubeCon talk; "Weave Cortex: Multi-tenant, horizontally scalable Prometheus as a Service" ([video][kubecon-2016-video], [slides][kubecon-2016-slides]) - Aug 2016 PromCon talk; "Project Frankenstein: Multitenant, Scale-Out Prometheus": ([video][promcon-2016-video], [slides][promcon-2016-slides]) @@ -90,10 +90,10 @@ Join us in shaping the future of Cortex, and let's build something amazing toget ### Blog Posts - Dec 2020 blog post "[How AWS and Grafana Labs are scaling Cortex for the cloud](https://aws.amazon.com/blogs/opensource/how-aws-and-grafana-labs-are-scaling-cortex-for-the-cloud/)" -- Oct 2020 blog post "[How to switch Cortex from chunks to blocks storage (and why you won’t look back)](https://grafana.com/blog/2020/10/19/how-to-switch-cortex-from-chunks-to-blocks-storage-and-why-you-wont-look-back/)" +- Oct 2020 blog post "[How to switch Cortex from chunks to blocks storage (and why you won't look back)](https://grafana.com/blog/2020/10/19/how-to-switch-cortex-from-chunks-to-blocks-storage-and-why-you-wont-look-back/)" - Oct 2020 blog post "[Now GA: Cortex blocks storage for running Prometheus at scale with reduced operational complexity](https://grafana.com/blog/2020/10/06/now-ga-cortex-blocks-storage-for-running-prometheus-at-scale-with-reduced-operational-complexity/)" - Sep 2020 blog post "[A Tale of Tail Latencies](https://www.weave.works/blog/a-tale-of-tail-latencies)" -- Aug 2020 blog post "[Scaling Prometheus: How we’re pushing Cortex blocks storage to its limit and beyond](https://grafana.com/blog/2020/08/12/scaling-prometheus-how-were-pushing-cortex-blocks-storage-to-its-limit-and-beyond/)" +- Aug 2020 blog post "[Scaling Prometheus: How we're pushing Cortex blocks storage to its limit and beyond](https://grafana.com/blog/2020/08/12/scaling-prometheus-how-were-pushing-cortex-blocks-storage-to-its-limit-and-beyond/)" - Jul 2020 blog post "[How blocks storage in Cortex reduces operational complexity for running Prometheus at massive scale](https://grafana.com/blog/2020/07/29/how-blocks-storage-in-cortex-reduces-operational-complexity-for-running-prometheus-at-massive-scale/)" - Mar 2020 blog post "[Cortex: Zone Aware Replication](https://kenhaines.net/cortex-zone-aware-replication/)" - Mar 2020 blog post "[How we're using gossip to improve Cortex and Loki availability](https://grafana.com/blog/2020/03/25/how-were-using-gossip-to-improve-cortex-and-loki-availability/)" @@ -157,7 +157,7 @@ Join us in shaping the future of Cortex, and let's build something amazing toget ### Amazon Managed Service for Prometheus (AMP) -[Amazon Managed Service for Prometheus (AMP)](https://aws.amazon.com/prometheus/) is a Prometheus-compatible monitoring service that makes it easy to monitor containerized applications at scale. It is a highly available, secure, and managed monitoring for your containers. Get started [here](https://console.aws.amazon.com/prometheus/home). To learn more about the AMP, reference our [documentation](https://docs.aws.amazon.com/prometheus/latest/userguide/what-is-Amazon-Managed-Service-Prometheus.html) and [Getting Started with AMP blog](https://aws.amazon.com/blogs/mt/getting-started-amazon-managed-service-for-prometheus/). +[Amazon Managed Service for Prometheus (AMP)](https://aws.amazon.com/prometheus/) is a Prometheus-compatible monitoring service that makes it easy to monitor containerized applications at scale. It is a highly available, secure, and managed monitoring service for your containers. Get started [here](https://console.aws.amazon.com/prometheus/home). To learn more about AMP, reference our [documentation](https://docs.aws.amazon.com/prometheus/latest/userguide/what-is-Amazon-Managed-Service-Prometheus.html) and [Getting Started with AMP blog](https://aws.amazon.com/blogs/mt/getting-started-amazon-managed-service-for-prometheus/). ## Emeritus Maintainers diff --git a/build-image/Dockerfile b/build-image/Dockerfile index 4952d308fbd..2aa5ae80cc2 100644 --- a/build-image/Dockerfile +++ b/build-image/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.24.3-bullseye +FROM golang:1.24.6-bullseye ARG goproxyValue ENV GOPROXY=${goproxyValue} RUN apt-get update && apt-get install -y curl file gettext jq unzip protobuf-compiler libprotobuf-dev && \ diff --git a/docs/api/_index.md b/docs/api/_index.md index 64a6aab3f0c..73462ec2585 100644 --- a/docs/api/_index.md +++ b/docs/api/_index.md @@ -37,7 +37,8 @@ For the sake of clarity, in this document we have grouped API endpoints by servi | [Instant query](#instant-query) | Querier, Query-frontend || `GET,POST /api/v1/query` | | [Range query](#range-query) | Querier, Query-frontend || `GET,POST /api/v1/query_range` | | [Exemplar query](#exemplar-query) | Querier, Query-frontend || `GET,POST /api/v1/query_exemplars` | -| [Format query](#format-query) | Querier, Query-frontend || `GET,POST /api/v1/format-query` | +| [Format query](#format-query) | Querier, Query-frontend || `GET,POST /api/v1/format_query` | +| [Parse query](#parse-query) | Querier, Query-frontend || `GET,POST /api/v1/parse_query` | | [Get series by label matchers](#get-series-by-label-matchers) | Querier, Query-frontend || `GET,POST /api/v1/series` | | [Get label names](#get-label-names) | Querier, Query-frontend || `GET,POST /api/v1/labels` | | [Get label values](#get-label-values) | Querier, Query-frontend || `GET /api/v1/label/{name}/values` | @@ -384,6 +385,21 @@ _For more information, please check out the Prometheus [fomatting query expressi _Requires [authentication](#authentication)._ +### Parse query + +``` +GET,POST /api/v1/parse_query + +# Legacy +GET,POST /api/v1/parse_query +``` + +Prometheus-compatible parse query endpoint. This endpoint is **experimental**, it parses a PromQL expression and returns it as a JSON-formatted AST (abstract syntax tree) representation. + +_For more information, please check out the Prometheus [Parsing query expressions](https://prometheus.io/docs/prometheus/latest/querying/api/#parsing-a-promql-expressions-into-a-abstract-syntax-tree-ast) documentation._ + +_Requires [authentication](#authentication)._ + ### Get series by label matchers ``` diff --git a/docs/architecture.md b/docs/architecture.md index bbb2ed7ae08..b532d83239a 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -21,9 +21,9 @@ Incoming samples (writes from Prometheus) are handled by the [distributor](#dist ## Blocks storage -The blocks storage is based on [Prometheus TSDB](https://prometheus.io/docs/prometheus/latest/storage/): it stores each tenant's time series into their own TSDB which write out their series to a on-disk Block (defaults to 2h block range periods). Each Block is composed by a few files storing the chunks and the block index. +The blocks storage is based on [Prometheus TSDB](https://prometheus.io/docs/prometheus/latest/storage/): it stores each tenant's time series into their own TSDB which writes out their series to an on-disk Block (defaults to 2h block range periods). Each Block is composed of a few files storing the chunks and the block index. -The TSDB chunk files contain the samples for multiple series. The series inside the Chunks are then indexed by a per-block index, which indexes metric names and labels to time series in the chunk files. +The TSDB chunk files contain the samples for multiple series. The series inside the chunks are then indexed by a per-block index, which indexes metric names and labels to time series in the chunk files. The blocks storage doesn't require a dedicated storage backend for the index. The only requirement is an object store for the Block files, which can be: @@ -60,7 +60,7 @@ The **distributor** service is responsible for handling incoming samples from Pr The validation done by the distributor includes: -- The metric labels name are formally correct +- The metric label names are formally correct - The configured max number of labels per metric is respected - The configured max length of a label name and value is respected - The timestamp is not older/newer than the configured min/max time range @@ -80,7 +80,7 @@ The supported KV stores for the HA tracker are: * [Consul](https://www.consul.io) * [Etcd](https://etcd.io) -Note: Memberlist is not supported. Memberlist-based KV store propagates updates using gossip, which is very slow for HA purposes: result is that different distributors may see different Prometheus server as elected HA replica, which is definitely not desirable. +Note: Memberlist is not supported. Memberlist-based KV store propagates updates using gossip, which is very slow for HA purposes: the result is that different distributors may see different Prometheus servers as the elected HA replica, which is definitely not desirable. For more information, please refer to [config for sending HA pairs data to Cortex](guides/ha-pair-handling.md) in the documentation. @@ -97,11 +97,11 @@ The trade-off associated with the latter is that writes are more balanced across #### The hash ring -A hash ring (stored in a key-value store) is used to achieve consistent hashing for the series sharding and replication across the ingesters. All [ingesters](#ingester) register themselves into the hash ring with a set of tokens they own; each token is a random unsigned 32-bit number. Each incoming series is [hashed](#hashing) in the distributor and then pushed to the ingester owning the tokens range for the series hash number plus N-1 subsequent ingesters in the ring, where N is the replication factor. +A hash ring (stored in a key-value store) is used to achieve consistent hashing for the series sharding and replication across the ingesters. All [ingesters](#ingester) register themselves into the hash ring with a set of tokens they own; each token is a random unsigned 32-bit number. Each incoming series is [hashed](#hashing) in the distributor and then pushed to the ingester owning the token's range for the series hash number plus N-1 subsequent ingesters in the ring, where N is the replication factor. To do the hash lookup, distributors find the smallest appropriate token whose value is larger than the [hash of the series](#hashing). When the replication factor is larger than 1, the next subsequent tokens (clockwise in the ring) that belong to different ingesters will also be included in the result. -The effect of this hash set up is that each token that an ingester owns is responsible for a range of hashes. If there are three tokens with values 0, 25, and 50, then a hash of 3 would be given to the ingester that owns the token 25; the ingester owning token 25 is responsible for the hash range of 1-25. +The effect of this hash setup is that each token that an ingester owns is responsible for a range of hashes. If there are three tokens with values 0, 25, and 50, then a hash of 3 would be given to the ingester that owns token 25; the ingester owning token 25 is responsible for the hash range of 1-25. The supported KV stores for the hash ring are: @@ -111,7 +111,7 @@ The supported KV stores for the hash ring are: #### Quorum consistency -Since all distributors share access to the same hash ring, write requests can be sent to any distributor and you can setup a stateless load balancer in front of it. +Since all distributors share access to the same hash ring, write requests can be sent to any distributor and you can set up a stateless load balancer in front of it. To ensure consistent query results, Cortex uses [Dynamo-style](https://www.allthingsdistributed.com/files/amazon-dynamo-sosp2007.pdf) quorum consistency on reads and writes. This means that the distributor will wait for a positive response of at least one half plus one of the ingesters to send the sample to before successfully responding to the Prometheus write request. @@ -125,35 +125,35 @@ The **ingester** service is responsible for writing incoming series to a [long-t Incoming series are not immediately written to the storage but kept in memory and periodically flushed to the storage (by default, 2 hours). For this reason, the [queriers](#querier) may need to fetch samples both from ingesters and long-term storage while executing a query on the read path. -Ingesters contain a **lifecycler** which manages the lifecycle of an ingester and stores the **ingester state** in the [hash ring](#the-hash-ring). Each ingester could be in one of the following states: +Ingesters contain a **lifecycler** which manages the lifecycle of an ingester and stores the **ingester state** in the [hash ring](#the-hash-ring). Each ingester can be in one of the following states: - **`PENDING`**
- The ingester has just started. While in this state, the ingester doesn't receive neither write and read requests. + The ingester has just started. While in this state, the ingester doesn't receive either write or read requests. - **`JOINING`**
- The ingester is starting up and joining the ring. While in this state the ingester doesn't receive neither write and read requests. The ingester will join the ring using tokens loaded from disk (if `-ingester.tokens-file-path` is configured) or generate a set of new random ones. Finally, the ingester optionally observes the ring for tokens conflicts and then, once any conflict is resolved, will move to `ACTIVE` state. + The ingester is starting up and joining the ring. While in this state the ingester doesn't receive either write or read requests. The ingester will join the ring using tokens loaded from disk (if `-ingester.tokens-file-path` is configured) or generate a set of new random ones. Finally, the ingester optionally observes the ring for token conflicts and then, once any conflict is resolved, will move to `ACTIVE` state. - **`ACTIVE`**
The ingester is up and running. While in this state the ingester can receive both write and read requests. - **`LEAVING`**
- The ingester is shutting down and leaving the ring. While in this state the ingester doesn't receive write requests, while it could receive read requests. + The ingester is shutting down and leaving the ring. While in this state the ingester doesn't receive write requests, while it can still receive read requests. - **`UNHEALTHY`**
The ingester has failed to heartbeat to the ring's KV Store. While in this state, distributors skip the ingester while building the replication set for incoming series and the ingester does not receive write or read requests. Ingesters are **semi-stateful**. -#### Ingesters failure and data loss +#### Ingester failure and data loss If an ingester process crashes or exits abruptly, all the in-memory series that have not yet been flushed to the long-term storage will be lost. There are two main ways to mitigate this failure mode: 1. Replication 2. Write-ahead log (WAL) -The **replication** is used to hold multiple (typically 3) replicas of each time series in the ingesters. If the Cortex cluster loses an ingester, the in-memory series held by the lost ingester are also replicated to at least another ingester. In the event of a single ingester failure, no time series samples will be lost. However, in the event of multiple ingester failures, time series may be potentially lost if the failures affect all the ingesters holding the replicas of a specific time series. +The **replication** is used to hold multiple (typically 3) replicas of each time series in the ingesters. If the Cortex cluster loses an ingester, the in-memory series held by the lost ingester are also replicated to at least one other ingester. In the event of a single ingester failure, no time series samples will be lost. However, in the event of multiple ingester failures, time series may be potentially lost if the failures affect all the ingesters holding the replicas of a specific time series. The **write-ahead log** (WAL) is used to write to a persistent disk all incoming series samples until they're flushed to the long-term storage. In the event of an ingester failure, a subsequent process restart will replay the WAL and recover the in-memory series samples. -Contrary to the sole replication and given the persistent disk data is not lost, in the event of multiple ingesters failure each ingester will recover the in-memory series samples from WAL upon subsequent restart. The replication is still recommended in order to ensure no temporary failures on the read path in the event of a single ingester failure. +Contrary to the sole replication and given that the persistent disk data is not lost, in the event of multiple ingester failures each ingester will recover the in-memory series samples from WAL upon subsequent restart. The replication is still recommended in order to ensure no temporary failures on the read path in the event of a single ingester failure. -#### Ingesters write de-amplification +#### Ingester write de-amplification Ingesters store recently received samples in-memory in order to perform write de-amplification. If the ingesters would immediately write received samples to the long-term storage, the system would be very difficult to scale due to the very high pressure on the storage. For this reason, the ingesters batch and compress samples in-memory and periodically flush them out to the storage. @@ -169,10 +169,10 @@ Queriers are **stateless** and can be scaled up and down as needed. ### Compactor -The **compactor** is a service which is responsible to: +The **compactor** is a service which is responsible for: -- Compact multiple blocks of a given tenant into a single optimized larger block. This helps to reduce storage costs (deduplication, index size reduction), and increase query speed (querying fewer blocks is faster). -- Keep the per-tenant bucket index updated. The [bucket index](./blocks-storage/bucket-index.md) is used by [queriers](./blocks-storage/querier.md), [store-gateways](#store-gateway) and rulers to discover new blocks in the storage. +- Compacting multiple blocks of a given tenant into a single optimized larger block. This helps to reduce storage costs (deduplication, index size reduction), and increase query speed (querying fewer blocks is faster). +- Keeping the per-tenant bucket index updated. The [bucket index](./blocks-storage/bucket-index.md) is used by [queriers](./blocks-storage/querier.md), [store-gateways](#store-gateway) and rulers to discover new blocks in the storage. For more information, see the [compactor documentation](./blocks-storage/compactor.md). @@ -190,7 +190,7 @@ The store gateway is **semi-stateful**. ### Query frontend -The **query frontend** is an **optional service** providing the querier's API endpoints and can be used to accelerate the read path. When the query frontend is in place, incoming query requests should be directed to the query frontend instead of the queriers. The querier service will be still required within the cluster, in order to execute the actual queries. +The **query frontend** is an **optional service** providing the querier's API endpoints and can be used to accelerate the read path. When the query frontend is in place, incoming query requests should be directed to the query frontend instead of the queriers. The querier service will still be required within the cluster, in order to execute the actual queries. The query frontend internally performs some query adjustments and holds queries in an internal queue. In this setup, queriers act as workers which pull jobs from the queue, execute them, and return them to the query-frontend for aggregation. Queriers need to be configured with the query frontend address (via the `-querier.frontend-address` CLI flag) in order to allow them to connect to the query frontends. @@ -199,15 +199,15 @@ Query frontends are **stateless**. However, due to how the internal queue works, Flow of the query in the system when using query-frontend: 1) Query is received by query frontend, which can optionally split it or serve from the cache. -2) Query frontend stores the query into in-memory queue, where it waits for some querier to pick it up. +2) Query frontend stores the query into an in-memory queue, where it waits for some querier to pick it up. 3) Querier picks up the query, and executes it. 4) Querier sends result back to query-frontend, which then forwards it to the client. -Query frontend can also be used with any Prometheus-API compatible service. In this mode Cortex can be used as an query accelerator with it's caching and splitting features on other prometheus query engines like Thanos Querier or your own Prometheus server. Query frontend needs to be configured with downstream url address(via the `-frontend.downstream-url` CLI flag), which is the endpoint of the prometheus server intended to be connected with Cortex. +Query frontend can also be used with any Prometheus-API compatible service. In this mode Cortex can be used as a query accelerator with its caching and splitting features on other prometheus query engines like Thanos Querier or your own Prometheus server. Query frontend needs to be configured with downstream url address (via the `-frontend.downstream-url` CLI flag), which is the endpoint of the prometheus server intended to be connected with Cortex. #### Queueing -The query frontend queuing mechanism is used to: +The query frontend queueing mechanism is used to: * Ensure that large queries, that could cause an out-of-memory (OOM) error in the querier, will be retried on failure. This allows administrators to under-provision memory for queries, or optimistically run more small queries in parallel, which helps to reduce the total cost of ownership (TCO). * Prevent multiple large requests from being convoyed on a single querier by distributing them across all queriers using a first-in/first-out queue (FIFO). @@ -223,7 +223,7 @@ The query frontend supports caching query results and reuses them on subsequent ### Query Scheduler -Query Scheduler is an **optional** service that moves the internal queue from query frontend into separate component. +Query Scheduler is an **optional** service that moves the internal queue from query frontend into a separate component. This enables independent scaling of query frontends and number of queues (query scheduler). In order to use query scheduler, both query frontend and queriers must be configured with query scheduler address @@ -232,10 +232,10 @@ In order to use query scheduler, both query frontend and queriers must be config Flow of the query in the system changes when using query scheduler: 1) Query is received by query frontend, which can optionally split it or serve from the cache. -2) Query frontend forwards the query to random query scheduler process. -3) Query scheduler stores the query into in-memory queue, where it waits for some querier to pick it up. -3) Querier picks up the query, and executes it. -4) Querier sends result back to query-frontend, which then forwards it to the client. +2) Query frontend forwards the query to a random query scheduler process. +3) Query scheduler stores the query into an in-memory queue, where it waits for some querier to pick it up. +4) Querier picks up the query, and executes it. +5) Querier sends result back to query-frontend, which then forwards it to the client. Query schedulers are **stateless**. It is recommended to run two replicas to make sure queries can still be serviced while one replica is restarting. @@ -263,7 +263,7 @@ If all of the alertmanager nodes failed simultaneously there would be a loss of ### Configs API The **configs API** is an **optional service** managing the configuration of Rulers and Alertmanagers. -It provides APIs to get/set/update the ruler and alertmanager configurations and store them into backend. -Current supported backend are PostgreSQL and in-memory. +It provides APIs to get/set/update the ruler and alertmanager configurations and store them in the backend. +Current supported backends are PostgreSQL and in-memory. Configs API is **stateless**. diff --git a/docs/blocks-storage/compactor.md b/docs/blocks-storage/compactor.md index dc7daeb8a91..fc0ab4ba11d 100644 --- a/docs/blocks-storage/compactor.md +++ b/docs/blocks-storage/compactor.md @@ -268,6 +268,12 @@ compactor: # CLI flag: -compactor.auto-forget-delay [auto_forget_delay: | default = 2m] + # Set to true to enable ring detailed metrics. These metrics provide + # detailed information, such as token count and ownership per tenant. + # Disabling them can significantly decrease the number of metrics emitted. + # CLI flag: -compactor.ring.detailed-metrics-enabled + [detailed_metrics_enabled: | default = true] + # Minimum time to wait for ring stability at startup. 0 to disable. # CLI flag: -compactor.ring.wait-stability-min-duration [wait_stability_min_duration: | default = 1m] diff --git a/docs/blocks-storage/querier.md b/docs/blocks-storage/querier.md index 04d74307420..855ff5c9028 100644 --- a/docs/blocks-storage/querier.md +++ b/docs/blocks-storage/querier.md @@ -127,7 +127,7 @@ querier: [per_step_stats_enabled: | default = false] # Use compression for metrics query API or instant and range query APIs. - # Supports 'gzip' and '' (disable compression) + # Supported compression 'gzip', 'snappy', 'zstd' and '' (disable compression) # CLI flag: -querier.response-compression [response_compression: | default = "gzip"] @@ -278,6 +278,30 @@ querier: # [Experimental] If true, experimental promQL functions are enabled. # CLI flag: -querier.enable-promql-experimental-functions [enable_promql_experimental_functions: | default = false] + + # [Experimental] If true, querier will try to query the parquet files if + # available. + # CLI flag: -querier.enable-parquet-queryable + [enable_parquet_queryable: | default = false] + + # [Experimental] Maximum size of the Parquet queryable shard cache. 0 to + # disable. + # CLI flag: -querier.parquet-queryable-shard-cache-size + [parquet_queryable_shard_cache_size: | default = 512] + + # [Experimental] Parquet queryable's default block store to query. Valid + # options are tsdb and parquet. If it is set to tsdb, parquet queryable always + # fallback to store gateway. + # CLI flag: -querier.parquet-queryable-default-block-store + [parquet_queryable_default_block_store: | default = "parquet"] + + # [Experimental] Disable Parquet queryable to fallback queries to Store + # Gateway if the block is not available as Parquet files but available in + # TSDB. Setting this to true will disable the fallback and users can remove + # Store Gateway. But need to make sure Parquet files are created before it is + # queryable. + # CLI flag: -querier.parquet-queryable-fallback-disabled + [parquet_queryable_fallback_disabled: | default = false] ``` ### `blocks_storage_config` @@ -1394,6 +1418,255 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.metadata-cache.partitioned-groups-list-ttl [partitioned_groups_list_ttl: | default = 0s] + parquet_labels_cache: + # The parquet labels cache backend type. Single or Multiple cache backend + # can be provided. Supported values in single cache: memcached, redis, + # inmemory, and '' (disable). Supported values in multi level cache: a + # comma-separated list of (inmemory, memcached, redis) + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.backend + [backend: | default = ""] + + inmemory: + # Maximum size in bytes of in-memory parquet-labels cache used (shared + # between all tenants). + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.inmemory.max-size-bytes + [max_size_bytes: | default = 1073741824] + + memcached: + # Comma separated list of memcached addresses. Supported prefixes are: + # dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV + # query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup + # made after that). + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.addresses + [addresses: | default = ""] + + # The socket read/write timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.timeout + [timeout: | default = 100ms] + + # The maximum number of idle connections that will be maintained per + # address. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-idle-connections + [max_idle_connections: | default = 16] + + # The maximum number of concurrent asynchronous operations can occur. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-async-concurrency + [max_async_concurrency: | default = 3] + + # The maximum number of enqueued asynchronous operations allowed. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-async-buffer-size + [max_async_buffer_size: | default = 10000] + + # The maximum number of concurrent connections running get operations. + # If set to 0, concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum number of keys a single underlying get operation should + # run. If more keys are specified, internally keys are split into + # multiple batches and fetched concurrently, honoring the max + # concurrency. If set to 0, the max batch size is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-get-multi-batch-size + [max_get_multi_batch_size: | default = 0] + + # The maximum size of an item stored in memcached. Bigger items are not + # stored. If set to 0, no maximum size is enforced. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-item-size + [max_item_size: | default = 1048576] + + # Use memcached auto-discovery mechanism provided by some cloud provider + # like GCP and AWS + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.auto-discovery + [auto_discovery: | default = false] + + set_async_circuit_breaker_config: + # If true, enable circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.enabled + [enabled: | default = false] + + # Maximum number of requests allowed to pass through when the circuit + # breaker is half-open. If set to 0, by default it allows 1 request. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.half-open-max-requests + [half_open_max_requests: | default = 10] + + # Period of the open state after which the state of the circuit + # breaker becomes half-open. If set to 0, by default open duration is + # 60 seconds. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.open-duration + [open_duration: | default = 5s] + + # Minimal requests to trigger the circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.min-requests + [min_requests: | default = 50] + + # Consecutive failures to determine if the circuit breaker should + # open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.consecutive-failures + [consecutive_failures: | default = 5] + + # Failure percentage to determine if the circuit breaker should open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.failure-percent + [failure_percent: | default = 0.05] + + redis: + # Comma separated list of redis addresses. Supported prefixes are: dns+ + # (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, + # dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after + # that). + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.addresses + [addresses: | default = ""] + + # Redis username. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.username + [username: | default = ""] + + # Redis password. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.password + [password: | default = ""] + + # Database to be selected after connecting to the server. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.db + [db: | default = 0] + + # Specifies the master's name. Must be not empty for Redis Sentinel. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.master-name + [master_name: | default = ""] + + # The maximum number of concurrent GetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum size per batch for mget. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.get-multi-batch-size + [get_multi_batch_size: | default = 100] + + # The maximum number of concurrent SetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-set-multi-concurrency + [max_set_multi_concurrency: | default = 100] + + # The maximum size per batch for pipeline set. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-multi-batch-size + [set_multi_batch_size: | default = 100] + + # The maximum number of concurrent asynchronous operations can occur. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-async-concurrency + [max_async_concurrency: | default = 3] + + # The maximum number of enqueued asynchronous operations allowed. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-async-buffer-size + [max_async_buffer_size: | default = 10000] + + # Client dial timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.dial-timeout + [dial_timeout: | default = 5s] + + # Client read timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.read-timeout + [read_timeout: | default = 3s] + + # Client write timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.write-timeout + [write_timeout: | default = 3s] + + # Whether to enable tls for redis connection. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-enabled + [tls_enabled: | default = false] + + # Path to the client certificate file, which will be used for + # authenticating with the server. Also requires the key path to be + # configured. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-cert-path + [tls_cert_path: | default = ""] + + # Path to the key file for the client certificate. Also requires the + # client certificate to be configured. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-key-path + [tls_key_path: | default = ""] + + # Path to the CA certificates file to validate server certificate + # against. If not set, the host's root CA certificates are used. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-ca-path + [tls_ca_path: | default = ""] + + # Override the expected name on the server certificate. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-server-name + [tls_server_name: | default = ""] + + # Skip validating server certificate. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-insecure-skip-verify + [tls_insecure_skip_verify: | default = false] + + # If not zero then client-side caching is enabled. Client-side caching + # is when data is stored in memory instead of fetching data each time. + # See https://redis.io/docs/manual/client-side-caching/ for more info. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.cache-size + [cache_size: | default = 0] + + set_async_circuit_breaker_config: + # If true, enable circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.enabled + [enabled: | default = false] + + # Maximum number of requests allowed to pass through when the circuit + # breaker is half-open. If set to 0, by default it allows 1 request. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.half-open-max-requests + [half_open_max_requests: | default = 10] + + # Period of the open state after which the state of the circuit + # breaker becomes half-open. If set to 0, by default open duration is + # 60 seconds. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.open-duration + [open_duration: | default = 5s] + + # Minimal requests to trigger the circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.min-requests + [min_requests: | default = 50] + + # Consecutive failures to determine if the circuit breaker should + # open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.consecutive-failures + [consecutive_failures: | default = 5] + + # Failure percentage to determine if the circuit breaker should open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.failure-percent + [failure_percent: | default = 0.05] + + multilevel: + # The maximum number of concurrent asynchronous operations can occur + # when backfilling cache items. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-async-concurrency + [max_async_concurrency: | default = 3] + + # The maximum number of enqueued asynchronous operations allowed when + # backfilling cache items. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-async-buffer-size + [max_async_buffer_size: | default = 10000] + + # The maximum number of items to backfill per asynchronous operation. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-backfill-items + [max_backfill_items: | default = 10000] + + # Size of each subrange that bucket object is split into for better + # caching. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.subrange-size + [subrange_size: | default = 16000] + + # Maximum number of sub-GetRange requests that a single GetRange request + # can be split into when fetching parquet labels file. Zero or negative + # value = unlimited number of sub-requests. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.max-get-range-requests + [max_get_range_requests: | default = 3] + + # TTL for caching object attributes for parquet labels file. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.attributes-ttl + [attributes_ttl: | default = 168h] + + # TTL for caching individual subranges. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.subrange-ttl + [subrange_ttl: | default = 24h] + # Maximum number of entries in the regex matchers cache. 0 to disable. # CLI flag: -blocks-storage.bucket-store.matchers-cache-max-items [matchers_cache_max_items: | default = 0] diff --git a/docs/blocks-storage/store-gateway.md b/docs/blocks-storage/store-gateway.md index ee2d307d3d4..f3ec87e1b53 100644 --- a/docs/blocks-storage/store-gateway.md +++ b/docs/blocks-storage/store-gateway.md @@ -303,6 +303,12 @@ store_gateway: # CLI flag: -store-gateway.sharding-ring.keep-instance-in-the-ring-on-shutdown [keep_instance_in_the_ring_on_shutdown: | default = false] + # Set to true to enable ring detailed metrics. These metrics provide + # detailed information, such as token count and ownership per tenant. + # Disabling them can significantly decrease the number of metrics emitted. + # CLI flag: -store-gateway.sharding-ring.detailed-metrics-enabled + [detailed_metrics_enabled: | default = true] + # Minimum time to wait for ring stability at startup. 0 to disable. # CLI flag: -store-gateway.sharding-ring.wait-stability-min-duration [wait_stability_min_duration: | default = 1m] @@ -1504,6 +1510,255 @@ blocks_storage: # CLI flag: -blocks-storage.bucket-store.metadata-cache.partitioned-groups-list-ttl [partitioned_groups_list_ttl: | default = 0s] + parquet_labels_cache: + # The parquet labels cache backend type. Single or Multiple cache backend + # can be provided. Supported values in single cache: memcached, redis, + # inmemory, and '' (disable). Supported values in multi level cache: a + # comma-separated list of (inmemory, memcached, redis) + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.backend + [backend: | default = ""] + + inmemory: + # Maximum size in bytes of in-memory parquet-labels cache used (shared + # between all tenants). + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.inmemory.max-size-bytes + [max_size_bytes: | default = 1073741824] + + memcached: + # Comma separated list of memcached addresses. Supported prefixes are: + # dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV + # query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup + # made after that). + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.addresses + [addresses: | default = ""] + + # The socket read/write timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.timeout + [timeout: | default = 100ms] + + # The maximum number of idle connections that will be maintained per + # address. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-idle-connections + [max_idle_connections: | default = 16] + + # The maximum number of concurrent asynchronous operations can occur. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-async-concurrency + [max_async_concurrency: | default = 3] + + # The maximum number of enqueued asynchronous operations allowed. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-async-buffer-size + [max_async_buffer_size: | default = 10000] + + # The maximum number of concurrent connections running get operations. + # If set to 0, concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum number of keys a single underlying get operation should + # run. If more keys are specified, internally keys are split into + # multiple batches and fetched concurrently, honoring the max + # concurrency. If set to 0, the max batch size is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-get-multi-batch-size + [max_get_multi_batch_size: | default = 0] + + # The maximum size of an item stored in memcached. Bigger items are not + # stored. If set to 0, no maximum size is enforced. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-item-size + [max_item_size: | default = 1048576] + + # Use memcached auto-discovery mechanism provided by some cloud provider + # like GCP and AWS + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.auto-discovery + [auto_discovery: | default = false] + + set_async_circuit_breaker_config: + # If true, enable circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.enabled + [enabled: | default = false] + + # Maximum number of requests allowed to pass through when the circuit + # breaker is half-open. If set to 0, by default it allows 1 request. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.half-open-max-requests + [half_open_max_requests: | default = 10] + + # Period of the open state after which the state of the circuit + # breaker becomes half-open. If set to 0, by default open duration is + # 60 seconds. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.open-duration + [open_duration: | default = 5s] + + # Minimal requests to trigger the circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.min-requests + [min_requests: | default = 50] + + # Consecutive failures to determine if the circuit breaker should + # open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.consecutive-failures + [consecutive_failures: | default = 5] + + # Failure percentage to determine if the circuit breaker should open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.failure-percent + [failure_percent: | default = 0.05] + + redis: + # Comma separated list of redis addresses. Supported prefixes are: dns+ + # (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, + # dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after + # that). + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.addresses + [addresses: | default = ""] + + # Redis username. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.username + [username: | default = ""] + + # Redis password. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.password + [password: | default = ""] + + # Database to be selected after connecting to the server. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.db + [db: | default = 0] + + # Specifies the master's name. Must be not empty for Redis Sentinel. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.master-name + [master_name: | default = ""] + + # The maximum number of concurrent GetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum size per batch for mget. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.get-multi-batch-size + [get_multi_batch_size: | default = 100] + + # The maximum number of concurrent SetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-set-multi-concurrency + [max_set_multi_concurrency: | default = 100] + + # The maximum size per batch for pipeline set. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-multi-batch-size + [set_multi_batch_size: | default = 100] + + # The maximum number of concurrent asynchronous operations can occur. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-async-concurrency + [max_async_concurrency: | default = 3] + + # The maximum number of enqueued asynchronous operations allowed. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-async-buffer-size + [max_async_buffer_size: | default = 10000] + + # Client dial timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.dial-timeout + [dial_timeout: | default = 5s] + + # Client read timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.read-timeout + [read_timeout: | default = 3s] + + # Client write timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.write-timeout + [write_timeout: | default = 3s] + + # Whether to enable tls for redis connection. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-enabled + [tls_enabled: | default = false] + + # Path to the client certificate file, which will be used for + # authenticating with the server. Also requires the key path to be + # configured. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-cert-path + [tls_cert_path: | default = ""] + + # Path to the key file for the client certificate. Also requires the + # client certificate to be configured. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-key-path + [tls_key_path: | default = ""] + + # Path to the CA certificates file to validate server certificate + # against. If not set, the host's root CA certificates are used. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-ca-path + [tls_ca_path: | default = ""] + + # Override the expected name on the server certificate. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-server-name + [tls_server_name: | default = ""] + + # Skip validating server certificate. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-insecure-skip-verify + [tls_insecure_skip_verify: | default = false] + + # If not zero then client-side caching is enabled. Client-side caching + # is when data is stored in memory instead of fetching data each time. + # See https://redis.io/docs/manual/client-side-caching/ for more info. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.cache-size + [cache_size: | default = 0] + + set_async_circuit_breaker_config: + # If true, enable circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.enabled + [enabled: | default = false] + + # Maximum number of requests allowed to pass through when the circuit + # breaker is half-open. If set to 0, by default it allows 1 request. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.half-open-max-requests + [half_open_max_requests: | default = 10] + + # Period of the open state after which the state of the circuit + # breaker becomes half-open. If set to 0, by default open duration is + # 60 seconds. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.open-duration + [open_duration: | default = 5s] + + # Minimal requests to trigger the circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.min-requests + [min_requests: | default = 50] + + # Consecutive failures to determine if the circuit breaker should + # open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.consecutive-failures + [consecutive_failures: | default = 5] + + # Failure percentage to determine if the circuit breaker should open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.failure-percent + [failure_percent: | default = 0.05] + + multilevel: + # The maximum number of concurrent asynchronous operations can occur + # when backfilling cache items. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-async-concurrency + [max_async_concurrency: | default = 3] + + # The maximum number of enqueued asynchronous operations allowed when + # backfilling cache items. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-async-buffer-size + [max_async_buffer_size: | default = 10000] + + # The maximum number of items to backfill per asynchronous operation. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-backfill-items + [max_backfill_items: | default = 10000] + + # Size of each subrange that bucket object is split into for better + # caching. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.subrange-size + [subrange_size: | default = 16000] + + # Maximum number of sub-GetRange requests that a single GetRange request + # can be split into when fetching parquet labels file. Zero or negative + # value = unlimited number of sub-requests. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.max-get-range-requests + [max_get_range_requests: | default = 3] + + # TTL for caching object attributes for parquet labels file. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.attributes-ttl + [attributes_ttl: | default = 168h] + + # TTL for caching individual subranges. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.subrange-ttl + [subrange_ttl: | default = 24h] + # Maximum number of entries in the regex matchers cache. 0 to disable. # CLI flag: -blocks-storage.bucket-store.matchers-cache-max-items [matchers_cache_max_items: | default = 0] diff --git a/docs/configuration/arguments.md b/docs/configuration/arguments.md index 943d319aee3..a99fe4daced 100644 --- a/docs/configuration/arguments.md +++ b/docs/configuration/arguments.md @@ -73,7 +73,7 @@ The next three options only apply when the querier is used together with the Que - `-frontend.forward-headers-list` - Request headers forwarded by query frontend to downstream queriers. Multiple headers may be specified. Defaults to empty. + Request headers forwarded by query frontend to downstream queriers. Multiple headers may be specified. Defaults to empty. - `-frontend.max-cache-freshness` @@ -113,7 +113,7 @@ The next three options only apply when the querier is used together with the Que Enable the distributors HA tracker so that it can accept samples from Prometheus HA replicas gracefully (requires labels). Global (for distributors), this ensures that the necessary internal data structures for the HA handling are created. The option `enable-for-all-users` is still needed to enable ingestion of HA samples for all users. - `distributor.drop-label` - This flag can be used to specify label names that to drop during sample ingestion within the distributor and can be repeated in order to drop multiple labels. + This flag can be used to specify label names to drop during sample ingestion within the distributor and can be repeated in order to drop multiple labels. ### Ring/HA Tracker Store @@ -123,7 +123,7 @@ The KVStore client is used by both the Ring and HA Tracker (HA Tracker doesn't s - `{ring,distributor.ha-tracker}.store` Backend storage to use for the HA Tracker (consul, etcd, inmemory, multi). - **Warning:** The `inmemory` store will not work correctly with multiple distributors as each distributor can have a different state, causing injestion errors. + **Warning:** The `inmemory` store will not work correctly with multiple distributors as each distributor can have a different state, causing ingestion errors. - `{ring,distributor.ring}.store` Backend storage to use for the Ring (consul, etcd, inmemory, memberlist, multi). @@ -162,8 +162,8 @@ prefix these flags with `distributor.ha-tracker.` The trusted CA file path. - `etcd.tls-insecure-skip-verify` Skip validating server certificate. -- `etcd.ping-without-stream-allowd'` - Enable/Disable PermitWithoutStream parameter +- `etcd.ping-without-stream-allowed` + Enable/Disable PermitWithoutStream parameter #### memberlist @@ -178,7 +178,7 @@ All nodes run the following two loops: 1. Every "gossip interval", pick random "gossip nodes" number of nodes, and send recent ring updates to them. 2. Every "push/pull sync interval", choose random single node, and exchange full ring information with it (push/pull sync). After this operation, rings on both nodes are the same. -When a node receives a ring update, node will merge it into its own ring state, and if that resulted in a change, node will add that update to the list of gossiped updates. +When a node receives a ring update, the node will merge it into its own ring state, and if that resulted in a change, the node will add that update to the list of gossiped updates. Such update will be gossiped `R * log(N+1)` times by this node (R = retransmit multiplication factor, N = number of gossiping nodes in the cluster). If you find the propagation to be too slow, there are some tuning possibilities (default values are memberlist settings for LAN networks): @@ -187,14 +187,14 @@ If you find the propagation to be too slow, there are some tuning possibilities - Decrease push/pull sync interval (default 30s) - Increase retransmit multiplication factor (default 4) -To find propagation delay, you can use `cortex_ring_oldest_member_timestamp{state="ACTIVE"}` metric. +To find propagation delay, you can use the `cortex_ring_oldest_member_timestamp{state="ACTIVE"}` metric. Flags for configuring KV store based on memberlist library: - `memberlist.nodename` Name of the node in memberlist cluster. Defaults to hostname. - `memberlist.randomize-node-name` - This flag adds extra random suffix to the node name used by memberlist. Defaults to true. Using random suffix helps to prevent issues when running multiple memberlist nodes on the same machine, or when node names are reused (eg. in stateful sets). + This flag adds an extra random suffix to the node name used by memberlist. Defaults to true. Using a random suffix helps to prevent issues when running multiple memberlist nodes on the same machine, or when node names are reused (e.g. in stateful sets). - `memberlist.retransmit-factor` Multiplication factor used when sending out messages (factor * log(N+1)). If not set, default value is used. - `memberlist.join` @@ -228,29 +228,29 @@ Flags for configuring KV store based on memberlist library: - `memberlist.gossip-to-dead-nodes-time` How long to keep gossiping to the nodes that seem to be dead. After this time, dead node is removed from list of nodes. If "dead" node appears again, it will simply join the cluster again, if its name is not reused by other node in the meantime. If the name has been reused, such a reanimated node will be ignored by other members. - `memberlist.dead-node-reclaim-time` - How soon can dead's node name be reused by a new node (using different IP). Disabled by default, name reclaim is not allowed until `gossip-to-dead-nodes-time` expires. This can be useful to set to low numbers when reusing node names, eg. in stateful sets. - If memberlist library detects that new node is trying to reuse the name of previous node, it will log message like this: `Conflicting address for ingester-6. Mine: 10.44.12.251:7946 Theirs: 10.44.12.54:7946 Old state: 2`. Node states are: "alive" = 0, "suspect" = 1 (doesn't respond, will be marked as dead if it doesn't respond), "dead" = 2. + How soon can a dead node's name be reused by a new node (using different IP). Disabled by default, name reclaim is not allowed until `gossip-to-dead-nodes-time` expires. This can be useful to set to low numbers when reusing node names, e.g. in stateful sets. + If memberlist library detects that a new node is trying to reuse the name of a previous node, it will log a message like this: `Conflicting address for ingester-6. Mine: 10.44.12.251:7946 Theirs: 10.44.12.54:7946 Old state: 2`. Node states are: "alive" = 0, "suspect" = 1 (doesn't respond, will be marked as dead if it doesn't respond), "dead" = 2. #### Multi KV -This is a special key-value implementation that uses two different KV stores (eg. consul, etcd or memberlist). One of them is always marked as primary, and all reads and writes go to primary store. Other one, secondary, is only used for writes. The idea is that operator can use multi KV store to migrate from primary to secondary store in runtime. +This is a special key-value implementation that uses two different KV stores (e.g. consul, etcd or memberlist). One of them is always marked as primary, and all reads and writes go to the primary store. The other one, secondary, is only used for writes. The idea is that an operator can use multi KV store to migrate from primary to secondary store at runtime. For example, migration from Consul to Etcd would look like this: - Set `ring.store` to use `multi` store. Set `-multi.primary=consul` and `-multi.secondary=etcd`. All consul and etcd settings must still be specified. -- Start all Cortex microservices. They will still use Consul as primary KV, but they will also write share ring via etcd. -- Operator can now use "runtime config" mechanism to switch primary store to etcd. -- After all Cortex microservices have picked up new primary store, and everything looks correct, operator can now shut down Consul, and modify Cortex configuration to use `-ring.store=etcd` only. +- Start all Cortex microservices. They will still use Consul as primary KV, but they will also share the ring via etcd. +- Operator can now use the "runtime config" mechanism to switch primary store to etcd. +- After all Cortex microservices have picked up the new primary store, and everything looks correct, operator can now shut down Consul, and modify Cortex configuration to use `-ring.store=etcd` only. - At this point, Consul can be shut down. -Multi KV has following parameters: +Multi KV has the following parameters: - `multi.primary` - name of primary KV store. Same values as in `ring.store` are supported, except `multi`. - `multi.secondary` - name of secondary KV store. - `multi.mirror-enabled` - enable mirroring of values to secondary store, defaults to true -- `multi.mirror-timeout` - wait max this time to write to secondary store to finish. Default to 2 seconds. Errors writing to secondary store are not reported to caller, but are logged and also reported via `cortex_multikv_mirror_write_errors_total` metric. +- `multi.mirror-timeout` - wait max this time for write to secondary store to finish. Defaults to 2 seconds. Errors writing to secondary store are not reported to caller, but are logged and also reported via `cortex_multikv_mirror_write_errors_total` metric. -Multi KV also reacts on changes done via runtime configuration. It uses this section: +Multi KV also reacts to changes done via runtime configuration. It uses this section: ```yaml multi_kv_config: @@ -268,7 +268,7 @@ HA tracking has two of its own flags: - `distributor.ha-tracker.replica` Prometheus label to look for in samples to identify a Prometheus HA replica. (default "`__replica__`") -It's reasonable to assume people probably already have a `cluster` label, or something similar. If not, they should add one along with `__replica__` via external labels in their Prometheus config. If you stick to these default values your Prometheus config could look like this (`POD_NAME` is an environment variable which must be set by you): +It's reasonable to assume people probably already have a `cluster` label, or something similar. If not, they should add one along with `__replica__` via external labels in their Prometheus config. If you stick to these default values, your Prometheus config could look like this (`POD_NAME` is an environment variable which must be set by you): ```yaml global: @@ -277,9 +277,9 @@ global: __replica__: $POD_NAME ``` -HA Tracking looks for the two labels (which can be overwritten per user) +HA Tracking looks for the two labels (which can be overridden per user). -It also talks to a KVStore and has it's own copies of the same flags used by the Distributor to connect to for the ring. +It also talks to a KVStore and has its own copies of the same flags used by the Distributor to connect to the ring. - `distributor.ha-tracker.failover-timeout` If we don't receive any samples from the accepted replica for a cluster in this amount of time we will failover to the next replica we receive a sample from. This value must be greater than the update timeout (default 30s) - `distributor.ha-tracker.store` @@ -307,9 +307,9 @@ It also talks to a KVStore and has it's own copies of the same flags used by the ## Runtime Configuration file -Cortex has a concept of "runtime config" file, which is simply a file that is reloaded while Cortex is running. It is used by some Cortex components to allow operator to change some aspects of Cortex configuration without restarting it. File is specified by using `-runtime-config.file=` flag and reload period (which defaults to 10 seconds) can be changed by `-runtime-config.reload-period=` flag. Previously this mechanism was only used by limits overrides, and flags were called `-limits.per-user-override-config=` and `-limits.per-user-override-period=10s` respectively. These are still used, if `-runtime-config.file=` is not specified. +Cortex has a concept of "runtime config" file, which is simply a file that is reloaded while Cortex is running. It is used by some Cortex components to allow an operator to change some aspects of Cortex configuration without restarting it. The file is specified by using the `-runtime-config.file=` flag and reload period (which defaults to 10 seconds) can be changed by the `-runtime-config.reload-period=` flag. Previously this mechanism was only used by limits overrides, and flags were called `-limits.per-user-override-config=` and `-limits.per-user-override-period=10s` respectively. These are still used, if `-runtime-config.file=` is not specified. -At the moment runtime configuration may contain per-user limits, multi KV store, and ingester instance limits. +At the moment, runtime configuration may contain per-user limits, multi KV store, and ingester instance limits. Example runtime configuration file: @@ -333,15 +333,15 @@ ingester_limits: max_inflight_push_requests: 10000 ``` -When running Cortex on Kubernetes, store this file in a config map and mount it in each services' containers. When changing the values there is no need to restart the services, unless otherwise specified. +When running Cortex on Kubernetes, store this file in a config map and mount it in each service's container. When changing the values there is no need to restart the services, unless otherwise specified. The `/runtime_config` endpoint returns the whole runtime configuration, including the overrides. In case you want to get only the non-default values of the configuration you can pass the `mode` parameter with the `diff` value. -## Ingester, Distributor & Querier limits. +## Ingester, Distributor & Querier limits -Cortex implements various limits on the requests it can process, in order to prevent a single tenant overwhelming the cluster. There are various default global limits which apply to all tenants which can be set on the command line. These limits can also be overridden on a per-tenant basis by using `overrides` field of runtime configuration file. +Cortex implements various limits on the requests it can process, in order to prevent a single tenant from overwhelming the cluster. There are various default global limits which apply to all tenants which can be set on the command line. These limits can also be overridden on a per-tenant basis by using the `overrides` field of the runtime configuration file. -The `overrides` field is a map of tenant ID (same values as passed in the `X-Scope-OrgID` header) to the various limits. An example could look like: +The `overrides` field is a map of tenant ID (same values as passed in the `X-Scope-OrgID` header) to the various limits. An example could look like: ```yaml overrides: @@ -363,9 +363,9 @@ Valid per-tenant limits are (with their corresponding flags for default values): The per-tenant rate limit (and burst size), in samples per second. It supports two strategies: `local` (default) and `global`. - The `local` strategy enforces the limit on a per distributor basis, actual effective rate limit will be N times higher, where N is the number of distributor replicas. + The `local` strategy enforces the limit on a per distributor basis; the actual effective rate limit will be N times higher, where N is the number of distributor replicas. - The `global` strategy enforces the limit globally, configuring a per-distributor local rate limiter as `ingestion_rate / N`, where N is the number of distributor replicas (it's automatically adjusted if the number of replicas change). The `ingestion_burst_size` refers to the per-distributor local rate limiter (even in the case of the `global` strategy) and should be set at least to the maximum number of samples expected in a single push request. For this reason, the `global` strategy requires that push requests are evenly distributed across the pool of distributors; if you use a load balancer in front of the distributors you should be already covered, while if you have a custom setup (ie. an authentication gateway in front) make sure traffic is evenly balanced across distributors. + The `global` strategy enforces the limit globally, configuring a per-distributor local rate limiter as `ingestion_rate / N`, where N is the number of distributor replicas (it's automatically adjusted if the number of replicas changes). The `ingestion_burst_size` refers to the per-distributor local rate limiter (even in the case of the `global` strategy) and should be set at least to the maximum number of samples expected in a single push request. For this reason, the `global` strategy requires that push requests are evenly distributed across the pool of distributors; if you use a load balancer in front of the distributors you should already be covered, while if you have a custom setup (i.e. an authentication gateway in front) make sure traffic is evenly balanced across distributors. The `global` strategy requires the distributors to form their own ring, which is used to keep track of the current number of healthy distributor replicas. The ring is configured by `distributor: { ring: {}}` / `-distributor.ring.*`. @@ -373,37 +373,37 @@ Valid per-tenant limits are (with their corresponding flags for default values): - `max_label_value_length` / `-validation.max-length-label-value` - `max_label_names_per_series` / `-validation.max-label-names-per-series` - Also enforced by the distributor, limits on the on length of labels and their values, and the total number of labels allowed per series. + Also enforced by the distributor; limits on the length of labels and their values, and the total number of labels allowed per series. - `reject_old_samples` / `-validation.reject-old-samples` - `reject_old_samples_max_age` / `-validation.reject-old-samples.max-age` - `creation_grace_period` / `-validation.create-grace-period` - Also enforce by the distributor, limits on how far in the past (and future) timestamps that we accept can be. + Also enforced by the distributor; limits on how far in the past (and future) timestamps that we accept can be. - `max_series_per_user` / `-ingester.max-series-per-user` - `max_series_per_metric` / `-ingester.max-series-per-metric` - Enforced by the ingesters; limits the number of active series a user (or a given metric) can have. When running with `-distributor.shard-by-all-labels=false` (the default), this limit will enforce the maximum number of series a metric can have 'globally', as all series for a single metric will be sent to the same replication set of ingesters. This is not the case when running with `-distributor.shard-by-all-labels=true`, so the actual limit will be N/RF times higher, where N is number of ingester replicas and RF is configured replication factor. + Enforced by the ingesters; limits the number of active series a user (or a given metric) can have. When running with `-distributor.shard-by-all-labels=false` (the default), this limit will enforce the maximum number of series a metric can have 'globally', as all series for a single metric will be sent to the same replication set of ingesters. This is not the case when running with `-distributor.shard-by-all-labels=true`, so the actual limit will be N/RF times higher, where N is the number of ingester replicas and RF is the configured replication factor. - `max_global_series_per_user` / `-ingester.max-global-series-per-user` - `max_global_series_per_metric` / `-ingester.max-global-series-per-metric` - Like `max_series_per_user` and `max_series_per_metric`, but the limit is enforced across the cluster. Each ingester is configured with a local limit based on the replication factor, the `-distributor.shard-by-all-labels` setting and the current number of healthy ingesters, and is kept updated whenever the number of ingesters change. + Like `max_series_per_user` and `max_series_per_metric`, but the limit is enforced across the cluster. Each ingester is configured with a local limit based on the replication factor, the `-distributor.shard-by-all-labels` setting and the current number of healthy ingesters, and is kept updated whenever the number of ingesters changes. Requires `-distributor.replication-factor`, `-distributor.shard-by-all-labels`, `-distributor.sharding-strategy` and `-distributor.zone-awareness-enabled` set for the ingesters too. - `max_metadata_per_user` / `-ingester.max-metadata-per-user` - `max_metadata_per_metric` / `-ingester.max-metadata-per-metric` - Enforced by the ingesters; limits the number of active metadata a user (or a given metric) can have. When running with `-distributor.shard-by-all-labels=false` (the default), this limit will enforce the maximum number of metadata a metric can have 'globally', as all metadata for a single metric will be sent to the same replication set of ingesters. This is not the case when running with `-distributor.shard-by-all-labels=true`, so the actual limit will be N/RF times higher, where N is number of ingester replicas and RF is configured replication factor. + Enforced by the ingesters; limits the number of active metadata a user (or a given metric) can have. When running with `-distributor.shard-by-all-labels=false` (the default), this limit will enforce the maximum number of metadata a metric can have 'globally', as all metadata for a single metric will be sent to the same replication set of ingesters. This is not the case when running with `-distributor.shard-by-all-labels=true`, so the actual limit will be N/RF times higher, where N is the number of ingester replicas and RF is the configured replication factor. - `max_fetched_series_per_query` / `querier.max-fetched-series-per-query` - When running Cortex with blocks storage this limit is enforced in the queriers on unique series fetched from ingesters and store-gateways (long-term storage). + When running Cortex with blocks storage, this limit is enforced in the queriers on unique series fetched from ingesters and store-gateways (long-term storage). - `max_global_metadata_per_user` / `-ingester.max-global-metadata-per-user` - `max_global_metadata_per_metric` / `-ingester.max-global-metadata-per-metric` - Like `max_metadata_per_user` and `max_metadata_per_metric`, but the limit is enforced across the cluster. Each ingester is configured with a local limit based on the replication factor, the `-distributor.shard-by-all-labels` setting and the current number of healthy ingesters, and is kept updated whenever the number of ingesters change. + Like `max_metadata_per_user` and `max_metadata_per_metric`, but the limit is enforced across the cluster. Each ingester is configured with a local limit based on the replication factor, the `-distributor.shard-by-all-labels` setting and the current number of healthy ingesters, and is kept updated whenever the number of ingesters changes. Requires `-distributor.replication-factor`, `-distributor.shard-by-all-labels`, `-distributor.sharding-strategy` and `-distributor.zone-awareness-enabled` set for the ingesters too. @@ -423,25 +423,25 @@ ingester_limits: Valid ingester instance limits are (with their corresponding flags): -- `max_ingestion_rate` \ `--ingester.instance-limits.max-ingestion-rate` +- `max_ingestion_rate` / `--ingester.instance-limits.max-ingestion-rate` Limit the ingestion rate in samples per second for an ingester. When this limit is reached, new requests will fail with an HTTP 500 error. -- `max_series` \ `-ingester.instance-limits.max-series` +- `max_series` / `-ingester.instance-limits.max-series` Limit the total number of series that an ingester keeps in memory, across all users. When this limit is reached, requests that create new series will fail with an HTTP 500 error. -- `max_tenants` \ `-ingester.instance-limits.max-tenants` +- `max_tenants` / `-ingester.instance-limits.max-tenants` Limit the maximum number of users an ingester will accept metrics for. When this limit is reached, requests from new users will fail with an HTTP 500 error. -- `max_inflight_push_requests` \ `-ingester.instance-limits.max-inflight-push-requests` +- `max_inflight_push_requests` / `-ingester.instance-limits.max-inflight-push-requests` Limit the maximum number of requests being handled by an ingester at once. This setting is critical for preventing ingesters from using an excessive amount of memory during high load or temporary slow downs. When this limit is reached, new requests will fail with an HTTP 500 error. ## DNS Service Discovery -Some clients in Cortex support service discovery via DNS to find addresses of backend servers to connect to (ie. caching servers). The clients supporting it are: +Some clients in Cortex support service discovery via DNS to find addresses of backend servers to connect to (i.e. caching servers). The clients supporting it are: - [Blocks storage's memcached cache](../blocks-storage/store-gateway.md#caching) - [All caching memcached servers](./config-file-reference.md#memcached-client-config) @@ -449,7 +449,7 @@ Some clients in Cortex support service discovery via DNS to find addresses of ba ### Supported discovery modes -The DNS service discovery, inspired from Thanos DNS SD, supports different discovery modes. A discovery mode is selected adding a specific prefix to the address. The supported prefixes are: +The DNS service discovery, inspired by Thanos DNS SD, supports different discovery modes. A discovery mode is selected by adding a specific prefix to the address. The supported prefixes are: - **`dns+`**
The domain name after the prefix is looked up as an A/AAAA query. For example: `dns+memcached.local:11211` @@ -458,13 +458,13 @@ The DNS service discovery, inspired from Thanos DNS SD, supports different disco - **`dnssrvnoa+`**
The domain name after the prefix is looked up as a SRV query, with no A/AAAA lookup made after that. For example: `dnssrvnoa+_memcached._tcp.memcached.namespace.svc.cluster.local` -If **no prefix** is provided, the provided IP or hostname will be used straightaway without pre-resolving it. +If **no prefix** is provided, the provided IP or hostname will be used directly without pre-resolving it. If you are using a managed memcached service from [Google Cloud](https://cloud.google.com/memorystore/docs/memcached/auto-discovery-overview), or [AWS](https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/AutoDiscovery.HowAutoDiscoveryWorks.html), use the [auto-discovery](./config-file-reference.md#memcached-client-config) flag instead of DNS discovery, then use the discovery/configuration endpoint as the domain name without any prefix. ## Logging of IP of reverse proxy -If a reverse proxy is used in front of Cortex it might be difficult to troubleshoot errors. The following 3 settings can be used to log the IP address passed along by the reverse proxy in headers like X-Forwarded-For. +If a reverse proxy is used in front of Cortex, it might be difficult to troubleshoot errors. The following 3 settings can be used to log the IP address passed along by the reverse proxy in headers like X-Forwarded-For. - `-server.log_source_ips_enabled` @@ -472,8 +472,8 @@ If a reverse proxy is used in front of Cortex it might be difficult to troublesh - `-server.log-source-ips-header` - Header field storing the source IPs. It is only used if `-server.log-source-ips-enabled` is true and if `-server.log-source-ips-regex` is set. If not set the default Forwarded, X-Real-IP or X-Forwarded-For headers are searched. + Header field storing the source IPs. It is only used if `-server.log-source-ips-enabled` is true and if `-server.log-source-ips-regex` is set. If not set, the default Forwarded, X-Real-IP or X-Forwarded-For headers are searched. - `-server.log-source-ips-regex` - Regular expression for matching the source IPs. It should contain at least one capturing group the first of which will be returned. Only used if `-server.log-source-ips-enabled` is true and if `-server.log-source-ips-header` is set. If not set the default Forwarded, X-Real-IP or X-Forwarded-For headers are searched. + Regular expression for matching the source IPs. It should contain at least one capturing group, the first of which will be returned. Only used if `-server.log-source-ips-enabled` is true and if `-server.log-source-ips-header` is set. If not set, the default Forwarded, X-Real-IP or X-Forwarded-For headers are searched. diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 0ce98cb65af..be3b38f6e65 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -102,6 +102,10 @@ api: # CLI flag: -api.http-request-headers-to-log [http_request_headers_to_log: | default = []] + # HTTP header that can be used as request id + # CLI flag: -api.request-id-header + [request_id_header: | default = ""] + # Regex for CORS origin. It is fully anchored. Example: # 'https?://(domain1|domain2)\.com' # CLI flag: -server.cors-origin @@ -162,6 +166,110 @@ api: # The compactor_config configures the compactor for the blocks storage. [compactor: ] +parquet_converter: + # Maximum concurrent goroutines for downloading block metadata from object + # storage. + # CLI flag: -parquet-converter.meta-sync-concurrency + [meta_sync_concurrency: | default = 20] + + # How often to check for new TSDB blocks to convert to parquet format. + # CLI flag: -parquet-converter.conversion-interval + [conversion_interval: | default = 1m] + + # Maximum number of time series per parquet row group. Larger values improve + # compression but may reduce performance during reads. + # CLI flag: -parquet-converter.max-rows-per-row-group + [max_rows_per_row_group: | default = 1000000] + + # Enable disk-based write buffering to reduce memory consumption during + # parquet file generation. + # CLI flag: -parquet-converter.file-buffer-enabled + [file_buffer_enabled: | default = true] + + # Local directory path for caching TSDB blocks during parquet conversion. + # CLI flag: -parquet-converter.data-dir + [data_dir: | default = "./data"] + + ring: + kvstore: + # Backend storage to use for the ring. Supported values are: consul, etcd, + # inmemory, memberlist, multi. + # CLI flag: -parquet-converter.ring.store + [store: | default = "consul"] + + # The prefix for the keys in the store. Should end with a /. + # CLI flag: -parquet-converter.ring.prefix + [prefix: | default = "collectors/"] + + dynamodb: + # Region to access dynamodb. + # CLI flag: -parquet-converter.ring.dynamodb.region + [region: | default = ""] + + # Table name to use on dynamodb. + # CLI flag: -parquet-converter.ring.dynamodb.table-name + [table_name: | default = ""] + + # Time to expire items on dynamodb. + # CLI flag: -parquet-converter.ring.dynamodb.ttl-time + [ttl: | default = 0s] + + # Time to refresh local ring with information on dynamodb. + # CLI flag: -parquet-converter.ring.dynamodb.puller-sync-time + [puller_sync_time: | default = 1m] + + # Maximum number of retries for DDB KV CAS. + # CLI flag: -parquet-converter.ring.dynamodb.max-cas-retries + [max_cas_retries: | default = 10] + + # Timeout of dynamoDbClient requests. Default is 2m. + # CLI flag: -parquet-converter.ring.dynamodb.timeout + [timeout: | default = 2m] + + # The consul_config configures the consul client. + # The CLI flags prefix for this block config is: parquet-converter.ring + [consul: ] + + # The etcd_config configures the etcd client. + # The CLI flags prefix for this block config is: parquet-converter.ring + [etcd: ] + + multi: + # Primary backend storage used by multi-client. + # CLI flag: -parquet-converter.ring.multi.primary + [primary: | default = ""] + + # Secondary backend storage used by multi-client. + # CLI flag: -parquet-converter.ring.multi.secondary + [secondary: | default = ""] + + # Mirror writes to secondary store. + # CLI flag: -parquet-converter.ring.multi.mirror-enabled + [mirror_enabled: | default = false] + + # Timeout for storing value to secondary store. + # CLI flag: -parquet-converter.ring.multi.mirror-timeout + [mirror_timeout: | default = 2s] + + # Period at which to heartbeat to the ring. 0 = disabled. + # CLI flag: -parquet-converter.ring.heartbeat-period + [heartbeat_period: | default = 5s] + + # The heartbeat timeout after which parquet-converter are considered + # unhealthy within the ring. 0 = never (timeout disabled). + # CLI flag: -parquet-converter.ring.heartbeat-timeout + [heartbeat_timeout: | default = 1m] + + # Time since last heartbeat before parquet-converter will be removed from + # ring. 0 to disable + # CLI flag: -parquet-converter.auto-forget-delay + [auto_forget_delay: | default = 2m] + + # File path where tokens are stored. If empty, tokens are not stored at + # shutdown and restored at startup. + # CLI flag: -parquet-converter.ring.tokens-file-path + [tokens_file_path: | default = ""] + # The store_gateway_config configures the store-gateway service used by the # blocks storage. [store_gateway: ] @@ -425,6 +533,12 @@ sharding_ring: # CLI flag: -alertmanager.sharding-ring.tokens-file-path [tokens_file_path: | default = ""] + # Set to true to enable ring detailed metrics. These metrics provide detailed + # information, such as token count and ownership per tenant. Disabling them + # can significantly decrease the number of metrics emitted. + # CLI flag: -alertmanager.sharding-ring.detailed-metrics-enabled + [detailed_metrics_enabled: | default = true] + # The sleep seconds when alertmanager is shutting down. Need to be close to or # larger than KV Store information propagation delay # CLI flag: -alertmanager.sharding-ring.final-sleep @@ -1974,6 +2088,252 @@ bucket_store: # CLI flag: -blocks-storage.bucket-store.metadata-cache.partitioned-groups-list-ttl [partitioned_groups_list_ttl: | default = 0s] + parquet_labels_cache: + # The parquet labels cache backend type. Single or Multiple cache backend + # can be provided. Supported values in single cache: memcached, redis, + # inmemory, and '' (disable). Supported values in multi level cache: a + # comma-separated list of (inmemory, memcached, redis) + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.backend + [backend: | default = ""] + + inmemory: + # Maximum size in bytes of in-memory parquet-labels cache used (shared + # between all tenants). + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.inmemory.max-size-bytes + [max_size_bytes: | default = 1073741824] + + memcached: + # Comma separated list of memcached addresses. Supported prefixes are: + # dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, + # dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after + # that). + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.addresses + [addresses: | default = ""] + + # The socket read/write timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.timeout + [timeout: | default = 100ms] + + # The maximum number of idle connections that will be maintained per + # address. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-idle-connections + [max_idle_connections: | default = 16] + + # The maximum number of concurrent asynchronous operations can occur. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-async-concurrency + [max_async_concurrency: | default = 3] + + # The maximum number of enqueued asynchronous operations allowed. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-async-buffer-size + [max_async_buffer_size: | default = 10000] + + # The maximum number of concurrent connections running get operations. If + # set to 0, concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum number of keys a single underlying get operation should run. + # If more keys are specified, internally keys are split into multiple + # batches and fetched concurrently, honoring the max concurrency. If set + # to 0, the max batch size is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-get-multi-batch-size + [max_get_multi_batch_size: | default = 0] + + # The maximum size of an item stored in memcached. Bigger items are not + # stored. If set to 0, no maximum size is enforced. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.max-item-size + [max_item_size: | default = 1048576] + + # Use memcached auto-discovery mechanism provided by some cloud provider + # like GCP and AWS + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.auto-discovery + [auto_discovery: | default = false] + + set_async_circuit_breaker_config: + # If true, enable circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.enabled + [enabled: | default = false] + + # Maximum number of requests allowed to pass through when the circuit + # breaker is half-open. If set to 0, by default it allows 1 request. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.half-open-max-requests + [half_open_max_requests: | default = 10] + + # Period of the open state after which the state of the circuit breaker + # becomes half-open. If set to 0, by default open duration is 60 + # seconds. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.open-duration + [open_duration: | default = 5s] + + # Minimal requests to trigger the circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.min-requests + [min_requests: | default = 50] + + # Consecutive failures to determine if the circuit breaker should open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.consecutive-failures + [consecutive_failures: | default = 5] + + # Failure percentage to determine if the circuit breaker should open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.memcached.set-async.circuit-breaker.failure-percent + [failure_percent: | default = 0.05] + + redis: + # Comma separated list of redis addresses. Supported prefixes are: dns+ + # (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, + # dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after + # that). + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.addresses + [addresses: | default = ""] + + # Redis username. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.username + [username: | default = ""] + + # Redis password. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.password + [password: | default = ""] + + # Database to be selected after connecting to the server. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.db + [db: | default = 0] + + # Specifies the master's name. Must be not empty for Redis Sentinel. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.master-name + [master_name: | default = ""] + + # The maximum number of concurrent GetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-get-multi-concurrency + [max_get_multi_concurrency: | default = 100] + + # The maximum size per batch for mget. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.get-multi-batch-size + [get_multi_batch_size: | default = 100] + + # The maximum number of concurrent SetMulti() operations. If set to 0, + # concurrency is unlimited. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-set-multi-concurrency + [max_set_multi_concurrency: | default = 100] + + # The maximum size per batch for pipeline set. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-multi-batch-size + [set_multi_batch_size: | default = 100] + + # The maximum number of concurrent asynchronous operations can occur. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-async-concurrency + [max_async_concurrency: | default = 3] + + # The maximum number of enqueued asynchronous operations allowed. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.max-async-buffer-size + [max_async_buffer_size: | default = 10000] + + # Client dial timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.dial-timeout + [dial_timeout: | default = 5s] + + # Client read timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.read-timeout + [read_timeout: | default = 3s] + + # Client write timeout. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.write-timeout + [write_timeout: | default = 3s] + + # Whether to enable tls for redis connection. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-enabled + [tls_enabled: | default = false] + + # Path to the client certificate file, which will be used for + # authenticating with the server. Also requires the key path to be + # configured. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-cert-path + [tls_cert_path: | default = ""] + + # Path to the key file for the client certificate. Also requires the + # client certificate to be configured. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-key-path + [tls_key_path: | default = ""] + + # Path to the CA certificates file to validate server certificate against. + # If not set, the host's root CA certificates are used. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-ca-path + [tls_ca_path: | default = ""] + + # Override the expected name on the server certificate. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-server-name + [tls_server_name: | default = ""] + + # Skip validating server certificate. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.tls-insecure-skip-verify + [tls_insecure_skip_verify: | default = false] + + # If not zero then client-side caching is enabled. Client-side caching is + # when data is stored in memory instead of fetching data each time. See + # https://redis.io/docs/manual/client-side-caching/ for more info. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.cache-size + [cache_size: | default = 0] + + set_async_circuit_breaker_config: + # If true, enable circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.enabled + [enabled: | default = false] + + # Maximum number of requests allowed to pass through when the circuit + # breaker is half-open. If set to 0, by default it allows 1 request. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.half-open-max-requests + [half_open_max_requests: | default = 10] + + # Period of the open state after which the state of the circuit breaker + # becomes half-open. If set to 0, by default open duration is 60 + # seconds. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.open-duration + [open_duration: | default = 5s] + + # Minimal requests to trigger the circuit breaker. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.min-requests + [min_requests: | default = 50] + + # Consecutive failures to determine if the circuit breaker should open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.consecutive-failures + [consecutive_failures: | default = 5] + + # Failure percentage to determine if the circuit breaker should open. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.redis.set-async.circuit-breaker.failure-percent + [failure_percent: | default = 0.05] + + multilevel: + # The maximum number of concurrent asynchronous operations can occur when + # backfilling cache items. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-async-concurrency + [max_async_concurrency: | default = 3] + + # The maximum number of enqueued asynchronous operations allowed when + # backfilling cache items. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-async-buffer-size + [max_async_buffer_size: | default = 10000] + + # The maximum number of items to backfill per asynchronous operation. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.multilevel.max-backfill-items + [max_backfill_items: | default = 10000] + + # Size of each subrange that bucket object is split into for better caching. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.subrange-size + [subrange_size: | default = 16000] + + # Maximum number of sub-GetRange requests that a single GetRange request can + # be split into when fetching parquet labels file. Zero or negative value = + # unlimited number of sub-requests. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.max-get-range-requests + [max_get_range_requests: | default = 3] + + # TTL for caching object attributes for parquet labels file. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.attributes-ttl + [attributes_ttl: | default = 168h] + + # TTL for caching individual subranges. + # CLI flag: -blocks-storage.bucket-store.parquet-labels-cache.subrange-ttl + [subrange_ttl: | default = 24h] + # Maximum number of entries in the regex matchers cache. 0 to disable. # CLI flag: -blocks-storage.bucket-store.matchers-cache-max-items [matchers_cache_max_items: | default = 0] @@ -2423,6 +2783,12 @@ sharding_ring: # CLI flag: -compactor.auto-forget-delay [auto_forget_delay: | default = 2m] + # Set to true to enable ring detailed metrics. These metrics provide detailed + # information, such as token count and ownership per tenant. Disabling them + # can significantly decrease the number of metrics emitted. + # CLI flag: -compactor.ring.detailed-metrics-enabled + [detailed_metrics_enabled: | default = true] + # Minimum time to wait for ring stability at startup. 0 to disable. # CLI flag: -compactor.ring.wait-stability-min-duration [wait_stability_min_duration: | default = 1m] @@ -2573,6 +2939,7 @@ The `consul_config` configures the consul client. The supported CLI flags ` | default = false] +# EXPERIMENTAL: If true, accept prometheus remote write v2 protocol push +# request. +# CLI flag: -distributor.remote-writev2-enabled +[remote_writev2_enabled: | default = false] + ring: kvstore: # Backend storage to use for the ring. Supported values are: consul, etcd, @@ -2843,6 +3215,12 @@ ring: # CLI flag: -distributor.ring.heartbeat-timeout [heartbeat_timeout: | default = 1m] + # Set to true to enable ring detailed metrics. These metrics provide detailed + # information, such as token count and ownership per tenant. Disabling them + # can significantly decrease the number of metrics emitted. + # CLI flag: -distributor.ring.detailed-metrics-enabled + [detailed_metrics_enabled: | default = true] + # Name of network interface to read address from. # CLI flag: -distributor.ring.instance-interface-names [instance_interface_names: | default = [eth0 en0]] @@ -2883,6 +3261,15 @@ otlp: # https://github.com/prometheus/OpenMetrics/blob/main/specification/OpenMetrics.md#supporting-target-metadata-in-both-push-based-and-pull-based-systems) # CLI flag: -distributor.otlp.disable-target-info [disable_target_info: | default = false] + + # EXPERIMENTAL: If true, delta temporality otlp metrics to be ingested. + # CLI flag: -distributor.otlp.allow-delta-temporality + [allow_delta_temporality: | default = false] + + # EXPERIMENTAL: If true, the '__type__' and '__unit__' labels are added for + # the OTLP metrics. + # CLI flag: -distributor.otlp.enable-type-and-unit-labels + [enable_type_and_unit_labels: | default = false] ``` ### `etcd_config` @@ -2894,6 +3281,7 @@ The `etcd_config` configures the etcd client. The supported CLI flags `` - `compactor.ring` - `distributor.ha-tracker` - `distributor.ring` +- `parquet-converter.ring` - `ruler.ring` - `store-gateway.sharding-ring` @@ -3731,6 +4119,11 @@ The `limits_config` configures default and per-tenant limits imposed by Cortex s # CLI flag: -frontend.max-queriers-per-tenant [max_queriers_per_tenant: | default = 0] +# [Experimental] Number of shards to use when distributing shardable PromQL +# queries. +# CLI flag: -frontend.query-vertical-shard-size +[query_vertical_shard_size: | default = 0] + # Enable to allow queries to be evaluated with data from a single zone, if other # zones are not available. [query_partial_data: | default = false] @@ -4177,7 +4570,7 @@ The `querier_config` configures the Cortex querier. [per_step_stats_enabled: | default = false] # Use compression for metrics query API or instant and range query APIs. -# Supports 'gzip' and '' (disable compression) +# Supported compression 'gzip', 'snappy', 'zstd' and '' (disable compression) # CLI flag: -querier.response-compression [response_compression: | default = "gzip"] @@ -4328,6 +4721,29 @@ thanos_engine: # [Experimental] If true, experimental promQL functions are enabled. # CLI flag: -querier.enable-promql-experimental-functions [enable_promql_experimental_functions: | default = false] + +# [Experimental] If true, querier will try to query the parquet files if +# available. +# CLI flag: -querier.enable-parquet-queryable +[enable_parquet_queryable: | default = false] + +# [Experimental] Maximum size of the Parquet queryable shard cache. 0 to +# disable. +# CLI flag: -querier.parquet-queryable-shard-cache-size +[parquet_queryable_shard_cache_size: | default = 512] + +# [Experimental] Parquet queryable's default block store to query. Valid options +# are tsdb and parquet. If it is set to tsdb, parquet queryable always fallback +# to store gateway. +# CLI flag: -querier.parquet-queryable-default-block-store +[parquet_queryable_default_block_store: | default = "parquet"] + +# [Experimental] Disable Parquet queryable to fallback queries to Store Gateway +# if the block is not available as Parquet files but available in TSDB. Setting +# this to true will disable the fallback and users can remove Store Gateway. But +# need to make sure Parquet files are created before it is queryable. +# CLI flag: -querier.parquet-queryable-fallback-disabled +[parquet_queryable_fallback_disabled: | default = false] ``` ### `query_frontend_config` @@ -4973,6 +5389,12 @@ ring: # CLI flag: -ruler.ring.tokens-file-path [tokens_file_path: | default = ""] + # Set to true to enable ring detailed metrics. These metrics provide detailed + # information, such as token count and ownership per tenant. Disabling them + # can significantly decrease the number of metrics emitted. + # CLI flag: -ruler.ring.detailed-metrics-enabled + [detailed_metrics_enabled: | default = true] + # Name of network interface to read address from. # CLI flag: -ruler.ring.instance-interface-names [instance_interface_names: | default = [eth0 en0]] @@ -5831,6 +6253,11 @@ grpc_tls_config: # CLI flag: -server.grpc.keepalive.ping-without-stream-allowed [grpc_server_ping_without_stream_allowed: | default = true] +# Enable Channelz for gRPC server. A web UI will be also exposed on the HTTP +# server at /channelz +# CLI flag: -server.enable-channelz +[enable_channelz: | default = false] + # Output log messages in the given format. Valid formats: [logfmt, json] # CLI flag: -log.format [log_format: | default = "logfmt"] @@ -5992,6 +6419,12 @@ sharding_ring: # CLI flag: -store-gateway.sharding-ring.keep-instance-in-the-ring-on-shutdown [keep_instance_in_the_ring_on_shutdown: | default = false] + # Set to true to enable ring detailed metrics. These metrics provide detailed + # information, such as token count and ownership per tenant. Disabling them + # can significantly decrease the number of metrics emitted. + # CLI flag: -store-gateway.sharding-ring.detailed-metrics-enabled + [detailed_metrics_enabled: | default = true] + # Minimum time to wait for ring stability at startup. 0 to disable. # CLI flag: -store-gateway.sharding-ring.wait-stability-min-duration [wait_stability_min_duration: | default = 1m] diff --git a/docs/configuration/v1-guarantees.md b/docs/configuration/v1-guarantees.md index 700fbf5beb7..8825b19e2a9 100644 --- a/docs/configuration/v1-guarantees.md +++ b/docs/configuration/v1-guarantees.md @@ -59,6 +59,7 @@ Currently experimental features are: - Distributor: - Do not extend writes on unhealthy ingesters (`-distributor.extend-writes=false`) - Accept multiple HA pairs in the same request (enabled via `-experimental.distributor.ha-tracker.mixed-ha-samples=true`) + - Accept Prometheus remote write 2.0 request (`-distributor.remote-writev2-enabled=true`) - Tenant Deletion in Purger, for blocks storage. - Query-frontend: query stats tracking (`-frontend.query-stats-enabled`) - Blocks storage bucket index @@ -116,6 +117,8 @@ Currently experimental features are: - `store-gateway.sharding-ring.final-sleep` (duration) CLI flag - `alertmanager-sharding-ring.final-sleep` (duration) CLI flag - OTLP Receiver + - Ingest delta temporality OTLP metrics (`-distributor.otlp.allow-delta-temporality=true`) + - Add `__type__` and `__unit__` labels (`-distributor.otlp.enable-type-and-unit-labels`) - Persistent tokens in the Ruler Ring: - `-ruler.ring.tokens-file-path` (path) CLI flag - Native Histograms diff --git a/docs/guides/parquet-mode.md b/docs/guides/parquet-mode.md new file mode 100644 index 00000000000..3bec5d11147 --- /dev/null +++ b/docs/guides/parquet-mode.md @@ -0,0 +1,305 @@ +--- +title: "Parquet Mode" +linkTitle: "Parquet Mode" +weight: 11 +slug: parquet-mode +--- + +## Overview + +Parquet mode in Cortex provides an experimental feature that converts TSDB blocks to Parquet format for improved query performance and storage efficiency on older data. This feature is particularly beneficial for long-term storage scenarios where data is accessed less frequently but needs to be queried efficiently. + +The parquet mode consists of two main components: +- **Parquet Converter**: Converts TSDB blocks to Parquet format +- **Parquet Queryable**: Enables querying of Parquet files with fallback to TSDB blocks + +## Why Parquet Mode? + +Traditional TSDB format and Store Gateway architecture face significant challenges when dealing with long-term data storage on object storage: + +### TSDB Format Limitations +- **Random Read Intensive**: TSDB index relies heavily on random reads, where each read becomes a separate request to object storage +- **Overfetching**: To reduce object storage requests, data that are close together are merged in a sigle request, leading to higher bandwidth usage and overfetching +- **High Cardinality Bottlenecks**: Index postings can become a major bottleneck for high cardinality data + +### Store Gateway Operational Challenges +- **Resource Intensive**: Requires significant local disk space for index headers and high memory usage +- **Complex State Management**: Requires complex data sharding when scaling, which often leads to consistency and availability issues, as well as long startup times +- **Query Inefficiencies**: Single-threaded block processing leads to high latency for large blocks + +### Parquet Advantages +[Apache Parquet](https://parquet.apache.org/) addresses these challenges through: +- **Columnar Storage**: Data organized by columns reduces object storage requests as only specific columns need to be fetched +- **Data Locality**: Series that are likely to be queried together are co-located to minimize I/O operations +- **Stateless Design**: Rich file metadata eliminates the need for local state like index headers +- **Advanced Compression**: Reduces storage costs and improves query performance +- **Parallel Processing**: Row groups enable parallel processing for better scalability + +For more details on the design rationale, see the [Parquet Storage Proposal](../proposals/parquet-storage.md). + +## Architecture + +The parquet system works by: + +1. **Block Conversion**: The parquet converter runs periodically to identify TSDB blocks that should be converted to Parquet format +2. **Storage**: Parquet files are stored alongside TSDB blocks in object storage +3. **Querying**: The parquet queryable attempts to query Parquet files first, falling back to TSDB blocks when necessary +4. **Marker System**: Conversion status is tracked using marker files to avoid duplicate conversions + +## Configuration + +### Enabling Parquet Converter + +To enable the parquet converter service, add it to your target list: + +```yaml +target: parquet-converter +``` + +Or include it in a multi-target deployment: + +```yaml +target: all,parquet-converter +``` + +### Parquet Converter Configuration + +Configure the parquet converter in your Cortex configuration: + +```yaml +parquet_converter: + # Data directory for caching blocks during conversion + data_dir: "./data" + + # Frequency of conversion job execution + conversion_interval: 1m + + # Maximum rows per parquet row group + max_rows_per_row_group: 1000000 + + # Number of concurrent meta file sync operations + meta_sync_concurrency: 20 + + # Enable file buffering to reduce memory usage + file_buffer_enabled: true + + # Ring configuration for distributed conversion + ring: + kvstore: + store: consul + consul: + host: localhost:8500 + heartbeat_period: 5s + heartbeat_timeout: 1m + instance_addr: 127.0.0.1 + instance_port: 9095 +``` + +### Per-Tenant Parquet Settings + +Enable parquet conversion per tenant using limits: + +```yaml +limits: + # Enable parquet converter for all tenants + parquet_converter_enabled: true + + # Shard size for shuffle sharding (0 = disabled) + parquet_converter_tenant_shard_size: 0.8 +``` + +You can also configure per-tenant settings using runtime configuration: + +```yaml +overrides: + tenant-1: + parquet_converter_enabled: true + parquet_converter_tenant_shard_size: 2 + tenant-2: + parquet_converter_enabled: false +``` + +### Enabling Parquet Queryable + +To enable querying of Parquet files, configure the querier: + +```yaml +querier: + # Enable parquet queryable with fallback (experimental) + enable_parquet_queryable: true + + # Cache size for parquet shards + parquet_queryable_shard_cache_size: 512 + + # Default block store: "tsdb" or "parquet" + parquet_queryable_default_block_store: "parquet" + + # Disable fallback to TSDB blocks when parquet files are not available + parquet_queryable_fallback_disabled: false +``` + +### Query Limits for Parquet + +Configure query limits specific to parquet operations: + +```yaml +limits: + # Maximum number of rows that can be scanned per query + parquet_max_fetched_row_count: 1000000 + + # Maximum chunk bytes per query + parquet_max_fetched_chunk_bytes: 100_000_000 # 100MB + + # Maximum data bytes per query + parquet_max_fetched_data_bytes: 1_000_000_000 # 1GB +``` + +### Cache Configuration + +Parquet mode supports dedicated caching for both chunks and labels to improve query performance. Configure caching in the blocks storage section: + +```yaml +blocks_storage: + bucket_store: + # Chunks cache configuration for parquet data + chunks_cache: + backend: "memcached" # Options: "", "inmemory", "memcached", "redis" + subrange_size: 16000 # Size of each subrange for better caching + max_get_range_requests: 3 # Max sub-GetRange requests per GetRange call + attributes_ttl: 168h # TTL for caching object attributes + subrange_ttl: 24h # TTL for caching individual chunk subranges + + # Memcached configuration (if using memcached backend) + memcached: + addresses: "memcached:11211" + timeout: 500ms + max_idle_connections: 16 + max_async_concurrency: 10 + max_async_buffer_size: 10000 + max_get_multi_concurrency: 100 + max_get_multi_batch_size: 0 + + # Parquet labels cache configuration (experimental) + parquet_labels_cache: + backend: "memcached" # Options: "", "inmemory", "memcached", "redis" + subrange_size: 16000 # Size of each subrange for better caching + max_get_range_requests: 3 # Max sub-GetRange requests per GetRange call + attributes_ttl: 168h # TTL for caching object attributes + subrange_ttl: 24h # TTL for caching individual label subranges + + # Memcached configuration (if using memcached backend) + memcached: + addresses: "memcached:11211" + timeout: 500ms + max_idle_connections: 16 +``` + +#### Cache Backend Options + +- **Empty string ("")**: Disables caching +- **inmemory**: Uses in-memory cache (suitable for single-instance deployments) +- **memcached**: Uses Memcached for distributed caching (recommended for production) +- **redis**: Uses Redis for distributed caching +- **Multi-level**: Comma-separated list for multi-tier caching (e.g., "inmemory,memcached") + +#### Cache Performance Tuning + +- **subrange_size**: Smaller values increase cache hit rates but create more cache entries +- **max_get_range_requests**: Higher values reduce object storage requests but increase memory usage +- **TTL values**: Balance between cache freshness and hit rates based on your data patterns +- **Multi-level caching**: Use "inmemory,memcached" for L1/L2 cache hierarchy + +## Block Conversion Logic + +The parquet converter determines which blocks to convert based on: + +1. **Time Range**: Only blocks with time ranges larger than the base TSDB block duration (typically 2h) are converted +2. **Conversion Status**: Blocks are only converted once, tracked via marker files +3. **Tenant Settings**: Conversion must be enabled for the specific tenant + +The conversion process: +- Downloads TSDB blocks from object storage +- Converts time series data to Parquet format +- Uploads Parquet files (chunks and labels) to object storage +- Creates conversion marker files to track completion + +## Querying Behavior + +When parquet queryable is enabled: + +1. **Block Discovery**: The bucket index is used to discover available blocks + * The bucket index now contains metadata indicating whether parquet files are available for querying +1. **Query Execution**: Queries prioritize parquet files when available, falling back to TSDB blocks when parquet conversion is incomplete +1. **Hybrid Queries**: Supports querying both parquet and TSDB blocks within the same query operation +1. **Fallback Control**: When `parquet_queryable_fallback_disabled` is set to `true`, queries will fail with a consistency check error if any required blocks are not available as parquet files, ensuring strict parquet-only querying + +## Monitoring + +### Parquet Converter Metrics + +Monitor parquet converter operations: + +```promql +# Blocks converted +cortex_parquet_converter_blocks_converted_total + +# Conversion failures +cortex_parquet_converter_block_convert_failures_total + +# Delay in minutes of Parquet block to be converted from the TSDB block being uploaded to object store +cortex_parquet_converter_convert_block_delay_minutes +``` + +### Parquet Queryable Metrics + +Monitor parquet query performance: + +```promql +# Blocks queried by type +cortex_parquet_queryable_blocks_queried_total + +# Query operations +cortex_parquet_queryable_operations_total + +# Cache metrics +cortex_parquet_queryable_cache_hits_total +cortex_parquet_queryable_cache_misses_total +``` + +## Best Practices + +### Deployment Recommendations + +1. **Dedicated Converters**: Run parquet converters on dedicated instances for better resource isolation +2. **Ring Configuration**: Use a distributed ring for high availability and load distribution +3. **Storage Considerations**: Ensure sufficient disk space in `data_dir` for block processing +4. **Network Bandwidth**: Consider network bandwidth for downloading/uploading blocks + +### Performance Tuning + +1. **Row Group Size**: Adjust `max_rows_per_row_group` based on your query patterns +2. **Cache Size**: Tune `parquet_queryable_shard_cache_size` based on available memory +3. **Concurrency**: Adjust `meta_sync_concurrency` based on object storage performance + +### Fallback Configuration + +1. **Gradual Migration**: Keep `parquet_queryable_fallback_disabled: false` (default) during initial deployment to allow queries to succeed even when parquet conversion is incomplete +2. **Strict Parquet Mode**: Set `parquet_queryable_fallback_disabled: true` only after ensuring all required blocks have been converted to parquet format +3. **Monitoring**: Monitor conversion progress and query failures before enabling strict parquet mode + +## Limitations + +1. **Experimental Feature**: Parquet mode is experimental and may have stability issues +2. **Storage Overhead**: Parquet files are stored in addition to TSDB blocks +3. **Conversion Latency**: There's a delay between block creation and parquet availability +4. **Shuffle Sharding Requirement**: Parquet mode only supports shuffle sharding as sharding strategy +5. **Bucket Index Dependency**: The bucket index must be enabled and properly configured as it provides essential metadata for parquet file discovery and query routing + +## Migration Considerations + +When enabling parquet mode: + +1. **Gradual Rollout**: Enable for specific tenants first +2. **Monitor Resources**: Watch CPU, memory, and storage usage +3. **Backup Strategy**: Ensure TSDB blocks remain available as fallback +4. **Testing**: Thoroughly test query patterns before production deployment diff --git a/docs/proposals/partition-ring-multi-az-replication.md b/docs/proposals/partition-ring-multi-az-replication.md new file mode 100644 index 00000000000..34afd856ecf --- /dev/null +++ b/docs/proposals/partition-ring-multi-az-replication.md @@ -0,0 +1,209 @@ +--- +title: "Partition Ring with Multi-AZ Replication" +linkTitle: "Partition Ring Multi-AZ Replication" +weight: 1 +slug: partition-ring-multi-az-replication +--- + +- Author: [Daniel Blando](https://github.com/danielblando) +- Date: July 2025 +- Status: Proposed + +## Background + +Distributors use a token-based ring to shard data across ingesters. Each ingester owns random tokens (32-bit numbers) in a hash ring. For each incoming series, the distributor: + +1. Hashes the series labels to get a hash value +2. Finds the primary ingester (smallest token > hash value) +3. When replication is enabled, selects additional replicas by moving clockwise around the ring +4. Ensures replicas are distributed across different availability zones + +The issue arises when replication is enabled: each series in a request is hashed independently, causing each series to route to different groups of ingesters. + +```mermaid +graph TD + A[Write Request] --> B[Distributor] + B --> C[Hash Series 1] --> D[Ingesters: 5,7,9] + B --> E[Hash Series 2] --> F[Ingesters: 5,3,10] + B --> G[Hash Series 3] --> H[Ingesters: 7,27,28] + B --> I[...] --> J[Different ingester sets
for each series] +``` + +## Problem + +### Limited AZ Failure Tolerance with replication factor + +While the token ring effectively distributes load across the ingester fleet, the independent hashing and routing of each series creates an amplification effect where a single ingester failure can impact a large number of write requests. + +Consider a ring with 30 ingesters, each series gets distributed to three different ingesters: + +``` +Sample 1: {name="http_request_latency",api="/push", status="2xx"} + → Ingesters: ing-5, ing-7, ing-9 +Sample 2: {name="http_request_latency",api="/push", status="4xx"} + → Ingesters: ing-5, ing-3, ing-10 +Sample 3: {name="http_request_latency",api="/push", status="2xx"} + → Ingesters: ing-7, ing-27, ing-28 +... +``` +If ingesters `ing-15` and `ing-18` (in different AZs) are offline, any request containing a series that needs to write to both these ingesters will fail completely: + +``` +Sample 15: {name="http_request_latency",api="/push", status="5xx"} + → Ingesters: ing-10, ing-15, ing-18 // Request fails +``` + +With requests increasing their batch size, the probability of request failure becomes critical in replicated deployments. Given two failed ingesters in different AZs, each individual series has a small chance of requiring both failed ingesters. However, as request batch sizes increase, the probability that at least one series in the batch will hash to both failed ingesters approaches certainty. + +**Note**: This problem specifically affects Cortex using replication. Replication as 1 are not impacted by this availability amplification issue. + +## Proposed Solution + +### Partition Ring Architecture + +A new Partition Ring is proposed where the ring is divided into partitions, with each partition containing a set of tokens and a group of ingesters. Ingesters are allocated to partitions based on their order in the zonal StatefulSet, ensuring that scaling operations align with StatefulSet's LIFO behavior. Each partition contains a number of ingesters equal to the replication factor, with exactly one ingester per availability zone. + +This approach provides **reduced failure probability** where the chances of getting two ingesters in the same partition down decreases significantly compared to random ingester failures affecting multiple series. It also enables **deterministic replication** where data sent to `ing-az1-1` always replicates to `ing-az2-1` and `ing-az3-1`, making the system behavior more predictable and easier to troubleshoot. + +```mermaid +graph TD + subgraph "Partition Ring" + subgraph "Partition 3" + P1A[ing-az1-3] + P1B[ing-az2-3] + P1C[ing-az3-3] + end + subgraph "Partition 2" + P2A[ing-az1-2] + P2B[ing-az2-2] + P2C[ing-az3-2] + end + subgraph "Partition 1" + P3A[ing-az1-1] + P3B[ing-az2-1] + P3C[ing-az3-1] + end + end + + T1[Tokens 34] --> P1A + T2[Tokens 56] --> P2A + T3[Tokens 12] --> P3A +``` + +Within each partition, ingesters maintain identical data, acting as true replicas of each other. Distributors maintain similar hashing logic but select a partition instead of individual ingesters. Data is then forwarded to all ingesters within the selected partition, making the replication pattern deterministic. + +### Protocol Buffer Definitions + +```protobuf +message PartitionRingDesc { + map partitions = 1; +} + +message PartitionDesc { + PartitionState state = 1; + repeated uint32 tokens = 2; + map instances = 3; + int64 registered_timestamp = 4; +} + +// Unchanged from current implementation +message InstanceDesc { + string addr = 1; + int64 timestamp = 2; + InstanceState state = 3; + string zone = 7; + int64 registered_timestamp = 8; +} +``` + +### Partition States + +Partitions maintain a simplified state model that provides **clear ownership** where each series belongs to exactly one partition, but requires **additional state management** for partition states and lifecycle management: + +```go +type PartitionState int + +const ( + NON_READY PartitionState = iota // Insufficient ingesters + ACTIVE // Fully operational + READONLY // Scale-down in progress +) +``` + +State transitions: +```mermaid +stateDiagram-v2 + [*] --> NON_READY + NON_READY --> ACTIVE : Required ingesters joined
across all AZs + ACTIVE --> READONLY : Scale-down initiated + ACTIVE --> NON_READY : Ingester removed + READONLY --> NON_READY : Ingesters removed + NON_READY --> [*] : Partition deleted +``` + +### Partition Lifecycle Management + +#### Creating Partitions + +When a new ingester joins the ring: +1. Check if a suitable partition exists with available slots +2. If no partition exists, create a new partition in `NON_READY` state +3. Add partition's tokens to the ring +4. Add the ingester to the partition +5. Wait for required number of ingesters across all AZs (one per AZ) +6. Once all AZs are represented, transition partition to `ACTIVE` + +#### Removing Partitions + +The scale-down process follows these steps: +1. **Mark READONLY**: Partition stops accepting new writes but continues serving reads +2. **Data Transfer**: Wait for all ingesters in partition to transfer data and become empty +3. **Coordinated Removal**: Remove one ingester from each AZ simultaneously +4. **State Transition**: Partition automatically transitions to `NON_READY` (insufficient replicas) +5. **Cleanup**: Remove remaining ingesters and delete partition from ring + +If not using READONLY mode, removing an ingester will make the partition as NON_READY. When all ingesters are removed, the last will delete the partition if configuration `unregister_on_shutdown` is true + +### Multi-Ring Migration Strategy + +To address the migration challenge for production clusters currently running token-based rings, this proposal also introduces a multi-ring infrastructure that allows gradual traffic shifting from token-based to partition-based rings: + +```mermaid +sequenceDiagram + participant C as Client + participant D as Distributor + participant MR as Multi-Ring Router + participant TR as Token Ring + participant PR as Partition Ring + + C->>D: Write Request (1000 series) + D->>MR: Route request + MR->>MR: Check percentage config
(e.g., 80% token, 20% partition) + MR->>TR: Route 800 series to Token Ring + MR->>PR: Route 200 series to Partition Ring + + Note over TR,PR: Both rings process their portion + TR->>D: Response for 800 series + PR->>D: Response for 200 series + D->>C: Combined response +``` + +Migration phases for production clusters: +1. **Phase 1**: Deploy partition ring alongside existing token ring (0% traffic) +2. **Phase 2**: Route 10% traffic to partition ring +3. **Phase 3**: Gradually increase to 50% traffic +4. **Phase 4**: Route 90% traffic to partition ring +5. **Phase 5**: Complete migration (100% partition ring) + +This multi-ring approach solves the migration problem for existing production deployments that cannot afford downtime during the transition from token-based to partition-based rings. It provides **zero downtime migration** with **rollback capability** and **incremental validation** at each step. However, it requires **dual ring participation** where ingesters must participate in both rings during migration, **increased memory usage** and **migration coordination** requiring careful percentage management and monitoring. + +#### Read Path Considerations + +During migration, the read path (queriers and rulers) must have visibility into both rings to ensure all functionality works correctly: + +- **Queriers** must check both token and partition rings to locate series data, as data may be distributed across both ring types during migration +- **Rulers** must evaluate rules against data from both rings to ensure complete rule evaluation +- **Ring-aware components** (like shuffle sharding) must operate correctly across both ring types +- **Metadata operations** (like label queries) must aggregate results from both rings + +All existing Cortex functionality must continue to work seamlessly during the migration period, requiring components to transparently handle the dual-ring architecture. diff --git a/docs/roadmap.md b/docs/roadmap.md index 45815e35f77..d4cfd051f29 100644 --- a/docs/roadmap.md +++ b/docs/roadmap.md @@ -37,3 +37,15 @@ For more information tracking this, please see [issue #6075](https://github.com/ This makes queries over long periods more efficient. It can reduce storage space slightly if the full-detail data is discarded. For more information tracking this, please see [issue #4322](https://github.com/cortexproject/cortex/issues/4322). + +## Changes to this Roadmap + +Changes to this roadmap will take the form of pull requests containing the suggested change. All such PRs must be posted to the [#cortex](https://cloud-native.slack.com/archives/CCYDASBLP) Slack channel in +the [CNCF slack](https://communityinviter.com/apps/cloud-native/cncf) so that they're made visible to all other developers and maintainers. + +Significant changes to this document should be discussed in the [monthly meeting](https://github.com/cortexproject/cortex?tab=readme-ov-file#engage-with-our-community) +before merging, to raise awareness of the change and to provide an opportunity for discussion. A significant change is one which meaningfully alters +one of the roadmap items, adds a new item, or removes an item. + +Insignificant changes include updating links to issues, spelling fixes or minor rewordings which don't significantly change meanings. These insignificant changes +don't need to be discussed in a meeting but should still be shared in Slack. diff --git a/go.mod b/go.mod index ea2dbcc0670..fc488024b01 100644 --- a/go.mod +++ b/go.mod @@ -26,14 +26,14 @@ require ( github.com/gorilla/mux v1.8.1 github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 - github.com/hashicorp/consul/api v1.31.2 + github.com/hashicorp/consul/api v1.32.0 github.com/hashicorp/go-cleanhttp v0.5.2 github.com/hashicorp/go-sockaddr v1.0.7 github.com/hashicorp/memberlist v0.5.1 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.18.0 github.com/lib/pq v1.10.9 - github.com/minio/minio-go/v7 v7.0.80 + github.com/minio/minio-go/v7 v7.0.93 github.com/mitchellh/go-wordwrap v1.0.1 github.com/oklog/ulid v1.3.1 // indirect github.com/opentracing-contrib/go-grpc v0.1.2 @@ -41,18 +41,18 @@ require ( github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 github.com/prometheus/alertmanager v0.28.1 - github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/client_golang v1.23.0-rc.1 github.com/prometheus/client_model v0.6.2 - github.com/prometheus/common v0.63.0 + github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3 // Prometheus maps version 2.x.y to tags v0.x.y. - github.com/prometheus/prometheus v0.303.1 + github.com/prometheus/prometheus v0.305.1-0.20250808023455-1e4144a496fb github.com/segmentio/fasthash v1.0.3 github.com/sony/gobreaker v1.0.0 github.com/spf13/afero v1.11.0 github.com/stretchr/testify v1.10.0 - github.com/thanos-io/objstore v0.0.0-20250317105316-a0136a6f898d - github.com/thanos-io/promql-engine v0.0.0-20250611170940-015ebeb7b5ff - github.com/thanos-io/thanos v0.39.2 + github.com/thanos-io/objstore v0.0.0-20250722142242-922b22272ee3 + github.com/thanos-io/promql-engine v0.0.0-20250726034445-91e6e32a36a7 + github.com/thanos-io/thanos v0.39.3-0.20250729120336-88d0ae8071cb github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20230728070032-dd9e68f319d5 go.etcd.io/etcd/api/v3 v3.5.17 @@ -79,30 +79,31 @@ require ( github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/cespare/xxhash/v2 v2.3.0 github.com/google/go-cmp v0.7.0 + github.com/google/uuid v1.6.0 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/oklog/ulid/v2 v2.1.1 github.com/parquet-go/parquet-go v0.25.1 - github.com/prometheus-community/parquet-common v0.0.0-20250716185251-4cfa597e936c + github.com/prometheus-community/parquet-common v0.0.0-20250807102632-2aeeceacebf0 github.com/prometheus/procfs v0.16.1 github.com/sercand/kuberesolver/v5 v5.1.1 github.com/tjhop/slog-gokit v0.1.4 - go.opentelemetry.io/collector/pdata v1.34.0 + go.opentelemetry.io/collector/pdata v1.35.0 go.uber.org/automaxprocs v1.6.0 google.golang.org/protobuf v1.36.6 ) require ( cel.dev/expr v0.23.1 // indirect - cloud.google.com/go v0.118.1 // indirect - cloud.google.com/go/auth v0.15.1-0.20250317171031-671eed979bfd // indirect + cloud.google.com/go v0.120.0 // indirect + cloud.google.com/go/auth v0.16.2 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.7.0 // indirect - cloud.google.com/go/iam v1.3.1 // indirect - cloud.google.com/go/monitoring v1.24.0 // indirect + cloud.google.com/go/iam v1.5.2 // indirect + cloud.google.com/go/monitoring v1.24.2 // indirect cloud.google.com/go/storage v1.50.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect @@ -112,19 +113,19 @@ require ( github.com/alecthomas/kingpin/v2 v2.4.0 // indirect github.com/andybalholm/brotli v1.1.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect + github.com/aws/aws-sdk-go-v2 v1.37.0 // indirect github.com/aws/aws-sdk-go-v2/config v1.29.15 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.68 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.33.20 // indirect - github.com/aws/smithy-go v1.22.3 // indirect + github.com/aws/smithy-go v1.22.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/caio/go-tdigest v3.1.0+incompatible // indirect @@ -147,8 +148,8 @@ require ( github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/go-chi/chi/v5 v5.2.2 // indirect github.com/go-ini/ini v1.67.0 // indirect - github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -170,9 +171,8 @@ require ( github.com/google/btree v1.1.3 // indirect github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a // indirect github.com/google/s2a-go v0.1.9 // indirect - github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect - github.com/googleapis/gax-go/v2 v2.14.1 // indirect + github.com/googleapis/gax-go/v2 v2.14.2 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -204,6 +204,7 @@ require ( github.com/mdlayher/vsock v1.2.1 // indirect github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a // indirect github.com/miekg/dns v1.1.66 // indirect + github.com/minio/crc64nvme v1.0.1 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -214,18 +215,21 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/ncw/swift v1.0.53 // indirect - github.com/oklog/run v1.1.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.128.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.128.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.128.0 // indirect + github.com/oklog/run v1.2.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0 // indirect + github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect github.com/pierrec/lz4/v4 v4.1.22 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus-community/prom-label-proxy v0.11.1 // indirect github.com/prometheus/exporter-toolkit v0.14.0 // indirect - github.com/prometheus/sigv4 v0.1.2 // indirect + github.com/prometheus/otlptranslator v0.0.0-20250731173911-a9673827589a // indirect + github.com/prometheus/sigv4 v0.2.0 // indirect github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect + github.com/rantav/go-grpc-channelz v0.0.4 // indirect github.com/redis/rueidis v1.0.61 // indirect github.com/rs/cors v1.11.1 // indirect github.com/rs/xid v1.6.0 // indirect @@ -235,26 +239,25 @@ require ( github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/soheilhy/cmux v0.1.5 // indirect - github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect github.com/stretchr/objx v0.5.2 // indirect + github.com/tinylib/msgp v1.3.0 // indirect github.com/trivago/tgo v1.0.7 // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/vimeo/galaxycache v1.3.1 // indirect github.com/weaveworks/promrus v1.2.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect github.com/yuin/gopher-lua v1.1.1 // indirect - github.com/zeebo/errs v1.4.0 // indirect go.mongodb.org/mongo-driver v1.17.4 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/collector/component v1.34.0 // indirect - go.opentelemetry.io/collector/confmap v1.34.0 // indirect - go.opentelemetry.io/collector/confmap/xconfmap v0.128.0 // indirect - go.opentelemetry.io/collector/consumer v1.34.0 // indirect - go.opentelemetry.io/collector/featuregate v1.34.0 // indirect - go.opentelemetry.io/collector/internal/telemetry v0.128.0 // indirect - go.opentelemetry.io/collector/pipeline v0.128.0 // indirect - go.opentelemetry.io/collector/processor v1.34.0 // indirect + go.opentelemetry.io/collector/component v1.35.0 // indirect + go.opentelemetry.io/collector/confmap v1.35.0 // indirect + go.opentelemetry.io/collector/confmap/xconfmap v0.129.0 // indirect + go.opentelemetry.io/collector/consumer v1.35.0 // indirect + go.opentelemetry.io/collector/featuregate v1.35.0 // indirect + go.opentelemetry.io/collector/internal/telemetry v0.129.0 // indirect + go.opentelemetry.io/collector/pipeline v0.129.0 // indirect + go.opentelemetry.io/collector/processor v1.35.0 // indirect go.opentelemetry.io/collector/semconv v0.128.0 // indirect go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.35.0 // indirect @@ -282,8 +285,8 @@ require ( golang.org/x/text v0.26.0 // indirect golang.org/x/tools v0.34.0 // indirect gonum.org/v1/gonum v0.16.0 // indirect - google.golang.org/api v0.228.0 // indirect - google.golang.org/genproto v0.0.0-20250204164813-702378808489 // indirect + google.golang.org/api v0.239.0 // indirect + google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect gopkg.in/telebot.v3 v3.3.8 // indirect @@ -295,7 +298,7 @@ require ( ) // Using cortex fork of weaveworks/common -replace github.com/weaveworks/common => github.com/cortexproject/weaveworks-common v0.0.0-20241129212437-96019edf21f1 +replace github.com/weaveworks/common => github.com/cortexproject/weaveworks-common v0.0.0-20250806170222-876764c695f2 // Override since git.apache.org is down. The docs say to fetch from github. replace git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999 @@ -321,7 +324,8 @@ replace github.com/google/gnostic => github.com/googleapis/gnostic v0.6.9 // https://github.com/thanos-io/thanos/blob/fdeea3917591fc363a329cbe23af37c6fff0b5f0/go.mod#L265 replace gopkg.in/alecthomas/kingpin.v2 => github.com/alecthomas/kingpin v1.3.8-0.20210301060133-17f40c25f497 -replace github.com/thanos-io/objstore => github.com/thanos-io/objstore v0.0.0-20241111205755-d1dd89d41f97 +// Wait for fix for https://github.com/grpc/grpc-go/pull/8504. +replace google.golang.org/grpc => google.golang.org/grpc v1.71.2 -// v3.3.1 with https://github.com/prometheus/prometheus/pull/16252. (same as thanos) -replace github.com/prometheus/prometheus => github.com/thanos-io/thanos-prometheus v0.0.0-20250610133519-082594458a88 +// See https://github.com/envoyproxy/go-control-plane/issues/1083 as this version introduces checksum mismatch. +exclude github.com/envoyproxy/go-control-plane/envoy v1.32.3 diff --git a/go.sum b/go.sum index 6985e6f181f..8e2c87cd829 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,9 @@ +cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= +cel.dev/expr v0.16.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= +cel.dev/expr v0.19.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cel.dev/expr v0.23.1 h1:K4KOtPCJQjVggkARsjG9RWXP6O4R73aHeJMa/dmCQQg= cel.dev/expr v0.23.1/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -30,56 +33,757 @@ cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+Y cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.118.1 h1:b8RATMcrK9A4BH0rj8yQupPXp+aP+cJ0l6H7V9osV1E= -cloud.google.com/go v0.118.1/go.mod h1:CFO4UPEPi8oV21xoezZCrd3d81K4fFkDTEJu4R8K+9M= -cloud.google.com/go/auth v0.15.1-0.20250317171031-671eed979bfd h1:0y6Ls7Yg2PYIjBiiY4COpxqhv+hRtoDQfY/u/eXNZuw= -cloud.google.com/go/auth v0.15.1-0.20250317171031-671eed979bfd/go.mod h1:uJW0Bahg/VuSfsCxYjfpcKMblBoti/JuY8OQfnmW4Vk= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= +cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA= +cloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/accesscontextmanager v1.8.0/go.mod h1:uI+AI/r1oyWK99NN8cQ3UK76AMelMzgZCvJfsi2c+ps= +cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/aiplatform v1.45.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/analytics v0.21.2/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= +cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= +cloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4= +cloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= +cloud.google.com/go/beyondcorp v0.6.1/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= +cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/bigquery v1.52.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/cloudbuild v1.10.1/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/cloudtasks v1.11.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= +cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/contactcenterinsights v1.9.1/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= +cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/container v1.22.1/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/datacatalog v1.14.0/go.mod h1:h0PrGtlihoutNMp/uvwhawLQ9+c63Kz65UFqh49Yo+E= +cloud.google.com/go/datacatalog v1.14.1/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataplex v1.8.1/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastore v1.12.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastore v1.12.1/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/datastream v1.9.1/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= +cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/deploy v1.11.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= +cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dialogflow v1.38.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/documentai v1.20.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/eventarc v1.12.1/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= +cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= -cloud.google.com/go/iam v1.3.1 h1:KFf8SaT71yYq+sQtRISn90Gyhyf4X8RGgeAVC8XGf3E= -cloud.google.com/go/iam v1.3.1/go.mod h1:3wMtuyT4NcbnYNPLMBzYRFiEfjKfJlLVLrisE7bwm34= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gaming v1.10.1/go.mod h1:XQQvtfP8Rb9Rxnxm5wFVpAp9zCQkJi2bLIb7iHGwB3s= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/gkemulticloud v0.6.1/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= +cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/grafeas v0.3.0/go.mod h1:P7hgN24EyONOTMyeJH6DxG4zD7fwiYa5Q6GUgyFSOU8= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iam v1.0.1/go.mod h1:yR3tmSL8BcZB4bxByRv2jkSIahVmCtfKZwLYGBalRE8= +cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk= +cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/kms v1.11.0/go.mod h1:hwdiYC0xjnWsKQQCQQmIQnS9asjYVSK6jtXm+zFqXLM= +cloud.google.com/go/kms v1.12.1/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= -cloud.google.com/go/longrunning v0.6.4 h1:3tyw9rO3E2XVXzSApn1gyEEnH2K9SynNQjMlBi3uHLg= -cloud.google.com/go/longrunning v0.6.4/go.mod h1:ttZpLCe6e7EXvn9OxpBRx7kZEB0efv8yBO6YnVMfhJs= -cloud.google.com/go/monitoring v1.24.0 h1:csSKiCJ+WVRgNkRzzz3BPoGjFhjPY23ZTcaenToJxMM= -cloud.google.com/go/monitoring v1.24.0/go.mod h1:Bd1PRK5bmQBQNnuGwHBfUamAV1ys9049oEPHnn4pcsc= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/longrunning v0.4.2/go.mod h1:OHrnaYyLUV6oqwh0xiS7e5sLQhP1m0QU9R+WhGDMgIQ= +cloud.google.com/go/longrunning v0.5.0/go.mod h1:0JNuqRShmscVAhIACGtskSAWtqtOoPkwP0YF1oVEchc= +cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/maps v1.3.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= +cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/metastore v1.11.1/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= +cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/orgpolicy v1.11.0/go.mod h1:2RK748+FtVvnfuynxBzdnyu7sygtoZa1za/0ZfpOs1M= +cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/osconfig v1.12.0/go.mod h1:8f/PaYzoS3JMVfdfTubkowZYGmAhUCjjwnjqWI7NVBc= +cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/policytroubleshooter v1.7.1/go.mod h1:0NaT5v3Ag1M7U5r0GfDCpUFkWd9YqpubBWsQlhanRv0= +cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= +cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsub v1.32.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicedirectory v1.10.1/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= +cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= +cloud.google.com/go/speech v1.17.1/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= +cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= cloud.google.com/go/storage v1.50.0 h1:3TbVkzTooBvnZsk7WaAQfOsNrdoM8QHusXA1cpk6QJs= cloud.google.com/go/storage v1.50.0/go.mod h1:l7XeiD//vx5lfqE3RavfmU9yvk5Pp0Zhcv482poyafY= -cloud.google.com/go/trace v1.11.4 h1:LKlhVyX6I4+heP31sWvERSKZZ9cPPEZumt7b4SKVK18= -cloud.google.com/go/trace v1.11.4/go.mod h1:lCSHzSPZC1TPwto7zhaRt3KtGYsXFyaErPQ18AUUeUE= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= +cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= +cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.8.1/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.17.1/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vmwareengine v0.4.1/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= +cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0 h1:j8BorDEigD8UFOSZQiSqAMOOleyQOOQPnUAwV+Ls1gA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 h1:Wc1ml6QlJs2BHQ/9Bqu1jiyggbsSjramq2oUmp5WeIo= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 h1:B+blDbyVIG3WaikNxPnhPiJ1MThR03b3vKGtER95TP4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4= @@ -104,6 +808,7 @@ github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 h1:5IT7xOdq17MtcdtL/vtl6mGfzhaq4m4vpollPRmlsBQ= @@ -114,13 +819,17 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapp github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.52.0/go.mod h1:gdIm9TxRk5soClCwuB0FtdXsbqtw0aqPwBEurK9tPkw= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= github.com/alecthomas/kingpin v1.3.8-0.20210301060133-17f40c25f497/go.mod h1:b6br6/pDFSfMkBgC96TbpOji05q5pa+v5rIlS0Y6XtI= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= @@ -137,9 +846,14 @@ github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible h1:8psS8a+wKfiLt1iVDX79F github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= +github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= @@ -151,32 +865,36 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= -github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= +github.com/aws/aws-sdk-go-v2 v1.37.0 h1:YtCOESR/pN4j5oA7cVHSfOwIcuh/KwHC4DOSXFbv5F0= +github.com/aws/aws-sdk-go-v2 v1.37.0/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg= github.com/aws/aws-sdk-go-v2/config v1.29.15 h1:I5XjesVMpDZXZEZonVfjI12VNMrYa38LtLnw4NtY5Ss= github.com/aws/aws-sdk-go-v2/config v1.29.15/go.mod h1:tNIp4JIPonlsgaO5hxO372a6gjhN63aSWl2GVl5QoBQ= github.com/aws/aws-sdk-go-v2/credentials v1.17.68 h1:cFb9yjI02/sWHBSYXAtkamjzCuRymvmeFmt0TC0MbYY= github.com/aws/aws-sdk-go-v2/credentials v1.17.68/go.mod h1:H6E+jBzyqUu8u0vGaU6POkK3P0NylYEeRZ6ynBpMqIk= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0 h1:H2iZoqW/v2Jnrh1FnU725Bq6KJ0k2uP63yH+DcY+HUI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.0/go.mod h1:L0FqLbwMXHvNC/7crWV1iIxUlOKYZUE8KuTIA+TozAI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0 h1:EDped/rNzAhFPhVY0sDGbtD16OKqksfA8OjF/kLEgw8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.0/go.mod h1:uUI335jvzpZRPpjYx6ODc/wg1qH+NnoSTK/FwVeK0C0= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.237.0 h1:XHE2G+yaDQql32FZt19QmQt4WuisqQJIkMUSCxeCUl8= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.237.0/go.mod h1:t11/j/nH9i6bbsPH9xc04BJOsV2nVPUqrB67/TLDsyM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 h1:eRhU3Sh8dGbaniI6B+I48XJMrTPRkK4DKo+vqIxziOU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0/go.mod h1:paNLV18DZ6FnWE/bd06RIKPDIFpjuvCkGKWTG/GDBeM= +github.com/aws/aws-sdk-go-v2/service/lightsail v1.44.0 h1:QiiCqpKy0prxq+92uWfESzcb7/8Y9JAamcMOzVYLEoM= +github.com/aws/aws-sdk-go-v2/service/lightsail v1.44.0/go.mod h1:ESppxYqXQCpCY+KWl3BdkQjmsQX6zxKP39SnDtRDoU0= github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8= github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= github.com/aws/aws-sdk-go-v2/service/sts v1.33.20 h1:oIaQ1e17CSKaWmUTu62MtraRWVIosn/iONMuZt0gbqc= github.com/aws/aws-sdk-go-v2/service/sts v1.33.20/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= -github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k= -github.com/aws/smithy-go v1.22.3/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= +github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/baidubce/bce-sdk-go v0.9.230 h1:HzELBKiD7QAgYqZ1qHZexoI2A3Lo/6zYGQFvcUbS5cA= github.com/baidubce/bce-sdk-go v0.9.230/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= @@ -191,6 +909,8 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw= github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bradfitz/gomemcache v0.0.0-20250403215159-8d39553ac7cf h1:TqhNAT4zKbTdLa62d2HDBFdvgSbIGB3eJE8HqhgiL9I= github.com/bradfitz/gomemcache v0.0.0-20250403215159-8d39553ac7cf/go.mod h1:r5xuitiExdLAJ09PR7vBVENGvp4ZuTBeWTGtxuX3K+c= github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6Lpu7OAUPR60cds= @@ -200,7 +920,8 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= @@ -218,20 +939,25 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coder/quartz v0.1.2 h1:PVhc9sJimTdKd3VbygXtS4826EOCpB1fXoRlLnCrE+s= github.com/coder/quartz v0.1.2/go.mod h1:vsiCc+AHViMKH2CQpGIpFgdHIEQsxwm8yCscqKmzbRA= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -239,8 +965,8 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cortexproject/promqlsmith v0.0.0-20250407233056-90db95b1a4e4 h1:dpo7kQ24uFSV6Zgm9/kB34TIUWjGmadlbKrM6fNfQko= github.com/cortexproject/promqlsmith v0.0.0-20250407233056-90db95b1a4e4/go.mod h1:jh6POgN18lXU133HBMfwr/1TjvBp8e5kL4ZtRsAPvGY= -github.com/cortexproject/weaveworks-common v0.0.0-20241129212437-96019edf21f1 h1:UoSixdl0sBUhfEOMpIGxFnJjp3/y/+nkw6Du7su05FE= -github.com/cortexproject/weaveworks-common v0.0.0-20241129212437-96019edf21f1/go.mod h1:7cl8fS/nivXe2DmBUUmr/3UGTJG2jVU2NRaIayR2Zjs= +github.com/cortexproject/weaveworks-common v0.0.0-20250806170222-876764c695f2 h1:F9AVQMNf48V02H6cB1hQpgbU6h7CkonGTmie9aMNHUw= +github.com/cortexproject/weaveworks-common v0.0.0-20250806170222-876764c695f2/go.mod h1:SnIoS7WUpqsW2y1VGA63VS2RNSAMXGireDhqW6ZZWLA= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cristalhq/hedgedhttp v0.9.1 h1:g68L9cf8uUyQKQJwciD0A1Vgbsz+QgCjuB1I8FAsCDs= github.com/cristalhq/hedgedhttp v0.9.1/go.mod h1:XkqWU6qVMutbhW68NnzjWrGtH8NUx1UfYqGYtHVKIsI= @@ -257,16 +983,17 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dhui/dktest v0.4.3 h1:wquqUxAFdcUgabAVLvSCOKOlag5cIZuaOjYIBOWdsR0= github.com/dhui/dktest v0.4.3/go.mod h1:zNK8IwktWzQRm6I/l2Wjp7MakiyaFWv4G1hjmodmMTs= -github.com/digitalocean/godo v1.136.0 h1:DTxugljFJSMBPfEGq4KeXpnKeAHicggNqogcrw/YdZw= -github.com/digitalocean/godo v1.136.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= +github.com/digitalocean/godo v1.157.0 h1:ReELaS6FxXNf8gryUiVH0wmyUmZN8/NCmBX4gXd3F0o= +github.com/digitalocean/godo v1.157.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v28.0.1+incompatible h1:FCHjSRdXhNRFjlHMTv4jUNlIBbTeRjrWfeFuJp7jpo0= -github.com/docker/docker v28.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.3.0+incompatible h1:ffS62aKWupCWdvcee7nBU9fhnmknOqDPaJAMtfK0ImQ= +github.com/docker/docker v28.3.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= @@ -284,22 +1011,22 @@ github.com/emersion/go-smtp v0.21.3 h1:7uVwagE8iPYE48WhNsng3RRpCUpFvNl39JGNSIyGV github.com/emersion/go-smtp v0.21.3/go.mod h1:qm27SGYgoIPRot6ubfQ/GpiPy/g3PaZAVRxiO/sDUgQ= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= +github.com/envoyproxy/go-control-plane/envoy v1.32.2/go.mod h1:eR2SOX2IedqlPvmiKjUH7Wu//S602JKI7HPC/L3SRq8= github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= @@ -314,6 +1041,8 @@ github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY= github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= @@ -323,24 +1052,32 @@ github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474/go. github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618= +github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= -github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -370,14 +1107,16 @@ github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZ github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= -github.com/go-resty/resty/v2 v2.16.3 h1:zacNT7lt4b8M/io2Ahj6yPypL7bqx9n1iprfQuodV+E= -github.com/go-resty/resty/v2 v2.16.3/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= +github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM= +github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk= github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= @@ -388,6 +1127,7 @@ github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJA github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/goccy/go-yaml v1.9.5/go.mod h1:U/jl18uSupI5rdI2jmuCswEA2htH9eXfferR3KfscvA= @@ -408,14 +1148,17 @@ github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeD github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-migrate/migrate/v4 v4.18.1 h1:JML/k+t4tpHCpQTCAD62Nu43NUFzHY4CV3uAuvHGC+Y= github.com/golang-migrate/migrate/v4 v4.18.1/go.mod h1:HAX6m3sQgcdO81tdjn5exv20+3Kb13cmGli1hrD6hks= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= @@ -441,6 +1184,7 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -451,6 +1195,7 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -468,6 +1213,7 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= @@ -478,6 +1224,7 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -499,11 +1246,20 @@ github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0Z github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18= github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= +github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -513,11 +1269,19 @@ github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0 github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= -github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw= +github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= +github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0= +github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gophercloud/gophercloud/v2 v2.6.0 h1:XJKQ0in3iHOZHVAFMXq/OhjCuvvG+BKR0unOqRfG1EI= -github.com/gophercloud/gophercloud/v2 v2.6.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= +github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E= +github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= @@ -532,11 +1296,14 @@ github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 h1:sGm2vDRFUrQJO/Veii4h4z github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2/go.mod h1:wd1YpapPLivG6nQgbf7ZkG1hhSOXDhhn4MLTknx2aAc= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= -github.com/hashicorp/consul/api v1.31.2 h1:NicObVJHcCmyOIl7Z9iHPvvFrocgTYo9cITSGg0/7pw= -github.com/hashicorp/consul/api v1.31.2/go.mod h1:Z8YgY0eVPukT/17ejW+l+C7zJmKwgPHtjU1q16v/Y40= +github.com/hashicorp/consul/api v1.32.0 h1:5wp5u780Gri7c4OedGEPzmlUEzi0g2KyiPphSr6zjVg= +github.com/hashicorp/consul/api v1.32.0/go.mod h1:Z8YgY0eVPukT/17ejW+l+C7zJmKwgPHtjU1q16v/Y40= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= @@ -594,12 +1361,14 @@ github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpT github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.19.1 h1:UU/7h3uc/rdgspM8xkQF7wokmwZXePWDXcLqrQRRzzY= -github.com/hetznercloud/hcloud-go/v2 v2.19.1/go.mod h1:r5RTzv+qi8IbLcDIskTzxkFIji7Ovc8yNgepQR9M+UA= +github.com/hetznercloud/hcloud-go/v2 v2.21.1 h1:IH3liW8/cCRjfJ4cyqYvw3s1ek+KWP8dl1roa0lD8JM= +github.com/hetznercloud/hcloud-go/v2 v2.21.1/go.mod h1:XOaYycZJ3XKMVWzmqQ24/+1V7ormJHmPdck/kxrNnQA= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/huaweicloud/huaweicloud-sdk-go-obs v3.25.4+incompatible h1:yNjwdvn9fwuN6Ouxr0xHM0cVu03YMUWUyFmu2van/Yc= github.com/huaweicloud/huaweicloud-sdk-go-obs v3.25.4+incompatible/go.mod h1:l7VUhRbTKCzdOacdT4oWCwATKyvZqUOlOqr0Ous3k4s= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= @@ -626,13 +1395,19 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo= @@ -649,6 +1424,7 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -670,8 +1446,13 @@ github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/linode/linodego v1.47.0 h1:6MFNCyzWbr8Rhl4r7d5DwZLwxvFIsM4ARH6W0KS/R0U= -github.com/linode/linodego v1.47.0/go.mod h1:vyklQRzZUWhFVBZdYx4dcYJU/gG9yKB9VUcUs6ub0Lk= +github.com/linode/linodego v1.52.2 h1:N9ozU27To1LMSrDd8WvJZ5STSz1eGYdyLnxhAR/dIZg= +github.com/linode/linodego v1.52.2/go.mod h1:bI949fZaVchjWyKIA08hNyvAcV6BAS+PM2op3p7PAWA= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/lyft/protoc-gen-star/v2 v2.0.3/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk= +github.com/lyft/protoc-gen-star/v2 v2.0.4-0.20230330145011-496ad1ac90a4/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -693,8 +1474,12 @@ github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcME github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= @@ -708,10 +1493,14 @@ github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKju github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE= github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/minio/crc64nvme v1.0.1 h1:DHQPrYPdqK7jQG/Ls5CTBZWeex/2FMS3G5XGkycuFrY= +github.com/minio/crc64nvme v1.0.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.80 h1:2mdUHXEykRdY/BigLt3Iuu1otL0JTogT0Nmltg0wujk= -github.com/minio/minio-go/v7 v7.0.80/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0= +github.com/minio/minio-go/v7 v7.0.93 h1:lAB4QJp8Nq3vDMOU0eKgMuyBiEGMNlXQ5Glc8qAxqSU= +github.com/minio/minio-go/v7 v7.0.93/go.mod h1:71t2CqDt3ThzESgZUlU1rBN54mksGGlkLcFgguDnnAc= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= @@ -754,8 +1543,8 @@ github.com/ncw/swift v1.0.53/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/run v1.2.0 h1:O8x3yXwah4A73hJdlrwo/2X6J62gE5qTMusH0dvz60E= +github.com/oklog/run v1.2.0/go.mod h1:mgDbKRSwPhJfesJ4PntqFUbKQRZ50NgmZTSPlFA0YFk= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s= @@ -764,14 +1553,14 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.128.0 h1:hZa4FkI2JhYC0tkiwOepnHyyfWzezz3FfCmt88nWJa0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.128.0/go.mod h1:sLbOuJEFckPdw4li0RtWpoSsMeppcck3s/cmzPyKAgc= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.128.0 h1:+rUULr4xqOJjZK3SokFmRYzsiPq5onoWoSv3He4aaus= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.128.0/go.mod h1:Fh2SXPeFkr4J97w9CV/apFAib8TC9Hi0P08xtiT7Lng= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.128.0 h1:8OWwRSdIhm3DY3PEYJ0PtSEz1a1OjL0fghLXSr14JMk= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.128.0/go.mod h1:32OeaysZe4vkSmD1LJ18Q1DfooryYqpSzFNmz+5A5RU= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.128.0 h1:9wVFaWEhgV8WQD+nP662nHNaQIkmyF57KRhtsqlaWEI= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.128.0/go.mod h1:Yak3vQIvwYQiAO83u+zD9ujdCmpcDL7JSfg2YK+Mwn4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0 h1:2pzb6bC/AAfciC9DN+8d7Y8Rsk8ZPCfp/ACTfZu87FQ= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0/go.mod h1:tIE4dzdxuM7HnFeYA6sj5zfLuUA/JxzQ+UDl1YrHvQw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.129.0 h1:ydkfqpZ5BWZfEJEs7OUhTHW59og5aZspbUYxoGcAEok= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.129.0/go.mod h1:oA+49dkzmhUx0YFC9JXGuPPSBL0TOTp6jkv7qSr2n0Q= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0 h1:AOVxBvCZfTPj0GLGqBVHpAnlC9t9pl1JXUQXymHliiY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0/go.mod h1:0CAJ32V/bCUBhNTEvnN9wlOG5IsyZ+Bmhe9e3Eri7CU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0 h1:yDLSAoIi3jNt4R/5xN4IJ9YAg1rhOShgchlO/ESv8EY= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0/go.mod h1:IXQHbTPxqNcuu44FvkyvpYJ6Qy4wh4YsCVkKsp0Flzo= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -786,8 +1575,8 @@ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYr github.com/oracle/oci-go-sdk/v65 v65.93.1 h1:lIvy/6aQOUenQI+cxXH1wDBJeXFPO9Du3CaomXeYFaY= github.com/oracle/oci-go-sdk/v65 v65.93.1/go.mod h1:u6XRPsw9tPziBh76K7GrrRXPa8P8W3BQeqJ6ZZt9VLA= github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= -github.com/ovh/go-ovh v1.7.0 h1:V14nF7FwDjQrZt9g7jzcvAAQ3HN6DNShRFRMC3jLoPw= -github.com/ovh/go-ovh v1.7.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= +github.com/ovh/go-ovh v1.9.0 h1:6K8VoL3BYjVV3In9tPJUdT7qMx9h0GExN9EXx1r2kKE= +github.com/ovh/go-ovh v1.9.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/parquet-go/parquet-go v0.25.1 h1:l7jJwNM0xrk0cnIIptWMtnSnuxRkwq53S+Po3KG8Xgo= github.com/parquet-go/parquet-go v0.25.1/go.mod h1:AXBuotO1XiBtcqJb/FKFyjBG4aqa3aQAAWF3ZPzCanY= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -796,14 +1585,22 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= @@ -814,8 +1611,8 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus-community/parquet-common v0.0.0-20250716185251-4cfa597e936c h1:yDtT3c2klcWJj6A0osq72qM8rd1ohtl/J3rHD3FHuNw= -github.com/prometheus-community/parquet-common v0.0.0-20250716185251-4cfa597e936c/go.mod h1:MbAv/yCv9GORLj0XvXgRF913R9Jc04+BvVq4VJpPCi0= +github.com/prometheus-community/parquet-common v0.0.0-20250807102632-2aeeceacebf0 h1:5mm5mMmEhUdvqf4NsVvEGWry0IeXkeZEfODbZ70c1Ok= +github.com/prometheus-community/parquet-common v0.0.0-20250807102632-2aeeceacebf0/go.mod h1:MbAv/yCv9GORLj0XvXgRF913R9Jc04+BvVq4VJpPCi0= github.com/prometheus-community/prom-label-proxy v0.11.1 h1:jX+m+BQCNM0z3/P6V6jVxbiDKgugvk91SaICD6bVhT4= github.com/prometheus-community/prom-label-proxy v0.11.1/go.mod h1:uTeQW+wZ/VPV1LL3IPfvUE++wR2nPLex+Y4RE38Cpis= github.com/prometheus/alertmanager v0.28.1 h1:BK5pCoAtaKg01BYRUJhEDV1tqJMEtYBGzPw8QdvnnvA= @@ -826,22 +1623,27 @@ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_golang v1.23.0-rc.1 h1:Is/nGODd8OsJlNQSybeYBwY/B6aHrN7+QwVUYutHSgw= +github.com/prometheus/client_golang v1.23.0-rc.1/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= -github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= +github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3 h1:R/zO7ombSHCI8bjQusgCMSL+cE669w5/R2upq5WlPD0= +github.com/prometheus/common v0.65.1-0.20250703115700-7f8b2a0d32d3/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg= github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA= +github.com/prometheus/otlptranslator v0.0.0-20250731173911-a9673827589a h1:r2csuCATbgDz2Nk2PkKo7b6x7ErrF3NMmxwH0fifqN8= +github.com/prometheus/otlptranslator v0.0.0-20250731173911-a9673827589a/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= @@ -849,27 +1651,38 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= -github.com/prometheus/sigv4 v0.1.2 h1:R7570f8AoM5YnTUPFm3mjZH5q2k4D+I/phCWvZ4PXG8= -github.com/prometheus/sigv4 v0.1.2/go.mod h1:GF9fwrvLgkQwDdQ5BXeV9XUSCH/IPNqzvAoaohfjqMU= +github.com/prometheus/prometheus v0.305.1-0.20250808023455-1e4144a496fb h1:azXJoaVT+S7PRdbdUwtyivhaGq++ZF5YTkk1XlTaZkw= +github.com/prometheus/prometheus v0.305.1-0.20250808023455-1e4144a496fb/go.mod h1:nFT/lsJGZPCe1mC6uLIoDuK2bP9JO9DBHIDPQsuZucQ= +github.com/prometheus/sigv4 v0.2.0 h1:qDFKnHYFswJxdzGeRP63c4HlH3Vbn1Yf/Ao2zabtVXk= +github.com/prometheus/sigv4 v0.2.0/go.mod h1:D04rqmAaPPEUkjRQxGqjoxdyJuyCh6E0M18fZr0zBiE= github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= +github.com/rantav/go-grpc-channelz v0.0.4 h1:8GvqhA6siQVBsZYzal3yHhyJ9YiHEJx7RtSH2Jvm9Co= +github.com/rantav/go-grpc-channelz v0.0.4/go.mod h1:HodrRmnnH1zXcEEfK7EJrI23YMPMT7uvyAYkq2JUIcI= github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI= github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= github.com/redis/rueidis v1.0.61 h1:AkbCMeTyjFSQraGaNYncg3unMCTYGr6Y8WOqGhDOQu4= github.com/redis/rueidis v1.0.61/go.mod h1:Lkhr2QTgcoYBhxARU7kJRO8SyVlgUuEkcJO1Y8MCluA= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.32 h1:4+LP7qmsLSGbmc66m1s5dKRMBwztRppfxFKlYqYte/c= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.32/go.mod h1:kzh+BSAvpoyHHdHBCDhmSWtBc1NbLMZ2lWHqnBoxFks= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33 h1:KhF0WejiUTDbL5X55nXowP7zNopwpowa6qaMAWyIE+0= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33/go.mod h1:792k1RTU+5JeMXm35/e2Wgp71qPH/DmDoZrRc+EFZDk= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= @@ -891,8 +1704,11 @@ github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sony/gobreaker v1.0.0 h1:feX5fGGXSl3dYd4aHZItw+FpHLvvoaqkawKjVNiFMNQ= github.com/sony/gobreaker v1.0.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= @@ -900,8 +1716,8 @@ github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0 github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw= -github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= -github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= +github.com/stackitcloud/stackit-sdk-go/core v0.17.2 h1:jPyn+i8rkp2hM80+hOg0B/1EVRbMt778Tr5RWyK1m2E= +github.com/stackitcloud/stackit-sdk-go/core v0.17.2/go.mod h1:8KIw3czdNJ9sdil9QQimxjR6vHjeINFrRv0iZ67wfn0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -919,6 +1735,7 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= @@ -928,14 +1745,14 @@ github.com/tencentyun/cos-go-sdk-v5 v0.7.66 h1:O4O6EsozBoDjxWbltr3iULgkI7WPj/BFN github.com/tencentyun/cos-go-sdk-v5 v0.7.66/go.mod h1:8+hG+mQMuRP/OIS9d83syAvXvrMj9HhkND6Q1fLghw0= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= -github.com/thanos-io/objstore v0.0.0-20241111205755-d1dd89d41f97 h1:VjG0mwhN1DkncwDHFvrpd12/2TLfgYNRmEQA48ikp+0= -github.com/thanos-io/objstore v0.0.0-20241111205755-d1dd89d41f97/go.mod h1:vyzFrBXgP+fGNG2FopEGWOO/zrIuoy7zt3LpLeezRsw= -github.com/thanos-io/promql-engine v0.0.0-20250611170940-015ebeb7b5ff h1:obQDLbgnae6rLPngWwQ6q/ifQZeDEmVvxHIJ6arJCDs= -github.com/thanos-io/promql-engine v0.0.0-20250611170940-015ebeb7b5ff/go.mod h1:IQjuIvDzOOVE2MGDs88Q65GYmmKrpmIsDkMVOqs5reo= -github.com/thanos-io/thanos v0.39.2 h1:edN03y7giEc6lD17HJhYcv8ELapXxElmhJnFIYJ2GqQ= -github.com/thanos-io/thanos v0.39.2/go.mod h1:bvUPJNIx2LBXme6yBinRiGqQinxlGikLlK7PGeFQPkQ= -github.com/thanos-io/thanos-prometheus v0.0.0-20250610133519-082594458a88 h1:5uf08MPb6xrVo4rxmBDh9/1SLthbZGY9zLeF3oMixh8= -github.com/thanos-io/thanos-prometheus v0.0.0-20250610133519-082594458a88/go.mod h1:WEq2ogBPZoLjj9x5K67VEk7ECR0nRD9XCjaOt1lsYck= +github.com/thanos-io/objstore v0.0.0-20250722142242-922b22272ee3 h1:P301Anc27aVL7Ls88el92j+qW3PJp8zmiDl+kOUZv3A= +github.com/thanos-io/objstore v0.0.0-20250722142242-922b22272ee3/go.mod h1:uDHLkMKOGDAnlN75EAz8VrRzob1+VbgYSuUleatWuF0= +github.com/thanos-io/promql-engine v0.0.0-20250726034445-91e6e32a36a7 h1:lFCGOWLDH50RB4ig/xRnUXX99ECD13xUHQdNOvcAYwc= +github.com/thanos-io/promql-engine v0.0.0-20250726034445-91e6e32a36a7/go.mod h1:MOFN0M1nDMcWZg1t4iF39sOard/K4SWgO/HHSODeDIc= +github.com/thanos-io/thanos v0.39.3-0.20250729120336-88d0ae8071cb h1:z/ePbn3lo/D4vdHGH8hpa2kgH9M6iLq0kOFtZwuelKM= +github.com/thanos-io/thanos v0.39.3-0.20250729120336-88d0ae8071cb/go.mod h1:gGUG3TDEoRSjTFVs/QO6QnQIILRgNF0P9l7BiiMfmHw= +github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww= +github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/tjhop/slog-gokit v0.1.4 h1:uj/vbDt3HaF0Py8bHPV4ti/s0utnO0miRbO277FLBKM= github.com/tjhop/slog-gokit v0.1.4/go.mod h1:Bbu5v2748qpAWH7k6gse/kw3076IJf6owJmh7yArmJs= github.com/trivago/tgo v1.0.7 h1:uaWH/XIy9aWYWpjm2CU3RpcqZXmX2ysQ9/Go+d9gyrM= @@ -962,10 +1779,12 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= -github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= -github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/api/v3 v3.5.17 h1:cQB8eb8bxwuxOilBpMJAEo8fAONyrdXTHUNcMd8yT1w= go.etcd.io/etcd/api/v3 v3.5.17/go.mod h1:d1hvkRuXkts6PmaYk2Vrgqbv7H4ADfAKhyJqHNLJCB4= @@ -989,44 +1808,45 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/collector/component v1.34.0 h1:YONg7FaZ5zZbj5cLdARvwtMNuZHunuyxw2fWe5fcWqc= -go.opentelemetry.io/collector/component v1.34.0/go.mod h1:GvolsSVZskXuyfQdwYacqeBSZe/1tg4RJ0YK55KSvDA= -go.opentelemetry.io/collector/component/componentstatus v0.128.0 h1:0lEYHgUQEMMkl5FLtMgDH8lue4B3auElQINzGIWUya4= -go.opentelemetry.io/collector/component/componentstatus v0.128.0/go.mod h1:8vVO6JSV+edmiezJsQzW7aKQ7sFLIN6S3JawKBI646o= -go.opentelemetry.io/collector/component/componenttest v0.128.0 h1:MGNh5lQQ0Qmz2SmNwOqLJYaWMDkMLYj/51wjMzTBR34= -go.opentelemetry.io/collector/component/componenttest v0.128.0/go.mod h1:hALNxcacqOaX/Gm/dE7sNOxAEFj41SbRqtvF57Yd6gs= -go.opentelemetry.io/collector/confmap v1.34.0 h1:PG4sYlLxgCMnA5F7daKXZV+NKjU1IzXBzVQeyvcwyh0= -go.opentelemetry.io/collector/confmap v1.34.0/go.mod h1:BbAit8+hAJg5vyFBQoDh9vOXOH8UzCdNu91jCh+b72E= -go.opentelemetry.io/collector/confmap/xconfmap v0.128.0 h1:hcVKU45pjC+PLz7xUc8kwSlR5wsN2w8hs9midZ3ez10= -go.opentelemetry.io/collector/confmap/xconfmap v0.128.0/go.mod h1:2928x4NAAu1CysfzLbEJE6MSSDB/gOYVq6YRGWY9LmM= -go.opentelemetry.io/collector/consumer v1.34.0 h1:oBhHH6mgViOGhVDPozE+sUdt7jFBo2Hh32lsSr2L3Tc= -go.opentelemetry.io/collector/consumer v1.34.0/go.mod h1:DVMCb56ZBlPNcmo0lSJKn3rp18oyZQCedRE4GKIMI+Q= -go.opentelemetry.io/collector/consumer/consumertest v0.128.0 h1:x50GB0I/QvU3sQuNCap5z/P2cnq2yHoRJ/8awkiT87w= -go.opentelemetry.io/collector/consumer/consumertest v0.128.0/go.mod h1:Wb3IAbMY/DOIwJPy81PuBiW2GnKoNIz4THE7wfJwovE= -go.opentelemetry.io/collector/consumer/xconsumer v0.128.0 h1:4E+KTdCjkRS3SIw0bsv5kpv9XFXHf8x9YiPEuxBVEHY= -go.opentelemetry.io/collector/consumer/xconsumer v0.128.0/go.mod h1:OmzilL/qbjCzPMHay+WEA7/cPe5xuX7Jbj5WPIpqaMo= -go.opentelemetry.io/collector/featuregate v1.34.0 h1:zqDHpEYy1UeudrfUCvlcJL2t13dXywrC6lwpNZ5DrCU= -go.opentelemetry.io/collector/featuregate v1.34.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc= -go.opentelemetry.io/collector/internal/telemetry v0.128.0 h1:ySEYWoY7J8DAYdlw2xlF0w+ODQi3AhYj7TRNflsCbx8= -go.opentelemetry.io/collector/internal/telemetry v0.128.0/go.mod h1:572B/iJqjauv3aT+zcwnlNWBPqM7+KqrYGSUuOAStrM= -go.opentelemetry.io/collector/pdata v1.34.0 h1:2vwYftckXe7pWxI9mfSo+tw3wqdGNrYpMbDx/5q6rw8= -go.opentelemetry.io/collector/pdata v1.34.0/go.mod h1:StPHMFkhLBellRWrULq0DNjv4znCDJZP6La4UuC+JHI= -go.opentelemetry.io/collector/pdata/pprofile v0.128.0 h1:6DEtzs/liqv/ukz2EHbC5OMaj2V6K2pzuj/LaRg2YmY= -go.opentelemetry.io/collector/pdata/pprofile v0.128.0/go.mod h1:bVVRpz+zKFf1UCCRUFqy8LvnO3tHlXKkdqW2d+Wi/iA= -go.opentelemetry.io/collector/pdata/testdata v0.128.0 h1:5xcsMtyzvb18AnS2skVtWreQP1nl6G3PiXaylKCZ6pA= -go.opentelemetry.io/collector/pdata/testdata v0.128.0/go.mod h1:9/VYVgzv3JMuIyo19KsT3FwkVyxbh3Eg5QlabQEUczA= -go.opentelemetry.io/collector/pipeline v0.128.0 h1:WgNXdFbyf/QRLy5XbO/jtPQosWrSWX/TEnSYpJq8bgI= -go.opentelemetry.io/collector/pipeline v0.128.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4= -go.opentelemetry.io/collector/processor v1.34.0 h1:5pwXIG12XXxdkJ8F68e2cBEjEnFlCIAZhqEYM7vjkqE= -go.opentelemetry.io/collector/processor v1.34.0/go.mod h1:VCl4vYj2tdO4APUcr0q6Eh796mqCCsH9Z/gqaPuzlUs= -go.opentelemetry.io/collector/processor/processortest v0.128.0 h1:xPhOSmGFDGqhC3/nu1BqPSE6EpDPAf1/F+BfaYjDn/8= -go.opentelemetry.io/collector/processor/processortest v0.128.0/go.mod h1:XXXom+mbAQtrkcvq4Ecd6n8RQoVgcfLe1vrUlr6U2gI= -go.opentelemetry.io/collector/processor/xprocessor v0.128.0 h1:ObbtdXab0is6bdt4XabsRJZ+SUTuwQjPVlHTbmScfNg= -go.opentelemetry.io/collector/processor/xprocessor v0.128.0/go.mod h1:/nHXW15nzwSRQ+25Cb+r17he/uMtCEvSOBGqpDbn3Uk= +go.opentelemetry.io/collector/component v1.35.0 h1:JpvBukEcEUvJ/TInF1KYpXtWEP+C7iYkxCHKjI0o7BQ= +go.opentelemetry.io/collector/component v1.35.0/go.mod h1:hU/ieWPxWbMAacODCSqem5ZaN6QH9W5GWiZ3MtXVuwc= +go.opentelemetry.io/collector/component/componentstatus v0.129.0 h1:ejpBAt7hXAAZiQKcSxLvcy8sj8SjY4HOLdoXIlW6ybw= +go.opentelemetry.io/collector/component/componentstatus v0.129.0/go.mod h1:/dLPIxn/tRMWmGi+DPtuFoBsffOLqPpSZ2IpEQzYtwI= +go.opentelemetry.io/collector/component/componenttest v0.129.0 h1:gpKkZGCRPu3Yn0U2co09bMvhs17yLFb59oV8Gl9mmRI= +go.opentelemetry.io/collector/component/componenttest v0.129.0/go.mod h1:JR9k34Qvd/pap6sYkPr5QqdHpTn66A5lYeYwhenKBAM= +go.opentelemetry.io/collector/confmap v1.35.0 h1:U4JDATAl4PrKWe9bGHbZkoQXmJXefWgR2DIkFvw8ULQ= +go.opentelemetry.io/collector/confmap v1.35.0/go.mod h1:qX37ExVBa+WU4jWWJCZc7IJ+uBjb58/9oL+/ctF1Bt0= +go.opentelemetry.io/collector/confmap/xconfmap v0.129.0 h1:Q/+pJKrkCaMPSoSAH2BpC3UZCh+5hTiFkh/bdy5yChk= +go.opentelemetry.io/collector/confmap/xconfmap v0.129.0/go.mod h1:RNMnlay2meJDXcKjxiLbST9/YAhKLJlj0kZCrJrLGgw= +go.opentelemetry.io/collector/consumer v1.35.0 h1:mgS42yh1maXBIE65IT4//iOA89BE+7xSUzV8czyevHg= +go.opentelemetry.io/collector/consumer v1.35.0/go.mod h1:9sSPX0hDHaHqzR2uSmfLOuFK9v3e9K3HRQ+fydAjOWs= +go.opentelemetry.io/collector/consumer/consumertest v0.129.0 h1:kRmrAgVvPxH5c/rTaOYAzyy0YrrYhQpBNkuqtDRrgeU= +go.opentelemetry.io/collector/consumer/consumertest v0.129.0/go.mod h1:JgJKms1+v/CuAjkPH+ceTnKeDgUUGTQV4snGu5wTEHY= +go.opentelemetry.io/collector/consumer/xconsumer v0.129.0 h1:bRyJ9TGWwnrUnB5oQGTjPhxpVRbkIVeugmvks22bJ4A= +go.opentelemetry.io/collector/consumer/xconsumer v0.129.0/go.mod h1:pbe5ZyPJrtzdt/RRI0LqfT1GVBiJLbtkDKx3SBRTiTY= +go.opentelemetry.io/collector/featuregate v1.35.0 h1:c/XRtA35odgxVc4VgOF/PTIk7ajw1wYdQ6QI562gzd4= +go.opentelemetry.io/collector/featuregate v1.35.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc= +go.opentelemetry.io/collector/internal/telemetry v0.129.0 h1:jkzRpIyMxMGdAzVOcBe8aRNrbP7eUrMq6cxEHe0sbzA= +go.opentelemetry.io/collector/internal/telemetry v0.129.0/go.mod h1:riAPlR2LZBV7VEx4LicOKebg3N1Ja3izzkv5fl1Lhiw= +go.opentelemetry.io/collector/pdata v1.35.0 h1:ck6WO6hCNjepADY/p9sT9/rLECTLO5ukYTumKzsqB/E= +go.opentelemetry.io/collector/pdata v1.35.0/go.mod h1:pttpb089864qG1k0DMeXLgwwTFLk+o3fAW9I6MF9tzw= +go.opentelemetry.io/collector/pdata/pprofile v0.129.0 h1:DgZTvjOGmyZRx7Or80hz8XbEaGwHPkIh2SX1A5eXttQ= +go.opentelemetry.io/collector/pdata/pprofile v0.129.0/go.mod h1:uUBZxqJNOk6QIMvbx30qom//uD4hXJ1K/l3qysijMLE= +go.opentelemetry.io/collector/pdata/testdata v0.129.0 h1:n1QLnLOtrcAR57oMSVzmtPsQEpCc/nE5Avk1xfuAkjY= +go.opentelemetry.io/collector/pdata/testdata v0.129.0/go.mod h1:RfY5IKpmcvkS2IGVjl9jG9fcT7xpQEBWpg9sQOn/7mY= +go.opentelemetry.io/collector/pipeline v0.129.0 h1:Mp7RuKLizLQJ0381eJqKQ0zpgkFlhTE9cHidpJQIvMU= +go.opentelemetry.io/collector/pipeline v0.129.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4= +go.opentelemetry.io/collector/processor v1.35.0 h1:YOfHemhhodYn4BnPjN7kWYYDhzPVqRkyHCaQ8mAlavs= +go.opentelemetry.io/collector/processor v1.35.0/go.mod h1:cWHDOpmpAaVNCc9K9j2/okZoLIuP/EpGGRNhM4JGmFM= +go.opentelemetry.io/collector/processor/processortest v0.129.0 h1:r5iJHdS7Ffdb2zmMVYx4ahe92PLrce5cas/AJEXivkY= +go.opentelemetry.io/collector/processor/processortest v0.129.0/go.mod h1:gdf8GzyzjGoDTA11+CPwC4jfXphtC+B7MWbWn+LIWXc= +go.opentelemetry.io/collector/processor/xprocessor v0.129.0 h1:V3Zgd+YIeu3Ij3DPlGtzdcTwpqOQIqQVcL5jdHHS7sc= +go.opentelemetry.io/collector/processor/xprocessor v0.129.0/go.mod h1:78T+AP5NO137W/E+SibQhaqOyS67fR+IN697b4JFh00= go.opentelemetry.io/collector/semconv v0.128.0 h1:MzYOz7Vgb3Kf5D7b49pqqgeUhEmOCuT10bIXb/Cc+k4= go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns= go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 h1:u2E32P7j1a/gRgZDWhIXC+Shd4rLg70mnE7QLI/Ssnw= go.opentelemetry.io/contrib/bridges/otelzap v0.11.0/go.mod h1:pJPCLM8gzX4ASqLlyAXjHBEYxgbOQJ/9bidWxD6PEPQ= +go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo= go.opentelemetry.io/contrib/detectors/gcp v1.35.0 h1:bGvFt68+KTiAKFlacHW6AhA56GF2rS0bdD3aJYEnmzA= go.opentelemetry.io/contrib/detectors/gcp v1.35.0/go.mod h1:qGWP8/+ILwMRIUf9uIVLloR1uo5ZYAslM4O6OqUi1DA= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= @@ -1045,6 +1865,8 @@ go.opentelemetry.io/contrib/propagators/jaeger v1.36.0 h1:SoCgXYF4ISDtNyfLUzsGDa go.opentelemetry.io/contrib/propagators/jaeger v1.36.0/go.mod h1:VHu48l0YTRKSObdPQ+Sb8xMZvdnJlN7yhHuHoPgNqHM= go.opentelemetry.io/contrib/propagators/ot v1.36.0 h1:UBoZjbx483GslNKYK2YpfvePTJV4BHGeFd8+b7dexiM= go.opentelemetry.io/contrib/propagators/ot v1.36.0/go.mod h1:adDDRry19/n9WoA7mSCMjoVJcmzK/bZYzX9SR+g2+W4= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= go.opentelemetry.io/otel/bridge/opentracing v1.36.0 h1:GWGmcYhMCu6+K/Yz5KWSETU/esd/mkVGx+77uKtLjpk= @@ -1059,15 +1881,24 @@ go.opentelemetry.io/otel/log v0.12.2 h1:yob9JVHn2ZY24byZeaXpTVoPS6l+UrrxmxmPKohX go.opentelemetry.io/otel/log v0.12.2/go.mod h1:ShIItIxSYxufUMt+1H5a2wbckGli3/iCfuEbVZi/98E= go.opentelemetry.io/otel/log/logtest v0.0.0-20250526142609-aa5bd0e64989 h1:4JF7oY9CcHrPGfBLijDcXZyCzGckVEyOjuat5ktmQRg= go.opentelemetry.io/otel/log/logtest v0.0.0-20250526142609-aa5bd0e64989/go.mod h1:NToOxLDCS1tXDSB2dIj44H9xGPOpKr0csIN+gnuihv4= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -1094,29 +1925,60 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4= golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -1139,10 +2001,20 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1182,6 +2054,8 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -1189,6 +2063,30 @@ golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1211,9 +2109,19 @@ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1225,10 +2133,20 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1276,6 +2194,7 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1291,10 +2210,12 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1308,14 +2229,55 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= +golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1327,15 +2289,32 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1349,6 +2328,7 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1379,6 +2359,7 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1390,6 +2371,15 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1398,8 +2388,18 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1437,11 +2437,35 @@ google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/S google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= -google.golang.org/api v0.228.0 h1:X2DJ/uoWGnY5obVjewbp8icSL5U4FzuCfy9OjbLSnLs= -google.golang.org/api v0.228.0/go.mod h1:wNvRS1Pbe8r4+IfBIniV8fwCpGwTrYa+kMUDiC5z5a4= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjYK+5E= +google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= +google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= +google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= +google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= +google.golang.org/api v0.239.0 h1:2hZKUnFZEy81eugPs4e2XzIJ5SOwQg0G82bpXD65Puo= +google.golang.org/api v0.239.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -1449,7 +2473,6 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1491,6 +2514,7 @@ google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -1521,52 +2545,114 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20250204164813-702378808489 h1:nQcbCCOg2h2CQ0yA8SY3AHqriNKDvsetuq9mE/HFjtc= -google.golang.org/genproto v0.0.0-20250204164813-702378808489/go.mod h1:wkQ2Aj/xvshAUDtO/JHvu9y+AaN9cqs28QuSVSHtZSY= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto v0.0.0-20230629202037-9506855d4529/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= +google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= +google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230629202037-9506855d4529/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= +google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a/go.mod h1:jehYqy3+AhJU9ve55aNOaSml7wUXjF9x6z2LcCfpAhY= +google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422/go.mod h1:b6h1vNKhxaSoEI+5jc3PJUCustfli/mRab7295pY7rw= google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:8mL13HKkDa+IuJ8yruA3ci0q+0vsUz4m//+ottjwS5o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230731190214-cbb8c96f2d6d/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5/go.mod h1:zBEcrKX2ZOcEkHWxBPAIvYUWOKKMIhYcmNiUIu2ji3I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= -google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= +google.golang.org/grpc v1.71.2 h1:KnzCueW4s+8ojAPZ+NnyZAELjsIMJGteKjKejieEC7M= +google.golang.org/grpc v1.71.2/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1582,6 +2668,17 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1625,6 +2722,7 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= k8s.io/api v0.33.1 h1:tA6Cf3bHnLIrUK4IqEgb2v++/GYUtqiu9sRVk3iBXyw= k8s.io/api v0.33.1/go.mod h1:87esjTn9DRSRTD4fWMXamiXxJhpOIREjWOSjsW1kEHw= k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4= @@ -1637,7 +2735,59 @@ k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUy k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.37.0/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20= +modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.0.0-20220904174949-82d86e1b6d56/go.mod h1:YSXjPL62P2AMSxBphRHPn7IkzhVHqkvOnRKAKh+W6ZI= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccgo/v3 v3.16.13-0.20221017192402-261537637ce8/go.mod h1:fUB3Vn0nVPReA+7IG7yZDfjv1TMWjhQP8gCxrFAtL5g= +modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/libc v1.17.4/go.mod h1:WNg2ZH56rDEwdropAJeZPQkXmDwh+JCA1s/htl6r2fA= +modernc.org/libc v1.18.0/go.mod h1:vj6zehR5bfc98ipowQOM2nIDUZnVew/wNC/2tOGS+q0= +modernc.org/libc v1.20.3/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0= +modernc.org/libc v1.21.4/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI= +modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.3.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/sqlite v1.18.2/go.mod h1:kvrTLEWgxUcHa2GfHBQtanR1H9ht3hTJNtKpzH9k1u0= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/tcl v1.13.2/go.mod h1:7CLiGIPo1M8Rv1Mitpv5akc2+8fxUd2y2UzC/MfMzy0= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= diff --git a/integration/e2e/images/images.go b/integration/e2e/images/images.go index 7b744526676..1ef0e8bbdec 100644 --- a/integration/e2e/images/images.go +++ b/integration/e2e/images/images.go @@ -11,5 +11,5 @@ var ( Minio = "minio/minio:RELEASE.2024-05-28T17-19-04Z" Consul = "consul:1.8.4" ETCD = "gcr.io/etcd-development/etcd:v3.4.7" - Prometheus = "quay.io/prometheus/prometheus:v3.3.1" + Prometheus = "quay.io/prometheus/prometheus:v3.5.0" ) diff --git a/integration/e2e/util.go b/integration/e2e/util.go index dd10efa1ba0..a7c164fea3b 100644 --- a/integration/e2e/util.go +++ b/integration/e2e/util.go @@ -19,6 +19,7 @@ import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/tsdbutil" @@ -423,3 +424,117 @@ func CreateBlock( return id, nil } + +func GenerateHistogramSeriesV2(name string, ts time.Time, i uint32, floatHistogram bool, additionalLabels ...prompb.Label) (symbols []string, series []writev2.TimeSeries) { + tsMillis := TimeToMilliseconds(ts) + + st := writev2.NewSymbolTable() + lb := labels.NewScratchBuilder(0) + lb.Add("__name__", name) + for _, lbl := range additionalLabels { + lb.Add(lbl.Name, lbl.Value) + } + + var ( + h *histogram.Histogram + fh *histogram.FloatHistogram + ph writev2.Histogram + ) + if floatHistogram { + fh = tsdbutil.GenerateTestFloatHistogram(int64(i)) + ph = writev2.FromFloatHistogram(tsMillis, fh) + } else { + h = tsdbutil.GenerateTestHistogram(int64(i)) + ph = writev2.FromIntHistogram(tsMillis, h) + } + + // Generate the series + series = append(series, writev2.TimeSeries{ + LabelsRefs: st.SymbolizeLabels(lb.Labels(), nil), + Histograms: []writev2.Histogram{ph}, + }) + + symbols = st.Symbols() + + return +} + +func GenerateSeriesV2(name string, ts time.Time, additionalLabels ...prompb.Label) (symbols []string, series []writev2.TimeSeries, vector model.Vector) { + tsMillis := TimeToMilliseconds(ts) + value := rand.Float64() + + st := writev2.NewSymbolTable() + lb := labels.NewScratchBuilder(0) + lb.Add("__name__", name) + + for _, label := range additionalLabels { + lb.Add(label.Name, label.Value) + } + series = append(series, writev2.TimeSeries{ + // Generate the series + LabelsRefs: st.SymbolizeLabels(lb.Labels(), nil), + Samples: []writev2.Sample{ + {Value: value, Timestamp: tsMillis}, + }, + Metadata: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_GAUGE, + }, + }) + symbols = st.Symbols() + + // Generate the expected vector when querying it + metric := model.Metric{} + metric[labels.MetricName] = model.LabelValue(name) + for _, lbl := range additionalLabels { + metric[model.LabelName(lbl.Name)] = model.LabelValue(lbl.Value) + } + + vector = append(vector, &model.Sample{ + Metric: metric, + Value: model.SampleValue(value), + Timestamp: model.Time(tsMillis), + }) + + return +} + +func GenerateV2SeriesWithSamples( + name string, + startTime time.Time, + scrapeInterval time.Duration, + startValue int, + numSamples int, + additionalLabels ...prompb.Label, +) (symbols []string, series writev2.TimeSeries) { + tsMillis := TimeToMilliseconds(startTime) + durMillis := scrapeInterval.Milliseconds() + + st := writev2.NewSymbolTable() + lb := labels.NewScratchBuilder(0) + lb.Add("__name__", name) + + for _, label := range additionalLabels { + lb.Add(label.Name, label.Value) + } + + startTMillis := tsMillis + samples := make([]writev2.Sample, numSamples) + for i := 0; i < numSamples; i++ { + scrapeJitter := rand.Int63n(10) + 1 // add a jitter to simulate real-world scenarios, refer to: https://github.com/prometheus/prometheus/issues/13213 + samples[i] = writev2.Sample{ + Timestamp: startTMillis + scrapeJitter, + Value: float64(i + startValue), + } + startTMillis += durMillis + } + + series = writev2.TimeSeries{ + LabelsRefs: st.SymbolizeLabels(lb.Labels(), nil), + Samples: samples, + Metadata: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_GAUGE, + }, + } + + return st.Symbols(), series +} diff --git a/integration/e2ecortex/client.go b/integration/e2ecortex/client.go index 9067b60c078..2b46d2262ec 100644 --- a/integration/e2ecortex/client.go +++ b/integration/e2ecortex/client.go @@ -24,6 +24,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/rulefmt" "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage/remote" yaml "gopkg.in/yaml.v3" @@ -147,6 +148,39 @@ func (c *Client) Push(timeseries []prompb.TimeSeries, metadata ...prompb.MetricM return res, nil } +// PushV2 the input timeseries to the remote endpoint +func (c *Client) PushV2(symbols []string, timeseries []writev2.TimeSeries) (*http.Response, error) { + // Create write request + data, err := proto.Marshal(&writev2.Request{Symbols: symbols, Timeseries: timeseries}) + if err != nil { + return nil, err + } + + // Create HTTP request + compressed := snappy.Encode(nil, data) + req, err := http.NewRequest("POST", fmt.Sprintf("http://%s/api/prom/push", c.distributorAddress), bytes.NewReader(compressed)) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Encoding", "snappy") + req.Header.Set("Content-Type", "application/x-protobuf;proto=io.prometheus.write.v2.Request") + req.Header.Set("X-Prometheus-Remote-Write-Version", "2.0.0") + req.Header.Set("X-Scope-OrgID", c.orgID) + + ctx, cancel := context.WithTimeout(context.Background(), c.timeout) + defer cancel() + + // Execute HTTP request + res, err := c.httpClient.Do(req.WithContext(ctx)) + if err != nil { + return nil, err + } + + defer res.Body.Close() + return res, nil +} + func getNameAndAttributes(ts prompb.TimeSeries) (string, map[string]any) { var metricName string attributes := make(map[string]any) @@ -236,7 +270,7 @@ func convertTimeseriesToMetrics(timeseries []prompb.TimeSeries, metadata []promp return metrics } -func otlpWriteRequest(name string, labels ...prompb.Label) pmetricotlp.ExportRequest { +func otlpWriteRequest(name, unit string, temporality pmetric.AggregationTemporality, labels ...prompb.Label) pmetricotlp.ExportRequest { d := pmetric.NewMetrics() // Generate One Counter, One Gauge, One Histogram, One Exponential-Histogram @@ -258,10 +292,11 @@ func otlpWriteRequest(name string, labels ...prompb.Label) pmetricotlp.ExportReq // Generate One Counter counterMetric := scopeMetric.Metrics().AppendEmpty() counterMetric.SetName(name) + counterMetric.SetUnit(unit) counterMetric.SetDescription("test-counter-description") counterMetric.SetEmptySum() - counterMetric.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + counterMetric.Sum().SetAggregationTemporality(temporality) counterDataPoint := counterMetric.Sum().DataPoints().AppendEmpty() counterDataPoint.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) @@ -276,8 +311,8 @@ func otlpWriteRequest(name string, labels ...prompb.Label) pmetricotlp.ExportReq return pmetricotlp.NewExportRequestFromMetrics(d) } -func (c *Client) OTLPPushExemplar(name string, labels ...prompb.Label) (*http.Response, error) { - data, err := otlpWriteRequest(name, labels...).MarshalProto() +func (c *Client) OTLPPushExemplar(name, unit string, temporality pmetric.AggregationTemporality, labels ...prompb.Label) (*http.Response, error) { + data, err := otlpWriteRequest(name, unit, temporality, labels...).MarshalProto() if err != nil { return nil, err } diff --git a/integration/otlp_test.go b/integration/otlp_test.go index 7eda34e55ec..fe83c1852fa 100644 --- a/integration/otlp_test.go +++ b/integration/otlp_test.go @@ -18,6 +18,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/thanos-io/objstore/providers/s3" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/cortexproject/cortex/integration/e2e" e2edb "github.com/cortexproject/cortex/integration/e2e/db" @@ -149,7 +150,7 @@ func TestOTLPIngestExemplar(t *testing.T) { c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1") require.NoError(t, err) - res, err := c.OTLPPushExemplar("exemplar_1") + res, err := c.OTLPPushExemplar("exemplar_1", "", pmetric.AggregationTemporalityCumulative) require.NoError(t, err) require.Equal(t, 200, res.StatusCode) @@ -241,15 +242,15 @@ func TestOTLPPromoteResourceAttributesPerTenant(t *testing.T) { {Name: "attr3", Value: "value"}, } - res, err := c1.OTLPPushExemplar("series_1", labels...) + res, err := c1.OTLPPushExemplar("series_1", "", pmetric.AggregationTemporalityCumulative, labels...) require.NoError(t, err) require.Equal(t, 200, res.StatusCode) - res, err = c2.OTLPPushExemplar("series_1", labels...) + res, err = c2.OTLPPushExemplar("series_1", "", pmetric.AggregationTemporalityCumulative, labels...) require.NoError(t, err) require.Equal(t, 200, res.StatusCode) - res, err = c3.OTLPPushExemplar("series_1", labels...) + res, err = c3.OTLPPushExemplar("series_1", "", pmetric.AggregationTemporalityCumulative, labels...) require.NoError(t, err) require.Equal(t, 200, res.StatusCode) @@ -265,3 +266,116 @@ func TestOTLPPromoteResourceAttributesPerTenant(t *testing.T) { require.NoError(t, err) require.Equal(t, labelSet3, []string{"__name__", "attr1", "attr2", "attr3", "instance", "job"}) } + +func TestOTLPEnableTypeAndUnitLabels(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + minio := e2edb.NewMinio(9000, bucketName) + require.NoError(t, s.StartAndWaitReady(minio)) + + // Configure the blocks storage to frequently compact TSDB head + // and ship blocks to the storage. + flags := mergeFlags(BlocksStorageFlags(), map[string]string{ + "-auth.enabled": "true", + + // OTLP + "-distributor.otlp.enable-type-and-unit-labels": "true", + + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + "-alertmanager-storage.backend": "local", + "-alertmanager-storage.local.path": filepath.Join(e2e.ContainerSharedDir, "alertmanager_configs"), + }) + + // make alert manager config dir + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + + require.NoError(t, copyFileToSharedDir(s, "docs/configuration/single-process-config-blocks-local.yaml", cortexConfigFile)) + + // start cortex and assert runtime-config is loaded correctly + cortex := e2ecortex.NewSingleBinaryWithConfigFile("cortex", cortexConfigFile, flags, "", 9009, 9095) + require.NoError(t, s.StartAndWaitReady(cortex)) + + c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + // Push some series to Cortex. + now := time.Now() + + labels := []prompb.Label{ + {Name: "service.name", Value: "test-service"}, + {Name: "attr1", Value: "value"}, + } + + res, err := c.OTLPPushExemplar("series_1", "seconds", pmetric.AggregationTemporalityCumulative, labels...) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + + value, err := c.Query("series_1_seconds", now) + require.NoError(t, err) + vector, ok := value.(model.Vector) + fmt.Println("vector", vector) + require.True(t, ok) + require.Equal(t, 1, len(vector)) + + metric := vector[0].Metric + require.Equal(t, model.LabelValue("seconds"), metric["__unit__"]) + require.Equal(t, model.LabelValue("gauge"), metric["__type__"]) +} + +func TestOTLPPushDeltaTemporality(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + minio := e2edb.NewMinio(9000, bucketName) + require.NoError(t, s.StartAndWaitReady(minio)) + + // Configure the blocks storage to frequently compact TSDB head + // and ship blocks to the storage. + flags := mergeFlags(BlocksStorageFlags(), map[string]string{ + "-auth.enabled": "true", + + // OTLP + "-distributor.otlp.allow-delta-temporality": "true", + + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + "-alertmanager-storage.backend": "local", + "-alertmanager-storage.local.path": filepath.Join(e2e.ContainerSharedDir, "alertmanager_configs"), + }) + + // make alert manager config dir + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + + require.NoError(t, copyFileToSharedDir(s, "docs/configuration/single-process-config-blocks-local.yaml", cortexConfigFile)) + + // start cortex and assert runtime-config is loaded correctly + cortex := e2ecortex.NewSingleBinaryWithConfigFile("cortex", cortexConfigFile, flags, "", 9009, 9095) + require.NoError(t, s.StartAndWaitReady(cortex)) + + c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + // Push some series to Cortex. + now := time.Now() + + labels := []prompb.Label{ + {Name: "service.name", Value: "test-service"}, + {Name: "attr1", Value: "value"}, + } + + res, err := c.OTLPPushExemplar("series_1", "", pmetric.AggregationTemporalityDelta, labels...) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + + value, err := c.Query("series_1", now) + require.NoError(t, err) + vector, ok := value.(model.Vector) + require.True(t, ok) + require.Equal(t, 1, len(vector)) +} diff --git a/integration/parquet_querier_test.go b/integration/parquet_querier_test.go index ca31a019c9a..e085cef99d1 100644 --- a/integration/parquet_querier_test.go +++ b/integration/parquet_querier_test.go @@ -63,8 +63,9 @@ func TestParquetFuzz(t *testing.T) { "-store-gateway.sharding-enabled": "false", "--querier.store-gateway-addresses": "nonExistent", // Make sure we do not call Store gateways // alert manager - "-alertmanager.web.external-url": "http://localhost/alertmanager", - "-frontend.query-vertical-shard-size": "1", + "-alertmanager.web.external-url": "http://localhost/alertmanager", + // Enable vertical sharding. + "-frontend.query-vertical-shard-size": "3", "-frontend.max-cache-freshness": "1m", // enable experimental promQL funcs "-querier.enable-promql-experimental-functions": "true", @@ -98,19 +99,8 @@ func TestParquetFuzz(t *testing.T) { end := now.Add(-time.Hour) for i := 0; i < numSeries; i++ { - lbls = append(lbls, labels.Labels{ - {Name: labels.MetricName, Value: "test_series_a"}, - {Name: "job", Value: "test"}, - {Name: "series", Value: strconv.Itoa(i % 3)}, - {Name: "status_code", Value: statusCodes[i%5]}, - }) - - lbls = append(lbls, labels.Labels{ - {Name: labels.MetricName, Value: "test_series_b"}, - {Name: "job", Value: "test"}, - {Name: "series", Value: strconv.Itoa((i + 1) % 3)}, - {Name: "status_code", Value: statusCodes[(i+1)%5]}, - }) + lbls = append(lbls, labels.FromStrings(labels.MetricName, "test_series_a", "job", "test", "series", strconv.Itoa(i%3), "status_code", statusCodes[i%5])) + lbls = append(lbls, labels.FromStrings(labels.MetricName, "test_series_b", "job", "test", "series", strconv.Itoa((i+1)%3), "status_code", statusCodes[(i+1)%5])) } id, err := e2e.CreateBlock(ctx, rnd, dir, lbls, numSamples, start.UnixMilli(), end.UnixMilli(), scrapeInterval.Milliseconds(), 10) require.NoError(t, err) @@ -130,16 +120,20 @@ func TestParquetFuzz(t *testing.T) { // Wait until we convert the blocks cortex_testutil.Poll(t, 30*time.Second, true, func() interface{} { found := false + foundBucketIndex := false err := bkt.Iter(context.Background(), "", func(name string) error { fmt.Println(name) if name == fmt.Sprintf("parquet-markers/%v-parquet-converter-mark.json", id.String()) { found = true } + if name == "bucket-index.json.gz" { + foundBucketIndex = true + } return nil }, objstore.WithRecursiveIter()) require.NoError(t, err) - return found + return found && foundBucketIndex }) att, err := bkt.Attributes(context.Background(), "bucket-index.json.gz") @@ -178,7 +172,7 @@ func TestParquetFuzz(t *testing.T) { } ps := promqlsmith.New(rnd, lbls, opts...) - runQueryFuzzTestCases(t, ps, c1, c2, end, start, end, scrapeInterval, 500, false) + runQueryFuzzTestCases(t, ps, c1, c2, end, start, end, scrapeInterval, 1000, false) require.NoError(t, cortex.WaitSumMetricsWithOptions(e2e.Greater(0), []string{"cortex_parquet_queryable_blocks_queried_total"}, e2e.WithLabelMatchers( labels.MustNewMatcher(labels.MatchEqual, "type", "parquet")))) diff --git a/integration/parse_query_api_test.go b/integration/parse_query_api_test.go new file mode 100644 index 00000000000..06db800a922 --- /dev/null +++ b/integration/parse_query_api_test.go @@ -0,0 +1,135 @@ +//go:build requires_docker +// +build requires_docker + +package integration + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cortexproject/cortex/integration/e2e" + e2edb "github.com/cortexproject/cortex/integration/e2e/db" + "github.com/cortexproject/cortex/integration/e2ecortex" +) + +func TestParseQueryAPIQuerier(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsul() + minio := e2edb.NewMinio(9000, bucketName) + require.NoError(t, s.StartAndWaitReady(consul, minio)) + + flags := mergeFlags(BlocksStorageFlags(), map[string]string{ + "-auth.enabled": "true", + }) + + distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester := e2ecortex.NewIngester("ingester", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(distributor, ingester)) + + // Wait until the distributor has updated the ring. + require.NoError(t, distributor.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + + querier := e2ecortex.NewQuerier("querier", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(querier)) + + // Wait until the querier has updated the ring. + require.NoError(t, querier.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + + endpoint := fmt.Sprintf("http://%s/api/prom/api/v1/parse_query?query=foo/bar", querier.HTTPEndpoint()) + + req, err := http.NewRequest("GET", endpoint, nil) + require.NoError(t, err) + req.Header.Set("X-Scope-OrgID", "user-1") + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + require.Equal(t, http.StatusOK, resp.StatusCode) + + var parsed struct { + Status string `json:"status"` + Data json.RawMessage `json:"data"` + } + require.NoError(t, json.Unmarshal(body, &parsed)) + require.Equal(t, "success", parsed.Status) + + // check for AST contents. + require.Contains(t, string(parsed.Data), "\"op\":\"/\"") + require.Contains(t, string(parsed.Data), `"lhs":{"matchers":[{"name":"__name__","type":"=","value":"foo"}]`) + require.Contains(t, string(parsed.Data), `"rhs":{"matchers":[{"name":"__name__","type":"=","value":"bar"}]`) +} + +func TestParseQueryAPIQueryFrontend(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsul() + minio := e2edb.NewMinio(9000, bucketName) + require.NoError(t, s.StartAndWaitReady(consul, minio)) + + flags := mergeFlags(BlocksStorageFlags(), map[string]string{ + "-auth.enabled": "true", + }) + + // Start the query-frontend. + queryFrontend := e2ecortex.NewQueryFrontend("query-frontend", flags, "") + require.NoError(t, s.Start(queryFrontend)) + + distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester := e2ecortex.NewIngester("ingester", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(distributor, ingester)) + + // Wait until both the distributor updated the ring. + require.NoError(t, distributor.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + + querier := e2ecortex.NewQuerier("querierWithFrontend", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), mergeFlags(flags, map[string]string{ + "-querier.frontend-address": queryFrontend.NetworkGRPCEndpoint(), + }), "") + + require.NoError(t, s.StartAndWaitReady(querier)) + require.NoError(t, s.WaitReady(queryFrontend)) + + require.NoError(t, querier.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + + endpoint := fmt.Sprintf("http://%s/api/prom/api/v1/parse_query?query=foo/bar", queryFrontend.HTTPEndpoint()) + + req, err := http.NewRequest("GET", endpoint, nil) + require.NoError(t, err) + req.Header.Set("X-Scope-OrgID", "user-1") + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + require.Equal(t, http.StatusOK, resp.StatusCode) + + var parsed struct { + Status string `json:"status"` + Data json.RawMessage `json:"data"` + } + require.NoError(t, json.Unmarshal(body, &parsed)) + require.Equal(t, "success", parsed.Status) + + // check for AST contents. + require.Contains(t, string(parsed.Data), "\"op\":\"/\"") + require.Contains(t, string(parsed.Data), `"lhs":{"matchers":[{"name":"__name__","type":"=","value":"foo"}]`) + require.Contains(t, string(parsed.Data), `"rhs":{"matchers":[{"name":"__name__","type":"=","value":"bar"}]`) +} diff --git a/integration/querier_test.go b/integration/querier_test.go index 7e16b587dbb..27929ba5d86 100644 --- a/integration/querier_test.go +++ b/integration/querier_test.go @@ -1375,3 +1375,78 @@ func TestQuerierEngineConfigs(t *testing.T) { } } + +func TestQuerierDistributedExecution(t *testing.T) { + // e2e test setup + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // initialize the flags + flags := mergeFlags( + BlocksStorageFlags(), + map[string]string{ + "-blocks-storage.tsdb.block-ranges-period": (5 * time.Second).String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((5 * time.Second * 2) - 1).String(), + "-querier.thanos-engine": "true", + // enable distributed execution (logical plan execution) + "-querier.distributed-exec-enabled": "true", + }, + ) + + minio := e2edb.NewMinio(9000, flags["-blocks-storage.s3.bucket-name"]) + consul := e2edb.NewConsul() + require.NoError(t, s.StartAndWaitReady(consul, minio)) + + // start services + distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester := e2ecortex.NewIngester("ingester", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + queryScheduler := e2ecortex.NewQueryScheduler("query-scheduler", flags, "") + storeGateway := e2ecortex.NewStoreGateway("store-gateway", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(queryScheduler, distributor, ingester, storeGateway)) + flags = mergeFlags(flags, map[string]string{ + "-querier.store-gateway-addresses": strings.Join([]string{storeGateway.NetworkGRPCEndpoint()}, ","), + }) + + queryFrontend := e2ecortex.NewQueryFrontend("query-frontend", mergeFlags(flags, map[string]string{ + "-frontend.scheduler-address": queryScheduler.NetworkGRPCEndpoint(), + }), "") + require.NoError(t, s.Start(queryFrontend)) + + querier := e2ecortex.NewQuerier("querier", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), mergeFlags(flags, map[string]string{ + "-querier.scheduler-address": queryScheduler.NetworkGRPCEndpoint(), + }), "") + require.NoError(t, s.StartAndWaitReady(querier)) + + // wait until the distributor and querier has updated the ring. + require.NoError(t, distributor.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + require.NoError(t, querier.WaitSumMetrics(e2e.Equals(2*512), "cortex_ring_tokens_total")) + + c, err := e2ecortex.NewClient(distributor.HTTPEndpoint(), queryFrontend.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + series1Timestamp := time.Now() + series2Timestamp := series1Timestamp.Add(time.Minute * 1) + series1, expectedVector1 := generateSeries("series_1", series1Timestamp, prompb.Label{Name: "series_1", Value: "series_1"}) + series2, expectedVector2 := generateSeries("series_2", series2Timestamp, prompb.Label{Name: "series_2", Value: "series_2"}) + + res, err := c.Push(series1) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + + res, err = c.Push(series2) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + + // main tests + // - make sure queries are still executable with distributed execution enabled + var val model.Value + val, err = c.Query("series_1", series1Timestamp) + require.NoError(t, err) + require.Equal(t, expectedVector1, val.(model.Vector)) + + val, err = c.Query("series_2", series2Timestamp) + require.NoError(t, err) + require.Equal(t, expectedVector2, val.(model.Vector)) +} diff --git a/integration/query_frontend_test.go b/integration/query_frontend_test.go index 6d7b0651d7a..b77bfa64756 100644 --- a/integration/query_frontend_test.go +++ b/integration/query_frontend_test.go @@ -216,14 +216,34 @@ func TestQueryFrontendProtobufCodec(t *testing.T) { require.NoError(t, s.StartAndWaitReady(minio)) flags = mergeFlags(e2e.EmptyFlags(), map[string]string{ - "-api.querier-default-codec": "protobuf", - "-querier.response-compression": "gzip", + "-api.querier-default-codec": "protobuf", }) return cortexConfigFile, flags }, }) } +func TestQuerierToQueryFrontendCompression(t *testing.T) { + for _, compression := range []string{"gzip", "zstd", "snappy", ""} { + runQueryFrontendTest(t, queryFrontendTestConfig{ + testMissingMetricName: false, + querySchedulerEnabled: true, + queryStatsEnabled: true, + setup: func(t *testing.T, s *e2e.Scenario) (configFile string, flags map[string]string) { + require.NoError(t, writeFileToSharedDir(s, cortexConfigFile, []byte(BlocksStorageConfig))) + + minio := e2edb.NewMinio(9000, BlocksStorageFlags()["-blocks-storage.s3.bucket-name"]) + require.NoError(t, s.StartAndWaitReady(minio)) + + flags = mergeFlags(e2e.EmptyFlags(), map[string]string{ + "-querier.response-compression": compression, + }) + return cortexConfigFile, flags + }, + }) + } +} + func TestQueryFrontendRemoteRead(t *testing.T) { runQueryFrontendTest(t, queryFrontendTestConfig{ remoteReadEnabled: true, diff --git a/integration/query_fuzz_test.go b/integration/query_fuzz_test.go index d4c501737e3..b12560be6f4 100644 --- a/integration/query_fuzz_test.go +++ b/integration/query_fuzz_test.go @@ -108,19 +108,8 @@ func TestNativeHistogramFuzz(t *testing.T) { scrapeInterval := time.Minute statusCodes := []string{"200", "400", "404", "500", "502"} for i := 0; i < numSeries; i++ { - lbls = append(lbls, labels.Labels{ - {Name: labels.MetricName, Value: "test_series_a"}, - {Name: "job", Value: "test"}, - {Name: "series", Value: strconv.Itoa(i % 3)}, - {Name: "status_code", Value: statusCodes[i%5]}, - }) - - lbls = append(lbls, labels.Labels{ - {Name: labels.MetricName, Value: "test_series_b"}, - {Name: "job", Value: "test"}, - {Name: "series", Value: strconv.Itoa((i + 1) % 3)}, - {Name: "status_code", Value: statusCodes[(i+1)%5]}, - }) + lbls = append(lbls, labels.FromStrings(labels.MetricName, "test_series_a", "job", "test", "series", strconv.Itoa(i%3), "status_code", statusCodes[i%5])) + lbls = append(lbls, labels.FromStrings(labels.MetricName, "test_series_b", "job", "test", "series", strconv.Itoa((i+1)%3), "status_code", statusCodes[(i+1)%5])) } ctx := context.Background() @@ -147,7 +136,7 @@ func TestNativeHistogramFuzz(t *testing.T) { err = writeFileToSharedDir(s, "prometheus.yml", []byte("")) require.NoError(t, err) - prom := e2edb.NewPrometheus("quay.io/prometheus/prometheus:v3.3.1", nil) + prom := e2edb.NewPrometheus("", nil) require.NoError(t, s.StartAndWaitReady(prom)) c2, err := e2ecortex.NewPromQueryClient(prom.HTTPEndpoint()) @@ -221,19 +210,8 @@ func TestExperimentalPromQLFuncsWithPrometheus(t *testing.T) { scrapeInterval := time.Minute statusCodes := []string{"200", "400", "404", "500", "502"} for i := 0; i < numSeries; i++ { - lbls = append(lbls, labels.Labels{ - {Name: labels.MetricName, Value: "test_series_a"}, - {Name: "job", Value: "test"}, - {Name: "series", Value: strconv.Itoa(i % 3)}, - {Name: "status_code", Value: statusCodes[i%5]}, - }) - - lbls = append(lbls, labels.Labels{ - {Name: labels.MetricName, Value: "test_series_b"}, - {Name: "job", Value: "test"}, - {Name: "series", Value: strconv.Itoa((i + 1) % 3)}, - {Name: "status_code", Value: statusCodes[(i+1)%5]}, - }) + lbls = append(lbls, labels.FromStrings(labels.MetricName, "test_series_a", "job", "test", "series", strconv.Itoa(i%3), "status_code", statusCodes[i%5])) + lbls = append(lbls, labels.FromStrings(labels.MetricName, "test_series_b", "job", "test", "series", strconv.Itoa((i+1)%3), "status_code", statusCodes[(i+1)%5])) } ctx := context.Background() @@ -799,7 +777,7 @@ func TestVerticalShardingFuzz(t *testing.T) { } ps := promqlsmith.New(rnd, lbls, opts...) - runQueryFuzzTestCases(t, ps, c1, c2, now, start, end, scrapeInterval, 1000, false) + runQueryFuzzTestCases(t, ps, c1, c2, end, start, end, scrapeInterval, 1000, false) } func TestProtobufCodecFuzz(t *testing.T) { @@ -1209,13 +1187,7 @@ func TestStoreGatewayLazyExpandedPostingsSeriesFuzz(t *testing.T) { metricName := "http_requests_total" statusCodes := []string{"200", "400", "404", "500", "502"} for i := 0; i < numSeries; i++ { - lbl := labels.Labels{ - {Name: labels.MetricName, Value: metricName}, - {Name: "job", Value: "test"}, - {Name: "series", Value: strconv.Itoa(i % 200)}, - {Name: "status_code", Value: statusCodes[i%5]}, - } - lbls = append(lbls, lbl) + lbls = append(lbls, labels.FromStrings(labels.MetricName, metricName, "job", "test", "series", strconv.Itoa(i%200), "status_code", statusCodes[i%5])) } ctx := context.Background() rnd := rand.New(rand.NewSource(time.Now().Unix())) @@ -1367,13 +1339,7 @@ func TestStoreGatewayLazyExpandedPostingsSeriesFuzzWithPrometheus(t *testing.T) metricName := "http_requests_total" statusCodes := []string{"200", "400", "404", "500", "502"} for i := 0; i < numSeries; i++ { - lbl := labels.Labels{ - {Name: labels.MetricName, Value: metricName}, - {Name: "job", Value: "test"}, - {Name: "series", Value: strconv.Itoa(i % 200)}, - {Name: "status_code", Value: statusCodes[i%5]}, - } - lbls = append(lbls, lbl) + lbls = append(lbls, labels.FromStrings(labels.MetricName, metricName, "job", "test", "series", strconv.Itoa(i%200), "status_code", statusCodes[i%5])) } ctx := context.Background() rnd := rand.New(rand.NewSource(time.Now().Unix())) @@ -1673,19 +1639,8 @@ func TestPrometheusCompatibilityQueryFuzz(t *testing.T) { scrapeInterval := time.Minute statusCodes := []string{"200", "400", "404", "500", "502"} for i := 0; i < numSeries; i++ { - lbls = append(lbls, labels.Labels{ - {Name: labels.MetricName, Value: "test_series_a"}, - {Name: "job", Value: "test"}, - {Name: "series", Value: strconv.Itoa(i % 3)}, - {Name: "status_code", Value: statusCodes[i%5]}, - }) - - lbls = append(lbls, labels.Labels{ - {Name: labels.MetricName, Value: "test_series_b"}, - {Name: "job", Value: "test"}, - {Name: "series", Value: strconv.Itoa((i + 1) % 3)}, - {Name: "status_code", Value: statusCodes[(i+1)%5]}, - }) + lbls = append(lbls, labels.FromStrings(labels.MetricName, "test_series_a", "job", "test", "series", strconv.Itoa(i%3), "status_code", statusCodes[i%5])) + lbls = append(lbls, labels.FromStrings(labels.MetricName, "test_series_b", "job", "test", "series", strconv.Itoa((i+1)%3), "status_code", statusCodes[(i+1)%5])) } ctx := context.Background() @@ -1838,7 +1793,7 @@ func runQueryFuzzTestCases(t *testing.T, ps *promqlsmith.PromQLSmith, c1, c2 *e2 failures++ } } else if !cmp.Equal(tc.res1, tc.res2, comparer) { - t.Logf("case %d results mismatch.\n%s: %s\nres1: %s\nres2: %s\n", i, qt, tc.query, tc.res1.String(), tc.res2.String()) + t.Logf("case %d results mismatch.\n%s: %s\nres1 len: %d data: %s\nres2 len: %d data: %s\n", i, qt, tc.query, resultLength(tc.res1), tc.res1.String(), resultLength(tc.res2), tc.res2.String()) failures++ } } @@ -1872,3 +1827,17 @@ func isValidQuery(generatedQuery parser.Expr, skipStdAggregations bool) bool { } return isValid } + +func resultLength(x model.Value) int { + vx, xvec := x.(model.Vector) + if xvec { + return vx.Len() + } + + mx, xMatrix := x.(model.Matrix) + if xMatrix { + return mx.Len() + } + // Other type, return 0 + return 0 +} diff --git a/integration/remote_write_v2_test.go b/integration/remote_write_v2_test.go new file mode 100644 index 00000000000..8ba26447f68 --- /dev/null +++ b/integration/remote_write_v2_test.go @@ -0,0 +1,464 @@ +//go:build integration_remote_write_v2 +// +build integration_remote_write_v2 + +package integration + +import ( + "math/rand" + "net/http" + "path" + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" + "github.com/prometheus/prometheus/tsdb/tsdbutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/cortexproject/cortex/integration/e2e" + e2edb "github.com/cortexproject/cortex/integration/e2e/db" + "github.com/cortexproject/cortex/integration/e2ecortex" + "github.com/cortexproject/cortex/pkg/storage/tsdb" +) + +func TestIngesterRollingUpdate(t *testing.T) { + // Test ingester rolling update situation: when -distributor.remote-writev2-enabled is true, and ingester uses the v1.19.0 image. + // Expected: remote write 2.0 push success + const blockRangePeriod = 5 * time.Second + ingesterImage := "quay.io/cortexproject/cortex:v1.19.0" + + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsulWithName("consul") + require.NoError(t, s.StartAndWaitReady(consul)) + + flags := mergeFlags( + AlertmanagerLocalFlags(), + map[string]string{ + "-store.engine": blocksStorageEngine, + "-blocks-storage.backend": "filesystem", + "-blocks-storage.tsdb.head-compaction-interval": "4m", + "-blocks-storage.bucket-store.sync-interval": "15m", + "-blocks-storage.bucket-store.index-cache.backend": tsdb.IndexCacheBackendInMemory, + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + "-querier.query-store-for-labels-enabled": "true", + "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), + "-blocks-storage.tsdb.enable-native-histograms": "true", + // Ingester. + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + // Distributor. + "-distributor.replication-factor": "1", + // Store-gateway. + "-store-gateway.sharding-enabled": "false", + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + }, + ) + + distributorFlag := mergeFlags(flags, map[string]string{ + "-distributor.remote-writev2-enabled": "true", + }) + + // make alert manager config dir + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + + path := path.Join(s.SharedDir(), "cortex-1") + + flags = mergeFlags(flags, map[string]string{"-blocks-storage.filesystem.dir": path}) + // Start Cortex replicas. + // Start all other services. + ingester := e2ecortex.NewIngester("ingester", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, ingesterImage) + distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), distributorFlag, "") + storeGateway := e2ecortex.NewStoreGateway("store-gateway", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + querier := e2ecortex.NewQuerier("querier", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), mergeFlags(flags, map[string]string{ + "-querier.store-gateway-addresses": storeGateway.NetworkGRPCEndpoint()}), "") + + require.NoError(t, s.StartAndWaitReady(querier, ingester, distributor, storeGateway)) + + // Wait until Cortex replicas have updated the ring state. + require.NoError(t, distributor.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + require.NoError(t, querier.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total")) + + c, err := e2ecortex.NewClient(distributor.HTTPEndpoint(), querier.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + now := time.Now() + + // series push + symbols1, series, expectedVector := e2e.GenerateSeriesV2("test_series", now, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "foo", Value: "bar"}) + res, err := c.PushV2(symbols1, series) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "1", "0", "0") + + // sample + result, err := c.Query("test_series", now) + require.NoError(t, err) + assert.Equal(t, expectedVector, result.(model.Vector)) + + // metadata + metadata, err := c.Metadata("test_series", "") + require.NoError(t, err) + require.Equal(t, 1, len(metadata["test_series"])) + + // histogram + histogramIdx := rand.Uint32() + symbols2, histogramSeries := e2e.GenerateHistogramSeriesV2("test_histogram", now, histogramIdx, false, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "float", Value: "false"}) + res, err = c.PushV2(symbols2, histogramSeries) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "0", "1", "0") + + symbols3, histogramFloatSeries := e2e.GenerateHistogramSeriesV2("test_histogram", now, histogramIdx, true, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "float", Value: "true"}) + res, err = c.PushV2(symbols3, histogramFloatSeries) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "0", "1", "0") + + testHistogramTimestamp := now.Add(blockRangePeriod * 2) + expectedHistogram := tsdbutil.GenerateTestHistogram(int64(histogramIdx)) + result, err = c.Query(`test_histogram`, testHistogramTimestamp) + require.NoError(t, err) + require.Equal(t, model.ValVector, result.Type()) + v := result.(model.Vector) + require.Equal(t, 2, v.Len()) + for _, s := range v { + require.NotNil(t, s.Histogram) + require.Equal(t, float64(expectedHistogram.Count), float64(s.Histogram.Count)) + require.Equal(t, float64(expectedHistogram.Sum), float64(s.Histogram.Sum)) + } +} + +func TestIngest_SenderSendPRW2_DistributorNotAllowPRW2(t *testing.T) { + // Test `-distributor.remote-writev2-enabled=false` but the Sender pushes PRW2 + // Expected: status code is 200, but samples are not written. + const blockRangePeriod = 5 * time.Second + + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsulWithName("consul") + require.NoError(t, s.StartAndWaitReady(consul)) + + flags := mergeFlags( + AlertmanagerLocalFlags(), + map[string]string{ + "-store.engine": blocksStorageEngine, + "-blocks-storage.backend": "filesystem", + "-blocks-storage.tsdb.head-compaction-interval": "4m", + "-blocks-storage.bucket-store.sync-interval": "15m", + "-blocks-storage.bucket-store.index-cache.backend": tsdb.IndexCacheBackendInMemory, + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + "-querier.query-store-for-labels-enabled": "true", + "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), + "-blocks-storage.tsdb.enable-native-histograms": "true", + // Ingester. + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + // Distributor. + "-distributor.replication-factor": "1", + "-distributor.remote-writev2-enabled": "false", + // Store-gateway. + "-store-gateway.sharding-enabled": "false", + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + }, + ) + + // make alert manager config dir + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + + path := path.Join(s.SharedDir(), "cortex-1") + + flags = mergeFlags(flags, map[string]string{"-blocks-storage.filesystem.dir": path}) + // Start Cortex replicas. + cortex := e2ecortex.NewSingleBinary("cortex", flags, "") + require.NoError(t, s.StartAndWaitReady(cortex)) + + // Wait until Cortex replicas have updated the ring state. + require.NoError(t, cortex.WaitSumMetrics(e2e.Equals(float64(512)), "cortex_ring_tokens_total")) + + c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + now := time.Now() + + // series push + symbols1, series, _ := e2e.GenerateSeriesV2("test_series", now, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "foo", Value: "bar"}) + res, err := c.PushV2(symbols1, series) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + + // sample + result, err := c.Query("test_series", now) + require.NoError(t, err) + require.Empty(t, result) +} + +func TestIngest(t *testing.T) { + const blockRangePeriod = 5 * time.Second + + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsulWithName("consul") + require.NoError(t, s.StartAndWaitReady(consul)) + + flags := mergeFlags( + AlertmanagerLocalFlags(), + map[string]string{ + "-store.engine": blocksStorageEngine, + "-blocks-storage.backend": "filesystem", + "-blocks-storage.tsdb.head-compaction-interval": "4m", + "-blocks-storage.bucket-store.sync-interval": "15m", + "-blocks-storage.bucket-store.index-cache.backend": tsdb.IndexCacheBackendInMemory, + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + "-querier.query-store-for-labels-enabled": "true", + "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), + "-blocks-storage.tsdb.enable-native-histograms": "true", + // Ingester. + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + // Distributor. + "-distributor.replication-factor": "1", + "-distributor.remote-writev2-enabled": "true", + // Store-gateway. + "-store-gateway.sharding-enabled": "false", + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + }, + ) + + // make alert manager config dir + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + + path := path.Join(s.SharedDir(), "cortex-1") + + flags = mergeFlags(flags, map[string]string{"-blocks-storage.filesystem.dir": path}) + // Start Cortex replicas. + cortex := e2ecortex.NewSingleBinary("cortex", flags, "") + require.NoError(t, s.StartAndWaitReady(cortex)) + + // Wait until Cortex replicas have updated the ring state. + require.NoError(t, cortex.WaitSumMetrics(e2e.Equals(float64(512)), "cortex_ring_tokens_total")) + + c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + now := time.Now() + + // series push + symbols1, series, expectedVector := e2e.GenerateSeriesV2("test_series", now, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "foo", Value: "bar"}) + res, err := c.PushV2(symbols1, series) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "1", "0", "0") + + // sample + result, err := c.Query("test_series", now) + require.NoError(t, err) + assert.Equal(t, expectedVector, result.(model.Vector)) + + // metadata + metadata, err := c.Metadata("test_series", "") + require.NoError(t, err) + require.Equal(t, 1, len(metadata["test_series"])) + + // histogram + histogramIdx := rand.Uint32() + symbols2, histogramSeries := e2e.GenerateHistogramSeriesV2("test_histogram", now, histogramIdx, false, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "float", Value: "false"}) + res, err = c.PushV2(symbols2, histogramSeries) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "0", "1", "0") + + // float histogram + symbols3, histogramFloatSeries := e2e.GenerateHistogramSeriesV2("test_histogram", now, histogramIdx, true, prompb.Label{Name: "job", Value: "test"}, prompb.Label{Name: "float", Value: "true"}) + res, err = c.PushV2(symbols3, histogramFloatSeries) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "0", "1", "0") + + testHistogramTimestamp := now.Add(blockRangePeriod * 2) + expectedHistogram := tsdbutil.GenerateTestHistogram(int64(histogramIdx)) + result, err = c.Query(`test_histogram`, testHistogramTimestamp) + require.NoError(t, err) + require.Equal(t, model.ValVector, result.Type()) + v := result.(model.Vector) + require.Equal(t, 2, v.Len()) + for _, s := range v { + require.NotNil(t, s.Histogram) + require.Equal(t, float64(expectedHistogram.Count), float64(s.Histogram.Count)) + require.Equal(t, float64(expectedHistogram.Sum), float64(s.Histogram.Sum)) + } +} + +func TestExemplar(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsulWithName("consul") + require.NoError(t, s.StartAndWaitReady(consul)) + + flags := mergeFlags( + AlertmanagerLocalFlags(), + map[string]string{ + "-store.engine": blocksStorageEngine, + "-blocks-storage.backend": "filesystem", + "-blocks-storage.tsdb.head-compaction-interval": "4m", + "-blocks-storage.bucket-store.sync-interval": "15m", + "-blocks-storage.bucket-store.index-cache.backend": tsdb.IndexCacheBackendInMemory, + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + "-querier.query-store-for-labels-enabled": "true", + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.enable-native-histograms": "true", + // Ingester. + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + "-ingester.max-exemplars": "100", + // Distributor. + "-distributor.replication-factor": "1", + "-distributor.remote-writev2-enabled": "true", + // Store-gateway. + "-store-gateway.sharding-enabled": "false", + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + }, + ) + + // make alert manager config dir + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs", []byte{})) + + path := path.Join(s.SharedDir(), "cortex-1") + + flags = mergeFlags(flags, map[string]string{"-blocks-storage.filesystem.dir": path}) + // Start Cortex replicas. + cortex := e2ecortex.NewSingleBinary("cortex", flags, "") + require.NoError(t, s.StartAndWaitReady(cortex)) + + // Wait until Cortex replicas have updated the ring state. + require.NoError(t, cortex.WaitSumMetrics(e2e.Equals(float64(512)), "cortex_ring_tokens_total")) + + c, err := e2ecortex.NewClient(cortex.HTTPEndpoint(), cortex.HTTPEndpoint(), "", "", "user-1") + require.NoError(t, err) + + now := time.Now() + tsMillis := e2e.TimeToMilliseconds(now) + + symbols := []string{"", "__name__", "test_metric", "b", "c", "baz", "qux", "d", "e", "foo", "bar", "f", "g", "h", "i", "Test gauge for test purposes", "Maybe op/sec who knows (:", "Test counter for test purposes"} + timeseries := []writev2.TimeSeries{ + { + LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Symbolized writeRequestFixture.Timeseries[0].Labels + Metadata: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_COUNTER, // writeV2RequestSeries1Metadata.Type. + + HelpRef: 15, // Symbolized writeV2RequestSeries1Metadata.Help. + UnitRef: 16, // Symbolized writeV2RequestSeries1Metadata.Unit. + }, + Samples: []writev2.Sample{{Value: 1, Timestamp: tsMillis}}, + Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: tsMillis}}, + }, + } + + res, err := c.PushV2(symbols, timeseries) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "1", "0", "1") + + start := time.Now().Add(-time.Minute) + end := now.Add(time.Minute) + + exemplars, err := c.QueryExemplars("test_metric", start, end) + require.NoError(t, err) + require.Equal(t, 1, len(exemplars)) +} + +func Test_WriteStatWithReplication(t *testing.T) { + // Test `X-Prometheus-Remote-Write-Samples-Written` header value + // with the replication. + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsulWithName("consul") + require.NoError(t, s.StartAndWaitReady(consul)) + + flags := mergeFlags( + AlertmanagerLocalFlags(), + map[string]string{ + "-store.engine": blocksStorageEngine, + "-blocks-storage.backend": "filesystem", + "-blocks-storage.tsdb.head-compaction-interval": "4m", + "-blocks-storage.bucket-store.sync-interval": "15m", + "-blocks-storage.bucket-store.index-cache.backend": tsdb.IndexCacheBackendInMemory, + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + "-querier.query-store-for-labels-enabled": "true", + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.tsdb.enable-native-histograms": "true", + // Ingester. + "-ring.store": "consul", + "-consul.hostname": consul.NetworkHTTPEndpoint(), + "-ingester.max-exemplars": "100", + // Distributor. + "-distributor.replication-factor": "3", + "-distributor.remote-writev2-enabled": "true", + // Store-gateway. + "-store-gateway.sharding-enabled": "false", + // alert manager + "-alertmanager.web.external-url": "http://localhost/alertmanager", + }, + ) + + // Start Cortex components. + distributor := e2ecortex.NewDistributor("distributor", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester1 := e2ecortex.NewIngester("ingester-1", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester2 := e2ecortex.NewIngester("ingester-2", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + ingester3 := e2ecortex.NewIngester("ingester-3", e2ecortex.RingStoreConsul, consul.NetworkHTTPEndpoint(), flags, "") + require.NoError(t, s.StartAndWaitReady(distributor, ingester1, ingester2, ingester3)) + + // Wait until distributor have updated the ring. + require.NoError(t, distributor.WaitSumMetricsWithOptions(e2e.Equals(3), []string{"cortex_ring_members"}, e2e.WithLabelMatchers( + labels.MustNewMatcher(labels.MatchEqual, "name", "ingester"), + labels.MustNewMatcher(labels.MatchEqual, "state", "ACTIVE")))) + + c, err := e2ecortex.NewClient(distributor.HTTPEndpoint(), "", "", "", "user-1") + require.NoError(t, err) + + now := time.Now() + + // series push + start := now.Add(-time.Minute * 10) + numSamples := 20 + scrapeInterval := 30 * time.Second + symbols, series := e2e.GenerateV2SeriesWithSamples("test_series", start, scrapeInterval, 0, numSamples, prompb.Label{Name: "job", Value: "test"}) + res, err := c.PushV2(symbols, []writev2.TimeSeries{series}) + require.NoError(t, err) + require.Equal(t, 200, res.StatusCode) + testPushHeader(t, res.Header, "20", "0", "0") +} + +func testPushHeader(t *testing.T, header http.Header, expectedSamples, expectedHistogram, expectedExemplars string) { + require.Equal(t, expectedSamples, header.Get("X-Prometheus-Remote-Write-Samples-Written")) + require.Equal(t, expectedHistogram, header.Get("X-Prometheus-Remote-Write-Histograms-Written")) + require.Equal(t, expectedExemplars, header.Get("X-Prometheus-Remote-Write-Exemplars-Written")) +} diff --git a/integration/ruler_test.go b/integration/ruler_test.go index 5a9a2d4261a..48bdaff5514 100644 --- a/integration/ruler_test.go +++ b/integration/ruler_test.go @@ -504,14 +504,14 @@ func testRulerAPIWithSharding(t *testing.T, enableRulesBackup bool) { assert.NoError(t, json.Unmarshal(responseJson, ar)) if !ar.LastEvaluation.IsZero() { // Labels will be merged only if groups are loaded to Prometheus rule manager - assert.Equal(t, 5, len(ar.Labels)) + assert.Equal(t, 5, ar.Labels.Len()) } - for _, label := range ar.Labels { - if label.Name == "duplicate_label" { + ar.Labels.Range(func(l labels.Label) { + if l.Name == "duplicate_label" { // rule label should override group label - assert.Equal(t, ruleLabels["duplicate_label"], label.Value) + assert.Equal(t, ruleLabels["duplicate_label"], l.Value) } - } + }) } }, }, diff --git a/pkg/alertmanager/alertmanager_ring.go b/pkg/alertmanager/alertmanager_ring.go index 90430137b03..33d72daeeb3 100644 --- a/pkg/alertmanager/alertmanager_ring.go +++ b/pkg/alertmanager/alertmanager_ring.go @@ -43,12 +43,13 @@ var SyncRingOp = ring.NewOp([]ring.InstanceState{ring.ACTIVE, ring.JOINING}, fun // is used to strip down the config to the minimum, and avoid confusion // to the user. type RingConfig struct { - KVStore kv.Config `yaml:"kvstore" doc:"description=The key-value store used to share the hash ring across multiple instances."` - HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` - HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` - ReplicationFactor int `yaml:"replication_factor"` - ZoneAwarenessEnabled bool `yaml:"zone_awareness_enabled"` - TokensFilePath string `yaml:"tokens_file_path"` + KVStore kv.Config `yaml:"kvstore" doc:"description=The key-value store used to share the hash ring across multiple instances."` + HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` + HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` + ReplicationFactor int `yaml:"replication_factor"` + ZoneAwarenessEnabled bool `yaml:"zone_awareness_enabled"` + TokensFilePath string `yaml:"tokens_file_path"` + DetailedMetricsEnabled bool `yaml:"detailed_metrics_enabled"` FinalSleep time.Duration `yaml:"final_sleep"` WaitInstanceStateTimeout time.Duration `yaml:"wait_instance_state_timeout"` @@ -88,6 +89,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.ReplicationFactor, rfprefix+"replication-factor", 3, "The replication factor to use when sharding the alertmanager.") f.BoolVar(&cfg.ZoneAwarenessEnabled, rfprefix+"zone-awareness-enabled", false, "True to enable zone-awareness and replicate alerts across different availability zones.") f.StringVar(&cfg.TokensFilePath, rfprefix+"tokens-file-path", "", "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.") + f.BoolVar(&cfg.DetailedMetricsEnabled, rfprefix+"detailed-metrics-enabled", true, "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted.") // Instance flags cfg.InstanceInterfaceNames = []string{"eth0", "en0"} @@ -134,6 +136,7 @@ func (cfg *RingConfig) ToRingConfig() ring.Config { rc.HeartbeatTimeout = cfg.HeartbeatTimeout rc.ReplicationFactor = cfg.ReplicationFactor rc.ZoneAwarenessEnabled = cfg.ZoneAwarenessEnabled + rc.DetailedMetricsEnabled = cfg.DetailedMetricsEnabled return rc } diff --git a/pkg/api/api.go b/pkg/api/api.go index ec02f72e760..ebe64440f9c 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -71,6 +71,10 @@ type Config struct { // Allows and is used to configure the addition of HTTP Header fields to logs HTTPRequestHeadersToLog flagext.StringSlice `yaml:"http_request_headers_to_log"` + // HTTP header that can be used as request id. It will always be included in logs + // If it's not provided, or this header is empty, then random requestId will be generated + RequestIdHeader string `yaml:"request_id_header"` + // This sets the Origin header value corsRegexString string `yaml:"cors_origin"` @@ -87,6 +91,7 @@ var ( func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.ResponseCompression, "api.response-compression-enabled", false, "Use GZIP compression for API responses. Some endpoints serve large YAML or JSON blobs which can benefit from compression.") f.Var(&cfg.HTTPRequestHeadersToLog, "api.http-request-headers-to-log", "Which HTTP Request headers to add to logs") + f.StringVar(&cfg.RequestIdHeader, "api.request-id-header", "", "HTTP header that can be used as request id") f.BoolVar(&cfg.buildInfoEnabled, "api.build-info-enabled", false, "If enabled, build Info API will be served by query frontend or querier.") f.StringVar(&cfg.QuerierDefaultCodec, "api.querier-default-codec", "json", "Choose default codec for querier response serialization. Supports 'json' and 'protobuf'.") cfg.RegisterFlagsWithPrefix("", f) @@ -169,8 +174,9 @@ func New(cfg Config, serverCfg server.Config, s *server.Server, logger log.Logge if cfg.HTTPAuthMiddleware == nil { api.AuthMiddleware = middleware.AuthenticateUser } - if len(cfg.HTTPRequestHeadersToLog) > 0 { - api.HTTPHeaderMiddleware = &HTTPHeaderMiddleware{TargetHeaders: cfg.HTTPRequestHeadersToLog} + api.HTTPHeaderMiddleware = &HTTPHeaderMiddleware{ + TargetHeaders: cfg.HTTPRequestHeadersToLog, + RequestIdHeader: cfg.RequestIdHeader, } return api, nil @@ -277,7 +283,7 @@ func (a *API) RegisterRuntimeConfig(runtimeConfigHandler http.HandlerFunc) { func (a *API) RegisterDistributor(d *distributor.Distributor, pushConfig distributor.Config, overrides *validation.Overrides) { distributorpb.RegisterDistributorServer(a.server.GRPC, d) - a.RegisterRoute("/api/v1/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST") + a.RegisterRoute("/api/v1/push", push.Handler(pushConfig.RemoteWriteV2Enabled, pushConfig.MaxRecvMsgSize, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST") a.RegisterRoute("/api/v1/otlp/v1/metrics", push.OTLPHandler(pushConfig.OTLPMaxRecvMsgSize, overrides, pushConfig.OTLPConfig, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST") a.indexPage.AddLink(SectionAdminEndpoints, "/distributor/ring", "Distributor Ring Status") @@ -289,7 +295,7 @@ func (a *API) RegisterDistributor(d *distributor.Distributor, pushConfig distrib a.RegisterRoute("/distributor/ha_tracker", d.HATracker, false, "GET") // Legacy Routes - a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/push"), push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST") + a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/push"), push.Handler(pushConfig.RemoteWriteV2Enabled, pushConfig.MaxRecvMsgSize, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST") a.RegisterRoute("/all_user_stats", http.HandlerFunc(d.AllUserStatsHandler), false, "GET") a.RegisterRoute("/ha-tracker", d.HATracker, false, "GET") } @@ -322,12 +328,12 @@ func (a *API) RegisterIngester(i Ingester, pushConfig distributor.Config) { a.RegisterRoute("/ingester/renewTokens", http.HandlerFunc(i.RenewTokenHandler), false, "GET", "POST") a.RegisterRoute("/ingester/all_user_stats", http.HandlerFunc(i.AllUserStatsHandler), false, "GET") a.RegisterRoute("/ingester/mode", http.HandlerFunc(i.ModeHandler), false, "GET", "POST") - a.RegisterRoute("/ingester/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push), true, "POST") // For testing and debugging. + a.RegisterRoute("/ingester/push", push.Handler(pushConfig.RemoteWriteV2Enabled, pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push), true, "POST") // For testing and debugging. // Legacy Routes a.RegisterRoute("/flush", http.HandlerFunc(i.FlushHandler), false, "GET", "POST") a.RegisterRoute("/shutdown", http.HandlerFunc(i.ShutdownHandler), false, "GET", "POST") - a.RegisterRoute("/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push), true, "POST") // For testing and debugging. + a.RegisterRoute("/push", push.Handler(pushConfig.RemoteWriteV2Enabled, pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push), true, "POST") // For testing and debugging. } func (a *API) RegisterTenantDeletion(api *purger.TenantDeletionAPI) { @@ -431,6 +437,7 @@ func (a *API) RegisterQueryAPI(handler http.Handler) { a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/query_range"), hf, true, "GET", "POST") a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/query_exemplars"), hf, true, "GET", "POST") a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/format_query"), hf, true, "GET", "POST") + a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/parse_query"), hf, true, "GET", "POST") a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/labels"), hf, true, "GET", "POST") a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/label/{name}/values"), hf, true, "GET") a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/series"), hf, true, "GET", "POST", "DELETE") @@ -442,6 +449,7 @@ func (a *API) RegisterQueryAPI(handler http.Handler) { a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/query_range"), hf, true, "GET", "POST") a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/query_exemplars"), hf, true, "GET", "POST") a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/format_query"), hf, true, "GET", "POST") + a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/parse_query"), hf, true, "GET", "POST") a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/labels"), hf, true, "GET", "POST") a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/label/{name}/values"), hf, true, "GET") a.RegisterRoute(path.Join(a.cfg.LegacyHTTPPrefix, "/api/v1/series"), hf, true, "GET", "POST", "DELETE") diff --git a/pkg/api/api_test.go b/pkg/api/api_test.go index c25ca27234b..df2ec239f03 100644 --- a/pkg/api/api_test.go +++ b/pkg/api/api_test.go @@ -89,6 +89,7 @@ func TestNewApiWithHeaderLogging(t *testing.T) { } +// HTTPHeaderMiddleware should be added even if no headers are specified to log because it also handles request ID injection. func TestNewApiWithoutHeaderLogging(t *testing.T) { cfg := Config{ HTTPRequestHeadersToLog: []string{}, @@ -102,7 +103,8 @@ func TestNewApiWithoutHeaderLogging(t *testing.T) { api, err := New(cfg, serverCfg, server, &FakeLogger{}) require.NoError(t, err) - require.Nil(t, api.HTTPHeaderMiddleware) + require.NotNil(t, api.HTTPHeaderMiddleware) + require.Empty(t, api.HTTPHeaderMiddleware.TargetHeaders) } diff --git a/pkg/api/handlers.go b/pkg/api/handlers.go index 9bcc6a6906e..54a55318542 100644 --- a/pkg/api/handlers.go +++ b/pkg/api/handlers.go @@ -19,13 +19,13 @@ import ( "github.com/prometheus/common/route" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" v1 "github.com/prometheus/prometheus/web/api/v1" "github.com/weaveworks/common/instrument" "github.com/weaveworks/common/middleware" "github.com/cortexproject/cortex/pkg/api/queryapi" + "github.com/cortexproject/cortex/pkg/engine" "github.com/cortexproject/cortex/pkg/querier" "github.com/cortexproject/cortex/pkg/querier/codec" "github.com/cortexproject/cortex/pkg/querier/stats" @@ -161,9 +161,10 @@ func DefaultConfigHandler(actualCfg interface{}, defaultCfg interface{}) http.Ha // server to fulfill the Prometheus query API. func NewQuerierHandler( cfg Config, + querierCfg querier.Config, queryable storage.SampleAndChunkQueryable, exemplarQueryable storage.ExemplarQueryable, - engine promql.QueryEngine, + engine engine.QueryEngine, metadataQuerier querier.MetadataQuerier, reg prometheus.Registerer, logger log.Logger, @@ -239,6 +240,9 @@ func NewQuerierHandler( false, false, false, + false, + querierCfg.LookbackDelta, + false, ) // Let's clear all codecs to create the instrumented ones api.ClearCodecs() @@ -291,6 +295,7 @@ func NewQuerierHandler( router.Path(path.Join(prefix, "/api/v1/query_range")).Methods("GET", "POST").Handler(queryAPI.Wrap(queryAPI.RangeQueryHandler)) router.Path(path.Join(prefix, "/api/v1/query_exemplars")).Methods("GET", "POST").Handler(promRouter) router.Path(path.Join(prefix, "/api/v1/format_query")).Methods("GET", "POST").Handler(promRouter) + router.Path(path.Join(prefix, "/api/v1/parse_query")).Methods("GET", "POST").Handler(promRouter) router.Path(path.Join(prefix, "/api/v1/labels")).Methods("GET", "POST").Handler(promRouter) router.Path(path.Join(prefix, "/api/v1/label/{name}/values")).Methods("GET").Handler(promRouter) router.Path(path.Join(prefix, "/api/v1/series")).Methods("GET", "POST", "DELETE").Handler(promRouter) @@ -305,6 +310,7 @@ func NewQuerierHandler( router.Path(path.Join(legacyPrefix, "/api/v1/query_range")).Methods("GET", "POST").Handler(queryAPI.Wrap(queryAPI.RangeQueryHandler)) router.Path(path.Join(legacyPrefix, "/api/v1/query_exemplars")).Methods("GET", "POST").Handler(legacyPromRouter) router.Path(path.Join(legacyPrefix, "/api/v1/format_query")).Methods("GET", "POST").Handler(legacyPromRouter) + router.Path(path.Join(legacyPrefix, "/api/v1/parse_query")).Methods("GET", "POST").Handler(legacyPromRouter) router.Path(path.Join(legacyPrefix, "/api/v1/labels")).Methods("GET", "POST").Handler(legacyPromRouter) router.Path(path.Join(legacyPrefix, "/api/v1/label/{name}/values")).Methods("GET").Handler(legacyPromRouter) router.Path(path.Join(legacyPrefix, "/api/v1/series")).Methods("GET", "POST", "DELETE").Handler(legacyPromRouter) diff --git a/pkg/api/handlers_test.go b/pkg/api/handlers_test.go index 32e84d70a97..9b8b7930683 100644 --- a/pkg/api/handlers_test.go +++ b/pkg/api/handlers_test.go @@ -14,6 +14,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/weaveworks/common/user" + + "github.com/cortexproject/cortex/pkg/querier" ) func TestIndexHandlerPrefix(t *testing.T) { @@ -229,10 +231,11 @@ func TestBuildInfoAPI(t *testing.T) { } { t.Run(tc.name, func(t *testing.T) { cfg := Config{buildInfoEnabled: true} + querierConfig := querier.Config{} version.Version = tc.version version.Branch = tc.branch version.Revision = tc.revision - handler := NewQuerierHandler(cfg, nil, nil, nil, nil, nil, &FakeLogger{}) + handler := NewQuerierHandler(cfg, querierConfig, nil, nil, nil, nil, nil, &FakeLogger{}) writer := httptest.NewRecorder() req := httptest.NewRequest("GET", "/api/v1/status/buildinfo", nil) req = req.WithContext(user.InjectOrgID(req.Context(), "test")) diff --git a/pkg/api/middlewares.go b/pkg/api/middlewares.go index 8ddefaa2c66..dcb9c298169 100644 --- a/pkg/api/middlewares.go +++ b/pkg/api/middlewares.go @@ -1,40 +1,51 @@ package api import ( - "context" "net/http" - util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/google/uuid" + + "github.com/cortexproject/cortex/pkg/util/requestmeta" ) // HTTPHeaderMiddleware adds specified HTTPHeaders to the request context type HTTPHeaderMiddleware struct { - TargetHeaders []string + TargetHeaders []string + RequestIdHeader string } -// InjectTargetHeadersIntoHTTPRequest injects specified HTTPHeaders into the request context -func (h HTTPHeaderMiddleware) InjectTargetHeadersIntoHTTPRequest(r *http.Request) context.Context { - headerMap := make(map[string]string) +// injectRequestContext injects request related metadata into the request context +func (h HTTPHeaderMiddleware) injectRequestContext(r *http.Request) *http.Request { + requestContextMap := make(map[string]string) - // Check to make sure that Headers have not already been injected - checkMapInContext := util_log.HeaderMapFromContext(r.Context()) + // Check to make sure that request context have not already been injected + checkMapInContext := requestmeta.MapFromContext(r.Context()) if checkMapInContext != nil { - return r.Context() + return r } for _, target := range h.TargetHeaders { contents := r.Header.Get(target) if contents != "" { - headerMap[target] = contents + requestContextMap[target] = contents } } - return util_log.ContextWithHeaderMap(r.Context(), headerMap) + requestContextMap[requestmeta.LoggingHeadersKey] = requestmeta.LoggingHeaderKeysToString(h.TargetHeaders) + + reqId := r.Header.Get(h.RequestIdHeader) + if reqId == "" { + reqId = uuid.NewString() + } + requestContextMap[requestmeta.RequestIdKey] = reqId + + ctx := requestmeta.ContextWithRequestMetadataMap(r.Context(), requestContextMap) + return r.WithContext(ctx) } // Wrap implements Middleware func (h HTTPHeaderMiddleware) Wrap(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := h.InjectTargetHeadersIntoHTTPRequest(r) - next.ServeHTTP(w, r.WithContext(ctx)) + r = h.injectRequestContext(r) + next.ServeHTTP(w, r) }) } diff --git a/pkg/api/middlewares_test.go b/pkg/api/middlewares_test.go index dbf8719ad48..691d3b23584 100644 --- a/pkg/api/middlewares_test.go +++ b/pkg/api/middlewares_test.go @@ -7,12 +7,11 @@ import ( "github.com/stretchr/testify/require" - util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/requestmeta" ) -var HTTPTestMiddleware = HTTPHeaderMiddleware{TargetHeaders: []string{"TestHeader1", "TestHeader2", "Test3"}} - func TestHeaderInjection(t *testing.T) { + middleware := HTTPHeaderMiddleware{TargetHeaders: []string{"TestHeader1", "TestHeader2", "Test3"}} ctx := context.Background() h := http.Header{} contentsMap := make(map[string]string) @@ -32,12 +31,12 @@ func TestHeaderInjection(t *testing.T) { } req = req.WithContext(ctx) - ctx = HTTPTestMiddleware.InjectTargetHeadersIntoHTTPRequest(req) + req = middleware.injectRequestContext(req) - headerMap := util_log.HeaderMapFromContext(ctx) + headerMap := requestmeta.MapFromContext(req.Context()) require.NotNil(t, headerMap) - for _, header := range HTTPTestMiddleware.TargetHeaders { + for _, header := range middleware.TargetHeaders { require.Equal(t, contentsMap[header], headerMap[header]) } for header, contents := range contentsMap { @@ -46,6 +45,7 @@ func TestHeaderInjection(t *testing.T) { } func TestExistingHeaderInContextIsNotOverridden(t *testing.T) { + middleware := HTTPHeaderMiddleware{TargetHeaders: []string{"TestHeader1", "TestHeader2", "Test3"}} ctx := context.Background() h := http.Header{} @@ -58,7 +58,7 @@ func TestExistingHeaderInContextIsNotOverridden(t *testing.T) { h.Add("TestHeader2", "Fail2") h.Add("Test3", "Fail3") - ctx = util_log.ContextWithHeaderMap(ctx, contentsMap) + ctx = requestmeta.ContextWithRequestMetadataMap(ctx, contentsMap) req := &http.Request{ Method: "GET", RequestURI: "/HTTPHeaderTest", @@ -67,8 +67,77 @@ func TestExistingHeaderInContextIsNotOverridden(t *testing.T) { } req = req.WithContext(ctx) - ctx = HTTPTestMiddleware.InjectTargetHeadersIntoHTTPRequest(req) + req = middleware.injectRequestContext(req) + + require.Equal(t, contentsMap, requestmeta.MapFromContext(req.Context())) + +} + +func TestRequestIdInjection(t *testing.T) { + middleware := HTTPHeaderMiddleware{ + RequestIdHeader: "X-Request-ID", + } + + req := &http.Request{ + Method: "GET", + RequestURI: "/test", + Body: http.NoBody, + Header: http.Header{}, + } + req = req.WithContext(context.Background()) + req = middleware.injectRequestContext(req) + + requestID := requestmeta.RequestIdFromContext(req.Context()) + require.NotEmpty(t, requestID, "Request ID should be generated if not provided") +} + +func TestRequestIdFromHeaderIsUsed(t *testing.T) { + const providedID = "my-test-id-123" + + middleware := HTTPHeaderMiddleware{ + RequestIdHeader: "X-Request-ID", + } + + h := http.Header{} + h.Add("X-Request-ID", providedID) - require.Equal(t, contentsMap, util_log.HeaderMapFromContext(ctx)) + req := &http.Request{ + Method: "GET", + RequestURI: "/test", + Body: http.NoBody, + Header: h, + } + req = req.WithContext(context.Background()) + req = middleware.injectRequestContext(req) + + requestID := requestmeta.RequestIdFromContext(req.Context()) + require.Equal(t, providedID, requestID, "Request ID from header should be used") +} + +func TestTargetHeaderAndRequestIdHeaderOverlap(t *testing.T) { + const headerKey = "X-Request-ID" + const providedID = "overlap-id-456" + + middleware := HTTPHeaderMiddleware{ + TargetHeaders: []string{headerKey, "Other-Header"}, + RequestIdHeader: headerKey, + } + + h := http.Header{} + h.Add(headerKey, providedID) + h.Add("Other-Header", "some-value") + + req := &http.Request{ + Method: "GET", + RequestURI: "/test", + Body: http.NoBody, + Header: h, + } + req = req.WithContext(context.Background()) + req = middleware.injectRequestContext(req) + ctxMap := requestmeta.MapFromContext(req.Context()) + requestID := requestmeta.RequestIdFromContext(req.Context()) + require.Equal(t, providedID, ctxMap[headerKey], "Header value should be correctly stored") + require.Equal(t, providedID, requestID, "Request ID should come from the overlapping header") } diff --git a/pkg/api/queryapi/compression.go b/pkg/api/queryapi/compression.go new file mode 100644 index 00000000000..7dd6fcbacab --- /dev/null +++ b/pkg/api/queryapi/compression.go @@ -0,0 +1,90 @@ +package queryapi + +import ( + "io" + "net/http" + "strings" + + "github.com/klauspost/compress/gzip" + "github.com/klauspost/compress/snappy" + "github.com/klauspost/compress/zlib" + "github.com/klauspost/compress/zstd" +) + +const ( + acceptEncodingHeader = "Accept-Encoding" + contentEncodingHeader = "Content-Encoding" + gzipEncoding = "gzip" + deflateEncoding = "deflate" + snappyEncoding = "snappy" + zstdEncoding = "zstd" +) + +// Wrapper around http.Handler which adds suitable response compression based +// on the client's Accept-Encoding headers. +type compressedResponseWriter struct { + http.ResponseWriter + writer io.Writer +} + +// Writes HTTP response content data. +func (c *compressedResponseWriter) Write(p []byte) (int, error) { + return c.writer.Write(p) +} + +// Closes the compressedResponseWriter and ensures to flush all data before. +func (c *compressedResponseWriter) Close() { + if zstdWriter, ok := c.writer.(*zstd.Encoder); ok { + zstdWriter.Flush() + } + if snappyWriter, ok := c.writer.(*snappy.Writer); ok { + snappyWriter.Flush() + } + if zlibWriter, ok := c.writer.(*zlib.Writer); ok { + zlibWriter.Flush() + } + if gzipWriter, ok := c.writer.(*gzip.Writer); ok { + gzipWriter.Flush() + } + if closer, ok := c.writer.(io.Closer); ok { + defer closer.Close() + } +} + +// Constructs a new compressedResponseWriter based on client request headers. +func newCompressedResponseWriter(writer http.ResponseWriter, req *http.Request) *compressedResponseWriter { + encodings := strings.Split(req.Header.Get(acceptEncodingHeader), ",") + for _, encoding := range encodings { + switch strings.TrimSpace(encoding) { + case zstdEncoding: + encoder, err := zstd.NewWriter(writer) + if err == nil { + writer.Header().Set(contentEncodingHeader, zstdEncoding) + return &compressedResponseWriter{ResponseWriter: writer, writer: encoder} + } + case snappyEncoding: + writer.Header().Set(contentEncodingHeader, snappyEncoding) + return &compressedResponseWriter{ResponseWriter: writer, writer: snappy.NewBufferedWriter(writer)} + case gzipEncoding: + writer.Header().Set(contentEncodingHeader, gzipEncoding) + return &compressedResponseWriter{ResponseWriter: writer, writer: gzip.NewWriter(writer)} + case deflateEncoding: + writer.Header().Set(contentEncodingHeader, deflateEncoding) + return &compressedResponseWriter{ResponseWriter: writer, writer: zlib.NewWriter(writer)} + } + } + return &compressedResponseWriter{ResponseWriter: writer, writer: writer} +} + +// CompressionHandler is a wrapper around http.Handler which adds suitable +// response compression based on the client's Accept-Encoding headers. +type CompressionHandler struct { + Handler http.Handler +} + +// ServeHTTP adds compression to the original http.Handler's ServeHTTP() method. +func (c CompressionHandler) ServeHTTP(writer http.ResponseWriter, req *http.Request) { + compWriter := newCompressedResponseWriter(writer, req) + c.Handler.ServeHTTP(compWriter, req) + compWriter.Close() +} diff --git a/pkg/api/queryapi/compression_test.go b/pkg/api/queryapi/compression_test.go new file mode 100644 index 00000000000..bcd36a3728c --- /dev/null +++ b/pkg/api/queryapi/compression_test.go @@ -0,0 +1,159 @@ +package queryapi + +import ( + "bytes" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/klauspost/compress/gzip" + "github.com/klauspost/compress/snappy" + "github.com/klauspost/compress/zlib" + "github.com/klauspost/compress/zstd" + "github.com/stretchr/testify/require" +) + +func decompress(t *testing.T, encoding string, b []byte) []byte { + t.Helper() + + switch encoding { + case gzipEncoding: + r, err := gzip.NewReader(bytes.NewReader(b)) + require.NoError(t, err) + defer r.Close() + data, err := io.ReadAll(r) + require.NoError(t, err) + return data + case deflateEncoding: + r, err := zlib.NewReader(bytes.NewReader(b)) + require.NoError(t, err) + defer r.Close() + data, err := io.ReadAll(r) + require.NoError(t, err) + return data + case snappyEncoding: + data, err := io.ReadAll(snappy.NewReader(bytes.NewReader(b))) + require.NoError(t, err) + return data + case zstdEncoding: + r, err := zstd.NewReader(bytes.NewReader(b)) + require.NoError(t, err) + defer r.Close() + data, err := io.ReadAll(r) + require.NoError(t, err) + return data + default: + return b + } +} + +func TestNewCompressedResponseWriter_SupportedEncodings(t *testing.T) { + for _, tc := range []string{gzipEncoding, deflateEncoding, snappyEncoding, zstdEncoding} { + t.Run(tc, func(t *testing.T) { + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/", nil) + req.Header.Set(acceptEncodingHeader, tc) + + cw := newCompressedResponseWriter(rec, req) + payload := []byte("hello world") + _, err := cw.Write(payload) + require.NoError(t, err) + cw.Close() + + require.Equal(t, tc, rec.Header().Get(contentEncodingHeader)) + + decompressed := decompress(t, tc, rec.Body.Bytes()) + require.Equal(t, payload, decompressed) + + switch tc { + case gzipEncoding: + _, ok := cw.writer.(*gzip.Writer) + require.True(t, ok) + case deflateEncoding: + _, ok := cw.writer.(*zlib.Writer) + require.True(t, ok) + case snappyEncoding: + _, ok := cw.writer.(*snappy.Writer) + require.True(t, ok) + case zstdEncoding: + _, ok := cw.writer.(*zstd.Encoder) + require.True(t, ok) + } + }) + } +} + +func TestNewCompressedResponseWriter_UnsupportedEncoding(t *testing.T) { + for _, tc := range []string{"", "br", "unknown"} { + t.Run(tc, func(t *testing.T) { + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/", nil) + if tc != "" { + req.Header.Set(acceptEncodingHeader, tc) + } + + cw := newCompressedResponseWriter(rec, req) + payload := []byte("data") + _, err := cw.Write(payload) + require.NoError(t, err) + cw.Close() + + require.Empty(t, rec.Header().Get(contentEncodingHeader)) + require.Equal(t, payload, rec.Body.Bytes()) + require.Same(t, rec, cw.writer) + }) + } +} + +func TestNewCompressedResponseWriter_MultipleEncodings(t *testing.T) { + tests := []struct { + header string + expectEnc string + expectType interface{} + }{ + {"snappy, gzip", snappyEncoding, &snappy.Writer{}}, + {"unknown, gzip", gzipEncoding, &gzip.Writer{}}, + } + + for _, tc := range tests { + t.Run(tc.header, func(t *testing.T) { + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/", nil) + req.Header.Set(acceptEncodingHeader, tc.header) + + cw := newCompressedResponseWriter(rec, req) + _, err := cw.Write([]byte("payload")) + require.NoError(t, err) + cw.Close() + + require.Equal(t, tc.expectEnc, rec.Header().Get(contentEncodingHeader)) + decompressed := decompress(t, tc.expectEnc, rec.Body.Bytes()) + require.Equal(t, []byte("payload"), decompressed) + + switch tc.expectEnc { + case gzipEncoding: + require.IsType(t, &gzip.Writer{}, cw.writer) + case snappyEncoding: + require.IsType(t, &snappy.Writer{}, cw.writer) + } + }) + } +} + +func TestCompressionHandler_ServeHTTP(t *testing.T) { + handler := CompressionHandler{Handler: http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + _, err := w.Write([]byte("hello")) + require.NoError(t, err) + })} + + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/", nil) + req.Header.Set(acceptEncodingHeader, gzipEncoding) + + handler.ServeHTTP(rec, req) + + require.Equal(t, gzipEncoding, rec.Header().Get(contentEncodingHeader)) + decompressed := decompress(t, gzipEncoding, rec.Body.Bytes()) + require.Equal(t, []byte("hello"), decompressed) +} diff --git a/pkg/api/queryapi/query_api.go b/pkg/api/queryapi/query_api.go index e3793ef5bee..3e4f7bb49ca 100644 --- a/pkg/api/queryapi/query_api.go +++ b/pkg/api/queryapi/query_api.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "strconv" "time" "github.com/go-kit/log" @@ -15,6 +16,7 @@ import ( "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/httputil" v1 "github.com/prometheus/prometheus/web/api/v1" + "github.com/thanos-io/promql-engine/logicalplan" "github.com/weaveworks/common/httpgrpc" "github.com/cortexproject/cortex/pkg/engine" @@ -25,7 +27,7 @@ import ( type QueryAPI struct { queryable storage.SampleAndChunkQueryable - queryEngine promql.QueryEngine + queryEngine engine.QueryEngine now func() time.Time statsRenderer v1.StatsRenderer logger log.Logger @@ -34,7 +36,7 @@ type QueryAPI struct { } func NewQueryAPI( - qe promql.QueryEngine, + qe engine.QueryEngine, q storage.SampleAndChunkQueryable, statsRenderer v1.StatsRenderer, logger log.Logger, @@ -100,10 +102,29 @@ func (q *QueryAPI) RangeQueryHandler(r *http.Request) (result apiFuncResult) { ctx = engine.AddEngineTypeToContext(ctx, r) ctx = querier.AddBlockStoreTypeToContext(ctx, r.Header.Get(querier.BlockStoreTypeHeader)) - qry, err := q.queryEngine.NewRangeQuery(ctx, q.queryable, opts, r.FormValue("query"), convertMsToTime(start), convertMsToTime(end), convertMsToDuration(step)) - if err != nil { - return invalidParamError(httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()), "query") + + var qry promql.Query + startTime := convertMsToTime(start) + endTime := convertMsToTime(end) + stepDuration := convertMsToDuration(step) + + byteLP := []byte(r.PostFormValue("plan")) + if len(byteLP) != 0 { + logicalPlan, err := logicalplan.Unmarshal(byteLP) + if err != nil { + return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("invalid logical plan: %v", err)}, nil, nil} + } + qry, err = q.queryEngine.MakeRangeQueryFromPlan(ctx, q.queryable, opts, logicalPlan, startTime, endTime, stepDuration, r.FormValue("query")) + if err != nil { + return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("failed to create range query from logical plan: %v", err)}, nil, nil} + } + } else { // if there is logical plan field is empty, fall back + qry, err = q.queryEngine.NewRangeQuery(ctx, q.queryable, opts, r.FormValue("query"), startTime, endTime, stepDuration) + if err != nil { + return invalidParamError(httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()), "query") + } } + // From now on, we must only return with a finalizer in the result (to // be called by the caller) or call qry.Close ourselves (which is // required in the case of a panic). @@ -156,9 +177,25 @@ func (q *QueryAPI) InstantQueryHandler(r *http.Request) (result apiFuncResult) { ctx = engine.AddEngineTypeToContext(ctx, r) ctx = querier.AddBlockStoreTypeToContext(ctx, r.Header.Get(querier.BlockStoreTypeHeader)) - qry, err := q.queryEngine.NewInstantQuery(ctx, q.queryable, opts, r.FormValue("query"), convertMsToTime(ts)) - if err != nil { - return invalidParamError(httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()), "query") + + var qry promql.Query + tsTime := convertMsToTime(ts) + + byteLP := []byte(r.PostFormValue("plan")) + if len(byteLP) != 0 { + logicalPlan, err := logicalplan.Unmarshal(byteLP) + if err != nil { + return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("invalid logical plan: %v", err)}, nil, nil} + } + qry, err = q.queryEngine.MakeInstantQueryFromPlan(ctx, q.queryable, opts, logicalPlan, tsTime, r.FormValue("query")) + if err != nil { + return apiFuncResult{nil, &apiError{errorInternal, fmt.Errorf("failed to create instant query from logical plan: %v", err)}, nil, nil} + } + } else { // if there is logical plan field is empty, fall back + qry, err = q.queryEngine.NewInstantQuery(ctx, q.queryable, opts, r.FormValue("query"), tsTime) + if err != nil { + return invalidParamError(httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()), "query") + } } // From now on, we must only return with a finalizer in the result (to @@ -208,7 +245,7 @@ func (q *QueryAPI) Wrap(f apiFunc) http.HandlerFunc { w.WriteHeader(http.StatusNoContent) } - return httputil.CompressionHandler{ + return CompressionHandler{ Handler: http.HandlerFunc(hf), }.ServeHTTP } @@ -237,6 +274,7 @@ func (q *QueryAPI) respond(w http.ResponseWriter, req *http.Request, data interf } w.Header().Set("Content-Type", codec.ContentType().String()) + w.Header().Set("X-Uncompressed-Length", strconv.Itoa(len(b))) w.WriteHeader(http.StatusOK) if n, err := w.Write(b); err != nil { level.Error(q.logger).Log("error writing response", "url", req.URL, "bytesWritten", n, "err", err) diff --git a/pkg/api/queryapi/query_api_test.go b/pkg/api/queryapi/query_api_test.go index 028184a12b8..2a0ce0cbc99 100644 --- a/pkg/api/queryapi/query_api_test.go +++ b/pkg/api/queryapi/query_api_test.go @@ -7,21 +7,28 @@ import ( "io" "net/http" "net/http/httptest" + "net/url" + "strings" "testing" "time" "github.com/go-kit/log" "github.com/gorilla/mux" "github.com/grafana/regexp" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/annotations" v1 "github.com/prometheus/prometheus/web/api/v1" "github.com/stretchr/testify/require" + "github.com/thanos-io/promql-engine/logicalplan" + "github.com/thanos-io/promql-engine/query" "github.com/weaveworks/common/user" + engine2 "github.com/cortexproject/cortex/pkg/engine" "github.com/cortexproject/cortex/pkg/querier" "github.com/cortexproject/cortex/pkg/querier/series" "github.com/cortexproject/cortex/pkg/querier/stats" @@ -64,10 +71,14 @@ func (mockQuerier) Close() error { } func Test_CustomAPI(t *testing.T) { - engine := promql.NewEngine(promql.EngineOpts{ - MaxSamples: 100, - Timeout: time.Second * 2, - }) + engine := engine2.New( + promql.EngineOpts{ + MaxSamples: 100, + Timeout: time.Second * 2, + }, + engine2.ThanosEngineConfig{Enabled: false}, + prometheus.NewRegistry()) + mockQueryable := &mockSampleAndChunkQueryable{ queryableFn: func(_, _ int64) (storage.Querier, error) { return mockQuerier{ @@ -175,10 +186,10 @@ func Test_CustomAPI(t *testing.T) { c := NewQueryAPI(engine, mockQueryable, querier.StatsRenderer, log.NewNopLogger(), []v1.Codec{v1.JSONCodec{}}, regexp.MustCompile(".*")) router := mux.NewRouter() - router.Path("/api/v1/query").Methods("GET").Handler(c.Wrap(c.InstantQueryHandler)) - router.Path("/api/v1/query_range").Methods("GET").Handler(c.Wrap(c.RangeQueryHandler)) + router.Path("/api/v1/query").Methods("POST").Handler(c.Wrap(c.InstantQueryHandler)) + router.Path("/api/v1/query_range").Methods("POST").Handler(c.Wrap(c.RangeQueryHandler)) - req := httptest.NewRequest(http.MethodGet, test.path, nil) + req := httptest.NewRequest(http.MethodPost, test.path, nil) ctx := context.Background() _, ctx = stats.ContextWithEmptyStats(ctx) req = req.WithContext(user.InjectOrgID(ctx, "user1")) @@ -209,10 +220,14 @@ func (m *mockCodec) Encode(_ *v1.Response) ([]byte, error) { } func Test_InvalidCodec(t *testing.T) { - engine := promql.NewEngine(promql.EngineOpts{ - MaxSamples: 100, - Timeout: time.Second * 2, - }) + engine := engine2.New( + promql.EngineOpts{ + MaxSamples: 100, + Timeout: time.Second * 2, + }, + engine2.ThanosEngineConfig{Enabled: false}, + prometheus.NewRegistry()) + mockQueryable := &mockSampleAndChunkQueryable{ queryableFn: func(_, _ int64) (storage.Querier, error) { return mockQuerier{ @@ -231,9 +246,9 @@ func Test_InvalidCodec(t *testing.T) { queryAPI := NewQueryAPI(engine, mockQueryable, querier.StatsRenderer, log.NewNopLogger(), []v1.Codec{&mockCodec{}}, regexp.MustCompile(".*")) router := mux.NewRouter() - router.Path("/api/v1/query").Methods("GET").Handler(queryAPI.Wrap(queryAPI.InstantQueryHandler)) + router.Path("/api/v1/query").Methods("POST").Handler(queryAPI.Wrap(queryAPI.InstantQueryHandler)) - req := httptest.NewRequest(http.MethodGet, "/api/v1/query?query=test", nil) + req := httptest.NewRequest(http.MethodPost, "/api/v1/query?query=test", nil) ctx := context.Background() _, ctx = stats.ContextWithEmptyStats(ctx) req = req.WithContext(user.InjectOrgID(ctx, "user1")) @@ -244,10 +259,14 @@ func Test_InvalidCodec(t *testing.T) { } func Test_CustomAPI_StatsRenderer(t *testing.T) { - engine := promql.NewEngine(promql.EngineOpts{ - MaxSamples: 100, - Timeout: time.Second * 2, - }) + engine := engine2.New( + promql.EngineOpts{ + MaxSamples: 100, + Timeout: time.Second * 2, + }, + engine2.ThanosEngineConfig{Enabled: false}, + prometheus.NewRegistry()) + mockQueryable := &mockSampleAndChunkQueryable{ queryableFn: func(_, _ int64) (storage.Querier, error) { return mockQuerier{ @@ -269,9 +288,9 @@ func Test_CustomAPI_StatsRenderer(t *testing.T) { queryAPI := NewQueryAPI(engine, mockQueryable, querier.StatsRenderer, log.NewNopLogger(), []v1.Codec{v1.JSONCodec{}}, regexp.MustCompile(".*")) router := mux.NewRouter() - router.Path("/api/v1/query_range").Methods("GET").Handler(queryAPI.Wrap(queryAPI.RangeQueryHandler)) + router.Path("/api/v1/query_range").Methods("POST").Handler(queryAPI.Wrap(queryAPI.RangeQueryHandler)) - req := httptest.NewRequest(http.MethodGet, "/api/v1/query_range?end=1536673680&query=test&start=1536673665&step=5", nil) + req := httptest.NewRequest(http.MethodPost, "/api/v1/query_range?end=1536673680&query=test&start=1536673665&step=5", nil) ctx := context.Background() _, ctx = stats.ContextWithEmptyStats(ctx) req = req.WithContext(user.InjectOrgID(ctx, "user1")) @@ -285,3 +304,202 @@ func Test_CustomAPI_StatsRenderer(t *testing.T) { require.Equal(t, uint64(4), queryStats.LoadPeakSamples()) require.Equal(t, uint64(4), queryStats.LoadScannedSamples()) } + +func Test_Logicalplan_Requests(t *testing.T) { + engine := engine2.New( + promql.EngineOpts{ + MaxSamples: 100, + Timeout: time.Second * 2, + }, + engine2.ThanosEngineConfig{Enabled: true}, + prometheus.NewRegistry(), + ) + + mockMatrix := model.Matrix{ + { + Metric: model.Metric{"__name__": "test", "foo": "bar"}, + Values: []model.SamplePair{ + {Timestamp: 1536673665000, Value: 0}, + {Timestamp: 1536673670000, Value: 1}, + }, + }, + } + + mockQueryable := &mockSampleAndChunkQueryable{ + queryableFn: func(_, _ int64) (storage.Querier, error) { + return mockQuerier{matrix: mockMatrix}, nil + }, + } + + tests := []struct { + name string + path string + start int64 + end int64 + stepDuration int64 + requestBody func(t *testing.T) []byte + expectedCode int + expectedBody string + }{ + { + name: "[Range Query] with valid logical plan and empty query string", + path: "/api/v1/query_range?end=1536673680&query=&start=1536673665&step=5", + start: 1536673665, + end: 1536673680, + stepDuration: 5, + requestBody: func(t *testing.T) []byte { + return createTestLogicalPlan(t, 1536673665, 1536673680, 5) + }, + expectedCode: http.StatusOK, + expectedBody: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"test","foo":"bar"},"values":[[1536673665,"0"],[1536673670,"1"],[1536673675,"1"],[1536673680,"1"]]}]}}`, + }, + { + name: "[Range Query] with corrupted logical plan", // will throw an error from unmarhsal step + path: "/api/v1/query_range?end=1536673680&query=test&start=1536673665&step=5", + start: 1536673665, + end: 1536673680, + stepDuration: 5, + requestBody: func(t *testing.T) []byte { + return append(createTestLogicalPlan(t, 1536673665, 1536673680, 5), []byte("random data")...) + }, + expectedCode: http.StatusInternalServerError, + expectedBody: `{"status":"error","errorType":"server_error","error":"invalid logical plan: invalid character 'r' after top-level value"}`, + }, + { + name: "[Range Query] with empty body and non-empty query string", // fall back to promql query execution + path: "/api/v1/query_range?end=1536673680&query=test&start=1536673665&step=5", + start: 1536673665, + end: 1536673680, + stepDuration: 5, + requestBody: func(t *testing.T) []byte { + return []byte{} + }, + expectedCode: http.StatusOK, + expectedBody: `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"__name__":"test","foo":"bar"},"values":[[1536673665,"0"],[1536673670,"1"],[1536673675,"1"],[1536673680,"1"]]}]}}`, + }, + { + name: "[Range Query] with empty body and empty query string", // fall back to promql query execution, but will have error because of empty query string + path: "/api/v1/query_range?end=1536673680&query=&start=1536673665&step=5", + start: 1536673665, + end: 1536673680, + stepDuration: 5, + requestBody: func(t *testing.T) []byte { + return []byte{} + }, + expectedCode: http.StatusBadRequest, + expectedBody: "{\"status\":\"error\",\"errorType\":\"bad_data\",\"error\":\"invalid parameter \\\"query\\\"; unknown position: parse error: no expression found in input\"}", + }, + { + name: "[Instant Query] with valid logical plan and empty query string", + path: "/api/v1/query?query=test&time=1536673670", + start: 1536673670, + end: 1536673670, + stepDuration: 0, + requestBody: func(t *testing.T) []byte { + return createTestLogicalPlan(t, 1536673670, 1536673670, 0) + }, + expectedCode: http.StatusOK, + expectedBody: `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"test","foo":"bar"},"value":[1536673670,"1"]}]}}`, + }, + { + name: "[Instant Query] with corrupted logical plan", + path: "/api/v1/query?query=test&time=1536673670", + start: 1536673670, + end: 1536673670, + stepDuration: 0, + requestBody: func(t *testing.T) []byte { + return append(createTestLogicalPlan(t, 1536673670, 1536673670, 0), []byte("random data")...) + }, + expectedCode: http.StatusInternalServerError, + expectedBody: `{"status":"error","errorType":"server_error","error":"invalid logical plan: invalid character 'r' after top-level value"}`, + }, + { + name: "[Instant Query] with empty body and non-empty query string", + path: "/api/v1/query?query=test&time=1536673670", + start: 1536673670, + end: 1536673670, + stepDuration: 0, + requestBody: func(t *testing.T) []byte { + return []byte{} + }, + expectedCode: http.StatusOK, + expectedBody: `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"test","foo":"bar"},"value":[1536673670,"1"]}]}}`, + }, + { + name: "[Instant Query] with empty body and empty query string", + path: "/api/v1/query?query=&time=1536673670", + start: 1536673670, + end: 1536673670, + stepDuration: 0, + requestBody: func(t *testing.T) []byte { + return []byte{} + }, + expectedCode: http.StatusBadRequest, + expectedBody: "{\"status\":\"error\",\"errorType\":\"bad_data\",\"error\":\"invalid parameter \\\"query\\\"; unknown position: parse error: no expression found in input\"}", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := NewQueryAPI(engine, mockQueryable, querier.StatsRenderer, log.NewNopLogger(), []v1.Codec{v1.JSONCodec{}}, regexp.MustCompile(".*")) + router := mux.NewRouter() + router.Path("/api/v1/query").Methods("POST").Handler(c.Wrap(c.InstantQueryHandler)) + router.Path("/api/v1/query_range").Methods("POST").Handler(c.Wrap(c.RangeQueryHandler)) + + req := createTestRequest(tt.path, tt.requestBody(t)) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, tt.expectedCode, rec.Code) + body, err := io.ReadAll(rec.Body) + require.NoError(t, err) + require.Equal(t, tt.expectedBody, string(body)) + }) + } +} + +func createTestRequest(path string, planBytes []byte) *http.Request { + form := url.Values{} + form.Set("plan", string(planBytes)) + req := httptest.NewRequest(http.MethodPost, path, io.NopCloser(strings.NewReader(form.Encode()))) + + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + ctx := context.Background() + _, ctx = stats.ContextWithEmptyStats(ctx) + return req.WithContext(user.InjectOrgID(ctx, "user1")) +} + +func createTestLogicalPlan(t *testing.T, start, end int64, stepDuration int64) []byte { + startTime, endTime := convertMsToTime(start), convertMsToTime(end) + step := convertMsToDuration(stepDuration) + + qOpts := query.Options{ + Start: startTime, + End: startTime, + Step: 0, + StepsBatch: 10, + LookbackDelta: 0, + EnablePerStepStats: false, + } + + if step != 0 { + qOpts.End = endTime + qOpts.Step = step + } + + // using a different metric name here so that we can check with debugger which query (from query string vs http request body) + // is being executed by the queriers + expr, err := parser.NewParser("up", parser.WithFunctions(parser.Functions)).ParseExpr() + require.NoError(t, err) + + planOpts := logicalplan.PlanOptions{ + DisableDuplicateLabelCheck: false, + } + + logicalPlan, err := logicalplan.NewFromAST(expr, &qOpts, planOpts) + require.NoError(t, err) + byteval, err := logicalplan.Marshal(logicalPlan.Root()) + require.NoError(t, err) + + return byteval +} diff --git a/pkg/chunk/fixtures.go b/pkg/chunk/fixtures.go index 9227415db08..433cd8c277a 100644 --- a/pkg/chunk/fixtures.go +++ b/pkg/chunk/fixtures.go @@ -8,22 +8,22 @@ import ( ) // BenchmarkLabels is a real example from Kubernetes' embedded cAdvisor metrics, lightly obfuscated -var BenchmarkLabels = labels.Labels{ - {Name: model.MetricNameLabel, Value: "container_cpu_usage_seconds_total"}, - {Name: "beta_kubernetes_io_arch", Value: "amd64"}, - {Name: "beta_kubernetes_io_instance_type", Value: "c3.somesize"}, - {Name: "beta_kubernetes_io_os", Value: "linux"}, - {Name: "container_name", Value: "some-name"}, - {Name: "cpu", Value: "cpu01"}, - {Name: "failure_domain_beta_kubernetes_io_region", Value: "somewhere-1"}, - {Name: "failure_domain_beta_kubernetes_io_zone", Value: "somewhere-1b"}, - {Name: "id", Value: "/kubepods/burstable/pod6e91c467-e4c5-11e7-ace3-0a97ed59c75e/a3c8498918bd6866349fed5a6f8c643b77c91836427fb6327913276ebc6bde28"}, - {Name: "image", Value: "registry/organisation/name@sha256:dca3d877a80008b45d71d7edc4fd2e44c0c8c8e7102ba5cbabec63a374d1d506"}, - {Name: "instance", Value: "ip-111-11-1-11.ec2.internal"}, - {Name: "job", Value: "kubernetes-cadvisor"}, - {Name: "kubernetes_io_hostname", Value: "ip-111-11-1-11"}, - {Name: "monitor", Value: "prod"}, - {Name: "name", Value: "k8s_some-name_some-other-name-5j8s8_kube-system_6e91c467-e4c5-11e7-ace3-0a97ed59c75e_0"}, - {Name: "namespace", Value: "kube-system"}, - {Name: "pod_name", Value: "some-other-name-5j8s8"}, -} +var BenchmarkLabels = labels.FromStrings( + model.MetricNameLabel, "container_cpu_usage_seconds_total", + "beta_kubernetes_io_arch", "amd64", + "beta_kubernetes_io_instance_type", "c3.somesize", + "beta_kubernetes_io_os", "linux", + "container_name", "some-name", + "cpu", "cpu01", + "failure_domain_beta_kubernetes_io_region", "somewhere-1", + "failure_domain_beta_kubernetes_io_zone", "somewhere-1b", + "id", "/kubepods/burstable/pod6e91c467-e4c5-11e7-ace3-0a97ed59c75e/a3c8498918bd6866349fed5a6f8c643b77c91836427fb6327913276ebc6bde28", + "image", "registry/organisation/name@sha256:dca3d877a80008b45d71d7edc4fd2e44c0c8c8e7102ba5cbabec63a374d1d506", + "instance", "ip-111-11-1-11.ec2.internal", + "job", "kubernetes-cadvisor", + "kubernetes_io_hostname", "ip-111-11-1-11", + "monitor", "prod", + "name", "k8s_some-name_some-other-name-5j8s8_kube-system_6e91c467-e4c5-11e7-ace3-0a97ed59c75e_0", + "namespace", "kube-system", + "pod_name", "some-other-name-5j8s8", +) diff --git a/pkg/chunk/json_helpers.go b/pkg/chunk/json_helpers.go index 9107f7d8c25..21711149380 100644 --- a/pkg/chunk/json_helpers.go +++ b/pkg/chunk/json_helpers.go @@ -1,7 +1,6 @@ package chunk import ( - "sort" "unsafe" jsoniter "github.com/json-iterator/go" @@ -19,35 +18,40 @@ func init() { // Override Prometheus' labels.Labels decoder which goes via a map func DecodeLabels(ptr unsafe.Pointer, iter *jsoniter.Iterator) { labelsPtr := (*labels.Labels)(ptr) - *labelsPtr = make(labels.Labels, 0, 10) + b := labels.NewBuilder(labels.EmptyLabels()) + iter.ReadMapCB(func(iter *jsoniter.Iterator, key string) bool { value := iter.ReadString() - *labelsPtr = append(*labelsPtr, labels.Label{Name: key, Value: value}) + b.Set(key, value) return true }) - // Labels are always sorted, but earlier Cortex using a map would - // output in any order so we have to sort on read in - sort.Sort(*labelsPtr) + *labelsPtr = b.Labels() } // Override Prometheus' labels.Labels encoder which goes via a map func EncodeLabels(ptr unsafe.Pointer, stream *jsoniter.Stream) { - labelsPtr := (*labels.Labels)(ptr) + lbls := *(*labels.Labels)(ptr) + stream.WriteObjectStart() - for i, v := range *labelsPtr { - if i != 0 { + first := true + + lbls.Range(func(l labels.Label) { + if !first { stream.WriteMore() } - stream.WriteString(v.Name) + first = false + + stream.WriteString(l.Name) stream.WriteRaw(`:`) - stream.WriteString(v.Value) - } + stream.WriteString(l.Value) + }) + stream.WriteObjectEnd() } func labelsIsEmpty(ptr unsafe.Pointer) bool { - labelsPtr := (*labels.Labels)(ptr) - return len(*labelsPtr) == 0 + labelsPtr := *(*labels.Labels)(ptr) + return labelsPtr.Len() == 0 } // Decode via jsoniter's float64 routine is faster than getting the string data and decoding as two integers diff --git a/pkg/compactor/compactor_metrics_test.go b/pkg/compactor/compactor_metrics_test.go index 75879f2d96a..0288bbe909f 100644 --- a/pkg/compactor/compactor_metrics_test.go +++ b/pkg/compactor/compactor_metrics_test.go @@ -49,6 +49,7 @@ func TestCompactorMetrics(t *testing.T) { cortex_compactor_meta_synced{state="marked-for-deletion"} 0 cortex_compactor_meta_synced{state="marked-for-no-compact"} 0 cortex_compactor_meta_synced{state="no-meta-json"} 0 + cortex_compactor_meta_synced{state="parquet-migrated"} 0 cortex_compactor_meta_synced{state="time-excluded"} 0 cortex_compactor_meta_synced{state="too-fresh"} 0 # HELP cortex_compactor_meta_syncs_total Total blocks metadata synchronization attempts. diff --git a/pkg/compactor/compactor_paritioning_test.go b/pkg/compactor/compactor_paritioning_test.go index 1e5627590b6..bbb875dad37 100644 --- a/pkg/compactor/compactor_paritioning_test.go +++ b/pkg/compactor/compactor_paritioning_test.go @@ -18,6 +18,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -1041,7 +1042,9 @@ func TestPartitionCompactor_ShouldCompactAllUsersOnShardingEnabledButOnlyOneInst bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-2"), false, nil) bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-2"), false, nil) bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json"}, nil) + //bucketClient.MockIterWithAttributes("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json"}, nil) bucketClient.MockIter("user-2/", []string{"user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ", "user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", "user-2/01FN3V83ABR9992RF8WRJZ76ZQ/meta.json"}, nil) + //bucketClient.MockIterWithAttributes("user-2/", []string{"user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ", "user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", "user-2/01FN3V83ABR9992RF8WRJZ76ZQ/meta.json"}, nil) bucketClient.MockIter("user-1/markers/", nil, nil) bucketClient.MockGet("user-1/markers/cleaner-visit-marker.json", "", nil) bucketClient.MockUpload("user-1/markers/cleaner-visit-marker.json", nil) @@ -1507,7 +1510,7 @@ func mockBlockGroup(userID string, ids []string, bkt *bucket.ClientMock) *compac log.NewNopLogger(), bkt, getPartitionedGroupID(userID), - nil, + labels.EmptyLabels(), 0, true, true, diff --git a/pkg/compactor/compactor_ring.go b/pkg/compactor/compactor_ring.go index c205ee80f55..430f042a7a3 100644 --- a/pkg/compactor/compactor_ring.go +++ b/pkg/compactor/compactor_ring.go @@ -18,10 +18,11 @@ import ( // is used to strip down the config to the minimum, and avoid confusion // to the user. type RingConfig struct { - KVStore kv.Config `yaml:"kvstore"` - HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` - HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` - AutoForgetDelay time.Duration `yaml:"auto_forget_delay"` + KVStore kv.Config `yaml:"kvstore"` + HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` + HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` + AutoForgetDelay time.Duration `yaml:"auto_forget_delay"` + DetailedMetricsEnabled bool `yaml:"detailed_metrics_enabled"` // Wait ring stability. WaitStabilityMinDuration time.Duration `yaml:"wait_stability_min_duration"` @@ -55,6 +56,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { cfg.KVStore.RegisterFlagsWithPrefix("compactor.ring.", "collectors/", f) f.DurationVar(&cfg.HeartbeatPeriod, "compactor.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring. 0 = disabled.") f.DurationVar(&cfg.HeartbeatTimeout, "compactor.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which compactors are considered unhealthy within the ring. 0 = never (timeout disabled).") + f.BoolVar(&cfg.DetailedMetricsEnabled, "compactor.ring.detailed-metrics-enabled", true, "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted.") f.DurationVar(&cfg.AutoForgetDelay, "compactor.auto-forget-delay", 2*cfg.HeartbeatTimeout, "Time since last heartbeat before compactor will be removed from ring. 0 to disable") // Wait stability flags. @@ -89,6 +91,7 @@ func (cfg *RingConfig) ToLifecyclerConfig() ring.LifecyclerConfig { rc.KVStore = cfg.KVStore rc.HeartbeatTimeout = cfg.HeartbeatTimeout rc.ReplicationFactor = 1 + rc.DetailedMetricsEnabled = cfg.DetailedMetricsEnabled // Configure lifecycler lc.RingConfig = rc diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index a76afa4a206..19bb759f009 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -1362,7 +1362,7 @@ func createTSDBBlock(t *testing.T, bkt objstore.Bucket, userID string, minT, max // Append a sample at the beginning and one at the end of the time range. for i, ts := range []int64{minT, maxT - 1} { - lbls := labels.Labels{labels.Label{Name: "series_id", Value: strconv.Itoa(i)}} + lbls := labels.FromStrings("series_id", strconv.Itoa(i)) app := db.Appender(context.Background()) _, err := app.Append(0, lbls, ts, float64(i)) diff --git a/pkg/compactor/sharded_compaction_lifecycle_callback_test.go b/pkg/compactor/sharded_compaction_lifecycle_callback_test.go index 0c0b8f0f340..9e598a2edc5 100644 --- a/pkg/compactor/sharded_compaction_lifecycle_callback_test.go +++ b/pkg/compactor/sharded_compaction_lifecycle_callback_test.go @@ -9,6 +9,7 @@ import ( "github.com/go-kit/log" "github.com/oklog/ulid/v2" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" "github.com/stretchr/testify/require" "github.com/thanos-io/thanos/pkg/block/metadata" @@ -46,7 +47,7 @@ func TestPreCompactionCallback(t *testing.T) { log.NewNopLogger(), nil, testGroupKey, - nil, + labels.EmptyLabels(), 0, true, true, diff --git a/pkg/compactor/sharded_posting.go b/pkg/compactor/sharded_posting.go index b0c29ca1c98..09115de6841 100644 --- a/pkg/compactor/sharded_posting.go +++ b/pkg/compactor/sharded_posting.go @@ -28,10 +28,10 @@ func NewShardedPosting(ctx context.Context, postings index.Postings, partitionCo if builder.Labels().Hash()%partitionCount == partitionID { posting := postings.At() series = append(series, posting) - for _, label := range builder.Labels() { - symbols[label.Name] = struct{}{} - symbols[label.Value] = struct{}{} - } + builder.Labels().Range(func(l labels.Label) { + symbols[l.Name] = struct{}{} + symbols[l.Value] = struct{}{} + }) } } return index.NewListPostings(series), symbols, nil diff --git a/pkg/compactor/sharded_posting_test.go b/pkg/compactor/sharded_posting_test.go index e65b9b52919..c277922fe0a 100644 --- a/pkg/compactor/sharded_posting_test.go +++ b/pkg/compactor/sharded_posting_test.go @@ -46,15 +46,11 @@ func TestShardPostingAndSymbolBasedOnPartitionID(t *testing.T) { expectedSeriesCount := 10 for i := 0; i < expectedSeriesCount; i++ { labelValue := strconv.Itoa(r.Int()) - series = append(series, labels.Labels{ - metricName, - {Name: ConstLabelName, Value: ConstLabelValue}, - {Name: TestLabelName, Value: labelValue}, - }) + series = append(series, labels.FromStrings(metricName.Name, metricName.Value, ConstLabelName, ConstLabelValue, TestLabelName, labelValue)) expectedSymbols[TestLabelName] = false expectedSymbols[labelValue] = false } - blockID, err := e2eutil.CreateBlock(context.Background(), tmpdir, series, 10, time.Now().Add(-10*time.Minute).UnixMilli(), time.Now().UnixMilli(), nil, 0, metadata.NoneFunc, nil) + blockID, err := e2eutil.CreateBlock(context.Background(), tmpdir, series, 10, time.Now().Add(-10*time.Minute).UnixMilli(), time.Now().UnixMilli(), labels.EmptyLabels(), 0, metadata.NoneFunc, nil) require.NoError(t, err) var closers []io.Closer @@ -82,10 +78,10 @@ func TestShardPostingAndSymbolBasedOnPartitionID(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(partitionID), builder.Labels().Hash()%uint64(partitionCount)) seriesCount++ - for _, label := range builder.Labels() { - expectedShardedSymbols[label.Name] = struct{}{} - expectedShardedSymbols[label.Value] = struct{}{} - } + builder.Labels().Range(func(l labels.Label) { + expectedShardedSymbols[l.Name] = struct{}{} + expectedShardedSymbols[l.Value] = struct{}{} + }) } err = ir.Close() if err == nil { diff --git a/pkg/configs/userconfig/config.go b/pkg/configs/userconfig/config.go index 25e7d39b38b..70b6ed70187 100644 --- a/pkg/configs/userconfig/config.go +++ b/pkg/configs/userconfig/config.go @@ -308,7 +308,7 @@ func (c RulesConfig) parseV2() (map[string][]rules.Rule, error) { time.Duration(rl.KeepFiringFor), labels.FromMap(rl.Labels), labels.FromMap(rl.Annotations), - nil, + labels.EmptyLabels(), "", true, util_log.GoKitLogToSlog(log.With(util_log.Logger, "alert", rl.Alert)), diff --git a/pkg/configs/userconfig/config_test.go b/pkg/configs/userconfig/config_test.go index 392ca911ca9..d17dae574d0 100644 --- a/pkg/configs/userconfig/config_test.go +++ b/pkg/configs/userconfig/config_test.go @@ -86,13 +86,9 @@ func TestParseLegacyAlerts(t *testing.T) { parsed, 5*time.Minute, 0, - labels.Labels{ - labels.Label{Name: "severity", Value: "critical"}, - }, - labels.Labels{ - labels.Label{Name: "message", Value: "I am a message"}, - }, - nil, + labels.FromStrings("severity", "critical"), + labels.FromStrings("message", "I am a message"), + labels.EmptyLabels(), "", true, util_log.GoKitLogToSlog(log.With(util_log.Logger, "alert", "TestAlert")), diff --git a/pkg/cortex/cortex.go b/pkg/cortex/cortex.go index 379501db0e6..f50fcf26e17 100644 --- a/pkg/cortex/cortex.go +++ b/pkg/cortex/cortex.go @@ -14,7 +14,6 @@ import ( "github.com/go-kit/log/level" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/prometheus/promql" prom_storage "github.com/prometheus/prometheus/storage" "github.com/weaveworks/common/server" "github.com/weaveworks/common/signals" @@ -35,6 +34,7 @@ import ( "github.com/cortexproject/cortex/pkg/cortex/storage" "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/distributor" + "github.com/cortexproject/cortex/pkg/engine" "github.com/cortexproject/cortex/pkg/flusher" "github.com/cortexproject/cortex/pkg/frontend" frontendv1 "github.com/cortexproject/cortex/pkg/frontend/v1" @@ -114,7 +114,7 @@ type Config struct { QueryRange queryrange.Config `yaml:"query_range"` BlocksStorage tsdb.BlocksStorageConfig `yaml:"blocks_storage"` Compactor compactor.Config `yaml:"compactor"` - ParquetConverter parquetconverter.Config `yaml:"parquet_converter" doc:"hidden"` + ParquetConverter parquetconverter.Config `yaml:"parquet_converter"` StoreGateway storegateway.Config `yaml:"store_gateway"` TenantFederation tenantfederation.Config `yaml:"tenant_federation"` @@ -322,7 +322,7 @@ type Cortex struct { QuerierQueryable prom_storage.SampleAndChunkQueryable ExemplarQueryable prom_storage.ExemplarQueryable MetadataQuerier querier.MetadataQuerier - QuerierEngine promql.QueryEngine + QuerierEngine engine.QueryEngine QueryFrontendTripperware tripperware.Tripperware ResourceMonitor *resource.Monitor @@ -393,10 +393,8 @@ func (t *Cortex) setupThanosTracing() { // setupGRPCHeaderForwarding appends a gRPC middleware used to enable the propagation of // HTTP Headers through child gRPC calls func (t *Cortex) setupGRPCHeaderForwarding() { - if len(t.Cfg.API.HTTPRequestHeadersToLog) > 0 { - t.Cfg.Server.GRPCMiddleware = append(t.Cfg.Server.GRPCMiddleware, grpcutil.HTTPHeaderPropagationServerInterceptor) - t.Cfg.Server.GRPCStreamMiddleware = append(t.Cfg.Server.GRPCStreamMiddleware, grpcutil.HTTPHeaderPropagationStreamServerInterceptor) - } + t.Cfg.Server.GRPCMiddleware = append(t.Cfg.Server.GRPCMiddleware, grpcutil.HTTPHeaderPropagationServerInterceptor) + t.Cfg.Server.GRPCStreamMiddleware = append(t.Cfg.Server.GRPCStreamMiddleware, grpcutil.HTTPHeaderPropagationStreamServerInterceptor) } func (t *Cortex) setupRequestSigning() { diff --git a/pkg/cortex/modules.go b/pkg/cortex/modules.go index e9a51f2c3c6..a47888b8267 100644 --- a/pkg/cortex/modules.go +++ b/pkg/cortex/modules.go @@ -44,6 +44,7 @@ import ( "github.com/cortexproject/cortex/pkg/querier/tripperware/instantquery" "github.com/cortexproject/cortex/pkg/querier/tripperware/queryrange" querier_worker "github.com/cortexproject/cortex/pkg/querier/worker" + cortexquerysharding "github.com/cortexproject/cortex/pkg/querysharding" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv/codec" "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" @@ -364,6 +365,7 @@ func (t *Cortex) initQuerier() (serv services.Service, err error) { // to a Prometheus API struct instantiated with the Cortex Queryable. internalQuerierRouter := api.NewQuerierHandler( t.Cfg.API, + t.Cfg.Querier, t.QuerierQueryable, t.ExemplarQueryable, t.QuerierEngine, @@ -402,9 +404,7 @@ func (t *Cortex) initQuerier() (serv services.Service, err error) { // request context. internalQuerierRouter = t.API.AuthMiddleware.Wrap(internalQuerierRouter) - if len(t.Cfg.API.HTTPRequestHeadersToLog) > 0 { - internalQuerierRouter = t.API.HTTPHeaderMiddleware.Wrap(internalQuerierRouter) - } + internalQuerierRouter = t.API.HTTPHeaderMiddleware.Wrap(internalQuerierRouter) } // If neither frontend address or scheduler address is configured, no worker is needed. @@ -511,7 +511,13 @@ func (t *Cortex) initFlusher() (serv services.Service, err error) { // initQueryFrontendTripperware instantiates the tripperware used by the query frontend // to optimize Prometheus query requests. func (t *Cortex) initQueryFrontendTripperware() (serv services.Service, err error) { - queryAnalyzer := querysharding.NewQueryAnalyzer() + var queryAnalyzer querysharding.Analyzer + queryAnalyzer = querysharding.NewQueryAnalyzer() + if t.Cfg.Querier.EnableParquetQueryable { + // Disable vertical sharding for binary expression with ignore for parquet queryable. + queryAnalyzer = cortexquerysharding.NewDisableBinaryExpressionAnalyzer(queryAnalyzer) + } + // PrometheusCodec is a codec to encode and decode Prometheus query range requests and responses. prometheusCodec := queryrange.NewPrometheusCodec(false, t.Cfg.Querier.ResponseCompression, t.Cfg.API.QuerierDefaultCodec) // ShardedPrometheusCodec is same as PrometheusCodec but to be used on the sharded queries (it sum up the stats) @@ -534,7 +540,7 @@ func (t *Cortex) initQueryFrontendTripperware() (serv services.Service, err erro shardedPrometheusCodec, t.Cfg.Querier.LookbackDelta, t.Cfg.Querier.DefaultEvaluationInterval, - t.Cfg.Frontend.DistributedExecEnabled, + t.Cfg.Querier.DistributedExecEnabled, ) if err != nil { return nil, err @@ -547,7 +553,7 @@ func (t *Cortex) initQueryFrontendTripperware() (serv services.Service, err erro queryAnalyzer, t.Cfg.Querier.LookbackDelta, t.Cfg.Querier.DefaultEvaluationInterval, - t.Cfg.Frontend.DistributedExecEnabled) + t.Cfg.Querier.DistributedExecEnabled) if err != nil { return nil, err } @@ -785,6 +791,7 @@ func (t *Cortex) initMemberlistKV() (services.Service, error) { t.Cfg.Compactor.ShardingRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV t.Cfg.Ruler.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV t.Cfg.Alertmanager.ShardingRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV + t.Cfg.ParquetConverter.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV return t.MemberlistKV, nil } diff --git a/pkg/cortexpb/compat.go b/pkg/cortexpb/compat.go index 6de2423d562..83bdbff33d1 100644 --- a/pkg/cortexpb/compat.go +++ b/pkg/cortexpb/compat.go @@ -67,13 +67,13 @@ func FromLabelAdaptersToLabels(ls []LabelAdapter) labels.Labels { // Do NOT use unsafe to convert between data types because this function may // get in input labels whose data structure is reused. func FromLabelAdaptersToLabelsWithCopy(input []LabelAdapter) labels.Labels { - return CopyLabels(FromLabelAdaptersToLabels(input)) + return CopyLabels(input) } // Efficiently copies labels input slice. To be used in cases where input slice // can be reused, but long-term copy is needed. -func CopyLabels(input []labels.Label) labels.Labels { - result := make(labels.Labels, len(input)) +func CopyLabels(input []LabelAdapter) labels.Labels { + builder := labels.NewBuilder(labels.EmptyLabels()) size := 0 for _, l := range input { @@ -84,12 +84,14 @@ func CopyLabels(input []labels.Label) labels.Labels { // Copy all strings into the buffer, and use 'yoloString' to convert buffer // slices to strings. buf := make([]byte, size) + var name, value string - for i, l := range input { - result[i].Name, buf = copyStringToBuffer(l.Name, buf) - result[i].Value, buf = copyStringToBuffer(l.Value, buf) + for _, l := range input { + name, buf = copyStringToBuffer(l.Name, buf) + value, buf = copyStringToBuffer(l.Value, buf) + builder.Set(name, value) } - return result + return builder.Labels() } // Copies string to buffer (which must be big enough), and converts buffer slice containing diff --git a/pkg/cortexpb/compat_test.go b/pkg/cortexpb/compat_test.go index 6fda91a84ee..843aa290d07 100644 --- a/pkg/cortexpb/compat_test.go +++ b/pkg/cortexpb/compat_test.go @@ -104,26 +104,28 @@ func TestMetricMetadataToMetricTypeToMetricType(t *testing.T) { func TestFromLabelAdaptersToLabels(t *testing.T) { input := []LabelAdapter{{Name: "hello", Value: "world"}} - expected := labels.Labels{labels.Label{Name: "hello", Value: "world"}} + expected := labels.FromStrings("hello", "world") actual := FromLabelAdaptersToLabels(input) assert.Equal(t, expected, actual) - // All strings must NOT be copied. - assert.Equal(t, uintptr(unsafe.Pointer(&input[0].Name)), uintptr(unsafe.Pointer(&actual[0].Name))) - assert.Equal(t, uintptr(unsafe.Pointer(&input[0].Value)), uintptr(unsafe.Pointer(&actual[0].Value))) + final := FromLabelsToLabelAdapters(actual) + // All strings must not be copied. + assert.Equal(t, uintptr(unsafe.Pointer(&input[0].Name)), uintptr(unsafe.Pointer(&final[0].Name))) + assert.Equal(t, uintptr(unsafe.Pointer(&input[0].Value)), uintptr(unsafe.Pointer(&final[0].Value))) } func TestFromLabelAdaptersToLabelsWithCopy(t *testing.T) { input := []LabelAdapter{{Name: "hello", Value: "world"}} - expected := labels.Labels{labels.Label{Name: "hello", Value: "world"}} + expected := labels.FromStrings("hello", "world") actual := FromLabelAdaptersToLabelsWithCopy(input) assert.Equal(t, expected, actual) + final := FromLabelsToLabelAdapters(actual) // All strings must be copied. - assert.NotEqual(t, uintptr(unsafe.Pointer(&input[0].Name)), uintptr(unsafe.Pointer(&actual[0].Name))) - assert.NotEqual(t, uintptr(unsafe.Pointer(&input[0].Value)), uintptr(unsafe.Pointer(&actual[0].Value))) + assert.NotEqual(t, uintptr(unsafe.Pointer(&input[0].Name)), uintptr(unsafe.Pointer(&final[0].Name))) + assert.NotEqual(t, uintptr(unsafe.Pointer(&input[0].Value)), uintptr(unsafe.Pointer(&final[0].Value))) } func BenchmarkFromLabelAdaptersToLabelsWithCopy(b *testing.B) { diff --git a/pkg/cortexpb/cortex.pb.go b/pkg/cortexpb/cortex.pb.go index 04eab395bc8..e0dac736baf 100644 --- a/pkg/cortexpb/cortex.pb.go +++ b/pkg/cortexpb/cortex.pb.go @@ -263,6 +263,12 @@ func (m *StreamWriteRequest) GetRequest() *WriteRequest { type WriteResponse struct { Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // Samples represents X-Prometheus-Remote-Write-Written-Samples + Samples int64 `protobuf:"varint,3,opt,name=Samples,proto3" json:"Samples,omitempty"` + // Histograms represents X-Prometheus-Remote-Write-Written-Histograms + Histograms int64 `protobuf:"varint,4,opt,name=Histograms,proto3" json:"Histograms,omitempty"` + // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars + Exemplars int64 `protobuf:"varint,5,opt,name=Exemplars,proto3" json:"Exemplars,omitempty"` } func (m *WriteResponse) Reset() { *m = WriteResponse{} } @@ -311,6 +317,27 @@ func (m *WriteResponse) GetMessage() string { return "" } +func (m *WriteResponse) GetSamples() int64 { + if m != nil { + return m.Samples + } + return 0 +} + +func (m *WriteResponse) GetHistograms() int64 { + if m != nil { + return m.Histograms + } + return 0 +} + +func (m *WriteResponse) GetExemplars() int64 { + if m != nil { + return m.Exemplars + } + return 0 +} + type TimeSeries struct { Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"` // Sorted by time, oldest sample first. @@ -945,80 +972,81 @@ func init() { func init() { proto.RegisterFile("cortex.proto", fileDescriptor_893a47d0a749d749) } var fileDescriptor_893a47d0a749d749 = []byte{ - // 1153 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xcd, 0x6f, 0x1b, 0xc5, - 0x1b, 0xde, 0xc9, 0xfa, 0xf3, 0xb5, 0xe3, 0x6e, 0xe7, 0x17, 0xf5, 0xb7, 0x04, 0x75, 0x9d, 0x2e, - 0x02, 0x2c, 0x84, 0x02, 0x0a, 0x02, 0xd4, 0xaa, 0x20, 0xd9, 0xad, 0xdb, 0x44, 0xad, 0x9d, 0x68, - 0xec, 0x50, 0x95, 0x8b, 0x35, 0xb5, 0xc7, 0xf6, 0xaa, 0xfb, 0xc5, 0xce, 0xb8, 0x6a, 0x38, 0x71, - 0x01, 0x71, 0xe4, 0xcc, 0x0d, 0x71, 0xe1, 0xca, 0x7f, 0xd1, 0x63, 0x8e, 0x55, 0x0f, 0x11, 0x75, - 0x2f, 0xe5, 0xd6, 0x03, 0x7f, 0x00, 0x9a, 0xd9, 0x2f, 0xa7, 0x69, 0xc5, 0x25, 0xb7, 0x79, 0x9f, - 0xf7, 0x79, 0xdf, 0x79, 0xe6, 0xfd, 0x58, 0x1b, 0xea, 0xe3, 0x20, 0x12, 0xec, 0xf1, 0x76, 0x18, - 0x05, 0x22, 0xc0, 0x95, 0xd8, 0x0a, 0x1f, 0x6c, 0x6e, 0xcc, 0x82, 0x59, 0xa0, 0xc0, 0x4f, 0xe4, - 0x29, 0xf6, 0xdb, 0xef, 0xc0, 0xc5, 0x1e, 0xe3, 0x9c, 0xce, 0xd8, 0x3d, 0x47, 0xcc, 0x3b, 0x8b, - 0x29, 0x61, 0xd3, 0x6b, 0x85, 0x57, 0xbf, 0x35, 0x35, 0xfb, 0x47, 0x1d, 0xea, 0xf7, 0x22, 0x47, - 0x30, 0xc2, 0xbe, 0x5b, 0x30, 0x2e, 0xf0, 0x01, 0x80, 0x70, 0x3c, 0xc6, 0x59, 0xe4, 0x30, 0x6e, - 0xa2, 0x2d, 0xbd, 0x55, 0xdb, 0xd9, 0xd8, 0x4e, 0x2f, 0xd8, 0x1e, 0x3a, 0x1e, 0x1b, 0x28, 0x5f, - 0x67, 0xf3, 0xc9, 0x49, 0x53, 0x7b, 0x76, 0xd2, 0xc4, 0x07, 0x11, 0xa3, 0xae, 0x1b, 0x8c, 0x87, - 0x59, 0x1c, 0x59, 0xc9, 0x81, 0xaf, 0x42, 0x69, 0x10, 0x2c, 0xa2, 0x31, 0x33, 0xd7, 0xb6, 0x50, - 0xab, 0xb1, 0x73, 0x25, 0xcf, 0xb6, 0x7a, 0xf3, 0x76, 0x4c, 0xea, 0xfa, 0x0b, 0x8f, 0x24, 0x01, - 0xf8, 0x1a, 0x54, 0x3c, 0x26, 0xe8, 0x84, 0x0a, 0x6a, 0xea, 0x4a, 0x8a, 0x99, 0x07, 0xf7, 0x98, - 0x88, 0x9c, 0x71, 0x2f, 0xf1, 0x77, 0x0a, 0x4f, 0x4e, 0x9a, 0x88, 0x64, 0x7c, 0x7c, 0x1d, 0x36, - 0xf9, 0x43, 0x27, 0x1c, 0xb9, 0xf4, 0x01, 0x73, 0x47, 0x3e, 0xf5, 0xd8, 0xe8, 0x11, 0x75, 0x9d, - 0x09, 0x15, 0x4e, 0xe0, 0x9b, 0x2f, 0xcb, 0x5b, 0xa8, 0x55, 0x21, 0xff, 0x97, 0x94, 0xbb, 0x92, - 0xd1, 0xa7, 0x1e, 0xfb, 0x26, 0xf3, 0xe3, 0x1e, 0xe8, 0x84, 0x4d, 0xcd, 0xbf, 0x25, 0xad, 0xb6, - 0xf3, 0xee, 0xea, 0xad, 0xaf, 0x15, 0xb2, 0x73, 0x59, 0xd6, 0xe1, 0xf8, 0xa4, 0x89, 0x9e, 0x9d, - 0x34, 0xcf, 0xd6, 0x99, 0xc8, 0x3c, 0x76, 0x13, 0x20, 0x7f, 0x1e, 0x2e, 0x83, 0xde, 0x3e, 0xd8, - 0x33, 0x34, 0x5c, 0x81, 0x02, 0x39, 0xbc, 0xdb, 0x35, 0x90, 0xfd, 0x27, 0x02, 0x3c, 0x10, 0x11, - 0xa3, 0xde, 0xa9, 0x6e, 0x6c, 0x42, 0x65, 0xc8, 0x7c, 0xea, 0x8b, 0xbd, 0x9b, 0x26, 0xda, 0x42, - 0xad, 0x2a, 0xc9, 0x6c, 0xfc, 0x29, 0x94, 0x13, 0x9a, 0x2a, 0x6c, 0x6d, 0xe7, 0xd2, 0x9b, 0x0b, - 0x4b, 0x52, 0x5a, 0xfa, 0xa8, 0x97, 0xe7, 0xf4, 0xa8, 0xaf, 0x60, 0x3d, 0xb9, 0x87, 0x87, 0x81, - 0xcf, 0x19, 0xc6, 0x50, 0x18, 0x07, 0x13, 0xa6, 0x94, 0x16, 0x89, 0x3a, 0x63, 0x13, 0xca, 0x5e, - 0x1c, 0xae, 0x54, 0x56, 0x49, 0x6a, 0xda, 0xff, 0x20, 0x80, 0x7c, 0x9c, 0x70, 0x1b, 0x4a, 0xaa, - 0x55, 0xe9, 0xd0, 0xfd, 0x2f, 0x97, 0xa7, 0x1a, 0x74, 0x40, 0x9d, 0xa8, 0xb3, 0x91, 0xcc, 0x5c, - 0x5d, 0x41, 0xed, 0x09, 0x0d, 0x05, 0x8b, 0x48, 0x12, 0x28, 0x2b, 0xc2, 0xa9, 0x17, 0xba, 0x8c, - 0x9b, 0x6b, 0x2a, 0x87, 0x91, 0xe7, 0x18, 0x28, 0x87, 0x9a, 0x12, 0x8d, 0xa4, 0x34, 0xfc, 0x05, - 0x54, 0xd9, 0x63, 0xe6, 0x85, 0x2e, 0x8d, 0x78, 0x32, 0x61, 0x38, 0x8f, 0xe9, 0x26, 0xae, 0x24, - 0x2a, 0xa7, 0xe2, 0xab, 0x00, 0x73, 0x87, 0x8b, 0x60, 0x16, 0x51, 0x8f, 0x9b, 0x85, 0xd7, 0x05, - 0xef, 0xa6, 0xbe, 0x24, 0x72, 0x85, 0x6c, 0x7f, 0x0e, 0xd5, 0xec, 0x3d, 0xb2, 0x62, 0x72, 0x32, - 0x55, 0xc5, 0xea, 0x44, 0x9d, 0xf1, 0x06, 0x14, 0x1f, 0x51, 0x77, 0x11, 0xd7, 0xab, 0x4e, 0x62, - 0xc3, 0x6e, 0x43, 0x29, 0x7e, 0x42, 0xee, 0x97, 0x41, 0x28, 0xf1, 0xe3, 0x2b, 0x50, 0x57, 0x3b, - 0x27, 0xa8, 0x17, 0x8e, 0x3c, 0xae, 0x82, 0x75, 0x52, 0xcb, 0xb0, 0x1e, 0xb7, 0x7f, 0x5d, 0x83, - 0xc6, 0xe9, 0xa5, 0xc1, 0x5f, 0x42, 0x41, 0x1c, 0x85, 0x71, 0xaa, 0xc6, 0xce, 0x7b, 0x6f, 0x5b, - 0xae, 0xc4, 0x1c, 0x1e, 0x85, 0x8c, 0xa8, 0x00, 0xfc, 0x31, 0x60, 0x4f, 0x61, 0xa3, 0x29, 0xf5, - 0x1c, 0xf7, 0x48, 0x2d, 0x58, 0xd2, 0x61, 0x23, 0xf6, 0xdc, 0x52, 0x0e, 0xb9, 0x57, 0xf2, 0x99, - 0x73, 0xe6, 0x86, 0x66, 0x41, 0xf9, 0xd5, 0x59, 0x62, 0x0b, 0xdf, 0x11, 0x66, 0x31, 0xc6, 0xe4, - 0xd9, 0x3e, 0x02, 0xc8, 0x6f, 0xc2, 0x35, 0x28, 0x1f, 0xf6, 0xef, 0xf4, 0xf7, 0xef, 0xf5, 0x0d, - 0x4d, 0x1a, 0x37, 0xf6, 0x0f, 0xfb, 0xc3, 0x2e, 0x31, 0x10, 0xae, 0x42, 0xf1, 0x76, 0xfb, 0xf0, - 0x76, 0xd7, 0x58, 0xc3, 0xeb, 0x50, 0xdd, 0xdd, 0x1b, 0x0c, 0xf7, 0x6f, 0x93, 0x76, 0xcf, 0xd0, - 0x31, 0x86, 0x86, 0xf2, 0xe4, 0x58, 0x41, 0x86, 0x0e, 0x0e, 0x7b, 0xbd, 0x36, 0xb9, 0x6f, 0x14, - 0xe5, 0xca, 0xed, 0xf5, 0x6f, 0xed, 0x1b, 0x25, 0x5c, 0x87, 0xca, 0x60, 0xd8, 0x1e, 0x76, 0x07, - 0xdd, 0xa1, 0x51, 0xb6, 0xef, 0x40, 0x29, 0xbe, 0xfa, 0x1c, 0x06, 0xd1, 0xfe, 0x09, 0x41, 0x25, - 0x1d, 0x9e, 0xf3, 0x18, 0xec, 0x53, 0x23, 0xf1, 0xd6, 0x96, 0xeb, 0x67, 0x5b, 0x7e, 0x5c, 0x84, - 0x6a, 0x36, 0x8c, 0xf8, 0x32, 0x54, 0xc7, 0xc1, 0xc2, 0x17, 0x23, 0xc7, 0x17, 0xaa, 0xe5, 0x85, - 0x5d, 0x8d, 0x54, 0x14, 0xb4, 0xe7, 0x0b, 0x7c, 0x05, 0x6a, 0xb1, 0x7b, 0xea, 0x06, 0x34, 0xfe, - 0xa8, 0xa0, 0x5d, 0x8d, 0x80, 0x02, 0x6f, 0x49, 0x0c, 0x1b, 0xa0, 0xf3, 0x85, 0xa7, 0x6e, 0x42, - 0x44, 0x1e, 0xf1, 0x25, 0x28, 0xf1, 0xf1, 0x9c, 0x79, 0x54, 0x35, 0xf7, 0x22, 0x49, 0x2c, 0xfc, - 0x3e, 0x34, 0xbe, 0x67, 0x51, 0x30, 0x12, 0xf3, 0x88, 0xf1, 0x79, 0xe0, 0x4e, 0x54, 0xa3, 0x11, - 0x59, 0x97, 0xe8, 0x30, 0x05, 0xf1, 0x07, 0x09, 0x2d, 0xd7, 0x55, 0x52, 0xba, 0x10, 0xa9, 0x4b, - 0xfc, 0x46, 0xaa, 0xed, 0x23, 0x30, 0x56, 0x78, 0xb1, 0xc0, 0xb2, 0x12, 0x88, 0x48, 0x23, 0x63, - 0xc6, 0x22, 0xdb, 0xd0, 0xf0, 0xd9, 0x8c, 0x0a, 0xe7, 0x11, 0x1b, 0xf1, 0x90, 0xfa, 0xdc, 0xac, - 0xbc, 0xfe, 0x33, 0xd6, 0x59, 0x8c, 0x1f, 0x32, 0x31, 0x08, 0xa9, 0x9f, 0x6c, 0xe8, 0x7a, 0x1a, - 0x21, 0x31, 0x8e, 0x3f, 0x84, 0x0b, 0x59, 0x8a, 0x09, 0x73, 0x05, 0xe5, 0x66, 0x75, 0x4b, 0x6f, - 0x61, 0x92, 0x65, 0xbe, 0xa9, 0xd0, 0x53, 0x44, 0xa5, 0x8d, 0x9b, 0xb0, 0xa5, 0xb7, 0x50, 0x4e, - 0x54, 0xc2, 0xe4, 0xe7, 0xad, 0x11, 0x06, 0xdc, 0x59, 0x11, 0x55, 0xfb, 0x6f, 0x51, 0x69, 0x44, - 0x26, 0x2a, 0x4b, 0x91, 0x88, 0xaa, 0xc7, 0xa2, 0x52, 0x38, 0x17, 0x95, 0x11, 0x13, 0x51, 0xeb, - 0xb1, 0xa8, 0x14, 0x4e, 0x44, 0x5d, 0x07, 0x88, 0x18, 0x67, 0x62, 0x34, 0x97, 0x95, 0x6f, 0xa8, - 0x8f, 0xc0, 0xe5, 0x37, 0x7c, 0xc6, 0xb6, 0x89, 0x64, 0xed, 0x3a, 0xbe, 0x20, 0xd5, 0x28, 0x3d, - 0x9e, 0x99, 0xbf, 0x0b, 0x67, 0xe7, 0xef, 0x1a, 0x54, 0xb3, 0xd0, 0xd3, 0xfb, 0x5c, 0x06, 0xfd, - 0x7e, 0x77, 0x60, 0x20, 0x5c, 0x82, 0xb5, 0xfe, 0xbe, 0xb1, 0x96, 0xef, 0xb4, 0xbe, 0x59, 0xf8, - 0xf9, 0x77, 0x0b, 0x75, 0xca, 0x50, 0x54, 0xe2, 0x3b, 0x75, 0x80, 0xbc, 0xf7, 0xf6, 0x75, 0x80, - 0xbc, 0x50, 0x72, 0xfc, 0x82, 0xe9, 0x94, 0xb3, 0x78, 0x9e, 0x2f, 0x92, 0xc4, 0x92, 0xb8, 0xcb, - 0xfc, 0x99, 0x98, 0xab, 0x31, 0x5e, 0x27, 0x89, 0xd5, 0xf9, 0xfa, 0xf8, 0xb9, 0xa5, 0x3d, 0x7d, - 0x6e, 0x69, 0xaf, 0x9e, 0x5b, 0xe8, 0x87, 0xa5, 0x85, 0xfe, 0x58, 0x5a, 0xe8, 0xc9, 0xd2, 0x42, - 0xc7, 0x4b, 0x0b, 0xfd, 0xb5, 0xb4, 0xd0, 0xcb, 0xa5, 0xa5, 0xbd, 0x5a, 0x5a, 0xe8, 0x97, 0x17, - 0x96, 0x76, 0xfc, 0xc2, 0xd2, 0x9e, 0xbe, 0xb0, 0xb4, 0x6f, 0xb3, 0x3f, 0x58, 0x0f, 0x4a, 0xea, - 0x1f, 0xd5, 0x67, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x82, 0x66, 0x44, 0xf2, 0x81, 0x09, 0x00, - 0x00, + // 1183 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xbd, 0x8f, 0x1b, 0x45, + 0x14, 0xdf, 0xb9, 0xf5, 0xd7, 0x3e, 0xfb, 0x9c, 0xcd, 0x70, 0x0a, 0xcb, 0x41, 0xd6, 0x8e, 0x11, + 0x60, 0x21, 0x74, 0xa0, 0x43, 0x80, 0x12, 0x45, 0x48, 0x76, 0xe2, 0xe4, 0x4e, 0x89, 0x7d, 0xa7, + 0xb1, 0x8f, 0x28, 0x34, 0xd6, 0xc4, 0x37, 0xb6, 0x57, 0xd9, 0x2f, 0x76, 0xc6, 0x51, 0x8e, 0x8a, + 0x06, 0x44, 0x49, 0x43, 0x43, 0x87, 0x68, 0x68, 0xf9, 0x2f, 0x52, 0x5e, 0x19, 0xa5, 0x38, 0x11, + 0xa7, 0x09, 0x5d, 0x0a, 0xfe, 0x00, 0x34, 0xb3, 0x5f, 0xbe, 0x5c, 0x22, 0x9a, 0x74, 0xf3, 0x7e, + 0xef, 0x63, 0x7e, 0xf3, 0xde, 0xef, 0xad, 0x0d, 0xb5, 0x49, 0x10, 0x09, 0xf6, 0x70, 0x2b, 0x8c, + 0x02, 0x11, 0xe0, 0x4a, 0x6c, 0x85, 0xf7, 0x36, 0x37, 0x66, 0xc1, 0x2c, 0x50, 0xe0, 0xa7, 0xf2, + 0x14, 0xfb, 0x5b, 0xef, 0xc0, 0xf9, 0x3e, 0xe3, 0x9c, 0xce, 0xd8, 0x1d, 0x47, 0xcc, 0xbb, 0x8b, + 0x29, 0x61, 0xd3, 0x2b, 0x85, 0x17, 0xbf, 0x37, 0xb4, 0xd6, 0x8f, 0x3a, 0xd4, 0xee, 0x44, 0x8e, + 0x60, 0x84, 0x7d, 0xb7, 0x60, 0x5c, 0xe0, 0x7d, 0x00, 0xe1, 0x78, 0x8c, 0xb3, 0xc8, 0x61, 0xdc, + 0x42, 0x4d, 0xbd, 0x5d, 0xdd, 0xde, 0xd8, 0x4a, 0x2f, 0xd8, 0x1a, 0x39, 0x1e, 0x1b, 0x2a, 0x5f, + 0x77, 0xf3, 0xd1, 0x49, 0x43, 0x7b, 0x72, 0xd2, 0xc0, 0xfb, 0x11, 0xa3, 0xae, 0x1b, 0x4c, 0x46, + 0x59, 0x1e, 0x59, 0xa9, 0x81, 0x2f, 0x43, 0x69, 0x18, 0x2c, 0xa2, 0x09, 0xb3, 0xd6, 0x9a, 0xa8, + 0x5d, 0xdf, 0xbe, 0x94, 0x57, 0x5b, 0xbd, 0x79, 0x2b, 0x0e, 0xea, 0xf9, 0x0b, 0x8f, 0x24, 0x09, + 0xf8, 0x0a, 0x54, 0x3c, 0x26, 0xe8, 0x21, 0x15, 0xd4, 0xd2, 0x15, 0x15, 0x2b, 0x4f, 0xee, 0x33, + 0x11, 0x39, 0x93, 0x7e, 0xe2, 0xef, 0x16, 0x1e, 0x9d, 0x34, 0x10, 0xc9, 0xe2, 0xf1, 0x55, 0xd8, + 0xe4, 0xf7, 0x9d, 0x70, 0xec, 0xd2, 0x7b, 0xcc, 0x1d, 0xfb, 0xd4, 0x63, 0xe3, 0x07, 0xd4, 0x75, + 0x0e, 0xa9, 0x70, 0x02, 0xdf, 0x7a, 0x5e, 0x6e, 0xa2, 0x76, 0x85, 0xbc, 0x2d, 0x43, 0x6e, 0xcb, + 0x88, 0x01, 0xf5, 0xd8, 0x37, 0x99, 0x1f, 0xf7, 0x41, 0x27, 0x6c, 0x6a, 0xfd, 0x23, 0xc3, 0xaa, + 0xdb, 0xef, 0xae, 0xde, 0xfa, 0x52, 0x23, 0xbb, 0x17, 0x65, 0x1f, 0x8e, 0x4f, 0x1a, 0xe8, 0xc9, + 0x49, 0xe3, 0x6c, 0x9f, 0x89, 0xac, 0xd3, 0x6a, 0x00, 0xe4, 0xcf, 0xc3, 0x65, 0xd0, 0x3b, 0xfb, + 0xbb, 0xa6, 0x86, 0x2b, 0x50, 0x20, 0x07, 0xb7, 0x7b, 0x26, 0x6a, 0xfd, 0x85, 0x00, 0x0f, 0x45, + 0xc4, 0xa8, 0x77, 0x6a, 0x1a, 0x9b, 0x50, 0x19, 0x31, 0x9f, 0xfa, 0x62, 0xf7, 0xba, 0x85, 0x9a, + 0xa8, 0x6d, 0x90, 0xcc, 0xc6, 0x9f, 0x41, 0x39, 0x09, 0x53, 0x8d, 0xad, 0x6e, 0x5f, 0x78, 0x75, + 0x63, 0x49, 0x1a, 0x96, 0x3e, 0xea, 0xf9, 0x1b, 0x7a, 0xd4, 0xaf, 0x08, 0xd6, 0x93, 0x8b, 0x78, + 0x18, 0xf8, 0x9c, 0x61, 0x0c, 0x85, 0x49, 0x70, 0xc8, 0x14, 0xd5, 0x22, 0x51, 0x67, 0x6c, 0x41, + 0xd9, 0x8b, 0xf3, 0x15, 0x4d, 0x83, 0xa4, 0xa6, 0xf4, 0x0c, 0xa9, 0x17, 0xba, 0x8c, 0x5b, 0x7a, + 0x13, 0xb5, 0x75, 0x92, 0x9a, 0xd8, 0x06, 0xd8, 0x71, 0xb8, 0x08, 0x66, 0x11, 0xf5, 0xb8, 0x55, + 0x50, 0xce, 0x15, 0x04, 0xbf, 0x07, 0x46, 0xef, 0x21, 0xf3, 0x42, 0x97, 0x46, 0xdc, 0x2a, 0x2a, + 0x77, 0x0e, 0xb4, 0xfe, 0x45, 0x00, 0xb9, 0x4e, 0x71, 0x07, 0x4a, 0x4a, 0x03, 0xa9, 0x9a, 0xdf, + 0xca, 0xdf, 0xad, 0x26, 0xbf, 0x4f, 0x9d, 0xa8, 0xbb, 0x91, 0x88, 0xb9, 0xa6, 0xa0, 0xce, 0x21, + 0x0d, 0x05, 0x8b, 0x48, 0x92, 0x28, 0x5b, 0xcd, 0x13, 0xa6, 0x6b, 0xaa, 0x86, 0x99, 0xd7, 0x88, + 0x39, 0x2b, 0xf9, 0x69, 0x24, 0x0d, 0xc3, 0x5f, 0x82, 0xc1, 0x32, 0x86, 0xb1, 0x74, 0x71, 0x9e, + 0x93, 0x72, 0x4d, 0xb2, 0xf2, 0x50, 0x7c, 0x19, 0x60, 0xbe, 0xfa, 0xf2, 0x97, 0x08, 0x67, 0x3d, + 0x48, 0x32, 0x57, 0x82, 0x5b, 0x5f, 0x80, 0x91, 0xbd, 0x47, 0x4e, 0x42, 0x4a, 0x5e, 0x4d, 0xa2, + 0x46, 0xd4, 0x19, 0x6f, 0x40, 0xf1, 0x01, 0x75, 0x17, 0xf1, 0x1c, 0x6a, 0x24, 0x36, 0x5a, 0x1d, + 0x28, 0xc5, 0x4f, 0xc8, 0xfd, 0x32, 0x09, 0x25, 0x7e, 0x7c, 0x09, 0x6a, 0x6a, 0x99, 0x05, 0xf5, + 0xc2, 0xb1, 0xc7, 0x55, 0xb2, 0x4e, 0xaa, 0x19, 0xd6, 0xe7, 0xad, 0xdf, 0xd6, 0xa0, 0x7e, 0x7a, + 0x1b, 0xf1, 0x57, 0x50, 0x10, 0x47, 0x61, 0x5c, 0xaa, 0xbe, 0xfd, 0xfe, 0xeb, 0xb6, 0x36, 0x31, + 0x47, 0x47, 0x21, 0x23, 0x2a, 0x01, 0x7f, 0x02, 0xd8, 0x53, 0xd8, 0x78, 0x4a, 0x3d, 0xc7, 0x3d, + 0x52, 0x9b, 0x9b, 0x28, 0xc7, 0x8c, 0x3d, 0x37, 0x94, 0x43, 0x2e, 0xac, 0x7c, 0xe6, 0x9c, 0xb9, + 0xa1, 0x92, 0x88, 0x41, 0xd4, 0x59, 0x62, 0x0b, 0xdf, 0x11, 0x4a, 0x17, 0x06, 0x51, 0xe7, 0xd6, + 0x11, 0x40, 0x7e, 0x13, 0xae, 0x42, 0xf9, 0x60, 0x70, 0x6b, 0xb0, 0x77, 0x67, 0x60, 0x6a, 0xd2, + 0xb8, 0xb6, 0x77, 0x30, 0x18, 0xf5, 0x88, 0x89, 0xb0, 0x01, 0xc5, 0x9b, 0x9d, 0x83, 0x9b, 0x3d, + 0x73, 0x0d, 0xaf, 0x83, 0xb1, 0xb3, 0x3b, 0x1c, 0xed, 0xdd, 0x24, 0x9d, 0xbe, 0xa9, 0x63, 0x0c, + 0x75, 0xe5, 0xc9, 0xb1, 0x82, 0x4c, 0x1d, 0x1e, 0xf4, 0xfb, 0x1d, 0x72, 0xd7, 0x2c, 0xca, 0x5d, + 0xde, 0x1d, 0xdc, 0xd8, 0x33, 0x4b, 0xb8, 0x06, 0x95, 0xe1, 0xa8, 0x33, 0xea, 0x0d, 0x7b, 0x23, + 0xb3, 0xdc, 0xba, 0x05, 0xa5, 0xf8, 0xea, 0x37, 0x20, 0xc4, 0xd6, 0x4f, 0x08, 0x2a, 0xa9, 0x78, + 0xde, 0x84, 0xb0, 0x4f, 0x49, 0xe2, 0xb5, 0x23, 0xd7, 0xcf, 0x8e, 0xfc, 0xb8, 0x08, 0x46, 0x26, + 0x46, 0x7c, 0x11, 0x8c, 0x49, 0xb0, 0xf0, 0xc5, 0xd8, 0xf1, 0x85, 0x1a, 0x79, 0x61, 0x47, 0x23, + 0x15, 0x05, 0xed, 0xfa, 0x02, 0x5f, 0x82, 0x6a, 0xec, 0x9e, 0xba, 0x01, 0x8d, 0xbf, 0x56, 0x68, + 0x47, 0x23, 0xa0, 0xc0, 0x1b, 0x12, 0xc3, 0x26, 0xe8, 0x7c, 0xe1, 0xa9, 0x9b, 0x10, 0x91, 0x47, + 0x7c, 0x01, 0x4a, 0x7c, 0x32, 0x67, 0x1e, 0x55, 0xc3, 0x3d, 0x4f, 0x12, 0x0b, 0x7f, 0x00, 0xf5, + 0xef, 0x59, 0x14, 0x8c, 0xc5, 0x3c, 0x62, 0x7c, 0x1e, 0xb8, 0x87, 0x6a, 0xd0, 0x88, 0xac, 0x4b, + 0x74, 0x94, 0x82, 0xf8, 0xc3, 0x24, 0x2c, 0xe7, 0x55, 0x52, 0xbc, 0x10, 0xa9, 0x49, 0xfc, 0x5a, + 0xca, 0xed, 0x63, 0x30, 0x57, 0xe2, 0x62, 0x82, 0x65, 0x45, 0x10, 0x91, 0x7a, 0x16, 0x19, 0x93, + 0xec, 0x40, 0xdd, 0x67, 0x33, 0x2a, 0x9c, 0x07, 0x6c, 0xcc, 0x43, 0xea, 0x73, 0xab, 0xf2, 0xf2, + 0xef, 0x63, 0x77, 0x31, 0xb9, 0xcf, 0xc4, 0x30, 0xa4, 0x7e, 0xb2, 0xa1, 0xeb, 0x69, 0x86, 0xc4, + 0x38, 0xfe, 0x08, 0xce, 0x65, 0x25, 0x0e, 0x99, 0x2b, 0x28, 0xb7, 0x8c, 0xa6, 0xde, 0xc6, 0x24, + 0xab, 0x7c, 0x5d, 0xa1, 0xa7, 0x02, 0x15, 0x37, 0x6e, 0x41, 0x53, 0x6f, 0xa3, 0x3c, 0x50, 0x11, + 0x93, 0x9f, 0xb7, 0x7a, 0x18, 0x70, 0x67, 0x85, 0x54, 0xf5, 0xff, 0x49, 0xa5, 0x19, 0x19, 0xa9, + 0xac, 0x44, 0x42, 0xaa, 0x16, 0x93, 0x4a, 0xe1, 0x9c, 0x54, 0x16, 0x98, 0x90, 0x5a, 0x8f, 0x49, + 0xa5, 0x70, 0x42, 0xea, 0x2a, 0x40, 0xc4, 0x38, 0x13, 0xe3, 0xb9, 0xec, 0x7c, 0x5d, 0x7d, 0x04, + 0x2e, 0xbe, 0xe2, 0x33, 0xb6, 0x45, 0x64, 0xd4, 0x8e, 0xe3, 0x0b, 0x62, 0x44, 0xe9, 0xf1, 0x8c, + 0xfe, 0xce, 0x9d, 0xd5, 0xdf, 0x15, 0x30, 0xb2, 0xd4, 0xd3, 0xfb, 0x5c, 0x06, 0xfd, 0x6e, 0x6f, + 0x68, 0x22, 0x5c, 0x82, 0xb5, 0xc1, 0x9e, 0xb9, 0x96, 0xef, 0xb4, 0xbe, 0x59, 0xf8, 0xf9, 0x0f, + 0x1b, 0x75, 0xcb, 0x50, 0x54, 0xe4, 0xbb, 0x35, 0x80, 0x7c, 0xf6, 0xad, 0xab, 0x00, 0x79, 0xa3, + 0xa4, 0xfc, 0x82, 0xe9, 0x94, 0xb3, 0x58, 0xcf, 0xe7, 0x49, 0x62, 0x49, 0xdc, 0x65, 0xfe, 0x4c, + 0xcc, 0x95, 0x8c, 0xd7, 0x49, 0x62, 0x75, 0xbf, 0x3e, 0x7e, 0x6a, 0x6b, 0x8f, 0x9f, 0xda, 0xda, + 0x8b, 0xa7, 0x36, 0xfa, 0x61, 0x69, 0xa3, 0x3f, 0x97, 0x36, 0x7a, 0xb4, 0xb4, 0xd1, 0xf1, 0xd2, + 0x46, 0x7f, 0x2f, 0x6d, 0xf4, 0x7c, 0x69, 0x6b, 0x2f, 0x96, 0x36, 0xfa, 0xe5, 0x99, 0xad, 0x1d, + 0x3f, 0xb3, 0xb5, 0xc7, 0xcf, 0x6c, 0xed, 0xdb, 0xec, 0x9f, 0xdb, 0xbd, 0x92, 0xfa, 0xab, 0xf6, + 0xf9, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4e, 0x10, 0xfc, 0x83, 0xda, 0x09, 0x00, 0x00, } func (x WriteRequest_SourceEnum) String() string { @@ -1164,6 +1192,15 @@ func (this *WriteResponse) Equal(that interface{}) bool { if this.Message != that1.Message { return false } + if this.Samples != that1.Samples { + return false + } + if this.Histograms != that1.Histograms { + return false + } + if this.Exemplars != that1.Exemplars { + return false + } return true } func (this *TimeSeries) Equal(that interface{}) bool { @@ -1638,10 +1675,13 @@ func (this *WriteResponse) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 9) s = append(s, "&cortexpb.WriteResponse{") s = append(s, "Code: "+fmt.Sprintf("%#v", this.Code)+",\n") s = append(s, "Message: "+fmt.Sprintf("%#v", this.Message)+",\n") + s = append(s, "Samples: "+fmt.Sprintf("%#v", this.Samples)+",\n") + s = append(s, "Histograms: "+fmt.Sprintf("%#v", this.Histograms)+",\n") + s = append(s, "Exemplars: "+fmt.Sprintf("%#v", this.Exemplars)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1999,6 +2039,21 @@ func (m *WriteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Exemplars != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Exemplars)) + i-- + dAtA[i] = 0x28 + } + if m.Histograms != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Histograms)) + i-- + dAtA[i] = 0x20 + } + if m.Samples != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Samples)) + i-- + dAtA[i] = 0x18 + } if len(m.Message) > 0 { i -= len(m.Message) copy(dAtA[i:], m.Message) @@ -2612,6 +2667,15 @@ func (m *WriteResponse) Size() (n int) { if l > 0 { n += 1 + l + sovCortex(uint64(l)) } + if m.Samples != 0 { + n += 1 + sovCortex(uint64(m.Samples)) + } + if m.Histograms != 0 { + n += 1 + sovCortex(uint64(m.Histograms)) + } + if m.Exemplars != 0 { + n += 1 + sovCortex(uint64(m.Exemplars)) + } return n } @@ -2906,6 +2970,9 @@ func (this *WriteResponse) String() string { s := strings.Join([]string{`&WriteResponse{`, `Code:` + fmt.Sprintf("%v", this.Code) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Samples:` + fmt.Sprintf("%v", this.Samples) + `,`, + `Histograms:` + fmt.Sprintf("%v", this.Histograms) + `,`, + `Exemplars:` + fmt.Sprintf("%v", this.Exemplars) + `,`, `}`, }, "") return s @@ -3566,6 +3633,63 @@ func (m *WriteResponse) Unmarshal(dAtA []byte) error { } m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + m.Samples = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Samples |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + } + m.Histograms = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Histograms |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + m.Exemplars = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Exemplars |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipCortex(dAtA[iNdEx:]) diff --git a/pkg/cortexpb/cortex.proto b/pkg/cortexpb/cortex.proto index f2995afbf22..fa2caf287c2 100644 --- a/pkg/cortexpb/cortex.proto +++ b/pkg/cortexpb/cortex.proto @@ -36,6 +36,12 @@ message StreamWriteRequest { message WriteResponse { int32 code = 1; string message = 2; + // Samples represents X-Prometheus-Remote-Write-Written-Samples + int64 Samples = 3; + // Histograms represents X-Prometheus-Remote-Write-Written-Histograms + int64 Histograms = 4; + // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars + int64 Exemplars = 5; } message TimeSeries { diff --git a/pkg/cortexpb/histograms.go b/pkg/cortexpb/histograms.go index 60e7207a19a..d05dbaa7727 100644 --- a/pkg/cortexpb/histograms.go +++ b/pkg/cortexpb/histograms.go @@ -16,6 +16,7 @@ package cortexpb import ( "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" ) func (h Histogram) IsFloatHistogram() bool { @@ -23,6 +24,30 @@ func (h Histogram) IsFloatHistogram() bool { return ok } +func HistogramWriteV2ProtoToHistogramProto(h writev2.Histogram) Histogram { + ph := Histogram{ + Sum: h.Sum, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + NegativeSpans: spansWriteV2ProtoToSpansProto(h.NegativeSpans), + NegativeDeltas: h.NegativeDeltas, + NegativeCounts: h.NegativeCounts, + PositiveSpans: spansWriteV2ProtoToSpansProto(h.PositiveSpans), + PositiveDeltas: h.PositiveDeltas, + PositiveCounts: h.PositiveCounts, + ResetHint: Histogram_ResetHint(h.ResetHint), + TimestampMs: h.Timestamp, + } + if h.IsFloatHistogram() { + ph.Count = &Histogram_CountFloat{CountFloat: h.GetCountFloat()} + ph.ZeroCount = &Histogram_ZeroCountFloat{ZeroCountFloat: h.GetZeroCountFloat()} + } else { + ph.Count = &Histogram_CountInt{CountInt: h.GetCountInt()} + ph.ZeroCount = &Histogram_ZeroCountInt{ZeroCountInt: h.GetZeroCountInt()} + } + return ph +} + // HistogramPromProtoToHistogramProto converts a prometheus protobuf Histogram to cortex protobuf Histogram. func HistogramPromProtoToHistogramProto(h prompb.Histogram) Histogram { ph := Histogram{ @@ -155,3 +180,12 @@ func spansPromProtoToSpansProto(s []prompb.BucketSpan) []BucketSpan { return spans } + +func spansWriteV2ProtoToSpansProto(s []writev2.BucketSpan) []BucketSpan { + spans := make([]BucketSpan, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} diff --git a/pkg/cortexpb/signature.go b/pkg/cortexpb/signature.go index 42343e6f4c1..a11c5bcd025 100644 --- a/pkg/cortexpb/signature.go +++ b/pkg/cortexpb/signature.go @@ -9,7 +9,7 @@ import ( // Ref: https://github.com/prometheus/common/blob/main/model/fnv.go func LabelsToFingerprint(lset labels.Labels) model.Fingerprint { - if len(lset) == 0 { + if lset.Len() == 0 { return model.Fingerprint(hashNew()) } diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 0fc11c19d19..80f4ec9025d 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -40,6 +40,7 @@ import ( "github.com/cortexproject/cortex/pkg/util/limiter" util_log "github.com/cortexproject/cortex/pkg/util/log" util_math "github.com/cortexproject/cortex/pkg/util/math" + "github.com/cortexproject/cortex/pkg/util/requestmeta" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -153,6 +154,7 @@ type Config struct { ExtendWrites bool `yaml:"extend_writes"` SignWriteRequestsEnabled bool `yaml:"sign_write_requests"` UseStreamPush bool `yaml:"use_stream_push"` + RemoteWriteV2Enabled bool `yaml:"remote_writev2_enabled"` // Distributors ring DistributorRing RingConfig `yaml:"ring"` @@ -191,8 +193,10 @@ type InstanceLimits struct { } type OTLPConfig struct { - ConvertAllAttributes bool `yaml:"convert_all_attributes"` - DisableTargetInfo bool `yaml:"disable_target_info"` + ConvertAllAttributes bool `yaml:"convert_all_attributes"` + DisableTargetInfo bool `yaml:"disable_target_info"` + AllowDeltaTemporality bool `yaml:"allow_delta_temporality"` + EnableTypeAndUnitLabels bool `yaml:"enable_type_and_unit_labels"` } // RegisterFlags adds the flags required to config this to the given FlagSet @@ -212,6 +216,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.ExtendWrites, "distributor.extend-writes", true, "Try writing to an additional ingester in the presence of an ingester not in the ACTIVE state. It is useful to disable this along with -ingester.unregister-on-shutdown=false in order to not spread samples to extra ingesters during rolling restarts with consistent naming.") f.BoolVar(&cfg.ZoneResultsQuorumMetadata, "distributor.zone-results-quorum-metadata", false, "Experimental, this flag may change in the future. If zone awareness and this both enabled, when querying metadata APIs (labels names and values for now), only results from quorum number of zones will be included.") f.IntVar(&cfg.NumPushWorkers, "distributor.num-push-workers", 0, "EXPERIMENTAL: Number of go routines to handle push calls from distributors to ingesters. When no workers are available, a new goroutine will be spawned automatically. If set to 0 (default), workers are disabled, and a new goroutine will be created for each push request.") + f.BoolVar(&cfg.RemoteWriteV2Enabled, "distributor.remote-writev2-enabled", false, "EXPERIMENTAL: If true, accept prometheus remote write v2 protocol push request.") f.Float64Var(&cfg.InstanceLimits.MaxIngestionRate, "distributor.instance-limits.max-ingestion-rate", 0, "Max ingestion rate (samples/sec) that this distributor will accept. This limit is per-distributor, not per-tenant. Additional push requests will be rejected. Current ingestion rate is computed as exponentially weighted moving average, updated every second. 0 = unlimited.") f.IntVar(&cfg.InstanceLimits.MaxInflightPushRequests, "distributor.instance-limits.max-inflight-push-requests", 0, "Max inflight push requests that this distributor can handle. This limit is per-distributor, not per-tenant. Additional requests will be rejected. 0 = unlimited.") @@ -219,6 +224,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.OTLPConfig.ConvertAllAttributes, "distributor.otlp.convert-all-attributes", false, "If true, all resource attributes are converted to labels.") f.BoolVar(&cfg.OTLPConfig.DisableTargetInfo, "distributor.otlp.disable-target-info", false, "If true, a target_info metric is not ingested. (refer to: https://github.com/prometheus/OpenMetrics/blob/main/specification/OpenMetrics.md#supporting-target-metadata-in-both-push-based-and-pull-based-systems)") + f.BoolVar(&cfg.OTLPConfig.AllowDeltaTemporality, "distributor.otlp.allow-delta-temporality", false, "EXPERIMENTAL: If true, delta temporality otlp metrics to be ingested.") + f.BoolVar(&cfg.OTLPConfig.EnableTypeAndUnitLabels, "distributor.otlp.enable-type-and-unit-labels", false, "EXPERIMENTAL: If true, the '__type__' and '__unit__' labels are added for the OTLP metrics.") } // Validate config and returns error on failure @@ -820,7 +827,17 @@ func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*co return nil, err } - return &cortexpb.WriteResponse{}, firstPartialErr + resp := &cortexpb.WriteResponse{} + if d.cfg.RemoteWriteV2Enabled { + // We simply expose validated samples, histograms, and exemplars + // to the header. We should improve it to expose the actual + // written values by the Ingesters. + resp.Samples = int64(validatedFloatSamples) + resp.Histograms = int64(validatedHistogramSamples) + resp.Exemplars = int64(validatedExemplars) + } + + return resp, firstPartialErr } func (d *Distributor) updateLabelSetMetrics() { @@ -892,9 +909,9 @@ func (d *Distributor) doBatch(ctx context.Context, req *cortexpb.WriteRequest, s if sp := opentracing.SpanFromContext(ctx); sp != nil { localCtx = opentracing.ContextWithSpan(localCtx, sp) } - // Get any HTTP headers that are supposed to be added to logs and add to localCtx for later use - if headerMap := util_log.HeaderMapFromContext(ctx); headerMap != nil { - localCtx = util_log.ContextWithHeaderMap(localCtx, headerMap) + // Get any HTTP request metadata that are supposed to be added to logs and add to localCtx for later use + if requestContextMap := requestmeta.MapFromContext(ctx); requestContextMap != nil { + localCtx = requestmeta.ContextWithRequestMetadataMap(localCtx, requestContextMap) } // Get clientIP(s) from Context and add it to localCtx source := util.GetSourceIPsFromOutgoingCtx(ctx) @@ -1017,7 +1034,7 @@ func (d *Distributor) prepareSeriesKeys(ctx context.Context, req *cortexpb.Write if mrc := limits.MetricRelabelConfigs; len(mrc) > 0 { l, _ := relabel.Process(cortexpb.FromLabelAdaptersToLabels(ts.Labels), mrc...) - if len(l) == 0 { + if l.Len() == 0 { // all labels are gone, samples will be discarded d.validateMetrics.DiscardedSamples.WithLabelValues( validation.DroppedByRelabelConfiguration, diff --git a/pkg/distributor/distributor_ring.go b/pkg/distributor/distributor_ring.go index f1b0fa2fb3d..5a49fa7a716 100644 --- a/pkg/distributor/distributor_ring.go +++ b/pkg/distributor/distributor_ring.go @@ -18,9 +18,10 @@ import ( // is used to strip down the config to the minimum, and avoid confusion // to the user. type RingConfig struct { - KVStore kv.Config `yaml:"kvstore"` - HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` - HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` + KVStore kv.Config `yaml:"kvstore"` + HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` + HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` + DetailedMetricsEnabled bool `yaml:"detailed_metrics_enabled"` // Instance details InstanceID string `yaml:"instance_id" doc:"hidden"` @@ -44,6 +45,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { cfg.KVStore.RegisterFlagsWithPrefix("distributor.ring.", "collectors/", f) f.DurationVar(&cfg.HeartbeatPeriod, "distributor.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring. 0 = disabled.") f.DurationVar(&cfg.HeartbeatTimeout, "distributor.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which distributors are considered unhealthy within the ring. 0 = never (timeout disabled).") + f.BoolVar(&cfg.DetailedMetricsEnabled, "distributor.ring.detailed-metrics-enabled", true, "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted.") // Instance flags cfg.InstanceInterfaceNames = []string{"eth0", "en0"} @@ -94,6 +96,7 @@ func (cfg *RingConfig) ToRingConfig() ring.Config { rc.KVStore = cfg.KVStore rc.HeartbeatTimeout = cfg.HeartbeatTimeout rc.ReplicationFactor = 1 + rc.DetailedMetricsEnabled = cfg.DetailedMetricsEnabled return rc } diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 5ad019c4bf9..c9f931199a2 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -1778,53 +1778,56 @@ func TestDistributor_Push_LabelRemoval(t *testing.T) { { removeReplica: true, removeLabels: []string{"cluster"}, - inputSeries: labels.Labels{ - {Name: "__name__", Value: "some_metric"}, - {Name: "cluster", Value: "one"}, - {Name: "__replica__", Value: "two"}, - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "some_metric"}, - }, + inputSeries: labels.FromStrings( + "__name__", "some_metric", + "cluster", "one", + "__replica__", "two", + ), + expectedSeries: labels.FromStrings( + "__name__", "some_metric", + ), }, + // Remove multiple labels and replica. { removeReplica: true, removeLabels: []string{"foo", "some"}, - inputSeries: labels.Labels{ - {Name: "__name__", Value: "some_metric"}, - {Name: "cluster", Value: "one"}, - {Name: "__replica__", Value: "two"}, - {Name: "foo", Value: "bar"}, - {Name: "some", Value: "thing"}, - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "some_metric"}, - {Name: "cluster", Value: "one"}, - }, + inputSeries: labels.FromStrings( + "__name__", "some_metric", + "cluster", "one", + "__replica__", "two", + "foo", "bar", + "some", "thing", + ), + expectedSeries: labels.FromStrings( + "__name__", "some_metric", + "cluster", "one", + ), }, + // Don't remove any labels. { removeReplica: false, - inputSeries: labels.Labels{ - {Name: "__name__", Value: "some_metric"}, - {Name: "__replica__", Value: "two"}, - {Name: "cluster", Value: "one"}, - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "some_metric"}, - {Name: "__replica__", Value: "two"}, - {Name: "cluster", Value: "one"}, - }, + inputSeries: labels.FromStrings( + "__name__", "some_metric", + "__replica__", "two", + "cluster", "one", + ), + expectedSeries: labels.FromStrings( + "__name__", "some_metric", + "__replica__", "two", + "cluster", "one", + ), }, + // No labels left. { removeReplica: true, removeLabels: []string{"cluster"}, - inputSeries: labels.Labels{ - {Name: "cluster", Value: "one"}, - {Name: "__replica__", Value: "two"}, - }, + inputSeries: labels.FromStrings( + "cluster", "one", + "__replica__", "two", + ), expectedSeries: labels.Labels{}, exemplars: []cortexpb.Exemplar{ {Labels: cortexpb.FromLabelsToLabelAdapters(labels.FromStrings("test", "a")), Value: 1, TimestampMs: 0}, @@ -1897,13 +1900,9 @@ func TestDistributor_Push_LabelRemoval_RemovingNameLabelWillError(t *testing.T) } tc := testcase{ - removeReplica: true, - removeLabels: []string{"__name__"}, - inputSeries: labels.Labels{ - {Name: "__name__", Value: "some_metric"}, - {Name: "cluster", Value: "one"}, - {Name: "__replica__", Value: "two"}, - }, + removeReplica: true, + removeLabels: []string{"__name__"}, + inputSeries: labels.FromStrings("__name__", "some_metric", "cluster", "one", "__replica__", "two"), expectedSeries: labels.Labels{}, } @@ -1937,66 +1936,70 @@ func TestDistributor_Push_ShouldGuaranteeShardingTokenConsistencyOverTheTime(t * expectedToken uint32 }{ "metric_1 with value_1": { - inputSeries: labels.Labels{ - {Name: "__name__", Value: "metric_1"}, - {Name: "cluster", Value: "cluster_1"}, - {Name: "key", Value: "value_1"}, - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "metric_1"}, - {Name: "cluster", Value: "cluster_1"}, - {Name: "key", Value: "value_1"}, - }, + inputSeries: labels.FromStrings( + "__name__", "metric_1", + "cluster", "cluster_1", + "key", "value_1", + ), + expectedSeries: labels.FromStrings( + "__name__", "metric_1", + "cluster", "cluster_1", + "key", "value_1", + ), expectedToken: 0xec0a2e9d, }, + "metric_1 with value_1 and dropped label due to config": { - inputSeries: labels.Labels{ - {Name: "__name__", Value: "metric_1"}, - {Name: "cluster", Value: "cluster_1"}, - {Name: "key", Value: "value_1"}, - {Name: "dropped", Value: "unused"}, // will be dropped, doesn't need to be in correct order - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "metric_1"}, - {Name: "cluster", Value: "cluster_1"}, - {Name: "key", Value: "value_1"}, - }, + inputSeries: labels.FromStrings( + "__name__", "metric_1", + "cluster", "cluster_1", + "key", "value_1", + "dropped", "unused", + ), + expectedSeries: labels.FromStrings( + "__name__", "metric_1", + "cluster", "cluster_1", + "key", "value_1", + ), expectedToken: 0xec0a2e9d, }, + "metric_1 with value_1 and dropped HA replica label": { - inputSeries: labels.Labels{ - {Name: "__name__", Value: "metric_1"}, - {Name: "cluster", Value: "cluster_1"}, - {Name: "key", Value: "value_1"}, - {Name: "__replica__", Value: "replica_1"}, - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "metric_1"}, - {Name: "cluster", Value: "cluster_1"}, - {Name: "key", Value: "value_1"}, - }, + inputSeries: labels.FromStrings( + "__name__", "metric_1", + "cluster", "cluster_1", + "key", "value_1", + "__replica__", "replica_1", + ), + expectedSeries: labels.FromStrings( + "__name__", "metric_1", + "cluster", "cluster_1", + "key", "value_1", + ), expectedToken: 0xec0a2e9d, }, + "metric_2 with value_1": { - inputSeries: labels.Labels{ - {Name: "__name__", Value: "metric_2"}, - {Name: "key", Value: "value_1"}, - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "metric_2"}, - {Name: "key", Value: "value_1"}, - }, + inputSeries: labels.FromStrings( + "__name__", "metric_2", + "key", "value_1", + ), + expectedSeries: labels.FromStrings( + "__name__", "metric_2", + "key", "value_1", + ), expectedToken: 0xa60906f2, }, + "metric_1 with value_2": { - inputSeries: labels.Labels{ - {Name: "__name__", Value: "metric_1"}, - {Name: "key", Value: "value_2"}, - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "metric_1"}, - {Name: "key", Value: "value_2"}, - }, + inputSeries: labels.FromStrings( + "__name__", "metric_1", + "key", "value_2", + ), + expectedSeries: labels.FromStrings( + "__name__", "metric_1", + "key", "value_2", + ), expectedToken: 0x18abc8a2, }, } @@ -2039,10 +2042,7 @@ func TestDistributor_Push_ShouldGuaranteeShardingTokenConsistencyOverTheTime(t * func TestDistributor_Push_LabelNameValidation(t *testing.T) { t.Parallel() - inputLabels := labels.Labels{ - {Name: model.MetricNameLabel, Value: "foo"}, - {Name: "999.illegal", Value: "baz"}, - } + inputLabels := labels.FromStrings(model.MetricNameLabel, "foo", "999.illegal", "baz") ctx := user.InjectOrgID(context.Background(), "user") tests := map[string]struct { @@ -2235,8 +2235,8 @@ func BenchmarkDistributor_Push(b *testing.B) { metrics := make([]labels.Labels, numSeriesPerRequest) samples := make([]cortexpb.Sample, numSeriesPerRequest) + lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) for i := 0; i < numSeriesPerRequest; i++ { - lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) for i := 0; i < 10; i++ { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2262,7 +2262,7 @@ func BenchmarkDistributor_Push(b *testing.B) { samples := make([]cortexpb.Sample, numSeriesPerRequest) for i := 0; i < numSeriesPerRequest; i++ { - lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) + lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) for i := 0; i < 10; i++ { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2287,7 +2287,7 @@ func BenchmarkDistributor_Push(b *testing.B) { samples := make([]cortexpb.Sample, numSeriesPerRequest) for i := 0; i < numSeriesPerRequest; i++ { - lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) + lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) for i := 1; i < 31; i++ { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2312,7 +2312,7 @@ func BenchmarkDistributor_Push(b *testing.B) { samples := make([]cortexpb.Sample, numSeriesPerRequest) for i := 0; i < numSeriesPerRequest; i++ { - lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) + lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) for i := 0; i < 10; i++ { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2340,7 +2340,7 @@ func BenchmarkDistributor_Push(b *testing.B) { samples := make([]cortexpb.Sample, numSeriesPerRequest) for i := 0; i < numSeriesPerRequest; i++ { - lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) + lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) for i := 0; i < 10; i++ { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2368,7 +2368,7 @@ func BenchmarkDistributor_Push(b *testing.B) { samples := make([]cortexpb.Sample, numSeriesPerRequest) for i := 0; i < numSeriesPerRequest; i++ { - lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) + lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) for i := 0; i < 10; i++ { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2397,7 +2397,7 @@ func BenchmarkDistributor_Push(b *testing.B) { samples := make([]cortexpb.Sample, numSeriesPerRequest) for i := 0; i < numSeriesPerRequest; i++ { - lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) + lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) for i := 0; i < 10; i++ { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2422,7 +2422,7 @@ func BenchmarkDistributor_Push(b *testing.B) { samples := make([]cortexpb.Sample, numSeriesPerRequest) for i := 0; i < numSeriesPerRequest; i++ { - lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) + lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, "foo")) for i := 0; i < 10; i++ { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -2571,7 +2571,8 @@ func TestDistributor_MetricsForLabelMatchers_SingleSlowIngester(t *testing.T) { now := model.Now() for i := 0; i < 100; i++ { - req := mockWriteRequest([]labels.Labels{{{Name: labels.MetricName, Value: "test"}, {Name: "app", Value: "m"}, {Name: "uniq8", Value: strconv.Itoa(i)}}}, 1, now.Unix(), histogram) + + req := mockWriteRequest([]labels.Labels{labels.FromStrings(labels.MetricName, "test", "app", "m", "uniq8", strconv.Itoa(i))}, 1, now.Unix(), histogram) _, err := ds[0].Push(ctx, req) require.NoError(t, err) } @@ -2592,12 +2593,32 @@ func TestDistributor_MetricsForLabelMatchers(t *testing.T) { value int64 timestamp int64 }{ - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "200"}}, 1, 100000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "500"}}, 1, 110000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000}, + { + lbls: labels.FromStrings(labels.MetricName, "test_1", "status", "200"), + value: 1, + timestamp: 100000, + }, + { + lbls: labels.FromStrings(labels.MetricName, "test_1", "status", "500"), + value: 1, + timestamp: 110000, + }, + { + lbls: labels.FromStrings(labels.MetricName, "test_2"), + value: 2, + timestamp: 200000, + }, // The two following series have the same FastFingerprint=e002a3a451262627 - {labels.Labels{{Name: labels.MetricName, Value: "fast_fingerprint_collision"}, {Name: "app", Value: "l"}, {Name: "uniq0", Value: "0"}, {Name: "uniq1", Value: "1"}}, 1, 300000}, - {labels.Labels{{Name: labels.MetricName, Value: "fast_fingerprint_collision"}, {Name: "app", Value: "m"}, {Name: "uniq0", Value: "1"}, {Name: "uniq1", Value: "1"}}, 1, 300000}, + { + lbls: labels.FromStrings(labels.MetricName, "fast_fingerprint_collision", "app", "l", "uniq0", "0", "uniq1", "1"), + value: 1, + timestamp: 300000, + }, + { + lbls: labels.FromStrings(labels.MetricName, "fast_fingerprint_collision", "app", "m", "uniq0", "1", "uniq1", "1"), + value: 1, + timestamp: 300000, + }, } tests := map[string]struct { @@ -2800,7 +2821,7 @@ func BenchmarkDistributor_MetricsForLabelMatchers(b *testing.B) { samples := make([]cortexpb.Sample, numSeriesPerRequest) for i := 0; i < numSeriesPerRequest; i++ { - lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: fmt.Sprintf("foo_%d", i)}}) + lbls := labels.NewBuilder(labels.FromStrings(model.MetricNameLabel, fmt.Sprintf("foo_%d", i))) for i := 0; i < 10; i++ { lbls.Set(fmt.Sprintf("name_%d", i), fmt.Sprintf("value_%d", i)) } @@ -3789,7 +3810,9 @@ func TestDistributorValidation(t *testing.T) { // Test validation passes. { metadata: []*cortexpb.MetricMetadata{{MetricFamilyName: "testmetric", Help: "a test metric.", Unit: "", Type: cortexpb.COUNTER}}, - labels: []labels.Labels{{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}}}, + labels: []labels.Labels{ + labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar"), + }, samples: []cortexpb.Sample{{ TimestampMs: int64(now), Value: 1, @@ -3800,7 +3823,9 @@ func TestDistributorValidation(t *testing.T) { }, // Test validation fails for very old samples. { - labels: []labels.Labels{{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}}}, + labels: []labels.Labels{ + labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar"), + }, samples: []cortexpb.Sample{{ TimestampMs: int64(past), Value: 2, @@ -3809,7 +3834,9 @@ func TestDistributorValidation(t *testing.T) { }, // Test validation fails for samples from the future. { - labels: []labels.Labels{{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}}}, + labels: []labels.Labels{ + labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar"), + }, samples: []cortexpb.Sample{{ TimestampMs: int64(future), Value: 4, @@ -3819,7 +3846,9 @@ func TestDistributorValidation(t *testing.T) { // Test maximum labels names per series. { - labels: []labels.Labels{{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}, {Name: "foo2", Value: "bar2"}}}, + labels: []labels.Labels{ + labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar", "foo2", "bar2"), + }, samples: []cortexpb.Sample{{ TimestampMs: int64(now), Value: 2, @@ -3829,8 +3858,8 @@ func TestDistributorValidation(t *testing.T) { // Test multiple validation fails return the first one. { labels: []labels.Labels{ - {{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}, {Name: "foo2", Value: "bar2"}}, - {{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}}, + labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar", "foo2", "bar2"), + labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar"), }, samples: []cortexpb.Sample{ {TimestampMs: int64(now), Value: 2}, @@ -3841,7 +3870,9 @@ func TestDistributorValidation(t *testing.T) { // Test metadata validation fails { metadata: []*cortexpb.MetricMetadata{{MetricFamilyName: "", Help: "a test metric.", Unit: "", Type: cortexpb.COUNTER}}, - labels: []labels.Labels{{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}}}, + labels: []labels.Labels{ + labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar"), + }, samples: []cortexpb.Sample{{ TimestampMs: int64(now), Value: 1, @@ -3850,7 +3881,9 @@ func TestDistributorValidation(t *testing.T) { }, // Test maximum labels names per series for histogram samples. { - labels: []labels.Labels{{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}, {Name: "foo2", Value: "bar2"}}}, + labels: []labels.Labels{ + labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar", "foo2", "bar2"), + }, histograms: []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(int64(now), testHistogram), }, @@ -3858,7 +3891,9 @@ func TestDistributorValidation(t *testing.T) { }, // Test validation fails for very old histogram samples. { - labels: []labels.Labels{{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}}}, + labels: []labels.Labels{ + labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar"), + }, histograms: []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(int64(past), testHistogram), }, @@ -3866,7 +3901,9 @@ func TestDistributorValidation(t *testing.T) { }, // Test validation fails for histogram samples from the future. { - labels: []labels.Labels{{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}}}, + labels: []labels.Labels{ + labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar"), + }, histograms: []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(int64(future), testFloatHistogram), }, @@ -4004,28 +4041,16 @@ func TestDistributor_Push_Relabel(t *testing.T) { { name: "with no relabel config", inputSeries: []labels.Labels{ - { - {Name: "__name__", Value: "foo"}, - {Name: "cluster", Value: "one"}, - }, - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "foo"}, - {Name: "cluster", Value: "one"}, + labels.FromStrings("__name__", "foo", "cluster", "one"), }, + expectedSeries: labels.FromStrings("__name__", "foo", "cluster", "one"), }, { name: "with hardcoded replace", inputSeries: []labels.Labels{ - { - {Name: "__name__", Value: "foo"}, - {Name: "cluster", Value: "one"}, - }, - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "foo"}, - {Name: "cluster", Value: "two"}, + labels.FromStrings("__name__", "foo", "cluster", "one"), }, + expectedSeries: labels.FromStrings("__name__", "foo", "cluster", "two"), metricRelabelConfigs: []*relabel.Config{ { SourceLabels: []model.LabelName{"cluster"}, @@ -4039,19 +4064,10 @@ func TestDistributor_Push_Relabel(t *testing.T) { { name: "with drop action", inputSeries: []labels.Labels{ - { - {Name: "__name__", Value: "foo"}, - {Name: "cluster", Value: "one"}, - }, - { - {Name: "__name__", Value: "bar"}, - {Name: "cluster", Value: "two"}, - }, - }, - expectedSeries: labels.Labels{ - {Name: "__name__", Value: "bar"}, - {Name: "cluster", Value: "two"}, + labels.FromStrings("__name__", "foo", "cluster", "one"), + labels.FromStrings("__name__", "bar", "cluster", "two"), }, + expectedSeries: labels.FromStrings("__name__", "bar", "cluster", "two"), metricRelabelConfigs: []*relabel.Config{ { SourceLabels: []model.LabelName{"__name__"}, @@ -4113,19 +4129,10 @@ func TestDistributor_Push_EmptyLabel(t *testing.T) { { name: "with empty label", inputSeries: []labels.Labels{ - { //Token 1106054332 without filtering - {Name: "__name__", Value: "foo"}, - {Name: "empty", Value: ""}, - }, - { //Token 3827924124 without filtering - {Name: "__name__", Value: "foo"}, - {Name: "changHash", Value: ""}, - }, - }, - expectedSeries: labels.Labels{ - //Token 1797290973 - {Name: "__name__", Value: "foo"}, + labels.FromStrings("__name__", "foo", "empty", ""), + labels.FromStrings("__name__", "foo", "changHash", ""), }, + expectedSeries: labels.FromStrings("__name__", "foo"), }, } @@ -4191,14 +4198,8 @@ func TestDistributor_Push_RelabelDropWillExportMetricOfDroppedSamples(t *testing } inputSeries := []labels.Labels{ - { - {Name: "__name__", Value: "foo"}, - {Name: "cluster", Value: "one"}, - }, - { - {Name: "__name__", Value: "bar"}, - {Name: "cluster", Value: "two"}, - }, + labels.FromStrings("__name__", "foo", "cluster", "one"), + labels.FromStrings("__name__", "bar", "cluster", "two"), } var err error @@ -4248,22 +4249,10 @@ func TestDistributor_Push_RelabelDropWillExportMetricOfDroppedSamples(t *testing func TestDistributor_PushLabelSetMetrics(t *testing.T) { t.Parallel() inputSeries := []labels.Labels{ - { - {Name: "__name__", Value: "foo"}, - {Name: "cluster", Value: "one"}, - }, - { - {Name: "__name__", Value: "bar"}, - {Name: "cluster", Value: "one"}, - }, - { - {Name: "__name__", Value: "bar"}, - {Name: "cluster", Value: "two"}, - }, - { - {Name: "__name__", Value: "foo"}, - {Name: "cluster", Value: "three"}, - }, + labels.FromStrings("__name__", "foo", "cluster", "one"), + labels.FromStrings("__name__", "bar", "cluster", "one"), + labels.FromStrings("__name__", "bar", "cluster", "two"), + labels.FromStrings("__name__", "foo", "cluster", "three"), } var err error @@ -4301,14 +4290,8 @@ func TestDistributor_PushLabelSetMetrics(t *testing.T) { // Push more series. inputSeries = []labels.Labels{ - { - {Name: "__name__", Value: "baz"}, - {Name: "cluster", Value: "two"}, - }, - { - {Name: "__name__", Value: "foo"}, - {Name: "cluster", Value: "four"}, - }, + labels.FromStrings("__name__", "baz", "cluster", "two"), + labels.FromStrings("__name__", "foo", "cluster", "four"), } // Write the same request twice for different users. req = mockWriteRequest(inputSeries, 1, 1, false) diff --git a/pkg/engine/engine.go b/pkg/engine/engine.go index be22e4573ad..8b02607a094 100644 --- a/pkg/engine/engine.go +++ b/pkg/engine/engine.go @@ -10,6 +10,7 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" thanosengine "github.com/thanos-io/promql-engine/engine" + "github.com/thanos-io/promql-engine/logicalplan" ) type engineKeyType struct{} @@ -43,6 +44,12 @@ func GetEngineType(ctx context.Context) Type { return None } +type QueryEngine interface { + promql.QueryEngine + MakeInstantQueryFromPlan(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, root logicalplan.Node, ts time.Time, qs string) (promql.Query, error) + MakeRangeQueryFromPlan(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, root logicalplan.Node, start time.Time, end time.Time, interval time.Duration, qs string) (promql.Query, error) +} + type Engine struct { prometheusEngine *promql.Engine thanosEngine *thanosengine.Engine @@ -127,6 +134,53 @@ prom: return qf.prometheusEngine.NewRangeQuery(ctx, q, opts, qs, start, end, interval) } +func (qf *Engine) MakeInstantQueryFromPlan(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, root logicalplan.Node, ts time.Time, qs string) (promql.Query, error) { + if engineType := GetEngineType(ctx); engineType == Prometheus { + qf.engineSwitchQueriesTotal.WithLabelValues(string(Prometheus)).Inc() + } else if engineType == Thanos { + qf.engineSwitchQueriesTotal.WithLabelValues(string(Thanos)).Inc() + } + + if qf.thanosEngine != nil { + res, err := qf.thanosEngine.MakeInstantQueryFromPlan(ctx, q, fromPromQLOpts(opts), root, ts) + if err != nil { + if thanosengine.IsUnimplemented(err) { + // fallback to use prometheus engine + qf.fallbackQueriesTotal.Inc() + goto prom + } + return nil, err + } + return res, nil + } + +prom: + return qf.prometheusEngine.NewInstantQuery(ctx, q, opts, qs, ts) +} + +func (qf *Engine) MakeRangeQueryFromPlan(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, root logicalplan.Node, start time.Time, end time.Time, interval time.Duration, qs string) (promql.Query, error) { + if engineType := GetEngineType(ctx); engineType == Prometheus { + qf.engineSwitchQueriesTotal.WithLabelValues(string(Prometheus)).Inc() + } else if engineType == Thanos { + qf.engineSwitchQueriesTotal.WithLabelValues(string(Thanos)).Inc() + } + if qf.thanosEngine != nil { + res, err := qf.thanosEngine.MakeRangeQueryFromPlan(ctx, q, fromPromQLOpts(opts), root, start, end, interval) + if err != nil { + if thanosengine.IsUnimplemented(err) { + // fallback to use prometheus engine + qf.fallbackQueriesTotal.Inc() + goto prom + } + return nil, err + } + return res, nil + } + +prom: + return qf.prometheusEngine.NewRangeQuery(ctx, q, opts, qs, start, end, interval) +} + func fromPromQLOpts(opts promql.QueryOpts) *thanosengine.QueryOpts { if opts == nil { return &thanosengine.QueryOpts{} diff --git a/pkg/engine/engine_test.go b/pkg/engine/engine_test.go index 7b270e6604a..db00be12673 100644 --- a/pkg/engine/engine_test.go +++ b/pkg/engine/engine_test.go @@ -14,8 +14,11 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/promqltest" - "github.com/stretchr/testify/require" "github.com/thanos-io/promql-engine/execution/parse" + "github.com/thanos-io/promql-engine/logicalplan" + "github.com/thanos-io/promql-engine/query" + + "github.com/stretchr/testify/require" utillog "github.com/cortexproject/cortex/pkg/util/log" ) @@ -123,3 +126,98 @@ func TestEngine_XFunctions(t *testing.T) { }) } } + +func TestEngine_With_Logical_Plan(t *testing.T) { + ctx := context.Background() + reg := prometheus.NewRegistry() + + now := time.Now() + start := time.Now().Add(-time.Minute * 5) + step := time.Minute + queryable := promqltest.LoadedStorage(t, "") + opts := promql.EngineOpts{ + Logger: utillog.GoKitLogToSlog(log.NewNopLogger()), + Reg: reg, + } + queryEngine := New(opts, ThanosEngineConfig{Enabled: true}, reg) + + range_lp := createTestLogicalPlan(t, start, now, step, "up") + instant_lp := createTestLogicalPlan(t, now, now, 0, "up") + + r := &http.Request{Header: http.Header{}} + r.Header.Set(TypeHeader, string(Thanos)) + ctx = AddEngineTypeToContext(ctx, r) + + // Case 1: Executing logical plan with thanos engine + _, _ = queryEngine.MakeInstantQueryFromPlan(ctx, queryable, nil, instant_lp.Root(), now, "up") + require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(` + # HELP cortex_thanos_engine_fallback_queries_total Total number of fallback queries due to not implementation in thanos engine + # TYPE cortex_thanos_engine_fallback_queries_total counter + cortex_thanos_engine_fallback_queries_total 0 + `), "cortex_thanos_engine_fallback_queries_total")) + + _, _ = queryEngine.MakeRangeQueryFromPlan(ctx, queryable, nil, range_lp.Root(), start, now, step, "up") + require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(` + # HELP cortex_thanos_engine_fallback_queries_total Total number of fallback queries due to not implementation in thanos engine + # TYPE cortex_thanos_engine_fallback_queries_total counter + cortex_thanos_engine_fallback_queries_total 0 + `), "cortex_thanos_engine_fallback_queries_total")) + + // Case 2: Logical plan that thanos engine cannot execute (so it will fall back to prometheus engine) + err_range_lp := createTestLogicalPlan(t, start, now, step, "up[10]") + _, _ = queryEngine.MakeRangeQueryFromPlan(ctx, queryable, nil, err_range_lp.Root(), start, now, step, "up") + require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(` + # HELP cortex_thanos_engine_fallback_queries_total Total number of fallback queries due to not implementation in thanos engine + # TYPE cortex_thanos_engine_fallback_queries_total counter + cortex_thanos_engine_fallback_queries_total 1 + `), "cortex_thanos_engine_fallback_queries_total")) + + // Case 3: executing with prometheus engine + r.Header.Set(TypeHeader, string(Prometheus)) + ctx = AddEngineTypeToContext(ctx, r) + + _, _ = queryEngine.MakeInstantQueryFromPlan(ctx, queryable, nil, instant_lp.Root(), now, "up") + require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(` + # HELP cortex_engine_switch_queries_total Total number of queries where engine_type is set explicitly + # TYPE cortex_engine_switch_queries_total counter + cortex_engine_switch_queries_total{engine_type="prometheus"} 1 + cortex_engine_switch_queries_total{engine_type="thanos"} 3 + `), "cortex_engine_switch_queries_total")) + + _, _ = queryEngine.MakeRangeQueryFromPlan(ctx, queryable, nil, range_lp.Root(), start, now, step, "up") + require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(` + # HELP cortex_engine_switch_queries_total Total number of queries where engine_type is set explicitly + # TYPE cortex_engine_switch_queries_total counter + cortex_engine_switch_queries_total{engine_type="prometheus"} 2 + cortex_engine_switch_queries_total{engine_type="thanos"} 3 + `), "cortex_engine_switch_queries_total")) +} + +func createTestLogicalPlan(t *testing.T, startTime time.Time, endTime time.Time, step time.Duration, q string) logicalplan.Plan { + + qOpts := query.Options{ + Start: startTime, + End: startTime, + Step: 0, + StepsBatch: 10, + LookbackDelta: 0, + EnablePerStepStats: false, + } + + if step != 0 { + qOpts.End = endTime + qOpts.Step = step + } + + expr, err := parser.NewParser(q, parser.WithFunctions(parser.Functions)).ParseExpr() + require.NoError(t, err) + + planOpts := logicalplan.PlanOptions{ + DisableDuplicateLabelCheck: false, + } + + logicalPlan, err := logicalplan.NewFromAST(expr, &qOpts, planOpts) + require.NoError(t, err) + + return logicalPlan +} diff --git a/pkg/frontend/config.go b/pkg/frontend/config.go index a1109f213ad..03dff13980e 100644 --- a/pkg/frontend/config.go +++ b/pkg/frontend/config.go @@ -20,8 +20,7 @@ type CombinedFrontendConfig struct { FrontendV1 v1.Config `yaml:",inline"` FrontendV2 v2.Config `yaml:",inline"` - DownstreamURL string `yaml:"downstream_url"` - DistributedExecEnabled bool `yaml:"distributed_exec_enabled" doc:"hidden"` + DownstreamURL string `yaml:"downstream_url"` } func (cfg *CombinedFrontendConfig) RegisterFlags(f *flag.FlagSet) { @@ -30,7 +29,6 @@ func (cfg *CombinedFrontendConfig) RegisterFlags(f *flag.FlagSet) { cfg.FrontendV2.RegisterFlags(f) f.StringVar(&cfg.DownstreamURL, "frontend.downstream-url", "", "URL of downstream Prometheus.") - f.BoolVar(&cfg.DistributedExecEnabled, "frontend.distributed-exec-enabled", false, "Experimental: Enables distributed execution of queries by passing logical query plan fragments to downstream components.") } // InitFrontend initializes frontend (either V1 -- without scheduler, or V2 -- with scheduler) or no frontend at diff --git a/pkg/frontend/transport/handler.go b/pkg/frontend/transport/handler.go index 9001560b524..bcdd3628a0d 100644 --- a/pkg/frontend/transport/handler.go +++ b/pkg/frontend/transport/handler.go @@ -22,6 +22,8 @@ import ( "github.com/weaveworks/common/httpgrpc" "google.golang.org/grpc/status" + "github.com/cortexproject/cortex/pkg/engine" + "github.com/cortexproject/cortex/pkg/querier" querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" "github.com/cortexproject/cortex/pkg/querier/tenantfederation" "github.com/cortexproject/cortex/pkg/querier/tripperware" @@ -75,8 +77,6 @@ const ( limitBytesStoreGateway = `exceeded bytes limit` ) -var noopResponseSizeLimiter = limiter.NewResponseSizeLimiter(0) - // Config for a Handler. type HandlerConfig struct { LogQueriesLongerThan time.Duration `yaml:"log_queries_longer_than"` @@ -332,7 +332,7 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // If the response status code is not 2xx, try to get the // error message from response body. if resp.StatusCode/100 != 2 { - body, err2 := tripperware.BodyBytes(resp, noopResponseSizeLimiter, f.log) + body, err2 := tripperware.BodyBytes(resp, f.log) if err2 == nil { err = httpgrpc.Errorf(resp.StatusCode, "%s", string(body)) } @@ -399,6 +399,14 @@ func (f *Handler) logQueryRequest(r *http.Request, queryString url.Values, sourc logMessage = append(logMessage, "user_agent", ua) } + if engineType := r.Header.Get(engine.TypeHeader); len(engineType) > 0 { + logMessage = append(logMessage, "engine_type", engineType) + } + + if blockStoreType := r.Header.Get(querier.BlockStoreTypeHeader); len(blockStoreType) > 0 { + logMessage = append(logMessage, "block_store_type", blockStoreType) + } + if acceptEncoding := r.Header.Get("Accept-Encoding"); len(acceptEncoding) > 0 { logMessage = append(logMessage, "accept_encoding", acceptEncoding) } @@ -511,6 +519,12 @@ func (f *Handler) reportQueryStats(r *http.Request, source, userID string, query if ua := r.Header.Get("User-Agent"); len(ua) > 0 { logMessage = append(logMessage, "user_agent", ua) } + if engineType := r.Header.Get(engine.TypeHeader); len(engineType) > 0 { + logMessage = append(logMessage, "engine_type", engineType) + } + if blockStoreType := r.Header.Get(querier.BlockStoreTypeHeader); len(blockStoreType) > 0 { + logMessage = append(logMessage, "block_store_type", blockStoreType) + } if priority, ok := stats.LoadPriority(); ok { logMessage = append(logMessage, "priority", priority) } diff --git a/pkg/frontend/transport/handler_test.go b/pkg/frontend/transport/handler_test.go index aa863230295..e507fbaed26 100644 --- a/pkg/frontend/transport/handler_test.go +++ b/pkg/frontend/transport/handler_test.go @@ -24,6 +24,8 @@ import ( "github.com/weaveworks/common/user" "google.golang.org/grpc/codes" + "github.com/cortexproject/cortex/pkg/engine" + "github.com/cortexproject/cortex/pkg/querier" querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" "github.com/cortexproject/cortex/pkg/querier/tenantfederation" "github.com/cortexproject/cortex/pkg/querier/tripperware" @@ -501,6 +503,16 @@ func TestReportQueryStatsFormat(t *testing.T) { expectedLog: `level=info msg="query stats" component=query-frontend method=GET path=/prometheus/api/v1/query response_time=1s query_wall_time_seconds=0 response_series_count=0 fetched_series_count=0 fetched_chunks_count=0 fetched_samples_count=0 fetched_chunks_bytes=0 fetched_data_bytes=0 split_queries=0 status_code=200 response_size=1000 samples_scanned=0 user_agent=Grafana`, source: tripperware.SourceAPI, }, + "should include engine type": { + header: http.Header{http.CanonicalHeaderKey(engine.TypeHeader): []string{string(engine.Thanos)}}, + expectedLog: `level=info msg="query stats" component=query-frontend method=GET path=/prometheus/api/v1/query response_time=1s query_wall_time_seconds=0 response_series_count=0 fetched_series_count=0 fetched_chunks_count=0 fetched_samples_count=0 fetched_chunks_bytes=0 fetched_data_bytes=0 split_queries=0 status_code=200 response_size=1000 samples_scanned=0 engine_type=thanos`, + source: tripperware.SourceAPI, + }, + "should include block store type": { + header: http.Header{http.CanonicalHeaderKey(querier.BlockStoreTypeHeader): []string{"parquet"}}, + expectedLog: `level=info msg="query stats" component=query-frontend method=GET path=/prometheus/api/v1/query response_time=1s query_wall_time_seconds=0 response_series_count=0 fetched_series_count=0 fetched_chunks_count=0 fetched_samples_count=0 fetched_chunks_bytes=0 fetched_data_bytes=0 split_queries=0 status_code=200 response_size=1000 samples_scanned=0 block_store_type=parquet`, + source: tripperware.SourceAPI, + }, "should include response error": { responseErr: errors.New("foo_err"), expectedLog: `level=error msg="query stats" component=query-frontend method=GET path=/prometheus/api/v1/query response_time=1s query_wall_time_seconds=0 response_series_count=0 fetched_series_count=0 fetched_chunks_count=0 fetched_samples_count=0 fetched_chunks_bytes=0 fetched_data_bytes=0 split_queries=0 status_code=200 response_size=1000 samples_scanned=0 error=foo_err`, diff --git a/pkg/ingester/active_series_test.go b/pkg/ingester/active_series_test.go index 3d84d7570cc..fe7840f2576 100644 --- a/pkg/ingester/active_series_test.go +++ b/pkg/ingester/active_series_test.go @@ -29,15 +29,15 @@ func TestActiveSeries_UpdateSeries(t *testing.T) { assert.Equal(t, 0, c.ActiveNativeHistogram()) labels1Hash := fromLabelToLabels(ls1).Hash() labels2Hash := fromLabelToLabels(ls2).Hash() - c.UpdateSeries(ls1, labels1Hash, time.Now(), true, copyFn) + c.UpdateSeries(fromLabelToLabels(ls1), labels1Hash, time.Now(), true, copyFn) assert.Equal(t, 1, c.Active()) assert.Equal(t, 1, c.ActiveNativeHistogram()) - c.UpdateSeries(ls1, labels1Hash, time.Now(), true, copyFn) + c.UpdateSeries(fromLabelToLabels(ls1), labels1Hash, time.Now(), true, copyFn) assert.Equal(t, 1, c.Active()) assert.Equal(t, 1, c.ActiveNativeHistogram()) - c.UpdateSeries(ls2, labels2Hash, time.Now(), true, copyFn) + c.UpdateSeries(fromLabelToLabels(ls2), labels2Hash, time.Now(), true, copyFn) assert.Equal(t, 2, c.Active()) assert.Equal(t, 2, c.ActiveNativeHistogram()) } @@ -56,7 +56,7 @@ func TestActiveSeries_Purge(t *testing.T) { c := NewActiveSeries() for i := 0; i < len(series); i++ { - c.UpdateSeries(series[i], fromLabelToLabels(series[i]).Hash(), time.Unix(int64(i), 0), true, copyFn) + c.UpdateSeries(fromLabelToLabels(series[i]), fromLabelToLabels(series[i]).Hash(), time.Unix(int64(i), 0), true, copyFn) } c.Purge(time.Unix(int64(ttl+1), 0)) @@ -109,9 +109,7 @@ func BenchmarkActiveSeriesTest_single_series(b *testing.B) { } func benchmarkActiveSeriesConcurrencySingleSeries(b *testing.B, goroutines int) { - series := labels.Labels{ - {Name: "a", Value: "a"}, - } + series := labels.FromStrings("a", "a") c := NewActiveSeries() @@ -152,7 +150,7 @@ func BenchmarkActiveSeries_UpdateSeries(b *testing.B) { series := make([]labels.Labels, b.N) labelhash := make([]uint64, b.N) for s := 0; s < b.N; s++ { - series[s] = labels.Labels{{Name: name, Value: name + strconv.Itoa(s)}} + series[s] = labels.FromStrings(name, name+strconv.Itoa(s)) labelhash[s] = series[s].Hash() } @@ -182,7 +180,7 @@ func benchmarkPurge(b *testing.B, twice bool) { series := [numSeries]labels.Labels{} labelhash := [numSeries]uint64{} for s := 0; s < numSeries; s++ { - series[s] = labels.Labels{{Name: "a", Value: strconv.Itoa(s)}} + series[s] = labels.FromStrings("a", strconv.Itoa(s)) labelhash[s] = series[s].Hash() } diff --git a/pkg/ingester/errors.go b/pkg/ingester/errors.go index b982f6ce09d..7da2f51b73b 100644 --- a/pkg/ingester/errors.go +++ b/pkg/ingester/errors.go @@ -35,7 +35,7 @@ func (e *validationError) Error() string { if e.err == nil { return e.errorType } - if e.labels == nil { + if e.labels.IsEmpty() { return e.err.Error() } return fmt.Sprintf("%s for series %s", e.err.Error(), e.labels.String()) diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index dd2dc4f1666..c2dab4a54ec 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -33,7 +33,7 @@ import ( "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" - "github.com/prometheus/prometheus/tsdb/wlog" + "github.com/prometheus/prometheus/util/compression" "github.com/prometheus/prometheus/util/zeropool" "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block/metadata" @@ -1147,15 +1147,17 @@ type extendedAppender interface { storage.GetRef } -func (i *Ingester) isLabelSetOutOfOrder(labels labels.Labels) bool { +func (i *Ingester) isLabelSetOutOfOrder(lbls labels.Labels) bool { last := "" - for _, l := range labels { + ooo := false + lbls.Range(func(l labels.Label) { if strings.Compare(last, l.Name) > 0 { - return true + ooo = true } last = l.Name - } - return false + }) + + return ooo } // Push adds metrics to a block @@ -1312,9 +1314,6 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte case errors.Is(cause, histogram.ErrHistogramCountMismatch): updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) }) - case errors.Is(cause, storage.ErrOOONativeHistogramsDisabled): - updateFirstPartial(func() error { return wrappedTSDBIngestErr(err, model.Time(timestampMs), lbls) }) - default: rollback = true } @@ -1461,7 +1460,7 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte Labels: cortexpb.FromLabelAdaptersToLabelsWithCopy(ex.Labels), } - if _, err = app.AppendExemplar(ref, nil, e); err == nil { + if _, err = app.AppendExemplar(ref, labels.EmptyLabels(), e); err == nil { succeededExemplarsCount++ continue } @@ -2518,9 +2517,9 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { } oooTimeWindow := i.limits.OutOfOrderTimeWindow(userID) - walCompressType := wlog.CompressionNone + walCompressType := compression.None if i.cfg.BlocksStorageConfig.TSDB.WALCompressionType != "" { - walCompressType = wlog.CompressionType(i.cfg.BlocksStorageConfig.TSDB.WALCompressionType) + walCompressType = i.cfg.BlocksStorageConfig.TSDB.WALCompressionType } // Create a new user database @@ -2542,7 +2541,6 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { EnableMemorySnapshotOnShutdown: i.cfg.BlocksStorageConfig.TSDB.MemorySnapshotOnShutdown, OutOfOrderTimeWindow: time.Duration(oooTimeWindow).Milliseconds(), OutOfOrderCapMax: i.cfg.BlocksStorageConfig.TSDB.OutOfOrderCapMax, - EnableOOONativeHistograms: true, EnableOverlappingCompaction: false, // Always let compactors handle overlapped blocks, e.g. OOO blocks. EnableNativeHistograms: true, // Always enable Native Histograms. Gate keeping is done though a per-tenant limit at ingestion. BlockChunkQuerierFunc: i.blockChunkQuerierFunc(userID), @@ -2578,15 +2576,7 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { // Thanos shipper requires at least 1 external label to be set. For this reason, // we set the tenant ID as external label and we'll filter it out when reading // the series from the storage. - l := labels.Labels{ - { - Name: cortex_tsdb.TenantIDExternalLabel, - Value: userID, - }, { - Name: cortex_tsdb.IngesterIDExternalLabel, - Value: i.TSDBState.shipperIngesterID, - }, - } + l := labels.FromStrings(cortex_tsdb.TenantIDExternalLabel, userID, cortex_tsdb.IngesterIDExternalLabel, i.TSDBState.shipperIngesterID) // Create a new shipper for this database if i.cfg.BlocksStorageConfig.TSDB.IsBlocksShippingEnabled() { diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index c9948f9ec66..c59879a1d84 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -305,9 +305,9 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { // Create first series within the limits for _, set := range limits.LimitsPerLabelSet { lbls := []string{labels.MetricName, "metric_name"} - for _, lbl := range set.LabelSet { - lbls = append(lbls, lbl.Name, lbl.Value) - } + set.LabelSet.Range(func(l labels.Label) { + lbls = append(lbls, l.Name, l.Value) + }) for i := 0; i < set.Limits.MaxSeries; i++ { _, err = ing.Push(ctx, cortexpb.ToWriteRequest( []labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpb.API)) @@ -330,9 +330,9 @@ func TestIngesterPerLabelsetLimitExceeded(t *testing.T) { // Should impose limits for _, set := range limits.LimitsPerLabelSet { lbls := []string{labels.MetricName, "metric_name"} - for _, lbl := range set.LabelSet { - lbls = append(lbls, lbl.Name, lbl.Value) - } + set.LabelSet.Range(func(l labels.Label) { + lbls = append(lbls, l.Name, l.Value) + }) _, err = ing.Push(ctx, cortexpb.ToWriteRequest( []labels.Labels{labels.FromStrings(append(lbls, "newLabel", "newValue")...)}, samples, nil, nil, cortexpb.API)) httpResp, ok := httpgrpc.HTTPResponseFromError(err) @@ -759,7 +759,7 @@ func TestIngesterUserLimitExceeded(t *testing.T) { userID := "1" // Series - labels1 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}} + labels1 := labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar") sample1 := cortexpb.Sample{ TimestampMs: 0, Value: 1, @@ -768,7 +768,7 @@ func TestIngesterUserLimitExceeded(t *testing.T) { TimestampMs: 1, Value: 2, } - labels3 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "biz"}} + labels3 := labels.FromStrings(labels.MetricName, "testmetric", "foo", "biz") sample3 := cortexpb.Sample{ TimestampMs: 1, Value: 3, @@ -878,8 +878,8 @@ func TestIngesterUserLimitExceededForNativeHistogram(t *testing.T) { userID := "1" // Series - labels1 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}} - labels3 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "biz"}} + labels1 := labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar") + labels3 := labels.FromStrings(labels.MetricName, "testmetric", "foo", "biz") sampleNativeHistogram1 := cortexpb.HistogramToHistogramProto(0, tsdbutil.GenerateTestHistogram(1)) sampleNativeHistogram2 := cortexpb.HistogramToHistogramProto(1, tsdbutil.GenerateTestHistogram(2)) sampleNativeHistogram3 := cortexpb.HistogramToHistogramProto(0, tsdbutil.GenerateTestHistogram(3)) @@ -958,13 +958,19 @@ func TestIngesterUserLimitExceededForNativeHistogram(t *testing.T) { func benchmarkData(nSeries int) (allLabels []labels.Labels, allSamples []cortexpb.Sample) { for j := 0; j < nSeries; j++ { - labels := chunk.BenchmarkLabels.Copy() - for i := range labels { - if labels[i].Name == "cpu" { - labels[i].Value = fmt.Sprintf("cpu%02d", j) + lbls := chunk.BenchmarkLabels.Copy() + + builder := labels.NewBuilder(labels.EmptyLabels()) + lbls.Range(func(l labels.Label) { + val := l.Value + if l.Name == "cpu" { + val = fmt.Sprintf("cpu%02d", j) } - } - allLabels = append(allLabels, labels) + + builder.Set(l.Name, val) + }) + + allLabels = append(allLabels, builder.Labels()) allSamples = append(allSamples, cortexpb.Sample{TimestampMs: 0, Value: float64(j)}) } return @@ -978,7 +984,7 @@ func TestIngesterMetricLimitExceeded(t *testing.T) { limits.MaxLocalMetadataPerMetric = 1 userID := "1" - labels1 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}} + labels1 := labels.FromStrings(labels.MetricName, "testmetric", "foo", "bar") sample1 := cortexpb.Sample{ TimestampMs: 0, Value: 1, @@ -987,7 +993,7 @@ func TestIngesterMetricLimitExceeded(t *testing.T) { TimestampMs: 1, Value: 2, } - labels3 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "biz"}} + labels3 := labels.FromStrings(labels.MetricName, "testmetric", "foo", "biz") sample3 := cortexpb.Sample{ TimestampMs: 1, Value: 3, @@ -2472,13 +2478,13 @@ func TestIngester_Push_OutOfOrderLabels(t *testing.T) { ctx := user.InjectOrgID(context.Background(), "test-user") - outOfOrderLabels := labels.Labels{ + outOfOrderLabels := []cortexpb.LabelAdapter{ {Name: labels.MetricName, Value: "test_metric"}, {Name: "c", Value: "3"}, - {Name: "a", Value: "1"}, // Out of order (a comes before c) + {Name: "a", Value: "1"}, } - req, _ := mockWriteRequest(t, outOfOrderLabels, 1, 2) + req, _ := mockWriteRequest(t, cortexpb.FromLabelAdaptersToLabels(outOfOrderLabels), 1, 2) _, err = i.Push(ctx, req) require.Error(t, err) require.Contains(t, err.Error(), "out-of-order label set found") @@ -2599,7 +2605,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) { // Push a single time series to set the TSDB min time. currTimeReq := cortexpb.ToWriteRequest( - []labels.Labels{{{Name: labels.MetricName, Value: metricName}}}, + []labels.Labels{labels.FromStrings(labels.MetricName, metricName)}, []cortexpb.Sample{{Value: 1, TimestampMs: util.TimeToMillis(time.Now())}}, nil, nil, @@ -2624,7 +2630,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { // For each series, push a single sample with a timestamp greater than next pushes. for i := 0; i < numSeriesPerRequest; i++ { currTimeReq := cortexpb.ToWriteRequest( - []labels.Labels{{{Name: labels.MetricName, Value: metricName}, {Name: "cardinality", Value: strconv.Itoa(i)}}}, + []labels.Labels{labels.FromStrings(labels.MetricName, metricName, "cardinality", strconv.Itoa(i))}, []cortexpb.Sample{{Value: 1, TimestampMs: sampleTimestamp + 1}}, nil, nil, @@ -2821,7 +2827,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { metrics := make([]labels.Labels, 0, scenario.numSeriesPerRequest) samples := make([]cortexpb.Sample, 0, scenario.numSeriesPerRequest) for i := 0; i < scenario.numSeriesPerRequest; i++ { - metrics = append(metrics, labels.Labels{{Name: labels.MetricName, Value: metricName}, {Name: "cardinality", Value: strconv.Itoa(i)}}) + metrics = append(metrics, labels.FromStrings(labels.MetricName, metricName, "cardinality", strconv.Itoa(i))) samples = append(samples, cortexpb.Sample{Value: float64(i), TimestampMs: sampleTimestamp}) } @@ -2857,9 +2863,9 @@ func Test_Ingester_LabelNames(t *testing.T) { value float64 timestamp int64 }{ - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "500"}}, 1, 110000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000}, + {labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "200"), 1, 100000}, + {labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "500"), 1, 110000}, + {labels.FromStrings("__name__", "test_2"), 2, 200000}, } expected := []string{"__name__", "route", "status"} @@ -2913,9 +2919,9 @@ func Test_Ingester_LabelValues(t *testing.T) { value float64 timestamp int64 }{ - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "500"}}, 1, 110000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000}, + {labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "200"), 1, 100000}, + {labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "500"), 1, 110000}, + {labels.FromStrings("__name__", "test_2"), 2, 200000}, } expected := map[string][]string{ @@ -2991,7 +2997,7 @@ func Test_Ingester_LabelValue_MaxInflightQueryRequest(t *testing.T) { // Mock request ctx := user.InjectOrgID(context.Background(), "test") - wreq, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000) + wreq, _ := mockWriteRequest(t, labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "200"), 1, 100000) _, err = i.Push(ctx, wreq) require.NoError(t, err) @@ -3007,9 +3013,9 @@ func Test_Ingester_Query(t *testing.T) { value float64 timestamp int64 }{ - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "500"}}, 1, 110000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000}, + {labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "200"), 1, 100000}, + {labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "500"), 1, 110000}, + {labels.FromStrings("__name__", "test_2"), 2, 200000}, } tests := map[string]struct { @@ -3150,7 +3156,7 @@ func Test_Ingester_Query_MaxInflightQueryRequest(t *testing.T) { // Mock request ctx := user.InjectOrgID(context.Background(), "test") - wreq, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000) + wreq, _ := mockWriteRequest(t, labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "200"), 1, 100000) _, err = i.Push(ctx, wreq) require.NoError(t, err) @@ -3191,7 +3197,7 @@ func Test_Ingester_Query_ResourceThresholdBreached(t *testing.T) { value float64 timestamp int64 }{ - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000}, + {labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "200"), 1, 100000}, } i, err := prepareIngesterWithBlocksStorage(t, defaultIngesterTestConfig(t), prometheus.NewRegistry()) @@ -3361,12 +3367,12 @@ func Test_Ingester_MetricsForLabelMatchers(t *testing.T) { value float64 timestamp int64 }{ - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "200"}}, 1, 100000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "status", Value: "500"}}, 1, 110000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000}, + {labels.FromStrings("__name__", "test_1", "status", "200"), 1, 100000}, + {labels.FromStrings("__name__", "test_1", "status", "500"), 1, 110000}, + {labels.FromStrings("__name__", "test_2"), 2, 200000}, // The two following series have the same FastFingerprint=e002a3a451262627 - {labels.Labels{{Name: labels.MetricName, Value: "collision"}, {Name: "app", Value: "l"}, {Name: "uniq0", Value: "0"}, {Name: "uniq1", Value: "1"}}, 1, 300000}, - {labels.Labels{{Name: labels.MetricName, Value: "collision"}, {Name: "app", Value: "m"}, {Name: "uniq0", Value: "1"}, {Name: "uniq1", Value: "1"}}, 1, 300000}, + {labels.FromStrings("__name__", "collision", "app", "l", "uniq0", "0", "uniq1", "1"), 1, 300000}, + {labels.FromStrings("__name__", "collision", "app", "m", "uniq0", "1", "uniq1", "1"), 1, 300000}, } tests := map[string]struct { @@ -3639,10 +3645,7 @@ func createIngesterWithSeries(t testing.TB, userID string, numSeries, numSamples samples := make([]cortexpb.Sample, 0, batchSize) for s := 0; s < batchSize; s++ { - metrics = append(metrics, labels.Labels{ - {Name: labels.MetricName, Value: fmt.Sprintf("test_%d", o+s)}, - }) - + metrics = append(metrics, labels.FromStrings("__name__", fmt.Sprintf("test_%d", o+s))) samples = append(samples, cortexpb.Sample{ TimestampMs: ts, Value: 1, @@ -3677,7 +3680,7 @@ func TestIngester_QueryStream(t *testing.T) { // Push series. ctx := user.InjectOrgID(context.Background(), userID) - lbls := labels.Labels{{Name: labels.MetricName, Value: "foo"}} + lbls := labels.FromStrings(labels.MetricName, "foo") var ( req *cortexpb.WriteRequest expectedResponseChunks *client.QueryStreamResponse @@ -3773,15 +3776,15 @@ func TestIngester_QueryStreamManySamplesChunks(t *testing.T) { } // 100k samples in chunks use about 154 KiB, - _, err = i.Push(ctx, writeRequestSingleSeries(labels.Labels{{Name: labels.MetricName, Value: "foo"}, {Name: "l", Value: "1"}}, samples[0:100000])) + _, err = i.Push(ctx, writeRequestSingleSeries(labels.FromStrings("__name__", "foo", "l", "1"), samples[0:100000])) require.NoError(t, err) // 1M samples in chunks use about 1.51 MiB, - _, err = i.Push(ctx, writeRequestSingleSeries(labels.Labels{{Name: labels.MetricName, Value: "foo"}, {Name: "l", Value: "2"}}, samples)) + _, err = i.Push(ctx, writeRequestSingleSeries(labels.FromStrings("__name__", "foo", "l", "2"), samples)) require.NoError(t, err) // 500k samples in chunks need 775 KiB, - _, err = i.Push(ctx, writeRequestSingleSeries(labels.Labels{{Name: labels.MetricName, Value: "foo"}, {Name: "l", Value: "3"}}, samples[0:500000])) + _, err = i.Push(ctx, writeRequestSingleSeries(labels.FromStrings("__name__", "foo", "l", "3"), samples[0:500000])) require.NoError(t, err) // Create a GRPC server used to query back the data. @@ -3969,7 +3972,7 @@ func benchmarkQueryStream(b *testing.B, samplesCount, seriesCount int) { } for s := 0; s < seriesCount; s++ { - _, err = i.Push(ctx, writeRequestSingleSeries(labels.Labels{{Name: labels.MetricName, Value: "foo"}, {Name: "l", Value: strconv.Itoa(s)}}, samples)) + _, err = i.Push(ctx, writeRequestSingleSeries(labels.FromStrings("__name__", "foo", "l", strconv.Itoa(s)), samples)) require.NoError(b, err) } @@ -4717,7 +4720,7 @@ func TestIngester_invalidSamplesDontChangeLastUpdateTime(t *testing.T) { sampleTimestamp := int64(model.Now()) { - req, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, sampleTimestamp) + req, _ := mockWriteRequest(t, labels.FromStrings("__name__", "test"), 0, sampleTimestamp) _, err = i.Push(ctx, req) require.NoError(t, err) } @@ -4733,7 +4736,7 @@ func TestIngester_invalidSamplesDontChangeLastUpdateTime(t *testing.T) { // Push another sample to the same metric and timestamp, with different value. We expect to get error. { - req, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 1, sampleTimestamp) + req, _ := mockWriteRequest(t, labels.FromStrings("__name__", "test"), 1, sampleTimestamp) _, err = i.Push(ctx, req) require.Error(t, err) } @@ -5031,9 +5034,10 @@ func Test_Ingester_UserStats(t *testing.T) { value float64 timestamp int64 }{ - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "500"}}, 1, 110000}, - {labels.Labels{{Name: labels.MetricName, Value: "test_2"}}, 2, 200000}, + + {labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "200"), 1, 100000}, + {labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "500"), 1, 110000}, + {labels.FromStrings("__name__", "test_2"), 2, 200000}, } // Create ingester @@ -5077,11 +5081,11 @@ func Test_Ingester_AllUserStats(t *testing.T) { value float64 timestamp int64 }{ - {"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000}, - {"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "500"}}, 1, 110000}, - {"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_2"}}, 2, 200000}, - {"user-2", labels.Labels{{Name: labels.MetricName, Value: "test_2_1"}}, 2, 200000}, - {"user-2", labels.Labels{{Name: labels.MetricName, Value: "test_2_2"}}, 2, 200000}, + {"user-1", labels.FromStrings("__name__", "test_1_1", "route", "get_user", "status", "200"), 1, 100000}, + {"user-1", labels.FromStrings("__name__", "test_1_1", "route", "get_user", "status", "500"), 1, 110000}, + {"user-1", labels.FromStrings("__name__", "test_1_2"), 2, 200000}, + {"user-2", labels.FromStrings("__name__", "test_2_1"), 2, 200000}, + {"user-2", labels.FromStrings("__name__", "test_2_2"), 2, 200000}, } // Create ingester @@ -5145,11 +5149,11 @@ func Test_Ingester_AllUserStatsHandler(t *testing.T) { value float64 timestamp int64 }{ - {"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000}, - {"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "500"}}, 1, 110000}, - {"user-1", labels.Labels{{Name: labels.MetricName, Value: "test_1_2"}}, 2, 200000}, - {"user-2", labels.Labels{{Name: labels.MetricName, Value: "test_2_1"}}, 2, 200000}, - {"user-2", labels.Labels{{Name: labels.MetricName, Value: "test_2_2"}}, 2, 200000}, + {"user-1", labels.FromStrings("__name__", "test_1_1", "route", "get_user", "status", "200"), 1, 100000}, + {"user-1", labels.FromStrings("__name__", "test_1_1", "route", "get_user", "status", "500"), 1, 110000}, + {"user-1", labels.FromStrings("__name__", "test_1_2"), 2, 200000}, + {"user-2", labels.FromStrings("__name__", "test_2_1"), 2, 200000}, + {"user-2", labels.FromStrings("__name__", "test_2_2"), 2, 200000}, } // Create ingester @@ -5424,7 +5428,7 @@ func verifyCompactedHead(t *testing.T, i *Ingester, expected bool) { func pushSingleSampleWithMetadata(t *testing.T, i *Ingester) { ctx := user.InjectOrgID(context.Background(), userID) - req, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, util.TimeToMillis(time.Now())) + req, _ := mockWriteRequest(t, labels.FromStrings("__name__", "test"), 0, util.TimeToMillis(time.Now())) req.Metadata = append(req.Metadata, &cortexpb.MetricMetadata{MetricFamilyName: "test", Help: "a help for metric", Unit: "", Type: cortexpb.COUNTER}) _, err := i.Push(ctx, req) require.NoError(t, err) @@ -5432,7 +5436,7 @@ func pushSingleSampleWithMetadata(t *testing.T, i *Ingester) { func pushSingleSampleAtTime(t *testing.T, i *Ingester, ts int64) { ctx := user.InjectOrgID(context.Background(), userID) - req, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, ts) + req, _ := mockWriteRequest(t, labels.FromStrings("__name__", "test"), 0, ts) _, err := i.Push(ctx, req) require.NoError(t, err) } @@ -5461,7 +5465,7 @@ func TestHeadCompactionOnStartup(t *testing.T) { db.DisableCompactions() head := db.Head() - l := labels.Labels{{Name: "n", Value: "v"}} + l := labels.FromStrings("n", "v") for i := 0; i < numFullChunks; i++ { // Not using db.Appender() as it checks for compaction. app := head.Appender(context.Background()) @@ -5571,7 +5575,7 @@ func TestIngesterNotDeleteUnshippedBlocks(t *testing.T) { // Push some data to create 3 blocks. ctx := user.InjectOrgID(context.Background(), userID) for j := int64(0); j < 5; j++ { - req, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, j*chunkRangeMilliSec) + req, _ := mockWriteRequest(t, labels.FromStrings(labels.MetricName, "test"), 0, j*chunkRangeMilliSec) _, err := i.Push(ctx, req) require.NoError(t, err) } @@ -5599,7 +5603,7 @@ func TestIngesterNotDeleteUnshippedBlocks(t *testing.T) { // Add more samples that could trigger another compaction and hence reload of blocks. for j := int64(5); j < 6; j++ { - req, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, j*chunkRangeMilliSec) + req, _ := mockWriteRequest(t, labels.FromStrings(labels.MetricName, "test"), 0, j*chunkRangeMilliSec) _, err := i.Push(ctx, req) require.NoError(t, err) } @@ -5627,7 +5631,7 @@ func TestIngesterNotDeleteUnshippedBlocks(t *testing.T) { // Add more samples that could trigger another compaction and hence reload of blocks. for j := int64(6); j < 7; j++ { - req, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, j*chunkRangeMilliSec) + req, _ := mockWriteRequest(t, labels.FromStrings(labels.MetricName, "test"), 0, j*chunkRangeMilliSec) _, err := i.Push(ctx, req) require.NoError(t, err) } @@ -5674,7 +5678,7 @@ func TestIngesterPushErrorDuringForcedCompaction(t *testing.T) { require.True(t, db.casState(active, forceCompacting)) // Ingestion should fail with a 503. - req, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test"}}, 0, util.TimeToMillis(time.Now())) + req, _ := mockWriteRequest(t, labels.FromStrings(labels.MetricName, "test"), 0, util.TimeToMillis(time.Now())) ctx := user.InjectOrgID(context.Background(), userID) _, err = i.Push(ctx, req) require.Equal(t, httpgrpc.Errorf(http.StatusServiceUnavailable, "%s", wrapWithUser(errors.New("forced compaction in progress"), userID).Error()), err) @@ -6608,7 +6612,7 @@ func Test_Ingester_QueryExemplar_MaxInflightQueryRequest(t *testing.T) { // Mock request ctx := user.InjectOrgID(context.Background(), "test") - wreq, _ := mockWriteRequest(t, labels.Labels{{Name: labels.MetricName, Value: "test_1"}, {Name: "route", Value: "get_user"}, {Name: "status", Value: "200"}}, 1, 100000) + wreq, _ := mockWriteRequest(t, labels.FromStrings("__name__", "test_1", "route", "get_user", "status", "200"), 1, 100000) _, err = i.Push(ctx, wreq) require.NoError(t, err) @@ -7149,7 +7153,7 @@ func CreateBlock(t *testing.T, ctx context.Context, dir string, mint, maxt int64 var ref storage.SeriesRef start := (maxt-mint)/2 + mint - _, err = app.Append(ref, labels.Labels{labels.Label{Name: "test_label", Value: "test_value"}}, start, float64(1)) + _, err = app.Append(ref, labels.FromStrings("test_label", "test_value"), start, float64(1)) require.NoError(t, err) err = app.Commit() require.NoError(t, err) diff --git a/pkg/ingester/metrics.go b/pkg/ingester/metrics.go index fc05b9764bb..8160216f2a1 100644 --- a/pkg/ingester/metrics.go +++ b/pkg/ingester/metrics.go @@ -20,6 +20,13 @@ const ( const ( sampleMetricTypeFloat = "float" sampleMetricTypeHistogram = "histogram" + + typeSeries = "series" + typeSamples = "samples" + typeExemplars = "exemplars" + typeHistograms = "histograms" + typeMetadata = "metadata" + typeTombstones = "tombstones" ) type ingesterMetrics struct { @@ -330,6 +337,8 @@ type tsdbMetrics struct { tsdbWALTruncateTotal *prometheus.Desc tsdbWALTruncateDuration *prometheus.Desc tsdbWALCorruptionsTotal *prometheus.Desc + tsdbWALReplayUnknownRefsTotal *prometheus.Desc + tsdbWBLReplayUnknownRefsTotal *prometheus.Desc tsdbWALWritesFailed *prometheus.Desc tsdbHeadTruncateFail *prometheus.Desc tsdbHeadTruncateTotal *prometheus.Desc @@ -437,6 +446,14 @@ func newTSDBMetrics(r prometheus.Registerer) *tsdbMetrics { "cortex_ingester_tsdb_wal_corruptions_total", "Total number of TSDB WAL corruptions.", nil, nil), + tsdbWALReplayUnknownRefsTotal: prometheus.NewDesc( + "cortex_ingester_tsdb_wal_replay_unknown_refs_total", + "Total number of unknown series references encountered during TSDB WAL replay.", + []string{"type"}, nil), + tsdbWBLReplayUnknownRefsTotal: prometheus.NewDesc( + "cortex_ingester_tsdb_wbl_replay_unknown_refs_total", + "Total number of unknown series references encountered during TSDB WBL replay.", + []string{"type"}, nil), tsdbWALWritesFailed: prometheus.NewDesc( "cortex_ingester_tsdb_wal_writes_failed_total", "Total number of TSDB WAL writes that failed.", @@ -601,6 +618,8 @@ func (sm *tsdbMetrics) Describe(out chan<- *prometheus.Desc) { out <- sm.tsdbWALTruncateTotal out <- sm.tsdbWALTruncateDuration out <- sm.tsdbWALCorruptionsTotal + out <- sm.tsdbWALReplayUnknownRefsTotal + out <- sm.tsdbWBLReplayUnknownRefsTotal out <- sm.tsdbWALWritesFailed out <- sm.tsdbHeadTruncateFail out <- sm.tsdbHeadTruncateTotal @@ -659,6 +678,8 @@ func (sm *tsdbMetrics) Collect(out chan<- prometheus.Metric) { data.SendSumOfCounters(out, sm.tsdbWALTruncateTotal, "prometheus_tsdb_wal_truncations_total") data.SendSumOfSummaries(out, sm.tsdbWALTruncateDuration, "prometheus_tsdb_wal_truncate_duration_seconds") data.SendSumOfCounters(out, sm.tsdbWALCorruptionsTotal, "prometheus_tsdb_wal_corruptions_total") + data.SendSumOfCountersWithLabels(out, sm.tsdbWALReplayUnknownRefsTotal, "prometheus_tsdb_wal_replay_unknown_refs_total", "type") + data.SendSumOfCountersWithLabels(out, sm.tsdbWBLReplayUnknownRefsTotal, "prometheus_tsdb_wbl_replay_unknown_refs_total", "type") data.SendSumOfCounters(out, sm.tsdbWALWritesFailed, "prometheus_tsdb_wal_writes_failed_total") data.SendSumOfCounters(out, sm.tsdbHeadTruncateFail, "prometheus_tsdb_head_truncations_failed_total") data.SendSumOfCounters(out, sm.tsdbHeadTruncateTotal, "prometheus_tsdb_head_truncations_total") diff --git a/pkg/ingester/metrics_test.go b/pkg/ingester/metrics_test.go index b08b0ca8141..9c7d316b964 100644 --- a/pkg/ingester/metrics_test.go +++ b/pkg/ingester/metrics_test.go @@ -240,6 +240,18 @@ func TestTSDBMetrics(t *testing.T) { # TYPE cortex_ingester_tsdb_wal_corruptions_total counter cortex_ingester_tsdb_wal_corruptions_total 2.676537e+06 + # HELP cortex_ingester_tsdb_wal_replay_unknown_refs_total Total number of unknown series references encountered during TSDB WAL replay. + # TYPE cortex_ingester_tsdb_wal_replay_unknown_refs_total counter + cortex_ingester_tsdb_wal_replay_unknown_refs_total{type="series"} 300 + cortex_ingester_tsdb_wal_replay_unknown_refs_total{type="samples"} 303 + cortex_ingester_tsdb_wal_replay_unknown_refs_total{type="metadata"} 306 + + # HELP cortex_ingester_tsdb_wbl_replay_unknown_refs_total Total number of unknown series references encountered during TSDB WBL replay. + # TYPE cortex_ingester_tsdb_wbl_replay_unknown_refs_total counter + cortex_ingester_tsdb_wbl_replay_unknown_refs_total{type="exemplars"} 300 + cortex_ingester_tsdb_wbl_replay_unknown_refs_total{type="histograms"} 303 + cortex_ingester_tsdb_wbl_replay_unknown_refs_total{type="tombstones"} 306 + # HELP cortex_ingester_tsdb_wal_writes_failed_total Total number of TSDB WAL writes that failed. # TYPE cortex_ingester_tsdb_wal_writes_failed_total counter cortex_ingester_tsdb_wal_writes_failed_total 1486965 @@ -505,6 +517,18 @@ func TestTSDBMetricsWithRemoval(t *testing.T) { # TYPE cortex_ingester_tsdb_wal_corruptions_total counter cortex_ingester_tsdb_wal_corruptions_total 2.676537e+06 + # HELP cortex_ingester_tsdb_wal_replay_unknown_refs_total Total number of unknown series references encountered during TSDB WAL replay. + # TYPE cortex_ingester_tsdb_wal_replay_unknown_refs_total counter + cortex_ingester_tsdb_wal_replay_unknown_refs_total{type="series"} 300 + cortex_ingester_tsdb_wal_replay_unknown_refs_total{type="samples"} 303 + cortex_ingester_tsdb_wal_replay_unknown_refs_total{type="metadata"} 306 + + # HELP cortex_ingester_tsdb_wbl_replay_unknown_refs_total Total number of unknown series references encountered during TSDB WBL replay. + # TYPE cortex_ingester_tsdb_wbl_replay_unknown_refs_total counter + cortex_ingester_tsdb_wbl_replay_unknown_refs_total{type="exemplars"} 300 + cortex_ingester_tsdb_wbl_replay_unknown_refs_total{type="histograms"} 303 + cortex_ingester_tsdb_wbl_replay_unknown_refs_total{type="tombstones"} 306 + # HELP cortex_ingester_tsdb_wal_writes_failed_total Total number of TSDB WAL writes that failed. # TYPE cortex_ingester_tsdb_wal_writes_failed_total counter cortex_ingester_tsdb_wal_writes_failed_total 1486965 @@ -883,6 +907,22 @@ func populateTSDBMetrics(base float64) *prometheus.Registry { }) snapshotReplayErrorTotal.Add(103) + walReplayUnknownRefsTotal := promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Name: "prometheus_tsdb_wal_replay_unknown_refs_total", + Help: "Total number of unknown series references encountered during WAL replay.", + }, []string{"type"}) + walReplayUnknownRefsTotal.WithLabelValues(typeSeries).Add(100) + walReplayUnknownRefsTotal.WithLabelValues(typeSamples).Add(101) + walReplayUnknownRefsTotal.WithLabelValues(typeMetadata).Add(102) + + wblReplayUnknownRefsTotal := promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Name: "prometheus_tsdb_wbl_replay_unknown_refs_total", + Help: "Total number of unknown series references encountered during WBL replay.", + }, []string{"type"}) + wblReplayUnknownRefsTotal.WithLabelValues(typeExemplars).Add(100) + wblReplayUnknownRefsTotal.WithLabelValues(typeHistograms).Add(101) + wblReplayUnknownRefsTotal.WithLabelValues(typeTombstones).Add(102) + oooHistogram := promauto.With(r).NewHistogram(prometheus.HistogramOpts{ Name: "prometheus_tsdb_sample_ooo_delta", Help: "Delta in seconds by which a sample is considered out of order (reported regardless of OOO time window and whether sample is accepted or not).", diff --git a/pkg/ingester/user_state.go b/pkg/ingester/user_state.go index 062f4d5e1bd..032c6907d8c 100644 --- a/pkg/ingester/user_state.go +++ b/pkg/ingester/user_state.go @@ -191,9 +191,9 @@ func getCardinalityForLimitsPerLabelSet(ctx context.Context, numSeries uint64, i } func getPostingForLabels(ctx context.Context, ir tsdb.IndexReader, lbls labels.Labels) (index.Postings, error) { - postings := make([]index.Postings, 0, len(lbls)) - for _, lbl := range lbls { - p, err := ir.Postings(ctx, lbl.Name, lbl.Value) + postings := make([]index.Postings, 0, lbls.Len()) + for name, value := range lbls.Map() { + p, err := ir.Postings(ctx, name, value) if err != nil { return nil, err } diff --git a/pkg/ingester/user_state_test.go b/pkg/ingester/user_state_test.go index a75b7e3e3e5..38be322854d 100644 --- a/pkg/ingester/user_state_test.go +++ b/pkg/ingester/user_state_test.go @@ -343,11 +343,11 @@ func (ir *mockIndexReader) Postings(ctx context.Context, name string, values ... func (ir *mockIndexReader) Symbols() index.StringIter { return nil } -func (ir *mockIndexReader) SortedLabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { +func (ir *mockIndexReader) SortedLabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) { return nil, nil } -func (ir *mockIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { +func (ir *mockIndexReader) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, error) { return nil, nil } diff --git a/pkg/parquetconverter/converter.go b/pkg/parquetconverter/converter.go index 4eca20ac0a5..ccfcdd0da24 100644 --- a/pkg/parquetconverter/converter.go +++ b/pkg/parquetconverter/converter.go @@ -104,11 +104,11 @@ type Converter struct { func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.Ring.RegisterFlags(f) - f.StringVar(&cfg.DataDir, "parquet-converter.data-dir", "./data", "Data directory in which to cache blocks and process conversions.") - f.IntVar(&cfg.MetaSyncConcurrency, "parquet-converter.meta-sync-concurrency", 20, "Number of Go routines to use when syncing block meta files from the long term storage.") - f.IntVar(&cfg.MaxRowsPerRowGroup, "parquet-converter.max-rows-per-row-group", 1e6, "Max number of rows per parquet row group.") - f.DurationVar(&cfg.ConversionInterval, "parquet-converter.conversion-interval", time.Minute, "The frequency at which the conversion job runs.") - f.BoolVar(&cfg.FileBufferEnabled, "parquet-converter.file-buffer-enabled", true, "Whether to enable buffering the writes in disk to reduce memory utilization.") + f.StringVar(&cfg.DataDir, "parquet-converter.data-dir", "./data", "Local directory path for caching TSDB blocks during parquet conversion.") + f.IntVar(&cfg.MetaSyncConcurrency, "parquet-converter.meta-sync-concurrency", 20, "Maximum concurrent goroutines for downloading block metadata from object storage.") + f.IntVar(&cfg.MaxRowsPerRowGroup, "parquet-converter.max-rows-per-row-group", 1e6, "Maximum number of time series per parquet row group. Larger values improve compression but may reduce performance during reads.") + f.DurationVar(&cfg.ConversionInterval, "parquet-converter.conversion-interval", time.Minute, "How often to check for new TSDB blocks to convert to parquet format.") + f.BoolVar(&cfg.FileBufferEnabled, "parquet-converter.file-buffer-enabled", true, "Enable disk-based write buffering to reduce memory consumption during parquet file generation.") } func NewConverter(cfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, blockRanges []int64, logger log.Logger, registerer prometheus.Registerer, limits *validation.Overrides) (*Converter, error) { diff --git a/pkg/parquetconverter/converter_test.go b/pkg/parquetconverter/converter_test.go index fc8f6e99805..70b6469a7ba 100644 --- a/pkg/parquetconverter/converter_test.go +++ b/pkg/parquetconverter/converter_test.go @@ -63,10 +63,7 @@ func TestConverter(t *testing.T) { ctx := context.Background() - lbls := labels.Labels{labels.Label{ - Name: "__name__", - Value: "test", - }} + lbls := labels.FromStrings("__name__", "test") blocks := []ulid.ULID{} // Create blocks @@ -254,10 +251,7 @@ func TestConverter_BlockConversionFailure(t *testing.T) { require.NoError(t, err) // Create test labels - lbls := labels.Labels{labels.Label{ - Name: "__name__", - Value: "test", - }} + lbls := labels.FromStrings("__name__", "test") // Create a real TSDB block dir := t.TempDir() @@ -312,10 +306,7 @@ func TestConverter_ShouldNotFailOnAccessDenyError(t *testing.T) { require.NoError(t, err) // Create test labels - lbls := labels.Labels{labels.Label{ - Name: "__name__", - Value: "test", - }} + lbls := labels.FromStrings("__name__", "test") // Create a real TSDB block dir := t.TempDir() @@ -366,11 +357,11 @@ type mockBucket struct { getFailure error } -func (m *mockBucket) Upload(ctx context.Context, name string, r io.Reader) error { +func (m *mockBucket) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) error { if m.uploadFailure != nil { return m.uploadFailure } - return m.Bucket.Upload(ctx, name, r) + return m.Bucket.Upload(ctx, name, r, opts...) } func (m *mockBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { diff --git a/pkg/parquetconverter/metrics.go b/pkg/parquetconverter/metrics.go index 57ff4c065ee..2b3e80b0cfd 100644 --- a/pkg/parquetconverter/metrics.go +++ b/pkg/parquetconverter/metrics.go @@ -30,7 +30,7 @@ func newMetrics(reg prometheus.Registerer) *metrics { convertParquetBlockDelay: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ Name: "cortex_parquet_converter_convert_block_delay_minutes", Help: "Delay in minutes of Parquet block to be converted from the TSDB block being uploaded to object store", - Buckets: []float64{5, 10, 15, 20, 30, 45, 60, 80, 100, 120}, + Buckets: []float64{5, 10, 15, 20, 30, 45, 60, 80, 100, 120, 150, 180, 210, 240, 270, 300}, }), ownedUsers: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ Name: "cortex_parquet_converter_users_owned", diff --git a/pkg/querier/blocks_finder_bucket_index.go b/pkg/querier/blocks_finder_bucket_index.go index 60f05d722e6..0f0977bf9ce 100644 --- a/pkg/querier/blocks_finder_bucket_index.go +++ b/pkg/querier/blocks_finder_bucket_index.go @@ -8,6 +8,7 @@ import ( "github.com/oklog/ulid/v2" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/model/labels" "github.com/thanos-io/objstore" "github.com/cortexproject/cortex/pkg/util/validation" @@ -49,7 +50,7 @@ func NewBucketIndexBlocksFinder(cfg BucketIndexBlocksFinderConfig, bkt objstore. } // GetBlocks implements BlocksFinder. -func (f *BucketIndexBlocksFinder) GetBlocks(ctx context.Context, userID string, minT, maxT int64) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { +func (f *BucketIndexBlocksFinder) GetBlocks(ctx context.Context, userID string, minT, maxT int64, _ []*labels.Matcher) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { if f.State() != services.Running { return nil, nil, errBucketIndexBlocksFinderNotRunning } diff --git a/pkg/querier/blocks_finder_bucket_index_test.go b/pkg/querier/blocks_finder_bucket_index_test.go index 280939c16cb..99675d4748f 100644 --- a/pkg/querier/blocks_finder_bucket_index_test.go +++ b/pkg/querier/blocks_finder_bucket_index_test.go @@ -125,7 +125,7 @@ func TestBucketIndexBlocksFinder_GetBlocks(t *testing.T) { t.Run(testName, func(t *testing.T) { t.Parallel() - blocks, deletionMarks, err := finder.GetBlocks(ctx, userID, testData.minT, testData.maxT) + blocks, deletionMarks, err := finder.GetBlocks(ctx, userID, testData.minT, testData.maxT, nil) require.NoError(t, err) require.ElementsMatch(t, testData.expectedBlocks, blocks) require.Equal(t, testData.expectedMarks, deletionMarks) @@ -165,7 +165,7 @@ func BenchmarkBucketIndexBlocksFinder_GetBlocks(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - blocks, marks, err := finder.GetBlocks(ctx, userID, 100, 200) + blocks, marks, err := finder.GetBlocks(ctx, userID, 100, 200, nil) if err != nil || len(blocks) != 11 || len(marks) != 11 { b.Fail() } @@ -181,7 +181,7 @@ func TestBucketIndexBlocksFinder_GetBlocks_BucketIndexDoesNotExist(t *testing.T) bkt, _ := cortex_testutil.PrepareFilesystemBucket(t) finder := prepareBucketIndexBlocksFinder(t, bkt) - blocks, deletionMarks, err := finder.GetBlocks(ctx, userID, 10, 20) + blocks, deletionMarks, err := finder.GetBlocks(ctx, userID, 10, 20, nil) require.NoError(t, err) assert.Empty(t, blocks) assert.Empty(t, deletionMarks) @@ -199,7 +199,7 @@ func TestBucketIndexBlocksFinder_GetBlocks_BucketIndexIsCorrupted(t *testing.T) // Upload a corrupted bucket index. require.NoError(t, bkt.Upload(ctx, path.Join(userID, bucketindex.IndexCompressedFilename), strings.NewReader("invalid}!"))) - _, _, err := finder.GetBlocks(ctx, userID, 10, 20) + _, _, err := finder.GetBlocks(ctx, userID, 10, 20, nil) require.Equal(t, bucketindex.ErrIndexCorrupted, err) } @@ -219,7 +219,7 @@ func TestBucketIndexBlocksFinder_GetBlocks_BucketIndexIsTooOld(t *testing.T) { UpdatedAt: time.Now().Add(-2 * time.Hour).Unix(), })) - _, _, err := finder.GetBlocks(ctx, userID, 10, 20) + _, _, err := finder.GetBlocks(ctx, userID, 10, 20, nil) require.Equal(t, errBucketIndexTooOld, err) } @@ -270,10 +270,10 @@ func TestBucketIndexBlocksFinder_GetBlocks_BucketIndexIsTooOldWithCustomerKeyErr t.Run(name, func(t *testing.T) { bucketindex.WriteSyncStatus(ctx, bkt, userID, tc.ss, log.NewNopLogger()) finder := prepareBucketIndexBlocksFinder(t, bkt) - _, _, err := finder.GetBlocks(ctx, userID, 10, 20) + _, _, err := finder.GetBlocks(ctx, userID, 10, 20, nil) require.Equal(t, tc.err, err) // Doing 2 times to return from the cache - _, _, err = finder.GetBlocks(ctx, userID, 10, 20) + _, _, err = finder.GetBlocks(ctx, userID, 10, 20, nil) require.Equal(t, tc.err, err) }) } @@ -315,7 +315,7 @@ func TestBucketIndexBlocksFinder_GetBlocks_KeyPermissionDenied(t *testing.T) { finder := prepareBucketIndexBlocksFinder(t, bkt) - _, _, err := finder.GetBlocks(context.Background(), userID, 0, 100) + _, _, err := finder.GetBlocks(context.Background(), userID, 0, 100, nil) expected := validation.AccessDeniedError("error") require.IsType(t, expected, err) } diff --git a/pkg/querier/blocks_finder_bucket_scan.go b/pkg/querier/blocks_finder_bucket_scan.go index 949ab5f6350..d047fd1421f 100644 --- a/pkg/querier/blocks_finder_bucket_scan.go +++ b/pkg/querier/blocks_finder_bucket_scan.go @@ -15,6 +15,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/prometheus/model/labels" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block" @@ -111,7 +112,7 @@ func NewBucketScanBlocksFinder(cfg BucketScanBlocksFinderConfig, usersScanner us // GetBlocks returns known blocks for userID containing samples within the range minT // and maxT (milliseconds, both included). Returned blocks are sorted by MaxTime descending. -func (d *BucketScanBlocksFinder) GetBlocks(_ context.Context, userID string, minT, maxT int64) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { +func (d *BucketScanBlocksFinder) GetBlocks(_ context.Context, userID string, minT, maxT int64, _ []*labels.Matcher) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { // We need to ensure the initial full bucket scan succeeded. if d.State() != services.Running { return nil, nil, errBucketScanBlocksFinderNotRunning diff --git a/pkg/querier/blocks_finder_bucket_scan_test.go b/pkg/querier/blocks_finder_bucket_scan_test.go index 8393e4b12c6..b81f6d7f910 100644 --- a/pkg/querier/blocks_finder_bucket_scan_test.go +++ b/pkg/querier/blocks_finder_bucket_scan_test.go @@ -39,7 +39,7 @@ func TestBucketScanBlocksFinder_InitialScan(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(ctx, s)) - blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 2, len(blocks)) assert.Equal(t, user1Block2.ULID, blocks[0].ID) @@ -48,7 +48,7 @@ func TestBucketScanBlocksFinder_InitialScan(t *testing.T) { assert.WithinDuration(t, time.Now(), blocks[1].GetUploadedAt(), 5*time.Second) assert.Empty(t, deletionMarks) - blocks, deletionMarks, err = s.GetBlocks(ctx, "user-2", 0, 30) + blocks, deletionMarks, err = s.GetBlocks(ctx, "user-2", 0, 30, nil) require.NoError(t, err) require.Equal(t, 1, len(blocks)) assert.Equal(t, user2Block1.ULID, blocks[0].ID) @@ -110,7 +110,7 @@ func TestBucketScanBlocksFinder_InitialScanFailure(t *testing.T) { require.NoError(t, s.StartAsync(ctx)) require.Error(t, s.AwaitRunning(ctx)) - blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30, nil) assert.Equal(t, errBucketScanBlocksFinderNotRunning, err) assert.Nil(t, blocks) assert.Nil(t, deletionMarks) @@ -233,7 +233,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsNewUser(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(ctx, s)) - blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 0, len(blocks)) assert.Empty(t, deletionMarks) @@ -245,7 +245,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsNewUser(t *testing.T) { // Trigger a periodic sync require.NoError(t, s.scan(ctx)) - blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 2, len(blocks)) assert.Equal(t, block2.ULID, blocks[0].ID) @@ -266,7 +266,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsNewBlock(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(ctx, s)) - blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 1, len(blocks)) assert.Equal(t, block1.ULID, blocks[0].ID) @@ -278,7 +278,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsNewBlock(t *testing.T) { // Trigger a periodic sync require.NoError(t, s.scan(ctx)) - blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 2, len(blocks)) assert.Equal(t, block2.ULID, blocks[0].ID) @@ -298,7 +298,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsBlockMarkedForDeletion(t *testi require.NoError(t, services.StartAndAwaitRunning(ctx, s)) - blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 2, len(blocks)) assert.Equal(t, block2.ULID, blocks[0].ID) @@ -310,7 +310,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsBlockMarkedForDeletion(t *testi // Trigger a periodic sync require.NoError(t, s.scan(ctx)) - blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 2, len(blocks)) assert.Equal(t, block2.ULID, blocks[0].ID) @@ -330,7 +330,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsDeletedBlock(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(ctx, s)) - blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 2, len(blocks)) assert.Equal(t, block2.ULID, blocks[0].ID) @@ -342,7 +342,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsDeletedBlock(t *testing.T) { // Trigger a periodic sync require.NoError(t, s.scan(ctx)) - blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 1, len(blocks)) assert.Equal(t, block2.ULID, blocks[0].ID) @@ -359,7 +359,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsDeletedUser(t *testing.T) { require.NoError(t, services.StartAndAwaitRunning(ctx, s)) - blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 2, len(blocks)) assert.Equal(t, block2.ULID, blocks[0].ID) @@ -371,7 +371,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsDeletedUser(t *testing.T) { // Trigger a periodic sync require.NoError(t, s.scan(ctx)) - blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 30) + blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 30, nil) require.NoError(t, err) require.Equal(t, 0, len(blocks)) assert.Empty(t, deletionMarks) @@ -387,7 +387,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsUserWhichWasPreviouslyDeleted(t require.NoError(t, services.StartAndAwaitRunning(ctx, s)) - blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 40) + blocks, deletionMarks, err := s.GetBlocks(ctx, "user-1", 0, 40, nil) require.NoError(t, err) require.Equal(t, 2, len(blocks)) assert.Equal(t, block2.ULID, blocks[0].ID) @@ -399,7 +399,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsUserWhichWasPreviouslyDeleted(t // Trigger a periodic sync require.NoError(t, s.scan(ctx)) - blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 40) + blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 40, nil) require.NoError(t, err) require.Equal(t, 0, len(blocks)) assert.Empty(t, deletionMarks) @@ -409,7 +409,7 @@ func TestBucketScanBlocksFinder_PeriodicScanFindsUserWhichWasPreviouslyDeleted(t // Trigger a periodic sync require.NoError(t, s.scan(ctx)) - blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 40) + blocks, deletionMarks, err = s.GetBlocks(ctx, "user-1", 0, 40, nil) require.NoError(t, err) require.Equal(t, 1, len(blocks)) assert.Equal(t, block3.ULID, blocks[0].ID) @@ -506,7 +506,7 @@ func TestBucketScanBlocksFinder_GetBlocks(t *testing.T) { t.Run(testName, func(t *testing.T) { t.Parallel() - metas, deletionMarks, err := s.GetBlocks(ctx, "user-1", testData.minT, testData.maxT) + metas, deletionMarks, err := s.GetBlocks(ctx, "user-1", testData.minT, testData.maxT, nil) require.NoError(t, err) require.Equal(t, len(testData.expectedMetas), len(metas)) require.Equal(t, testData.expectedMarks, deletionMarks) diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 98b58a83361..41cf8201634 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -86,7 +86,7 @@ type BlocksFinder interface { // GetBlocks returns known blocks for userID containing samples within the range minT // and maxT (milliseconds, both included). Returned blocks are sorted by MaxTime descending. - GetBlocks(ctx context.Context, userID string, minT, maxT int64) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) + GetBlocks(ctx context.Context, userID string, minT, maxT int64, matchers []*labels.Matcher) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) } // BlocksStoreClient is the interface that should be implemented by any client used @@ -373,7 +373,7 @@ func (q *blocksStoreQuerier) LabelNames(ctx context.Context, hints *storage.Labe return queriedBlocks, nil, retryableError } - if err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, userID, queryFunc); err != nil { + if err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, matchers, userID, queryFunc); err != nil { return nil, nil, err } @@ -416,7 +416,7 @@ func (q *blocksStoreQuerier) LabelValues(ctx context.Context, name string, hints return queriedBlocks, nil, retryableError } - if err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, userID, queryFunc); err != nil { + if err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, matchers, userID, queryFunc); err != nil { return nil, nil, err } @@ -472,7 +472,7 @@ func (q *blocksStoreQuerier) selectSorted(ctx context.Context, sp *storage.Selec return queriedBlocks, nil, retryableError } - if err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, userID, queryFunc); err != nil { + if err := q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, matchers, userID, queryFunc); err != nil { return storage.ErrSeriesSet(err) } @@ -485,8 +485,8 @@ func (q *blocksStoreQuerier) selectSorted(ctx context.Context, sp *storage.Selec resWarnings) } -func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logger log.Logger, minT, maxT int64, userID string, - queryFunc func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error, error)) error { +func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logger log.Logger, minT, maxT int64, matchers []*labels.Matcher, + userID string, queryFunc func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error, error)) error { // If queryStoreAfter is enabled, we do manipulate the query maxt to query samples up until // now - queryStoreAfter, because the most recent time range is covered by ingesters. This // optimization is particularly important for the blocks storage because can be used to skip @@ -508,7 +508,7 @@ func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logg } // Find the list of blocks we need to query given the time range. - knownBlocks, knownDeletionMarks, err := q.finder.GetBlocks(ctx, userID, minT, maxT) + knownBlocks, knownDeletionMarks, err := q.finder.GetBlocks(ctx, userID, minT, maxT, matchers) // if blocks were already discovered, we should use then if b, ok := ExtractBlocksFromContext(ctx); ok { diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index da0a5df2679..4070e0b7383 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -129,7 +129,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -155,7 +155,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), @@ -187,7 +187,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), @@ -218,8 +218,8 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 3, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 3, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -250,7 +250,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel, series1Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), @@ -258,7 +258,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, nil, ), mockSeriesResponse( - labels.Labels{metricNameLabel, series2Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram3), @@ -294,7 +294,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel, series1Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), @@ -302,7 +302,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, ), mockSeriesResponse( - labels.Labels{metricNameLabel, series2Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram3), @@ -337,11 +337,11 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block2), }}: {block2}, }, @@ -367,7 +367,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), @@ -377,7 +377,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), @@ -408,7 +408,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), @@ -418,7 +418,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), @@ -448,11 +448,11 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1), }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block2), }}: {block2}, }, @@ -478,7 +478,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), @@ -488,7 +488,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), @@ -520,7 +520,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), @@ -530,7 +530,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), @@ -561,16 +561,16 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block2), }}: {block2}, &storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 3, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 3, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block3), }}: {block3}, }, @@ -631,16 +631,16 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block2), }}: {block2}, &storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 3, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 3, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block3), }}: {block3}, }, @@ -666,14 +666,14 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel, series1Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), }, nil, ), mockSeriesResponse( - labels.Labels{metricNameLabel, series2Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), @@ -683,7 +683,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel, series1Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), @@ -694,7 +694,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }}: {block2}, &storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel, series2Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), @@ -732,14 +732,14 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel, series1Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), }, ), mockSeriesResponse( - labels.Labels{metricNameLabel, series2Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), @@ -749,7 +749,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel, series1Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), @@ -760,7 +760,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }}: {block2}, &storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponse( - labels.Labels{metricNameLabel, series2Label}, + labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), @@ -798,7 +798,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -820,11 +820,11 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1), }}: {block1}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block2), }}: {block2}, }, @@ -846,25 +846,25 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1, block3}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block2), }}: {block2, block4}, }, // Second attempt returns 1 missing block. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block3), }}: {block3, block4}, }, // Third attempt returns the last missing block. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "4.4.4.4", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 3, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 3, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block4), }}: {block4}, }, @@ -924,7 +924,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -949,7 +949,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -966,7 +966,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, []cortexpb.Histogram{ + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), }, nil), @@ -986,7 +986,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, nil, []cortexpb.Histogram{ + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), }), @@ -1006,7 +1006,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -1023,7 +1023,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, []cortexpb.Histogram{ + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), }, nil), @@ -1043,7 +1043,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, nil, []cortexpb.Histogram{ + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), }), @@ -1066,25 +1066,25 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1, block3}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block2), }}: {block2, block4}, }, // Second attempt returns 1 missing block. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block3), }}: {block3, block4}, }, // Third attempt returns the last missing block. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "4.4.4.4", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 3, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 3, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block4), }}: {block4}, }, @@ -1104,25 +1104,25 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { // First attempt returns a client whose response does not include all expected blocks. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1, block3}, &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block2), }}: {block2, block4}, }, // Second attempt returns 1 missing block. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block3), }}: {block3, block4}, }, // Third attempt returns the last missing block. map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "4.4.4.4", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 3, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 3, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block4), }}: {block4}, }, @@ -1139,8 +1139,8 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -1157,12 +1157,12 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), }, nil, ), - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, nil, + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT+1, testHistogram2), }, nil, @@ -1183,12 +1183,12 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, nil, + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), }, ), - mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, nil, nil, + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series2Label.Name, series2Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT+1, testFloatHistogram2), }, @@ -1209,7 +1209,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -1226,7 +1226,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, []cortexpb.Histogram{ + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), }, nil), mockHintsResponse(block1, block2), @@ -1245,7 +1245,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, nil, []cortexpb.Histogram{ + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), }), mockHintsResponse(block1, block2), @@ -1264,7 +1264,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1, block2), }}: {block1, block2}, }, @@ -1281,7 +1281,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, []cortexpb.Histogram{ + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, []cortexpb.Histogram{ cortexpb.HistogramToHistogramProto(minT, testHistogram1), }, nil), mockHintsResponse(block1, block2), @@ -1300,7 +1300,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { storeSetResponses: []interface{}{ map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, nil, nil, []cortexpb.Histogram{ + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), nil, nil, []cortexpb.Histogram{ cortexpb.FloatHistogramToHistogramProto(minT, testFloatHistogram1), }), mockHintsResponse(block1, block2), @@ -1324,7 +1324,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -1353,7 +1353,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -1382,7 +1382,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -1408,7 +1408,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &storeGatewayClientMock{ remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }, mockedSeriesStreamErr: status.Error(codes.PermissionDenied, "PermissionDenied"), @@ -1418,7 +1418,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { &storeGatewayClientMock{ remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }, mockedSeriesStreamErr: status.Error(codes.PermissionDenied, "PermissionDenied"), @@ -1446,13 +1446,13 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { remoteAddr: "1.1.1.1", mockedSeriesStreamErr: status.Error(codes.Unavailable, "unavailable"), mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -1481,7 +1481,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -1510,7 +1510,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -1531,7 +1531,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), + mockSeriesResponse(labels.FromStrings(metricNameLabel.Name, metricNameLabel.Value, series1Label.Name, series1Label.Value), []cortexpb.Sample{{Value: 2, TimestampMs: minT}}, nil, nil), mockHintsResponse(block1), }}: {block1}, }, @@ -1568,7 +1568,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { reg := prometheus.NewPedanticRegistry() stores := &blocksStoreSetMock{mockedResponses: testData.storeSetResponses} finder := &blocksFinderMock{} - finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(testData.finderResult, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), testData.finderErr) + finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT, mock.Anything).Return(testData.finderResult, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), testData.finderErr) q := &blocksStoreQuerier{ minT: minT, @@ -1664,7 +1664,7 @@ func TestOverrideBlockDiscovery(t *testing.T) { } finder := &blocksFinderMock{} // return block 1 and 2 on finder but only query block 1 - finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(bucketindex.Blocks{ + finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT, mock.Anything).Return(bucketindex.Blocks{ &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) @@ -2213,7 +2213,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { reg := prometheus.NewPedanticRegistry() stores := &blocksStoreSetMock{mockedResponses: testData.storeSetResponses} finder := &blocksFinderMock{} - finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(testData.finderResult, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), testData.finderErr) + finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT, mock.Anything).Return(testData.finderResult, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), testData.finderErr) q := &blocksStoreQuerier{ minT: minT, @@ -2321,7 +2321,7 @@ func TestBlocksStoreQuerier_SelectSortedShouldHonorQueryStoreAfter(t *testing.T) ctx := user.InjectOrgID(context.Background(), "user-1") finder := &blocksFinderMock{} - finder.On("GetBlocks", mock.Anything, "user-1", mock.Anything, mock.Anything).Return(bucketindex.Blocks(nil), map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), error(nil)) + finder.On("GetBlocks", mock.Anything, "user-1", mock.Anything, mock.Anything, mock.Anything).Return(bucketindex.Blocks(nil), map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), error(nil)) q := &blocksStoreQuerier{ minT: testData.queryMinT, @@ -2385,7 +2385,7 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { finder := &blocksFinderMock{ Service: services.NewIdleService(nil, nil), } - finder.On("GetBlocks", mock.Anything, "user-1", mock.Anything, mock.Anything).Return(bucketindex.Blocks{ + finder.On("GetBlocks", mock.Anything, "user-1", mock.Anything, mock.Anything, mock.Anything).Return(bucketindex.Blocks{ &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), error(nil)) @@ -2535,8 +2535,8 @@ type blocksFinderMock struct { mock.Mock } -func (m *blocksFinderMock) GetBlocks(ctx context.Context, userID string, minT, maxT int64) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { - args := m.Called(ctx, userID, minT, maxT) +func (m *blocksFinderMock) GetBlocks(ctx context.Context, userID string, minT, maxT int64, matchers []*labels.Matcher) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { + args := m.Called(ctx, userID, minT, maxT, matchers) return args.Get(0).(bucketindex.Blocks), args.Get(1).(map[ulid.ULID]*bucketindex.BlockDeletionMark), args.Error(2) } @@ -2736,9 +2736,9 @@ func mockValuesHints(ids ...ulid.ULID) *types.Any { func namesFromSeries(series ...labels.Labels) []string { namesMap := map[string]struct{}{} for _, s := range series { - for _, l := range s { + s.Range(func(l labels.Label) { namesMap[l.Name] = struct{}{} - } + }) } names := []string{} @@ -2753,11 +2753,11 @@ func namesFromSeries(series ...labels.Labels) []string { func valuesFromSeries(name string, series ...labels.Labels) []string { valuesMap := map[string]struct{}{} for _, s := range series { - for _, l := range s { + s.Range(func(l labels.Label) { if l.Name == name { valuesMap[l.Value] = struct{}{} } - } + }) } values := []string{} diff --git a/pkg/querier/codec/protobuf_codec.go b/pkg/querier/codec/protobuf_codec.go index 64bfa2e3945..733e61c79bd 100644 --- a/pkg/querier/codec/protobuf_codec.go +++ b/pkg/querier/codec/protobuf_codec.go @@ -5,6 +5,7 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/util/stats" v1 "github.com/prometheus/prometheus/web/api/v1" @@ -101,16 +102,18 @@ func getMatrixSampleStreams(data *v1.QueryData) *[]tripperware.SampleStream { for i := 0; i < sampleStreamsLen; i++ { sampleStream := data.Result.(promql.Matrix)[i] - labelsLen := len(sampleStream.Metric) - var labels []cortexpb.LabelAdapter + labelsLen := sampleStream.Metric.Len() + var lbls []cortexpb.LabelAdapter if labelsLen > 0 { - labels = make([]cortexpb.LabelAdapter, labelsLen) - for j := 0; j < labelsLen; j++ { - labels[j] = cortexpb.LabelAdapter{ - Name: sampleStream.Metric[j].Name, - Value: sampleStream.Metric[j].Value, + lbls = make([]cortexpb.LabelAdapter, labelsLen) + j := 0 + sampleStream.Metric.Range(func(l labels.Label) { + lbls[j] = cortexpb.LabelAdapter{ + Name: l.Name, + Value: l.Value, } - } + j++ + }) } samplesLen := len(sampleStream.Floats) @@ -145,7 +148,7 @@ func getMatrixSampleStreams(data *v1.QueryData) *[]tripperware.SampleStream { } } } - sampleStreams[i] = tripperware.SampleStream{Labels: labels, Samples: samples, Histograms: histograms} + sampleStreams[i] = tripperware.SampleStream{Labels: lbls, Samples: samples, Histograms: histograms} } return &sampleStreams } @@ -156,18 +159,20 @@ func getVectorSamples(data *v1.QueryData, cortexInternal bool) *[]tripperware.Sa for i := 0; i < vectorSamplesLen; i++ { sample := data.Result.(promql.Vector)[i] - labelsLen := len(sample.Metric) - var labels []cortexpb.LabelAdapter + labelsLen := sample.Metric.Len() + var lbls []cortexpb.LabelAdapter if labelsLen > 0 { - labels = make([]cortexpb.LabelAdapter, labelsLen) - for j := 0; j < labelsLen; j++ { - labels[j] = cortexpb.LabelAdapter{ - Name: sample.Metric[j].Name, - Value: sample.Metric[j].Value, + lbls = make([]cortexpb.LabelAdapter, labelsLen) + j := 0 + sample.Metric.Range(func(l labels.Label) { + lbls[j] = cortexpb.LabelAdapter{ + Name: l.Name, + Value: l.Value, } - } + j++ + }) } - vectorSamples[i].Labels = labels + vectorSamples[i].Labels = lbls // Float samples only. if sample.H == nil { diff --git a/pkg/querier/codec/protobuf_codec_test.go b/pkg/querier/codec/protobuf_codec_test.go index c7fee0ecba5..44ebf6f1732 100644 --- a/pkg/querier/codec/protobuf_codec_test.go +++ b/pkg/querier/codec/protobuf_codec_test.go @@ -170,10 +170,7 @@ func TestProtobufCodec_Encode(t *testing.T) { ResultType: parser.ValueTypeMatrix, Result: promql.Matrix{ promql.Series{ - Metric: labels.Labels{ - {Name: "__name__", Value: "foo"}, - {Name: "__job__", Value: "bar"}, - }, + Metric: labels.FromStrings("__name__", "foo", "__job__", "bar"), Floats: []promql.FPoint{ {F: 0.14, T: 18555000}, {F: 2.9, T: 18556000}, @@ -192,8 +189,8 @@ func TestProtobufCodec_Encode(t *testing.T) { SampleStreams: []tripperware.SampleStream{ { Labels: []cortexpb.LabelAdapter{ - {Name: "__name__", Value: "foo"}, {Name: "__job__", Value: "bar"}, + {Name: "__name__", Value: "foo"}, }, Samples: []cortexpb.Sample{ {Value: 0.14, TimestampMs: 18555000}, diff --git a/pkg/querier/distributor_queryable_test.go b/pkg/querier/distributor_queryable_test.go index bb7e20b7ba9..d7313bdf396 100644 --- a/pkg/querier/distributor_queryable_test.go +++ b/pkg/querier/distributor_queryable_test.go @@ -191,13 +191,13 @@ func TestIngesterStreaming(t *testing.T) { require.True(t, seriesSet.Next()) series := seriesSet.At() - require.Equal(t, labels.Labels{{Name: "bar", Value: "baz"}}, series.Labels()) + require.Equal(t, labels.FromStrings("bar", "baz"), series.Labels()) chkIter := series.Iterator(nil) require.Equal(t, enc.ChunkValueType(), chkIter.Next()) require.True(t, seriesSet.Next()) series = seriesSet.At() - require.Equal(t, labels.Labels{{Name: "foo", Value: "bar"}}, series.Labels()) + require.Equal(t, labels.FromStrings("foo", "bar"), series.Labels()) chkIter = series.Iterator(chkIter) require.Equal(t, enc.ChunkValueType(), chkIter.Next()) diff --git a/pkg/querier/error_translate_queryable_test.go b/pkg/querier/error_translate_queryable_test.go index b1b34149096..dbbdbfc9b59 100644 --- a/pkg/querier/error_translate_queryable_test.go +++ b/pkg/querier/error_translate_queryable_test.go @@ -176,6 +176,9 @@ func createPrometheusAPI(q storage.SampleAndChunkQueryable, engine promql.QueryE false, false, false, + false, + 5*time.Minute, + false, ) promRouter := route.New().WithPrefix("/api/v1") diff --git a/pkg/querier/parquet_queryable.go b/pkg/querier/parquet_queryable.go index 8d7fe7152ed..e5bab841604 100644 --- a/pkg/querier/parquet_queryable.go +++ b/pkg/querier/parquet_queryable.go @@ -3,16 +3,17 @@ package querier import ( "context" "fmt" + "strings" "time" "github.com/go-kit/log" - "github.com/go-kit/log/level" lru "github.com/hashicorp/golang-lru/v2" "github.com/opentracing/opentracing-go" "github.com/parquet-go/parquet-go" "github.com/pkg/errors" "github.com/prometheus-community/parquet-common/queryable" "github.com/prometheus-community/parquet-common/schema" + "github.com/prometheus-community/parquet-common/search" parquet_storage "github.com/prometheus-community/parquet-common/storage" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -20,17 +21,18 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/util/annotations" + "github.com/thanos-io/thanos/pkg/store/storepb" "github.com/thanos-io/thanos/pkg/strutil" "golang.org/x/sync/errgroup" "github.com/cortexproject/cortex/pkg/cortexpb" + "github.com/cortexproject/cortex/pkg/querysharding" "github.com/cortexproject/cortex/pkg/storage/bucket" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/limiter" - util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/multierror" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/validation" @@ -49,7 +51,9 @@ const ( parquetBlockStore blockStoreType = "parquet" ) -var validBlockStoreTypes = []blockStoreType{tsdbBlockStore, parquetBlockStore} +var ( + validBlockStoreTypes = []blockStoreType{tsdbBlockStore, parquetBlockStore} +) // AddBlockStoreTypeToContext checks HTTP header and set block store key to context if // relevant header is set. @@ -90,6 +94,7 @@ func newParquetQueryableFallbackMetrics(reg prometheus.Registerer) *parquetQuery type parquetQueryableWithFallback struct { services.Service + fallbackDisabled bool queryStoreAfter time.Duration parquetQueryable storage.Queryable blockStorageQueryable *BlocksStoreQueryable @@ -153,6 +158,7 @@ func NewParquetQueryable( userID, _ := tenant.TenantID(ctx) return int64(limits.ParquetMaxFetchedDataBytes(userID)) }), + queryable.WithMaterializedLabelsFilterCallback(materializedLabelsFilterCallback), queryable.WithMaterializedSeriesCallback(func(ctx context.Context, cs []storage.ChunkSeries) error { queryLimiter := limiter.QueryLimiterFromContextWithFallback(ctx) lbls := make([][]cortexpb.LabelAdapter, 0, len(cs)) @@ -253,6 +259,7 @@ func NewParquetQueryable( limits: limits, logger: logger, defaultBlockStoreType: blockStoreType(config.ParquetQueryableDefaultBlockStore), + fallbackDisabled: config.ParquetQueryableFallbackDisabled, } p.Service = services.NewBasicService(p.starting, p.running, p.stopping) @@ -305,6 +312,7 @@ func (p *parquetQueryableWithFallback) Querier(mint, maxt int64) (storage.Querie limits: p.limits, logger: p.logger, defaultBlockStoreType: p.defaultBlockStoreType, + fallbackDisabled: p.fallbackDisabled, }, nil } @@ -327,13 +335,15 @@ type parquetQuerierWithFallback struct { logger log.Logger defaultBlockStoreType blockStoreType + + fallbackDisabled bool } func (q *parquetQuerierWithFallback) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { span, ctx := opentracing.StartSpanFromContext(ctx, "parquetQuerierWithFallback.LabelValues") defer span.Finish() - remaining, parquet, err := q.getBlocks(ctx, q.minT, q.maxT) + remaining, parquet, err := q.getBlocks(ctx, q.minT, q.maxT, matchers) defer q.incrementOpsMetric("LabelValues", remaining, parquet) if err != nil { return nil, nil, err @@ -349,6 +359,10 @@ func (q *parquetQuerierWithFallback) LabelValues(ctx context.Context, name strin rAnnotations annotations.Annotations ) + if len(remaining) > 0 && q.fallbackDisabled { + return nil, nil, parquetConsistencyCheckError(remaining) + } + if len(parquet) > 0 { res, ann, qErr := q.parquetQuerier.LabelValues(InjectBlocksIntoContext(ctx, parquet...), name, hints, matchers...) if qErr != nil { @@ -382,7 +396,7 @@ func (q *parquetQuerierWithFallback) LabelNames(ctx context.Context, hints *stor span, ctx := opentracing.StartSpanFromContext(ctx, "parquetQuerierWithFallback.LabelNames") defer span.Finish() - remaining, parquet, err := q.getBlocks(ctx, q.minT, q.maxT) + remaining, parquet, err := q.getBlocks(ctx, q.minT, q.maxT, matchers) defer q.incrementOpsMetric("LabelNames", remaining, parquet) if err != nil { return nil, nil, err @@ -399,6 +413,10 @@ func (q *parquetQuerierWithFallback) LabelNames(ctx context.Context, hints *stor rAnnotations annotations.Annotations ) + if len(remaining) > 0 && q.fallbackDisabled { + return nil, nil, parquetConsistencyCheckError(remaining) + } + if len(parquet) > 0 { res, ann, qErr := q.parquetQuerier.LabelNames(InjectBlocksIntoContext(ctx, parquet...), hints, matchers...) if qErr != nil { @@ -432,17 +450,11 @@ func (q *parquetQuerierWithFallback) Select(ctx context.Context, sortSeries bool span, ctx := opentracing.StartSpanFromContext(ctx, "parquetQuerierWithFallback.Select") defer span.Finish() - userID, err := tenant.TenantID(ctx) + newMatchers, shardMatcher, err := querysharding.ExtractShardingMatchers(matchers) if err != nil { return storage.ErrSeriesSet(err) } - - if q.limits.QueryVerticalShardSize(userID) > 1 { - uLogger := util_log.WithUserID(userID, q.logger) - level.Warn(uLogger).Log("msg", "parquet queryable enabled but vertical sharding > 1. Falling back to the block storage") - - return q.blocksStoreQuerier.Select(ctx, sortSeries, h, matchers...) - } + defer shardMatcher.Close() hints := storage.SelectHints{ Start: q.minT, @@ -463,13 +475,18 @@ func (q *parquetQuerierWithFallback) Select(ctx context.Context, sortSeries bool return storage.EmptySeriesSet() } - remaining, parquet, err := q.getBlocks(ctx, mint, maxt) + remaining, parquet, err := q.getBlocks(ctx, mint, maxt, matchers) defer q.incrementOpsMetric("Select", remaining, parquet) if err != nil { return storage.ErrSeriesSet(err) } + if len(remaining) > 0 && q.fallbackDisabled { + err = parquetConsistencyCheckError(remaining) + return storage.ErrSeriesSet(err) + } + // Lets sort the series to merge if len(parquet) > 0 && len(remaining) > 0 { sortSeries = true @@ -483,7 +500,11 @@ func (q *parquetQuerierWithFallback) Select(ctx context.Context, sortSeries bool go func() { span, _ := opentracing.StartSpanFromContext(ctx, "parquetQuerier.Select") defer span.Finish() - p <- q.parquetQuerier.Select(InjectBlocksIntoContext(ctx, parquet...), sortSeries, &hints, matchers...) + parquetCtx := InjectBlocksIntoContext(ctx, parquet...) + if shardMatcher != nil { + parquetCtx = injectShardMatcherIntoContext(parquetCtx, shardMatcher) + } + p <- q.parquetQuerier.Select(parquetCtx, sortSeries, &hints, newMatchers...) }() } @@ -526,7 +547,7 @@ func (q *parquetQuerierWithFallback) Close() error { return mErr.Err() } -func (q *parquetQuerierWithFallback) getBlocks(ctx context.Context, minT, maxT int64) ([]*bucketindex.Block, []*bucketindex.Block, error) { +func (q *parquetQuerierWithFallback) getBlocks(ctx context.Context, minT, maxT int64, matchers []*labels.Matcher) ([]*bucketindex.Block, []*bucketindex.Block, error) { userID, err := tenant.TenantID(ctx) if err != nil { return nil, nil, err @@ -538,7 +559,7 @@ func (q *parquetQuerierWithFallback) getBlocks(ctx context.Context, minT, maxT i return nil, nil, nil } - blocks, _, err := q.finder.GetBlocks(ctx, userID, minT, maxT) + blocks, _, err := q.finder.GetBlocks(ctx, userID, minT, maxT, matchers) if err != nil { return nil, nil, err } @@ -570,6 +591,26 @@ func (q *parquetQuerierWithFallback) incrementOpsMetric(method string, remaining } } +type shardMatcherLabelsFilter struct { + shardMatcher *storepb.ShardMatcher +} + +func (f *shardMatcherLabelsFilter) Filter(lbls labels.Labels) bool { + return f.shardMatcher.MatchesLabels(lbls) +} + +func (f *shardMatcherLabelsFilter) Close() { + f.shardMatcher.Close() +} + +func materializedLabelsFilterCallback(ctx context.Context, _ *storage.SelectHints) (search.MaterializedLabelsFilter, bool) { + shardMatcher, exists := extractShardMatcherFromContext(ctx) + if !exists || !shardMatcher.IsSharded() { + return nil, false + } + return &shardMatcherLabelsFilter{shardMatcher: shardMatcher}, true +} + type cacheInterface[T any] interface { Get(path string) T Set(path string, reader T) @@ -655,3 +696,31 @@ func (n noopCache[T]) Get(_ string) (r T) { func (n noopCache[T]) Set(_ string, _ T) { } + +var ( + shardMatcherCtxKey contextKey = 1 +) + +func injectShardMatcherIntoContext(ctx context.Context, sm *storepb.ShardMatcher) context.Context { + return context.WithValue(ctx, shardMatcherCtxKey, sm) +} + +func extractShardMatcherFromContext(ctx context.Context) (*storepb.ShardMatcher, bool) { + if sm := ctx.Value(shardMatcherCtxKey); sm != nil { + return sm.(*storepb.ShardMatcher), true + } + + return nil, false +} + +func parquetConsistencyCheckError(blocks []*bucketindex.Block) error { + return fmt.Errorf("consistency check failed because some blocks were not available as parquet files: %s", strings.Join(convertBlockULIDToString(blocks), " ")) +} + +func convertBlockULIDToString(blocks []*bucketindex.Block) []string { + res := make([]string, len(blocks)) + for idx, b := range blocks { + res[idx] = b.ID.String() + } + return res +} diff --git a/pkg/querier/parquet_queryable_test.go b/pkg/querier/parquet_queryable_test.go index 13cdde6cd57..70baa010b66 100644 --- a/pkg/querier/parquet_queryable_test.go +++ b/pkg/querier/parquet_queryable_test.go @@ -5,6 +5,8 @@ import ( "fmt" "math/rand" "path/filepath" + "strconv" + "sync" "testing" "time" @@ -52,7 +54,7 @@ func TestParquetQueryableFallbackLogic(t *testing.T) { map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ - mockSeriesResponse(labels.Labels{{Name: labels.MetricName, Value: "fromSg"}}, []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockSeriesResponse(labels.FromStrings(labels.MetricName, "fromSg"), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), mockHintsResponse(block1, block2), }, mockedLabelNamesResponse: &storepb.LabelNamesResponse{ @@ -75,49 +77,6 @@ func TestParquetQueryableFallbackLogic(t *testing.T) { } ctx := user.InjectOrgID(context.Background(), "user-1") - t.Run("should fallback when vertical sharding is enabled", func(t *testing.T) { - finder := &blocksFinderMock{} - stores := createStore() - - q := &blocksStoreQuerier{ - minT: minT, - maxT: maxT, - finder: finder, - stores: stores, - consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), - logger: log.NewNopLogger(), - metrics: newBlocksStoreQueryableMetrics(prometheus.NewPedanticRegistry()), - limits: &blocksStoreLimitsMock{}, - - storeGatewayConsistencyCheckMaxAttempts: 3, - } - - mParquetQuerier := &mockParquetQuerier{} - pq := &parquetQuerierWithFallback{ - minT: minT, - maxT: maxT, - finder: finder, - blocksStoreQuerier: q, - parquetQuerier: mParquetQuerier, - metrics: newParquetQueryableFallbackMetrics(prometheus.NewRegistry()), - limits: defaultOverrides(t, 4), - logger: log.NewNopLogger(), - defaultBlockStoreType: parquetBlockStore, - } - - finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(bucketindex.Blocks{ - &bucketindex.Block{ID: block1, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, - &bucketindex.Block{ID: block2, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, - }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) - - t.Run("select", func(t *testing.T) { - ss := pq.Select(ctx, true, nil, matchers...) - require.NoError(t, ss.Err()) - require.Len(t, stores.queriedBlocks, 2) - require.Len(t, mParquetQuerier.queriedBlocks, 0) - }) - }) - t.Run("should fallback all blocks", func(t *testing.T) { finder := &blocksFinderMock{} stores := createStore() @@ -149,7 +108,7 @@ func TestParquetQueryableFallbackLogic(t *testing.T) { defaultBlockStoreType: parquetBlockStore, } - finder.On("GetBlocks", mock.Anything, "user-1", minT, mock.Anything).Return(bucketindex.Blocks{ + finder.On("GetBlocks", mock.Anything, "user-1", minT, mock.Anything, mock.Anything).Return(bucketindex.Blocks{ &bucketindex.Block{ID: block1}, &bucketindex.Block{ID: block2}, }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) @@ -210,7 +169,7 @@ func TestParquetQueryableFallbackLogic(t *testing.T) { defaultBlockStoreType: parquetBlockStore, } - finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(bucketindex.Blocks{ + finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT, mock.Anything).Return(bucketindex.Blocks{ &bucketindex.Block{ID: block1, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, &bucketindex.Block{ID: block2}, }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) @@ -279,7 +238,7 @@ func TestParquetQueryableFallbackLogic(t *testing.T) { defaultBlockStoreType: parquetBlockStore, } - finder.On("GetBlocks", mock.Anything, "user-1", minT, mock.Anything).Return(bucketindex.Blocks{ + finder.On("GetBlocks", mock.Anything, "user-1", minT, mock.Anything, mock.Anything).Return(bucketindex.Blocks{ &bucketindex.Block{ID: block1, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, &bucketindex.Block{ID: block2, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) @@ -353,7 +312,7 @@ func TestParquetQueryableFallbackLogic(t *testing.T) { defaultBlockStoreType: tsdbBlockStore, } - finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(bucketindex.Blocks{ + finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT, mock.Anything).Return(bucketindex.Blocks{ &bucketindex.Block{ID: block1, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, &bucketindex.Block{ID: block2, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) @@ -457,10 +416,7 @@ func TestParquetQueryable_Limits(t *testing.T) { seriesCount := 100 lbls := make([]labels.Labels, seriesCount) for i := 0; i < seriesCount; i++ { - lbls[i] = labels.Labels{ - {Name: labels.MetricName, Value: metricName}, - {Name: "series", Value: fmt.Sprintf("%d", i)}, - } + lbls[i] = labels.FromStrings(labels.MetricName, metricName, "series", strconv.Itoa(i)) } rnd := rand.New(rand.NewSource(time.Now().UnixNano())) @@ -477,7 +433,7 @@ func TestParquetQueryable_Limits(t *testing.T) { // Create a mocked bucket index blocks finder finder := &blocksFinderMock{} - finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(bucketindex.Blocks{ + finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT, mock.Anything).Return(bucketindex.Blocks{ &bucketindex.Block{ID: blockID, Parquet: &parquet.ConverterMarkMeta{Version: parquet.CurrentVersion}}, }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) @@ -541,7 +497,7 @@ func TestParquetQueryable_Limits(t *testing.T) { return validation.NewOverrides(limits, nil) }(), queryLimiter: limiter.NewQueryLimiter(0, 0, 0, 1), - expectedErr: fmt.Errorf("error materializing labels: materializer failed to materialize columns: would fetch too many data bytes: resource exhausted (used 1)"), + expectedErr: fmt.Errorf("error materializing labels: failed to get column indexes: failed to materialize column indexes: would fetch too many data bytes: resource exhausted (used 1)"), }, "limits within bounds - should succeed": { limits: func() *validation.Overrides { @@ -671,3 +627,244 @@ func (m *mockParquetQuerier) Reset() { func (mockParquetQuerier) Close() error { return nil } + +func TestMaterializedLabelsFilterCallback(t *testing.T) { + tests := []struct { + name string + setupContext func() context.Context + expectedFilterReturned bool + expectedCallbackReturned bool + }{ + { + name: "no shard matcher in context", + setupContext: func() context.Context { + return context.Background() + }, + expectedFilterReturned: false, + expectedCallbackReturned: false, + }, + { + name: "shard matcher exists but is not sharded", + setupContext: func() context.Context { + // Create a ShardInfo with TotalShards = 0 (not sharded) + shardInfo := &storepb.ShardInfo{ + ShardIndex: 0, + TotalShards: 0, // Not sharded + By: true, + Labels: []string{"__name__"}, + } + + buffers := &sync.Pool{New: func() interface{} { + b := make([]byte, 0, 100) + return &b + }} + shardMatcher := shardInfo.Matcher(buffers) + + return injectShardMatcherIntoContext(context.Background(), shardMatcher) + }, + expectedFilterReturned: false, + expectedCallbackReturned: false, + }, + { + name: "shard matcher exists and is sharded", + setupContext: func() context.Context { + // Create a ShardInfo with TotalShards > 0 (sharded) + shardInfo := &storepb.ShardInfo{ + ShardIndex: 0, + TotalShards: 2, // Sharded + By: true, + Labels: []string{"__name__"}, + } + + buffers := &sync.Pool{New: func() interface{} { + b := make([]byte, 0, 100) + return &b + }} + shardMatcher := shardInfo.Matcher(buffers) + + return injectShardMatcherIntoContext(context.Background(), shardMatcher) + }, + expectedFilterReturned: true, + expectedCallbackReturned: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := tt.setupContext() + + filter, exists := materializedLabelsFilterCallback(ctx, nil) + + require.Equal(t, tt.expectedCallbackReturned, exists) + + if tt.expectedFilterReturned { + require.NotNil(t, filter) + + // Test that the filter can be used + testLabels := labels.FromStrings("__name__", "test_metric", "label1", "value1") + // We can't easily test the actual filtering logic without knowing the internal + // shard matching implementation, but we can at least verify the filter interface works + _ = filter.Filter(testLabels) + + // Cleanup + filter.Close() + } else { + require.Nil(t, filter) + } + }) + } +} + +func TestParquetQueryableFallbackDisabled(t *testing.T) { + block1 := ulid.MustNew(1, nil) + block2 := ulid.MustNew(2, nil) + minT := int64(10) + maxT := util.TimeToMillis(time.Now()) + + createStore := func() *blocksStoreSetMock { + return &blocksStoreSetMock{mockedResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", + mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponse(labels.FromStrings(labels.MetricName, "fromSg"), []cortexpb.Sample{{Value: 1, TimestampMs: minT}, {Value: 2, TimestampMs: minT + 1}}, nil, nil), + mockHintsResponse(block1, block2), + }, + mockedLabelNamesResponse: &storepb.LabelNamesResponse{ + Names: namesFromSeries(labels.FromMap(map[string]string{labels.MetricName: "fromSg", "fromSg": "fromSg"})), + Warnings: []string{}, + Hints: mockNamesHints(block1, block2), + }, + mockedLabelValuesResponse: &storepb.LabelValuesResponse{ + Values: valuesFromSeries(labels.MetricName, labels.FromMap(map[string]string{labels.MetricName: "fromSg", "fromSg": "fromSg"})), + Warnings: []string{}, + Hints: mockValuesHints(block1, block2), + }, + }: {block1, block2}}, + }, + } + } + + matchers := []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, "fromSg"), + } + ctx := user.InjectOrgID(context.Background(), "user-1") + + t.Run("should return consistency check errors when fallback disabled and some blocks not available as parquet", func(t *testing.T) { + finder := &blocksFinderMock{} + stores := createStore() + + q := &blocksStoreQuerier{ + minT: minT, + maxT: maxT, + finder: finder, + stores: stores, + consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), + logger: log.NewNopLogger(), + metrics: newBlocksStoreQueryableMetrics(prometheus.NewPedanticRegistry()), + limits: &blocksStoreLimitsMock{}, + + storeGatewayConsistencyCheckMaxAttempts: 3, + } + + mParquetQuerier := &mockParquetQuerier{} + pq := &parquetQuerierWithFallback{ + minT: minT, + maxT: maxT, + finder: finder, + blocksStoreQuerier: q, + parquetQuerier: mParquetQuerier, + queryStoreAfter: time.Hour, + metrics: newParquetQueryableFallbackMetrics(prometheus.NewRegistry()), + limits: defaultOverrides(t, 0), + logger: log.NewNopLogger(), + defaultBlockStoreType: parquetBlockStore, + fallbackDisabled: true, // Disable fallback + } + + // Set up blocks where block1 has parquet metadata but block2 doesn't + finder.On("GetBlocks", mock.Anything, "user-1", minT, mock.Anything, mock.Anything).Return(bucketindex.Blocks{ + &bucketindex.Block{ID: block1, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, // Available as parquet + &bucketindex.Block{ID: block2}, // Not available as parquet + }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) + + expectedError := fmt.Sprintf("consistency check failed because some blocks were not available as parquet files: %s", block2.String()) + + t.Run("select should return consistency check error", func(t *testing.T) { + ss := pq.Select(ctx, true, nil, matchers...) + require.Error(t, ss.Err()) + require.Contains(t, ss.Err().Error(), expectedError) + }) + + t.Run("labelNames should return consistency check error", func(t *testing.T) { + _, _, err := pq.LabelNames(ctx, nil, matchers...) + require.Error(t, err) + require.Contains(t, err.Error(), expectedError) + }) + + t.Run("labelValues should return consistency check error", func(t *testing.T) { + _, _, err := pq.LabelValues(ctx, labels.MetricName, nil, matchers...) + require.Error(t, err) + require.Contains(t, err.Error(), expectedError) + }) + }) + + t.Run("should work normally when all blocks are available as parquet and fallback disabled", func(t *testing.T) { + finder := &blocksFinderMock{} + stores := createStore() + + q := &blocksStoreQuerier{ + minT: minT, + maxT: maxT, + finder: finder, + stores: stores, + consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), + logger: log.NewNopLogger(), + metrics: newBlocksStoreQueryableMetrics(prometheus.NewPedanticRegistry()), + limits: &blocksStoreLimitsMock{}, + + storeGatewayConsistencyCheckMaxAttempts: 3, + } + + mParquetQuerier := &mockParquetQuerier{} + pq := &parquetQuerierWithFallback{ + minT: minT, + maxT: maxT, + finder: finder, + blocksStoreQuerier: q, + parquetQuerier: mParquetQuerier, + queryStoreAfter: time.Hour, + metrics: newParquetQueryableFallbackMetrics(prometheus.NewRegistry()), + limits: defaultOverrides(t, 0), + logger: log.NewNopLogger(), + defaultBlockStoreType: parquetBlockStore, + fallbackDisabled: true, // Disable fallback + } + + // Set up blocks where both blocks have parquet metadata + finder.On("GetBlocks", mock.Anything, "user-1", minT, mock.Anything, mock.Anything).Return(bucketindex.Blocks{ + &bucketindex.Block{ID: block1, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, // Available as parquet + &bucketindex.Block{ID: block2, Parquet: &parquet.ConverterMarkMeta{Version: 1}}, // Available as parquet + }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), nil) + + t.Run("select should work without error", func(t *testing.T) { + mParquetQuerier.Reset() + ss := pq.Select(ctx, true, nil, matchers...) + require.NoError(t, ss.Err()) + require.Len(t, mParquetQuerier.queriedBlocks, 2) + }) + + t.Run("labelNames should work without error", func(t *testing.T) { + mParquetQuerier.Reset() + _, _, err := pq.LabelNames(ctx, nil, matchers...) + require.NoError(t, err) + require.Len(t, mParquetQuerier.queriedBlocks, 2) + }) + + t.Run("labelValues should work without error", func(t *testing.T) { + mParquetQuerier.Reset() + _, _, err := pq.LabelValues(ctx, labels.MetricName, nil, matchers...) + require.NoError(t, err) + require.Len(t, mParquetQuerier.queriedBlocks, 2) + }) + }) +} diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index ffe6c2e0b50..9160f1c4112 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -92,16 +92,18 @@ type Config struct { EnablePromQLExperimentalFunctions bool `yaml:"enable_promql_experimental_functions"` // Query Parquet files if available - EnableParquetQueryable bool `yaml:"enable_parquet_queryable" doc:"hidden"` - ParquetQueryableShardCacheSize int `yaml:"parquet_queryable_shard_cache_size" doc:"hidden"` - ParquetQueryableDefaultBlockStore string `yaml:"parquet_queryable_default_block_store" doc:"hidden"` + EnableParquetQueryable bool `yaml:"enable_parquet_queryable"` + ParquetQueryableShardCacheSize int `yaml:"parquet_queryable_shard_cache_size"` + ParquetQueryableDefaultBlockStore string `yaml:"parquet_queryable_default_block_store"` + ParquetQueryableFallbackDisabled bool `yaml:"parquet_queryable_fallback_disabled"` + DistributedExecEnabled bool `yaml:"distributed_exec_enabled" doc:"hidden"` } var ( errBadLookbackConfigs = errors.New("bad settings, query_store_after >= query_ingesters_within which can result in queries not being sent") errShuffleShardingLookbackLessThanQueryStoreAfter = errors.New("the shuffle-sharding lookback period should be greater or equal than the configured 'query store after'") errEmptyTimeRange = errors.New("empty time range") - errUnsupportedResponseCompression = errors.New("unsupported response compression. Supported compression 'gzip' and '' (disable compression)") + errUnsupportedResponseCompression = errors.New("unsupported response compression. Supported compression 'gzip', 'snappy', 'zstd' and '' (disable compression)") errInvalidConsistencyCheckAttempts = errors.New("store gateway consistency check max attempts should be greater or equal than 1") errInvalidIngesterQueryMaxAttempts = errors.New("ingester query max attempts should be greater or equal than 1") errInvalidParquetQueryableDefaultBlockStore = errors.New("unsupported parquet queryable default block store. Supported options are tsdb and parquet") @@ -128,7 +130,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.MaxSamples, "querier.max-samples", 50e6, "Maximum number of samples a single query can load into memory.") f.DurationVar(&cfg.QueryIngestersWithin, "querier.query-ingesters-within", 0, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.") f.BoolVar(&cfg.EnablePerStepStats, "querier.per-step-stats-enabled", false, "Enable returning samples stats per steps in query response.") - f.StringVar(&cfg.ResponseCompression, "querier.response-compression", "gzip", "Use compression for metrics query API or instant and range query APIs. Supports 'gzip' and '' (disable compression)") + f.StringVar(&cfg.ResponseCompression, "querier.response-compression", "gzip", "Use compression for metrics query API or instant and range query APIs. Supported compression 'gzip', 'snappy', 'zstd' and '' (disable compression)") f.DurationVar(&cfg.MaxQueryIntoFuture, "querier.max-query-into-future", 10*time.Minute, "Maximum duration into the future you can query. 0 to disable.") f.DurationVar(&cfg.DefaultEvaluationInterval, "querier.default-evaluation-interval", time.Minute, "The default evaluation interval or step size for subqueries.") f.DurationVar(&cfg.QueryStoreAfter, "querier.query-store-after", 0, "The time after which a metric should be queried from storage and not just ingesters. 0 means all queries are sent to store. When running the blocks storage, if this option is enabled, the time range of the query sent to the store will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.") @@ -143,8 +145,10 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.IgnoreMaxQueryLength, "querier.ignore-max-query-length", false, "If enabled, ignore max query length check at Querier select method. Users can choose to ignore it since the validation can be done before Querier evaluation like at Query Frontend or Ruler.") f.BoolVar(&cfg.EnablePromQLExperimentalFunctions, "querier.enable-promql-experimental-functions", false, "[Experimental] If true, experimental promQL functions are enabled.") f.BoolVar(&cfg.EnableParquetQueryable, "querier.enable-parquet-queryable", false, "[Experimental] If true, querier will try to query the parquet files if available.") - f.IntVar(&cfg.ParquetQueryableShardCacheSize, "querier.parquet-queryable-shard-cache-size", 512, "[Experimental] [Experimental] Maximum size of the Parquet queryable shard cache. 0 to disable.") - f.StringVar(&cfg.ParquetQueryableDefaultBlockStore, "querier.parquet-queryable-default-block-store", string(parquetBlockStore), "Parquet queryable's default block store to query. Valid options are tsdb and parquet. If it is set to tsdb, parquet queryable always fallback to store gateway.") + f.IntVar(&cfg.ParquetQueryableShardCacheSize, "querier.parquet-queryable-shard-cache-size", 512, "[Experimental] Maximum size of the Parquet queryable shard cache. 0 to disable.") + f.StringVar(&cfg.ParquetQueryableDefaultBlockStore, "querier.parquet-queryable-default-block-store", string(parquetBlockStore), "[Experimental] Parquet queryable's default block store to query. Valid options are tsdb and parquet. If it is set to tsdb, parquet queryable always fallback to store gateway.") + f.BoolVar(&cfg.DistributedExecEnabled, "querier.distributed-exec-enabled", false, "Experimental: Enables distributed execution of queries by passing logical query plan fragments to downstream components.") + f.BoolVar(&cfg.ParquetQueryableFallbackDisabled, "querier.parquet-queryable-fallback-disabled", false, "[Experimental] Disable Parquet queryable to fallback queries to Store Gateway if the block is not available as Parquet files but available in TSDB. Setting this to true will disable the fallback and users can remove Store Gateway. But need to make sure Parquet files are created before it is queryable.") } // Validate the config @@ -156,7 +160,7 @@ func (cfg *Config) Validate() error { } } - if cfg.ResponseCompression != "" && cfg.ResponseCompression != "gzip" { + if cfg.ResponseCompression != "" && cfg.ResponseCompression != "gzip" && cfg.ResponseCompression != "snappy" && cfg.ResponseCompression != "zstd" { return errUnsupportedResponseCompression } @@ -200,7 +204,7 @@ func getChunksIteratorFunction(_ Config) chunkIteratorFunc { } // New builds a queryable and promql engine. -func New(cfg Config, limits *validation.Overrides, distributor Distributor, stores []QueryableWithFilter, reg prometheus.Registerer, logger log.Logger, isPartialDataEnabled partialdata.IsCfgEnabledFunc) (storage.SampleAndChunkQueryable, storage.ExemplarQueryable, promql.QueryEngine) { +func New(cfg Config, limits *validation.Overrides, distributor Distributor, stores []QueryableWithFilter, reg prometheus.Registerer, logger log.Logger, isPartialDataEnabled partialdata.IsCfgEnabledFunc) (storage.SampleAndChunkQueryable, storage.ExemplarQueryable, engine.QueryEngine) { iteratorFunc := getChunksIteratorFunction(cfg) distributorQueryable := newDistributorQueryable(distributor, cfg.IngesterMetadataStreaming, cfg.IngesterLabelNamesWithMatchers, iteratorFunc, cfg.QueryIngestersWithin, isPartialDataEnabled, cfg.IngesterQueryMaxAttempts) diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 06f44039a11..3c48c0ab7d5 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -120,11 +120,9 @@ var ( // Very simple single-point gets, with low step. Performance should be // similar to above. { - query: "foo", - step: sampleRate * 4, - labels: labels.Labels{ - labels.Label{Name: model.MetricNameLabel, Value: "foo"}, - }, + query: "foo", + step: sampleRate * 4, + labels: labels.FromStrings(labels.MetricName, "foo"), samples: func(from, through time.Time, step time.Duration) int { return int(through.Sub(from)/step) + 1 }, @@ -182,11 +180,9 @@ var ( // Single points gets with large step; excersise Seek performance. { - query: "foo", - step: sampleRate * 4 * 10, - labels: labels.Labels{ - labels.Label{Name: model.MetricNameLabel, Value: "foo"}, - }, + query: "foo", + step: sampleRate * 4 * 10, + labels: labels.FromStrings(labels.MetricName, "foo"), samples: func(from, through time.Time, step time.Duration) int { return int(through.Sub(from)/step) + 1 }, diff --git a/pkg/querier/series/series_set.go b/pkg/querier/series/series_set.go index 53a3ca4a1b1..4aaf6f89305 100644 --- a/pkg/querier/series/series_set.go +++ b/pkg/querier/series/series_set.go @@ -195,17 +195,12 @@ func MetricsToSeriesSet(ctx context.Context, sortSeries bool, ms []model.Metric) } func metricToLabels(m model.Metric) labels.Labels { - ls := make(labels.Labels, 0, len(m)) + builder := labels.NewBuilder(labels.EmptyLabels()) for k, v := range m { - ls = append(ls, labels.Label{ - Name: string(k), - Value: string(v), - }) + builder.Set(string(k), string(v)) + } - // PromQL expects all labels to be sorted! In general, anyone constructing - // a labels.Labels list is responsible for sorting it during construction time. - sort.Sort(ls) - return ls + return builder.Labels() } type byLabels []storage.Series diff --git a/pkg/querier/series/series_set_test.go b/pkg/querier/series/series_set_test.go index 7e243a14449..cf82cb61fec 100644 --- a/pkg/querier/series/series_set_test.go +++ b/pkg/querier/series/series_set_test.go @@ -46,11 +46,5 @@ func TestMatrixToSeriesSetSortsMetricLabels(t *testing.T) { require.NoError(t, ss.Err()) l := ss.At().Labels() - require.Equal(t, labels.Labels{ - {Name: string(model.MetricNameLabel), Value: "testmetric"}, - {Name: "a", Value: "b"}, - {Name: "c", Value: "d"}, - {Name: "e", Value: "f"}, - {Name: "g", Value: "h"}, - }, l) + require.Equal(t, labels.FromStrings(labels.MetricName, "testmetric", "a", "b", "c", "d", "e", "f", "g", "h"), l) } diff --git a/pkg/querier/stats_renderer_test.go b/pkg/querier/stats_renderer_test.go index 6f197b01657..0b8d591c2a4 100644 --- a/pkg/querier/stats_renderer_test.go +++ b/pkg/querier/stats_renderer_test.go @@ -90,6 +90,9 @@ func Test_StatsRenderer(t *testing.T) { false, false, false, + false, + 5*time.Minute, + false, ) promRouter := route.New().WithPrefix("/api/v1") diff --git a/pkg/querier/tenantfederation/exemplar_merge_queryable.go b/pkg/querier/tenantfederation/exemplar_merge_queryable.go index a5f40ca59dc..c6b24caeb03 100644 --- a/pkg/querier/tenantfederation/exemplar_merge_queryable.go +++ b/pkg/querier/tenantfederation/exemplar_merge_queryable.go @@ -175,10 +175,7 @@ func (m mergeExemplarQuerier) Select(start, end int64, matchers ...[]*labels.Mat // append __tenant__ label to `seriesLabels` to identify each tenants for i, e := range res { - e.SeriesLabels = setLabelsRetainExisting(e.SeriesLabels, labels.Label{ - Name: m.idLabelName, - Value: job.id, - }) + e.SeriesLabels = setLabelsRetainExisting(e.SeriesLabels, labels.FromStrings(m.idLabelName, job.id)) res[i] = e } diff --git a/pkg/querier/tenantfederation/merge_queryable.go b/pkg/querier/tenantfederation/merge_queryable.go index 71bf0e2531e..58cdb7625f2 100644 --- a/pkg/querier/tenantfederation/merge_queryable.go +++ b/pkg/querier/tenantfederation/merge_queryable.go @@ -364,12 +364,7 @@ func (m *mergeQuerier) Select(ctx context.Context, sortSeries bool, hints *stora newCtx := user.InjectOrgID(parentCtx, job.id) seriesSets[job.pos] = &addLabelsSeriesSet{ upstream: job.querier.Select(newCtx, sortSeries, hints, filteredMatchers...), - labels: labels.Labels{ - { - Name: m.idLabelName, - Value: job.id, - }, - }, + labels: labels.FromStrings(m.idLabelName, job.id), } return nil } @@ -442,7 +437,7 @@ func (m *addLabelsSeriesSet) At() storage.Series { upstream := m.upstream.At() m.currSeries = &addLabelsSeries{ upstream: upstream, - labels: setLabelsRetainExisting(upstream.Labels(), m.labels...), + labels: setLabelsRetainExisting(upstream.Labels(), m.labels), } } return m.currSeries @@ -471,11 +466,11 @@ func rewriteLabelName(s string) string { } // this outputs a more readable error format -func labelsToString(labels labels.Labels) string { - parts := make([]string, len(labels)) - for pos, l := range labels { - parts[pos] = rewriteLabelName(l.Name) + " " + l.Value - } +func labelsToString(lbls labels.Labels) string { + parts := make([]string, 0, lbls.Len()) + lbls.Range(func(l labels.Label) { + parts = append(parts, rewriteLabelName(l.Name)+" "+l.Value) + }) return strings.Join(parts, ", ") } @@ -496,17 +491,17 @@ func (a *addLabelsSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { // this sets a label and preserves an existing value a new label prefixed with // original_. It doesn't do this recursively. -func setLabelsRetainExisting(src labels.Labels, additionalLabels ...labels.Label) labels.Labels { +func setLabelsRetainExisting(src labels.Labels, additionalLabels labels.Labels) labels.Labels { lb := labels.NewBuilder(src) - for _, additionalL := range additionalLabels { - if oldValue := src.Get(additionalL.Name); oldValue != "" { + for name, value := range additionalLabels.Map() { + if oldValue := src.Get(name); oldValue != "" { lb.Set( - retainExistingPrefix+additionalL.Name, + retainExistingPrefix+name, oldValue, ) } - lb.Set(additionalL.Name, additionalL.Value) + lb.Set(name, value) } return lb.Labels() diff --git a/pkg/querier/tenantfederation/merge_queryable_test.go b/pkg/querier/tenantfederation/merge_queryable_test.go index 8015ca21951..5be2f70a764 100644 --- a/pkg/querier/tenantfederation/merge_queryable_test.go +++ b/pkg/querier/tenantfederation/merge_queryable_test.go @@ -492,24 +492,24 @@ func TestMergeQueryable_Select(t *testing.T) { matchers: []*labels.Matcher{{Name: defaultTenantLabel, Value: "team-b", Type: labels.MatchNotEqual}}, expectedSeriesCount: 4, expectedLabels: []labels.Labels{ - { - {Name: "__tenant_id__", Value: "team-a"}, - {Name: "instance", Value: "host1"}, - {Name: "tenant-team-a", Value: "static"}, - }, - { - {Name: "__tenant_id__", Value: "team-a"}, - {Name: "instance", Value: "host2.team-a"}, - }, - { - {Name: "__tenant_id__", Value: "team-c"}, - {Name: "instance", Value: "host1"}, - {Name: "tenant-team-c", Value: "static"}, - }, - { - {Name: "__tenant_id__", Value: "team-c"}, - {Name: "instance", Value: "host2.team-c"}, - }, + labels.FromStrings( + "__tenant_id__", "team-a", + "instance", "host1", + "tenant-team-a", "static", + ), + labels.FromStrings( + "__tenant_id__", "team-a", + "instance", "host2.team-a", + ), + labels.FromStrings( + "__tenant_id__", "team-c", + "instance", "host1", + "tenant-team-c", "static", + ), + labels.FromStrings( + "__tenant_id__", "team-c", + "instance", "host2.team-c", + ), }, expectedMetrics: expectedThreeTenantsMetrics, }, @@ -518,15 +518,15 @@ func TestMergeQueryable_Select(t *testing.T) { matchers: []*labels.Matcher{{Name: defaultTenantLabel, Value: "team-b", Type: labels.MatchEqual}}, expectedSeriesCount: 2, expectedLabels: []labels.Labels{ - { - {Name: "__tenant_id__", Value: "team-b"}, - {Name: "instance", Value: "host1"}, - {Name: "tenant-team-b", Value: "static"}, - }, - { - {Name: "__tenant_id__", Value: "team-b"}, - {Name: "instance", Value: "host2.team-b"}, - }, + labels.FromStrings( + "__tenant_id__", "team-b", + "instance", "host1", + "tenant-team-b", "static", + ), + labels.FromStrings( + "__tenant_id__", "team-b", + "instance", "host2.team-b", + ), }, expectedMetrics: expectedThreeTenantsMetrics, }, @@ -545,39 +545,39 @@ func TestMergeQueryable_Select(t *testing.T) { name: "should return all series when no matchers are provided", expectedSeriesCount: 6, expectedLabels: []labels.Labels{ - { - {Name: "__tenant_id__", Value: "team-a"}, - {Name: "instance", Value: "host1"}, - {Name: "original___tenant_id__", Value: "original-value"}, - {Name: "tenant-team-a", Value: "static"}, - }, - { - {Name: "__tenant_id__", Value: "team-a"}, - {Name: "instance", Value: "host2.team-a"}, - {Name: "original___tenant_id__", Value: "original-value"}, - }, - { - {Name: "__tenant_id__", Value: "team-b"}, - {Name: "instance", Value: "host1"}, - {Name: "original___tenant_id__", Value: "original-value"}, - {Name: "tenant-team-b", Value: "static"}, - }, - { - {Name: "__tenant_id__", Value: "team-b"}, - {Name: "instance", Value: "host2.team-b"}, - {Name: "original___tenant_id__", Value: "original-value"}, - }, - { - {Name: "__tenant_id__", Value: "team-c"}, - {Name: "instance", Value: "host1"}, - {Name: "original___tenant_id__", Value: "original-value"}, - {Name: "tenant-team-c", Value: "static"}, - }, - { - {Name: "__tenant_id__", Value: "team-c"}, - {Name: "instance", Value: "host2.team-c"}, - {Name: "original___tenant_id__", Value: "original-value"}, - }, + labels.FromStrings( + "__tenant_id__", "team-a", + "instance", "host1", + "original___tenant_id__", "original-value", + "tenant-team-a", "static", + ), + labels.FromStrings( + "__tenant_id__", "team-a", + "instance", "host2.team-a", + "original___tenant_id__", "original-value", + ), + labels.FromStrings( + "__tenant_id__", "team-b", + "instance", "host1", + "original___tenant_id__", "original-value", + "tenant-team-b", "static", + ), + labels.FromStrings( + "__tenant_id__", "team-b", + "instance", "host2.team-b", + "original___tenant_id__", "original-value", + ), + labels.FromStrings( + "__tenant_id__", "team-c", + "instance", "host1", + "original___tenant_id__", "original-value", + "tenant-team-c", "static", + ), + labels.FromStrings( + "__tenant_id__", "team-c", + "instance", "host2.team-c", + "original___tenant_id__", "original-value", + ), }, expectedMetrics: expectedThreeTenantsMetrics, }, @@ -599,17 +599,17 @@ func TestMergeQueryable_Select(t *testing.T) { matchers: []*labels.Matcher{{Name: defaultTenantLabel, Value: "team-b", Type: labels.MatchEqual}}, expectedSeriesCount: 2, expectedLabels: []labels.Labels{ - { - {Name: "__tenant_id__", Value: "team-b"}, - {Name: "instance", Value: "host1"}, - {Name: "original___tenant_id__", Value: "original-value"}, - {Name: "tenant-team-b", Value: "static"}, - }, - { - {Name: "__tenant_id__", Value: "team-b"}, - {Name: "instance", Value: "host2.team-b"}, - {Name: "original___tenant_id__", Value: "original-value"}, - }, + labels.FromStrings( + "__tenant_id__", "team-b", + "instance", "host1", + "original___tenant_id__", "original-value", + "tenant-team-b", "static", + ), + labels.FromStrings( + "__tenant_id__", "team-b", + "instance", "host2.team-b", + "original___tenant_id__", "original-value", + ), }, expectedMetrics: expectedThreeTenantsMetrics, }, @@ -1178,33 +1178,33 @@ func TestSetLabelsRetainExisting(t *testing.T) { }{ // Test adding labels at the end. { - labels: labels.Labels{{Name: "a", Value: "b"}}, - additionalLabels: labels.Labels{{Name: "c", Value: "d"}}, - expected: labels.Labels{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, + labels: labels.FromStrings("a", "b"), + additionalLabels: labels.FromStrings("c", "d"), + expected: labels.FromStrings("a", "b", "c", "d"), }, // Test adding labels at the beginning. { - labels: labels.Labels{{Name: "c", Value: "d"}}, - additionalLabels: labels.Labels{{Name: "a", Value: "b"}}, - expected: labels.Labels{{Name: "a", Value: "b"}, {Name: "c", Value: "d"}}, + labels: labels.FromStrings("c", "d"), + additionalLabels: labels.FromStrings("a", "b"), + expected: labels.FromStrings("a", "b", "c", "d"), }, // Test we do override existing labels and expose the original value. { - labels: labels.Labels{{Name: "a", Value: "b"}}, - additionalLabels: labels.Labels{{Name: "a", Value: "c"}}, - expected: labels.Labels{{Name: "a", Value: "c"}, {Name: "original_a", Value: "b"}}, + labels: labels.FromStrings("a", "b"), + additionalLabels: labels.FromStrings("a", "c"), + expected: labels.FromStrings("a", "c", "original_a", "b"), }, // Test we do override existing labels but don't do it recursively. { - labels: labels.Labels{{Name: "a", Value: "b"}, {Name: "original_a", Value: "i am lost"}}, - additionalLabels: labels.Labels{{Name: "a", Value: "d"}}, - expected: labels.Labels{{Name: "a", Value: "d"}, {Name: "original_a", Value: "b"}}, + labels: labels.FromStrings("a", "b", "original_a", "i am lost"), + additionalLabels: labels.FromStrings("a", "d"), + expected: labels.FromStrings("a", "d", "original_a", "b"), }, } { - assert.Equal(t, tc.expected, setLabelsRetainExisting(tc.labels, tc.additionalLabels...)) + assert.Equal(t, tc.expected, setLabelsRetainExisting(tc.labels, tc.additionalLabels)) } } diff --git a/pkg/querier/testutils.go b/pkg/querier/testutils.go index a032e545ddc..4ac69988bfa 100644 --- a/pkg/querier/testutils.go +++ b/pkg/querier/testutils.go @@ -142,7 +142,7 @@ func ConvertToChunks(t *testing.T, samples []cortexpb.Sample, histograms []*cort } } - c := chunk.NewChunk(nil, chk, model.Time(samples[0].TimestampMs), model.Time(samples[len(samples)-1].TimestampMs)) + c := chunk.NewChunk(labels.EmptyLabels(), chk, model.Time(samples[0].TimestampMs), model.Time(samples[len(samples)-1].TimestampMs)) clientChunks, err := chunkcompat.ToChunks([]chunk.Chunk{c}) require.NoError(t, err) diff --git a/pkg/querier/tripperware/distributed_query.go b/pkg/querier/tripperware/distributed_query.go index 02a0692153d..5439a3dc697 100644 --- a/pkg/querier/tripperware/distributed_query.go +++ b/pkg/querier/tripperware/distributed_query.go @@ -64,7 +64,10 @@ func (d distributedQueryMiddleware) newLogicalPlan(qs string, start time.Time, e DisableDuplicateLabelCheck: false, } - logicalPlan := logicalplan.NewFromAST(expr, &qOpts, planOpts) + logicalPlan, err := logicalplan.NewFromAST(expr, &qOpts, planOpts) + if err != nil { + return nil, err + } optimizedPlan, _ := logicalPlan.Optimize(logicalplan.DefaultOptimizers) return &optimizedPlan, nil diff --git a/pkg/querier/tripperware/instantquery/instant_query.go b/pkg/querier/tripperware/instantquery/instant_query.go index a3977207199..0af6ab9c618 100644 --- a/pkg/querier/tripperware/instantquery/instant_query.go +++ b/pkg/querier/tripperware/instantquery/instant_query.go @@ -47,8 +47,15 @@ type instantQueryCodec struct { func NewInstantQueryCodec(compressionStr string, defaultCodecTypeStr string) instantQueryCodec { compression := tripperware.NonCompression // default - if compressionStr == string(tripperware.GzipCompression) { + switch compressionStr { + case string(tripperware.GzipCompression): compression = tripperware.GzipCompression + + case string(tripperware.SnappyCompression): + compression = tripperware.SnappyCompression + + case string(tripperware.ZstdCompression): + compression = tripperware.ZstdCompression } defaultCodecType := tripperware.JsonCodecType // default @@ -102,13 +109,31 @@ func (c instantQueryCodec) DecodeResponse(ctx context.Context, r *http.Response, return nil, err } + responseSizeHeader := r.Header.Get("X-Uncompressed-Length") responseSizeLimiter := limiter.ResponseSizeLimiterFromContextWithFallback(ctx) - body, err := tripperware.BodyBytes(r, responseSizeLimiter, log) + responseSize, hasSizeHeader, err := tripperware.ParseResponseSizeHeader(responseSizeHeader) + if err != nil { + log.Error(err) + return nil, err + } + if hasSizeHeader { + if err := responseSizeLimiter.AddResponseBytes(responseSize); err != nil { + return nil, httpgrpc.Errorf(http.StatusUnprocessableEntity, "%s", err.Error()) + } + } + + body, err := tripperware.BodyBytes(r, log) if err != nil { log.Error(err) return nil, err } + if !hasSizeHeader { + if err := responseSizeLimiter.AddResponseBytes(len(body)); err != nil { + return nil, httpgrpc.Errorf(http.StatusUnprocessableEntity, "%s", err.Error()) + } + } + if r.StatusCode/100 != 2 { return nil, httpgrpc.Errorf(r.StatusCode, "%s", string(body)) } @@ -183,7 +208,7 @@ func (c instantQueryCodec) EncodeRequest(ctx context.Context, r tripperware.Requ } } - h.Add("Content-Type", "application/json") + h.Add("Content-Type", "application/x-www-form-urlencoded") isSourceRuler := strings.Contains(h.Get("User-Agent"), tripperware.RulerUserAgent) if !isSourceRuler { @@ -191,16 +216,19 @@ func (c instantQueryCodec) EncodeRequest(ctx context.Context, r tripperware.Requ tripperware.SetRequestHeaders(h, c.defaultCodecType, c.compression) } - byteBody, err := c.getSerializedBody(promReq) + bodyBytes, err := c.getSerializedBody(promReq) if err != nil { return nil, err } + form := url.Values{} + form.Set("plan", string(bodyBytes)) + formEncoded := form.Encode() req := &http.Request{ Method: "POST", RequestURI: u.String(), // This is what the httpgrpc code looks at. URL: u, - Body: io.NopCloser(bytes.NewReader(byteBody)), + Body: io.NopCloser(strings.NewReader(formEncoded)), Header: h, } diff --git a/pkg/querier/tripperware/instantquery/instant_query_middlewares_test.go b/pkg/querier/tripperware/instantquery/instant_query_middlewares_test.go index 122f0645623..0b1de391f8e 100644 --- a/pkg/querier/tripperware/instantquery/instant_query_middlewares_test.go +++ b/pkg/querier/tripperware/instantquery/instant_query_middlewares_test.go @@ -212,8 +212,7 @@ func TestRoundTripWithAndWithoutDistributedExec(t *testing.T) { require.NoError(t, err) // check request body - body, err := io.ReadAll(req.Body) - require.NoError(t, err) + body := []byte(req.PostFormValue("plan")) if tc.expectEmptyBody { require.Empty(t, body) } else { diff --git a/pkg/querier/tripperware/query.go b/pkg/querier/tripperware/query.go index 42e2d9eebf0..180ce1c27d0 100644 --- a/pkg/querier/tripperware/query.go +++ b/pkg/querier/tripperware/query.go @@ -4,7 +4,6 @@ import ( "bytes" "compress/gzip" "context" - "encoding/binary" "fmt" "io" "net/http" @@ -16,6 +15,8 @@ import ( "github.com/go-kit/log" "github.com/gogo/protobuf/proto" jsoniter "github.com/json-iterator/go" + "github.com/klauspost/compress/snappy" + "github.com/klauspost/compress/zstd" "github.com/opentracing/opentracing-go" otlog "github.com/opentracing/opentracing-go/log" "github.com/pkg/errors" @@ -27,7 +28,6 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/util/limiter" "github.com/cortexproject/cortex/pkg/util/runutil" "github.com/thanos-io/promql-engine/logicalplan" @@ -46,6 +46,8 @@ type Compression string const ( GzipCompression Compression = "gzip" + ZstdCompression Compression = "zstd" + SnappyCompression Compression = "snappy" NonCompression Compression = "" JsonCodecType CodecType = "json" ProtobufCodecType CodecType = "protobuf" @@ -446,7 +448,7 @@ type Buffer interface { Bytes() []byte } -func BodyBytes(res *http.Response, responseSizeLimiter *limiter.ResponseSizeLimiter, logger log.Logger) ([]byte, error) { +func BodyBytes(res *http.Response, logger log.Logger) ([]byte, error) { var buf *bytes.Buffer // Attempt to cast the response body to a Buffer and use it if possible. @@ -464,13 +466,26 @@ func BodyBytes(res *http.Response, responseSizeLimiter *limiter.ResponseSizeLimi } } - responseSize := getResponseSize(res, buf) - if err := responseSizeLimiter.AddResponseBytes(responseSize); err != nil { - return nil, httpgrpc.Errorf(http.StatusUnprocessableEntity, "%s", err.Error()) + // Handle decoding response if it was compressed + encoding := res.Header.Get("Content-Encoding") + return decode(buf, encoding, logger) +} + +func BodyBytesFromHTTPGRPCResponse(res *httpgrpc.HTTPResponse, logger log.Logger) ([]byte, error) { + headers := http.Header{} + for _, h := range res.Headers { + headers[h.Key] = h.Values } + // Handle decoding response if it was compressed + encoding := headers.Get("Content-Encoding") + buf := bytes.NewBuffer(res.Body) + return decode(buf, encoding, logger) +} + +func decode(buf *bytes.Buffer, encoding string, logger log.Logger) ([]byte, error) { // if the response is gzipped, lets unzip it here - if strings.EqualFold(res.Header.Get("Content-Encoding"), "gzip") { + if strings.EqualFold(encoding, "gzip") { gReader, err := gzip.NewReader(buf) if err != nil { return nil, err @@ -480,35 +495,24 @@ func BodyBytes(res *http.Response, responseSizeLimiter *limiter.ResponseSizeLimi return io.ReadAll(gReader) } - return buf.Bytes(), nil -} - -func BodyBytesFromHTTPGRPCResponse(res *httpgrpc.HTTPResponse, logger log.Logger) ([]byte, error) { - // if the response is gzipped, lets unzip it here - headers := http.Header{} - for _, h := range res.Headers { - headers[h.Key] = h.Values + // if the response is snappy compressed, decode it here + if strings.EqualFold(encoding, "snappy") { + sReader := snappy.NewReader(buf) + return io.ReadAll(sReader) } - if strings.EqualFold(headers.Get("Content-Encoding"), "gzip") { - gReader, err := gzip.NewReader(bytes.NewBuffer(res.Body)) + + // if the response is zstd compressed, decode it here + if strings.EqualFold(encoding, "zstd") { + zReader, err := zstd.NewReader(buf) if err != nil { return nil, err } - defer runutil.CloseWithLogOnErr(logger, gReader, "close gzip reader") + defer runutil.CloseWithLogOnErr(logger, zReader.IOReadCloser(), "close zstd decoder") - return io.ReadAll(gReader) + return io.ReadAll(zReader) } - return res.Body, nil -} - -func getResponseSize(res *http.Response, buf *bytes.Buffer) int { - if strings.EqualFold(res.Header.Get("Content-Encoding"), "gzip") && len(buf.Bytes()) >= 4 { - // GZIP body contains the size of the original (uncompressed) input data - // modulo 2^32 in the last 4 bytes (https://www.ietf.org/rfc/rfc1952.txt). - return int(binary.LittleEndian.Uint32(buf.Bytes()[len(buf.Bytes())-4:])) - } - return len(buf.Bytes()) + return buf.Bytes(), nil } // UnmarshalJSON implements json.Unmarshaler. @@ -767,9 +771,17 @@ func (s *PrometheusResponseStats) MarshalJSON() ([]byte, error) { } func SetRequestHeaders(h http.Header, defaultCodecType CodecType, compression Compression) { - if compression == GzipCompression { + switch compression { + case GzipCompression: h.Set("Accept-Encoding", string(GzipCompression)) + + case SnappyCompression: + h.Set("Accept-Encoding", string(SnappyCompression)) + + case ZstdCompression: + h.Set("Accept-Encoding", string(ZstdCompression)) } + if defaultCodecType == ProtobufCodecType { h.Set("Accept", ApplicationProtobuf+", "+ApplicationJson) } else { @@ -777,6 +789,17 @@ func SetRequestHeaders(h http.Header, defaultCodecType CodecType, compression Co } } +func ParseResponseSizeHeader(header string) (int, bool, error) { + if header == "" { + return 0, false, nil + } + size, err := strconv.Atoi(header) + if err != nil { + return 0, false, err + } + return size, true, nil +} + func UnmarshalResponse(r *http.Response, buf []byte, resp *PrometheusResponse) error { if r.Header == nil { return json.Unmarshal(buf, resp) diff --git a/pkg/querier/tripperware/query_test.go b/pkg/querier/tripperware/query_test.go index 04606df99e6..08f149f43b0 100644 --- a/pkg/querier/tripperware/query_test.go +++ b/pkg/querier/tripperware/query_test.go @@ -1,10 +1,7 @@ package tripperware import ( - "bytes" - "compress/gzip" "math" - "net/http" "strconv" "testing" "time" @@ -196,50 +193,3 @@ func generateData(timeseries, datapoints int) (floatMatrix, histogramMatrix []*S } return } - -func Test_getResponseSize(t *testing.T) { - tests := []struct { - body []byte - useGzip bool - }{ - { - body: []byte(`foo`), - useGzip: false, - }, - { - body: []byte(`foo`), - useGzip: true, - }, - { - body: []byte(`{"status":"success","data":{"resultType":"vector","result":[]}}`), - useGzip: false, - }, - { - body: []byte(`{"status":"success","data":{"resultType":"vector","result":[]}}`), - useGzip: true, - }, - } - - for i, test := range tests { - t.Run(strconv.Itoa(i), func(t *testing.T) { - expectedBodyLength := len(test.body) - buf := &bytes.Buffer{} - response := &http.Response{} - - if test.useGzip { - response = &http.Response{ - Header: http.Header{"Content-Encoding": []string{"gzip"}}, - } - w := gzip.NewWriter(buf) - _, err := w.Write(test.body) - require.NoError(t, err) - w.Close() - } else { - buf = bytes.NewBuffer(test.body) - } - - bodyLength := getResponseSize(response, buf) - require.Equal(t, expectedBodyLength, bodyLength) - }) - } -} diff --git a/pkg/querier/tripperware/queryrange/query_range.go b/pkg/querier/tripperware/queryrange/query_range.go index df721146f66..786676846bc 100644 --- a/pkg/querier/tripperware/queryrange/query_range.go +++ b/pkg/querier/tripperware/queryrange/query_range.go @@ -63,8 +63,15 @@ type prometheusCodec struct { func NewPrometheusCodec(sharded bool, compressionStr string, defaultCodecTypeStr string) *prometheusCodec { //nolint:revive compression := tripperware.NonCompression // default - if compressionStr == string(tripperware.GzipCompression) { + switch compressionStr { + case string(tripperware.GzipCompression): compression = tripperware.GzipCompression + + case string(tripperware.SnappyCompression): + compression = tripperware.SnappyCompression + + case string(tripperware.ZstdCompression): + compression = tripperware.ZstdCompression } defaultCodecType := tripperware.JsonCodecType // default @@ -189,8 +196,7 @@ func (c prometheusCodec) EncodeRequest(ctx context.Context, r tripperware.Reques h.Add(n, v) } } - - h.Add("Content-Type", "application/json") + h.Add("Content-Type", "application/x-www-form-urlencoded") tripperware.SetRequestHeaders(h, c.defaultCodecType, c.compression) @@ -199,11 +205,15 @@ func (c prometheusCodec) EncodeRequest(ctx context.Context, r tripperware.Reques return nil, err } + form := url.Values{} + form.Set("plan", string(bodyBytes)) + formEncoded := form.Encode() + req := &http.Request{ Method: "POST", RequestURI: u.String(), // This is what the httpgrpc code looks at. URL: u, - Body: io.NopCloser(bytes.NewReader(bodyBytes)), + Body: io.NopCloser(strings.NewReader(formEncoded)), Header: h, } @@ -218,13 +228,31 @@ func (c prometheusCodec) DecodeResponse(ctx context.Context, r *http.Response, _ return nil, err } + responseSizeHeader := r.Header.Get("X-Uncompressed-Length") responseSizeLimiter := limiter.ResponseSizeLimiterFromContextWithFallback(ctx) - body, err := tripperware.BodyBytes(r, responseSizeLimiter, log) + responseSize, hasSizeHeader, err := tripperware.ParseResponseSizeHeader(responseSizeHeader) + if err != nil { + log.Error(err) + return nil, err + } + if hasSizeHeader { + if err := responseSizeLimiter.AddResponseBytes(responseSize); err != nil { + return nil, httpgrpc.Errorf(http.StatusUnprocessableEntity, "%s", err.Error()) + } + } + + body, err := tripperware.BodyBytes(r, log) if err != nil { log.Error(err) return nil, err } + if !hasSizeHeader { + if err := responseSizeLimiter.AddResponseBytes(len(body)); err != nil { + return nil, httpgrpc.Errorf(http.StatusUnprocessableEntity, "%s", err.Error()) + } + } + if r.StatusCode/100 != 2 { return nil, httpgrpc.Errorf(r.StatusCode, "%s", string(body)) } diff --git a/pkg/querier/tripperware/queryrange/query_range_middlewares_test.go b/pkg/querier/tripperware/queryrange/query_range_middlewares_test.go index f21eae986df..acf66698c16 100644 --- a/pkg/querier/tripperware/queryrange/query_range_middlewares_test.go +++ b/pkg/querier/tripperware/queryrange/query_range_middlewares_test.go @@ -231,8 +231,7 @@ func TestRoundTripWithAndWithoutDistributedExec(t *testing.T) { require.NoError(t, err) // check request body - body, err := io.ReadAll(req.Body) - require.NoError(t, err) + body := []byte(req.PostFormValue("plan")) if tc.expectEmptyBody { require.Empty(t, body) } else { diff --git a/pkg/querier/tripperware/queryrange/results_cache.go b/pkg/querier/tripperware/queryrange/results_cache.go index db6d2f284f5..6378a82fbef 100644 --- a/pkg/querier/tripperware/queryrange/results_cache.go +++ b/pkg/querier/tripperware/queryrange/results_cache.go @@ -335,7 +335,12 @@ func (s resultsCache) isAtModifierCachable(ctx context.Context, r tripperware.Re } // This resolves the start() and end() used with the @ modifier. - expr = promql.PreprocessExpr(expr, timestamp.Time(r.GetStart()), timestamp.Time(r.GetEnd())) + expr, err = promql.PreprocessExpr(expr, timestamp.Time(r.GetStart()), timestamp.Time(r.GetEnd()), time.Duration(r.GetStep())*time.Millisecond) + if err != nil { + // We are being pessimistic in such cases. + level.Warn(util_log.WithContext(ctx, s.logger)).Log("msg", "failed to preprocess expr", "query", query, "err", err) + return false + } end := r.GetEnd() atModCachable := true diff --git a/pkg/querier/tripperware/queryrange/test_utils.go b/pkg/querier/tripperware/queryrange/test_utils.go index 6e198baebbc..a48ae956131 100644 --- a/pkg/querier/tripperware/queryrange/test_utils.go +++ b/pkg/querier/tripperware/queryrange/test_utils.go @@ -24,13 +24,12 @@ func genLabels( Value: fmt.Sprintf("%d", i), } if len(rest) == 0 { - set := labels.Labels{x} - result = append(result, set) + result = append(result, labels.FromStrings(x.Name, x.Value)) continue } for _, others := range rest { - set := append(others, x) - result = append(result, set) + builder := labels.NewBuilder(others).Set(x.Name, x.Value) + result = append(result, builder.Labels()) } } return result diff --git a/pkg/querier/tripperware/queryrange/test_utils_test.go b/pkg/querier/tripperware/queryrange/test_utils_test.go index 7e0d8268ea5..8bdf75b3dd2 100644 --- a/pkg/querier/tripperware/queryrange/test_utils_test.go +++ b/pkg/querier/tripperware/queryrange/test_utils_test.go @@ -2,7 +2,6 @@ package queryrange import ( "math" - "sort" "testing" "github.com/prometheus/prometheus/model/labels" @@ -12,51 +11,13 @@ import ( func TestGenLabelsCorrectness(t *testing.T) { t.Parallel() ls := genLabels([]string{"a", "b"}, 2) - for _, set := range ls { - sort.Sort(set) - } expected := []labels.Labels{ - { - labels.Label{ - Name: "a", - Value: "0", - }, - labels.Label{ - Name: "b", - Value: "0", - }, - }, - { - labels.Label{ - Name: "a", - Value: "0", - }, - labels.Label{ - Name: "b", - Value: "1", - }, - }, - { - labels.Label{ - Name: "a", - Value: "1", - }, - labels.Label{ - Name: "b", - Value: "0", - }, - }, - { - labels.Label{ - Name: "a", - Value: "1", - }, - labels.Label{ - Name: "b", - Value: "1", - }, - }, + labels.FromStrings("a", "0", "b", "0"), + labels.FromStrings("a", "0", "b", "1"), + labels.FromStrings("a", "1", "b", "0"), + labels.FromStrings("a", "1", "b", "1"), } + require.Equal(t, expected, ls) } diff --git a/pkg/querier/tripperware/queryrange/value.go b/pkg/querier/tripperware/queryrange/value.go index efa8569a9d5..e13bb54fc65 100644 --- a/pkg/querier/tripperware/queryrange/value.go +++ b/pkg/querier/tripperware/queryrange/value.go @@ -58,10 +58,10 @@ func FromResult(res *promql.Result) ([]tripperware.SampleStream, error) { } func mapLabels(ls labels.Labels) []cortexpb.LabelAdapter { - result := make([]cortexpb.LabelAdapter, 0, len(ls)) - for _, l := range ls { + result := make([]cortexpb.LabelAdapter, 0, ls.Len()) + ls.Range(func(l labels.Label) { result = append(result, cortexpb.LabelAdapter(l)) - } + }) return result } diff --git a/pkg/querier/tripperware/queryrange/value_test.go b/pkg/querier/tripperware/queryrange/value_test.go index e82eadfa737..b31230b4ae5 100644 --- a/pkg/querier/tripperware/queryrange/value_test.go +++ b/pkg/querier/tripperware/queryrange/value_test.go @@ -48,20 +48,14 @@ func TestFromValue(t *testing.T) { input: &promql.Result{ Value: promql.Vector{ promql.Sample{ - T: 1, - F: 1, - Metric: labels.Labels{ - {Name: "a", Value: "a1"}, - {Name: "b", Value: "b1"}, - }, + T: 1, + F: 1, + Metric: labels.FromStrings("a", "a1", "b", "b1"), }, promql.Sample{ - T: 2, - F: 2, - Metric: labels.Labels{ - {Name: "a", Value: "a2"}, - {Name: "b", Value: "b2"}, - }, + T: 2, + F: 2, + Metric: labels.FromStrings("a", "a2", "b", "b2"), }, }, }, @@ -98,20 +92,14 @@ func TestFromValue(t *testing.T) { input: &promql.Result{ Value: promql.Matrix{ { - Metric: labels.Labels{ - {Name: "a", Value: "a1"}, - {Name: "b", Value: "b1"}, - }, + Metric: labels.FromStrings("a", "a1", "b", "b1"), Floats: []promql.FPoint{ {T: 1, F: 1}, {T: 2, F: 2}, }, }, { - Metric: labels.Labels{ - {Name: "a", Value: "a2"}, - {Name: "b", Value: "b2"}, - }, + Metric: labels.FromStrings("a", "a2", "b", "b2"), Floats: []promql.FPoint{ {T: 1, F: 8}, {T: 2, F: 9}, diff --git a/pkg/querier/tripperware/roundtrip.go b/pkg/querier/tripperware/roundtrip.go index 144bb04da36..b7759b8b45b 100644 --- a/pkg/querier/tripperware/roundtrip.go +++ b/pkg/querier/tripperware/roundtrip.go @@ -34,7 +34,7 @@ import ( "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/limiter" - util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/requestmeta" ) const ( @@ -46,6 +46,7 @@ const ( opTypeLabelValues = "label_values" opTypeMetadata = "metadata" opTypeQueryExemplars = "query_exemplars" + opTypeFormatQuery = "format_query" ) // HandlerFunc is like http.HandlerFunc, but for Handler. @@ -152,6 +153,7 @@ func NewQueryTripperware( isLabelValues := strings.HasSuffix(r.URL.Path, "/values") isMetadata := strings.HasSuffix(r.URL.Path, "/metadata") isQueryExemplars := strings.HasSuffix(r.URL.Path, "/query_exemplars") + isFormatQuery := strings.HasSuffix(r.URL.Path, "/format_query") op := opTypeQuery switch { @@ -169,6 +171,8 @@ func NewQueryTripperware( op = opTypeMetadata case isQueryExemplars: op = opTypeQueryExemplars + case isFormatQuery: + op = opTypeFormatQuery } tenantIDs, err := tenant.TenantIDs(r.Context()) @@ -255,8 +259,8 @@ func (q roundTripper) Do(ctx context.Context, r Request) (Response, error) { return nil, err } - if headerMap := util_log.HeaderMapFromContext(ctx); headerMap != nil { - util_log.InjectHeadersIntoHTTPRequest(headerMap, request) + if requestMetadataMap := requestmeta.MapFromContext(ctx); requestMetadataMap != nil { + requestmeta.InjectMetadataIntoHTTPRequestHeaders(requestMetadataMap, request) } if err := user.InjectOrgIDIntoHTTPRequest(ctx, request); err != nil { diff --git a/pkg/querier/tripperware/roundtrip_test.go b/pkg/querier/tripperware/roundtrip_test.go index ceb4510d479..ff0ed99d8e0 100644 --- a/pkg/querier/tripperware/roundtrip_test.go +++ b/pkg/querier/tripperware/roundtrip_test.go @@ -38,6 +38,7 @@ const ( labelNamesQuery = "/api/v1/labels" labelValuesQuery = "/api/v1/label/label/values" metadataQuery = "/api/v1/metadata" + formatQuery = "/api/v1/format_query?query=foo/bar" responseBody = `{"status":"success","data":{"resultType":"matrix","result":[{"metric":{"foo":"bar"},"values":[[1536673680,"137"],[1536673780,"137"]]}]}}` instantResponseBody = `{"status":"success","data":{"resultType":"vector","result":[{"metric":{"foo":"bar"},"values":[[1536673680,"137"],[1536673780,"137"]]}]}}` @@ -229,6 +230,18 @@ cortex_query_frontend_queries_total{op="remote_read", source="api", user="1"} 1 # HELP cortex_query_frontend_queries_total Total queries sent per tenant. # TYPE cortex_query_frontend_queries_total counter cortex_query_frontend_queries_total{op="query_range", source="api", user="1"} 1 +`, + }, + { + path: formatQuery, + expectedBody: "bar", + limits: defaultOverrides, + maxSubQuerySteps: 11000, + userAgent: "dummyUserAgent/1.2", + expectedMetric: ` +# HELP cortex_query_frontend_queries_total Total queries sent per tenant. +# TYPE cortex_query_frontend_queries_total counter +cortex_query_frontend_queries_total{op="format_query", source="api", user="1"} 1 `, }, { diff --git a/pkg/querier/worker/frontend_processor.go b/pkg/querier/worker/frontend_processor.go index 17bd031acfb..88f7f311393 100644 --- a/pkg/querier/worker/frontend_processor.go +++ b/pkg/querier/worker/frontend_processor.go @@ -17,6 +17,7 @@ import ( querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" "github.com/cortexproject/cortex/pkg/util/backoff" util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/requestmeta" ) var ( @@ -129,18 +130,12 @@ func (fp *frontendProcessor) runRequest(ctx context.Context, request *httpgrpc.H for _, h := range request.Headers { headers[h.Key] = h.Values[0] } - headerMap := make(map[string]string, 0) - // Remove non-existent header. - for _, header := range fp.targetHeaders { - if v, ok := headers[textproto.CanonicalMIMEHeaderKey(header)]; ok { - headerMap[header] = v - } - } + ctx = requestmeta.ContextWithRequestMetadataMapFromHeaders(ctx, headers, fp.targetHeaders) + orgID, ok := headers[textproto.CanonicalMIMEHeaderKey(user.OrgIDHeaderName)] if ok { ctx = user.InjectOrgID(ctx, orgID) } - ctx = util_log.ContextWithHeaderMap(ctx, headerMap) logger := util_log.WithContext(ctx, fp.log) if statsEnabled { level.Info(logger).Log("msg", "started running request") diff --git a/pkg/querier/worker/scheduler_processor.go b/pkg/querier/worker/scheduler_processor.go index 0d149210284..10fd96ab230 100644 --- a/pkg/querier/worker/scheduler_processor.go +++ b/pkg/querier/worker/scheduler_processor.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "net/http" - "net/textproto" "time" "github.com/go-kit/log" @@ -28,6 +27,7 @@ import ( "github.com/cortexproject/cortex/pkg/util/httpgrpcutil" util_log "github.com/cortexproject/cortex/pkg/util/log" cortexmiddleware "github.com/cortexproject/cortex/pkg/util/middleware" + "github.com/cortexproject/cortex/pkg/util/requestmeta" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -141,14 +141,7 @@ func (sp *schedulerProcessor) querierLoop(c schedulerpb.SchedulerForQuerier_Quer for _, h := range request.HttpRequest.Headers { headers[h.Key] = h.Values[0] } - headerMap := make(map[string]string, 0) - // Remove non-existent header. - for _, header := range sp.targetHeaders { - if v, ok := headers[textproto.CanonicalMIMEHeaderKey(header)]; ok { - headerMap[header] = v - } - } - ctx = util_log.ContextWithHeaderMap(ctx, headerMap) + ctx = requestmeta.ContextWithRequestMetadataMapFromHeaders(ctx, headers, sp.targetHeaders) tracer := opentracing.GlobalTracer() // Ignore errors here. If we cannot get parent span, we just don't create new one. diff --git a/pkg/querysharding/util.go b/pkg/querysharding/util.go index 2b438ce275e..eafc3a71b4f 100644 --- a/pkg/querysharding/util.go +++ b/pkg/querysharding/util.go @@ -4,8 +4,10 @@ import ( "encoding/base64" "sync" + "github.com/pkg/errors" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" + "github.com/thanos-io/thanos/pkg/querysharding" "github.com/thanos-io/thanos/pkg/store/storepb" cortexparser "github.com/cortexproject/cortex/pkg/parser" @@ -20,6 +22,8 @@ var ( b := make([]byte, 0, 100) return &b }} + + stop = errors.New("stop") ) func InjectShardingInfo(query string, shardInfo *storepb.ShardInfo) (string, error) { @@ -77,3 +81,43 @@ func ExtractShardingMatchers(matchers []*labels.Matcher) ([]*labels.Matcher, *st return r, shardInfo.Matcher(&buffers), nil } + +type disableBinaryExpressionAnalyzer struct { + analyzer querysharding.Analyzer +} + +// NewDisableBinaryExpressionAnalyzer is a wrapper around the analyzer that disables binary expressions. +func NewDisableBinaryExpressionAnalyzer(analyzer querysharding.Analyzer) *disableBinaryExpressionAnalyzer { + return &disableBinaryExpressionAnalyzer{analyzer: analyzer} +} + +func (d *disableBinaryExpressionAnalyzer) Analyze(query string) (querysharding.QueryAnalysis, error) { + analysis, err := d.analyzer.Analyze(query) + if err != nil || !analysis.IsShardable() { + return analysis, err + } + + expr, _ := cortexparser.ParseExpr(query) + isShardable := true + parser.Inspect(expr, func(node parser.Node, nodes []parser.Node) error { + switch n := node.(type) { + case *parser.BinaryExpr: + // No vector matching means one operand is not vector. Skip it. + if n.VectorMatching == nil { + return nil + } + // Vector matching ignore will add MetricNameLabel as sharding label. + // Mark this type of query not shardable. + if !n.VectorMatching.On { + isShardable = false + return stop + } + } + return nil + }) + if !isShardable { + // Mark as not shardable. + return querysharding.QueryAnalysis{}, nil + } + return analysis, nil +} diff --git a/pkg/querysharding/util_test.go b/pkg/querysharding/util_test.go new file mode 100644 index 00000000000..cba23190723 --- /dev/null +++ b/pkg/querysharding/util_test.go @@ -0,0 +1,145 @@ +package querysharding + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/thanos-io/thanos/pkg/querysharding" +) + +func TestDisableBinaryExpressionAnalyzer_Analyze(t *testing.T) { + tests := []struct { + name string + query string + expectShardable bool + expectError bool + description string + }{ + { + name: "binary expression with vector matching on", + query: `up{job="prometheus"} + on(instance) rate(cpu_usage[5m])`, + expectShardable: true, + expectError: false, + description: "Binary expression with 'on' matching should remain shardable", + }, + { + name: "binary expression without explicit vector matching", + query: `up{job="prometheus"} + rate(cpu_usage[5m])`, + expectShardable: false, + expectError: false, + description: "No explicit vector matching means without. Not shardable.", + }, + { + name: "binary expression with vector matching ignoring", + query: `up{job="prometheus"} + ignoring(instance) rate(cpu_usage[5m])`, + expectShardable: false, + expectError: false, + description: "Binary expression with 'ignoring' matching should not be shardable", + }, + { + name: "complex expression with binary expr using on", + query: `sum(rate(http_requests_total[5m])) by (job) + on(job) avg(cpu_usage) by (job)`, + expectShardable: true, + expectError: false, + description: "Complex expression with 'on' matching should remain shardable", + }, + { + name: "complex expression with binary expr using ignoring", + query: `sum(rate(http_requests_total[5m])) by (job) + ignoring(instance) avg(cpu_usage) by (job)`, + expectShardable: false, + expectError: false, + description: "Complex expression with 'ignoring' matching should not be shardable", + }, + { + name: "nested binary expressions with one ignoring", + query: `(up + on(job) rate(cpu[5m])) * ignoring(instance) memory_usage`, + expectShardable: false, + expectError: false, + description: "Nested expressions with any 'ignoring' should not be shardable", + }, + { + name: "aggregation", + query: `sum(rate(http_requests_total[5m])) by (job)`, + expectShardable: true, + expectError: false, + description: "Aggregations should remain shardable", + }, + { + name: "aggregation with binary expression and scalar", + query: `sum(rate(http_requests_total[5m])) by (job) * 100`, + expectShardable: true, + expectError: false, + description: "Aggregations should remain shardable", + }, + { + name: "invalid query", + query: "invalid{query", + expectShardable: false, + expectError: true, + description: "Invalid queries should return error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create the actual thanos analyzer + thanosAnalyzer := querysharding.NewQueryAnalyzer() + + // Wrap it with our disable binary expression analyzer + analyzer := NewDisableBinaryExpressionAnalyzer(thanosAnalyzer) + + // Test the wrapped analyzer + result, err := analyzer.Analyze(tt.query) + + if tt.expectError { + require.Error(t, err, tt.description) + return + } + + require.NoError(t, err, tt.description) + assert.Equal(t, tt.expectShardable, result.IsShardable(), tt.description) + }) + } +} + +func TestDisableBinaryExpressionAnalyzer_ComparedToOriginal(t *testing.T) { + // Test cases that verify the wrapper correctly modifies behavior + testCases := []struct { + name string + query string + }{ + { + name: "ignoring expression should be disabled", + query: `up + ignoring(instance) rate(cpu[5m])`, + }, + { + name: "nested ignoring expression should be disabled", + query: `(sum(rate(http_requests_total[5m])) by (job)) + ignoring(instance) avg(cpu_usage) by (job)`, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Test with original analyzer + originalAnalyzer := querysharding.NewQueryAnalyzer() + originalResult, err := originalAnalyzer.Analyze(tc.query) + require.NoError(t, err) + + // Test with wrapped analyzer + wrappedAnalyzer := NewDisableBinaryExpressionAnalyzer(originalAnalyzer) + wrappedResult, err := wrappedAnalyzer.Analyze(tc.query) + require.NoError(t, err) + + // The wrapped analyzer should make previously shardable queries non-shardable + // if they contain binary expressions with ignoring + if originalResult.IsShardable() { + assert.False(t, wrappedResult.IsShardable(), + "Wrapped analyzer should disable sharding for queries with ignoring vector matching") + } else { + // If original wasn't shardable, wrapped shouldn't be either + assert.False(t, wrappedResult.IsShardable()) + } + }) + } +} diff --git a/pkg/ring/client/pool_test.go b/pkg/ring/client/pool_test.go index 5ab735966bc..948d49d3a01 100644 --- a/pkg/ring/client/pool_test.go +++ b/pkg/ring/client/pool_test.go @@ -21,13 +21,6 @@ type mockClient struct { status grpc_health_v1.HealthCheckResponse_ServingStatus } -func (i mockClient) List(ctx context.Context, in *grpc_health_v1.HealthListRequest, opts ...grpc.CallOption) (*grpc_health_v1.HealthListResponse, error) { - if !i.happy { - return nil, fmt.Errorf("Fail") - } - return &grpc_health_v1.HealthListResponse{}, nil -} - func (i mockClient) Check(ctx context.Context, in *grpc_health_v1.HealthCheckRequest, opts ...grpc.CallOption) (*grpc_health_v1.HealthCheckResponse, error) { if !i.happy { return nil, fmt.Errorf("Fail") diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index c8d8302e27a..68c45a5bdcf 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -4,10 +4,12 @@ import ( "context" "errors" "fmt" + "time" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/google/uuid" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" @@ -27,6 +29,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring/client" util_log "github.com/cortexproject/cortex/pkg/util/log" promql_util "github.com/cortexproject/cortex/pkg/util/promql" + "github.com/cortexproject/cortex/pkg/util/requestmeta" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -183,6 +186,9 @@ func EngineQueryFunc(engine promql.QueryEngine, frontendClient *frontendClient, } } + // Add request ID to the context so that it can be used in logs and metrics for split queries. + ctx = requestmeta.ContextWithRequestId(ctx, uuid.NewString()) + if frontendClient != nil { v, err := frontendClient.InstantQuery(ctx, qs, t) if err != nil { diff --git a/pkg/ruler/external_labels.go b/pkg/ruler/external_labels.go index 886fc4d0ed8..b0f2e4306b5 100644 --- a/pkg/ruler/external_labels.go +++ b/pkg/ruler/external_labels.go @@ -20,7 +20,7 @@ func newUserExternalLabels(global labels.Labels, limits RulesLimits) *userExtern return &userExternalLabels{ global: global, limits: limits, - builder: labels.NewBuilder(nil), + builder: labels.NewBuilder(labels.EmptyLabels()), mtx: sync.Mutex{}, users: map[string]labels.Labels{}, @@ -41,9 +41,9 @@ func (e *userExternalLabels) update(userID string) (labels.Labels, bool) { defer e.mtx.Unlock() e.builder.Reset(e.global) - for _, l := range lset { + lset.Range(func(l labels.Label) { e.builder.Set(l.Name, l.Value) - } + }) lset = e.builder.Labels() if !labels.Equal(e.users[userID], lset) { diff --git a/pkg/ruler/external_labels_test.go b/pkg/ruler/external_labels_test.go index 45ff1507c83..1bc13a65831 100644 --- a/pkg/ruler/external_labels_test.go +++ b/pkg/ruler/external_labels_test.go @@ -22,7 +22,7 @@ func TestUserExternalLabels(t *testing.T) { name: "global labels only", removeBeforeTest: false, exists: false, - userExternalLabels: nil, + userExternalLabels: labels.EmptyLabels(), expectedExternalLabels: labels.FromStrings("from", "cortex"), }, { diff --git a/pkg/ruler/frontend_decoder.go b/pkg/ruler/frontend_decoder.go index 92a6b1a3f6e..4086dceffb7 100644 --- a/pkg/ruler/frontend_decoder.go +++ b/pkg/ruler/frontend_decoder.go @@ -5,7 +5,6 @@ import ( "encoding/json" "errors" "fmt" - "sort" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" @@ -76,20 +75,14 @@ func (j JsonDecoder) Decode(body []byte) (promql.Vector, Warnings, error) { func (j JsonDecoder) vectorToPromQLVector(vector model.Vector) promql.Vector { v := make([]promql.Sample, 0, len(vector)) for _, sample := range vector { - metric := make([]labels.Label, 0, len(sample.Metric)) + builder := labels.NewBuilder(labels.EmptyLabels()) for k, v := range sample.Metric { - metric = append(metric, labels.Label{ - Name: string(k), - Value: string(v), - }) + builder.Set(string(k), string(v)) } - sort.Slice(metric, func(i, j int) bool { - return metric[i].Name < metric[j].Name - }) v = append(v, promql.Sample{ T: int64(sample.Timestamp), F: float64(sample.Value), - Metric: metric, + Metric: builder.Labels(), }) } return v diff --git a/pkg/ruler/notifier_test.go b/pkg/ruler/notifier_test.go index 8d3c6ba2af7..e27e3527ed7 100644 --- a/pkg/ruler/notifier_test.go +++ b/pkg/ruler/notifier_test.go @@ -225,9 +225,7 @@ func TestBuildNotifierConfig(t *testing.T) { name: "with external labels", cfg: &Config{ AlertmanagerURL: "http://alertmanager.default.svc.cluster.local/alertmanager", - ExternalLabels: []labels.Label{ - {Name: "region", Value: "us-east-1"}, - }, + ExternalLabels: labels.FromStrings("region", "us-east-1"), }, ncfg: &config.Config{ AlertingConfig: config.AlertingConfig{ @@ -247,9 +245,7 @@ func TestBuildNotifierConfig(t *testing.T) { }, }, GlobalConfig: config.GlobalConfig{ - ExternalLabels: []labels.Label{ - {Name: "region", Value: "us-east-1"}, - }, + ExternalLabels: labels.FromStrings("region", "us-east-1"), }, }, }, diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go index 70c07233f41..7c38c8ab6e5 100644 --- a/pkg/ruler/ruler.go +++ b/pkg/ruler/ruler.go @@ -693,13 +693,21 @@ func (r *Ruler) run(ctx context.Context) error { ringTickerChan = ringTicker.C } - r.syncRules(ctx, rulerSyncReasonInitial) + syncRuleErrMsg := func(syncRulesErr error) { + level.Error(r.logger).Log("msg", "failed to sync rules", "err", syncRulesErr) + } + + initialSyncErr := r.syncRules(ctx, rulerSyncReasonInitial) + if initialSyncErr != nil { + syncRuleErrMsg(initialSyncErr) + } for { + var syncRulesErr error select { case <-ctx.Done(): return nil case <-tick.C: - r.syncRules(ctx, rulerSyncReasonPeriodic) + syncRulesErr = r.syncRules(ctx, rulerSyncReasonPeriodic) case <-ringTickerChan: // We ignore the error because in case of error it will return an empty // replication set which we use to compare with the previous state. @@ -707,15 +715,18 @@ func (r *Ruler) run(ctx context.Context) error { if ring.HasReplicationSetChanged(ringLastState, currRingState) { ringLastState = currRingState - r.syncRules(ctx, rulerSyncReasonRingChange) + syncRulesErr = r.syncRules(ctx, rulerSyncReasonRingChange) } case err := <-r.subservicesWatcher.Chan(): return errors.Wrap(err, "ruler subservice failed") } + if syncRulesErr != nil { + syncRuleErrMsg(syncRulesErr) + } } } -func (r *Ruler) syncRules(ctx context.Context, reason string) { +func (r *Ruler) syncRules(ctx context.Context, reason string) error { level.Info(r.logger).Log("msg", "syncing rules", "reason", reason) r.rulerSync.WithLabelValues(reason).Inc() timer := prometheus.NewTimer(nil) @@ -727,12 +738,12 @@ func (r *Ruler) syncRules(ctx context.Context, reason string) { loadedConfigs, backupConfigs, err := r.loadRuleGroups(ctx) if err != nil { - return + return err } if ctx.Err() != nil { level.Info(r.logger).Log("msg", "context is canceled. not syncing rules") - return + return err } // This will also delete local group files for users that are no longer in 'configs' map. r.manager.SyncRuleGroups(ctx, loadedConfigs) @@ -740,6 +751,8 @@ func (r *Ruler) syncRules(ctx context.Context, reason string) { if r.cfg.RulesBackupEnabled() { r.manager.BackUpRuleGroups(ctx, backupConfigs) } + + return nil } func (r *Ruler) loadRuleGroups(ctx context.Context) (map[string]rulespb.RuleGroupList, map[string]rulespb.RuleGroupList, error) { diff --git a/pkg/ruler/ruler_ring.go b/pkg/ruler/ruler_ring.go index 215a711f022..da87bede3ff 100644 --- a/pkg/ruler/ruler_ring.go +++ b/pkg/ruler/ruler_ring.go @@ -38,12 +38,13 @@ var ListRuleRingOp = ring.NewOp([]ring.InstanceState{ring.ACTIVE, ring.LEAVING}, // is used to strip down the config to the minimum, and avoid confusion // to the user. type RingConfig struct { - KVStore kv.Config `yaml:"kvstore"` - HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` - HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` - ReplicationFactor int `yaml:"replication_factor"` - ZoneAwarenessEnabled bool `yaml:"zone_awareness_enabled"` - TokensFilePath string `yaml:"tokens_file_path"` + KVStore kv.Config `yaml:"kvstore"` + HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` + HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` + ReplicationFactor int `yaml:"replication_factor"` + ZoneAwarenessEnabled bool `yaml:"zone_awareness_enabled"` + TokensFilePath string `yaml:"tokens_file_path"` + DetailedMetricsEnabled bool `yaml:"detailed_metrics_enabled"` // Instance details InstanceID string `yaml:"instance_id" doc:"hidden"` @@ -77,6 +78,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.ReplicationFactor, "ruler.ring.replication-factor", 1, "EXPERIMENTAL: The replication factor to use when loading rule groups for API HA.") f.BoolVar(&cfg.ZoneAwarenessEnabled, "ruler.ring.zone-awareness-enabled", false, "EXPERIMENTAL: True to enable zone-awareness and load rule groups across different availability zones for API HA.") f.StringVar(&cfg.TokensFilePath, "ruler.ring.tokens-file-path", "", "EXPERIMENTAL: File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.") + f.BoolVar(&cfg.DetailedMetricsEnabled, "ruler.ring.detailed-metrics-enabled", true, "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted.") // Instance flags cfg.InstanceInterfaceNames = []string{"eth0", "en0"} @@ -119,6 +121,7 @@ func (cfg *RingConfig) ToRingConfig() ring.Config { rc.HeartbeatTimeout = cfg.HeartbeatTimeout rc.SubringCacheDisabled = true rc.ZoneAwarenessEnabled = cfg.ZoneAwarenessEnabled + rc.DetailedMetricsEnabled = cfg.DetailedMetricsEnabled // Each rule group is evaluated by *exactly* one ruler, but it can be loaded by multiple rulers for API HA rc.ReplicationFactor = cfg.ReplicationFactor diff --git a/pkg/ruler/ruler_test.go b/pkg/ruler/ruler_test.go index 538d7a0ac2f..c6a6b833b19 100644 --- a/pkg/ruler/ruler_test.go +++ b/pkg/ruler/ruler_test.go @@ -414,7 +414,7 @@ func TestNotifierSendsUserIDHeader(t *testing.T) { time.Sleep(10 * time.Millisecond) } n.Send(¬ifier.Alert{ - Labels: labels.Labels{labels.Label{Name: "alertname", Value: "testalert"}}, + Labels: labels.FromStrings("alertname", "testalert"), }) wg.Wait() @@ -450,7 +450,7 @@ func TestNotifierSendExternalLabels(t *testing.T) { cfg := defaultRulerConfig(t) cfg.AlertmanagerURL = ts.URL cfg.AlertmanagerDiscovery = false - cfg.ExternalLabels = []labels.Label{{Name: "region", Value: "us-east-1"}} + cfg.ExternalLabels = labels.FromStrings("region", "us-east-1") limits := &ruleLimits{} engine, queryable, pusher, logger, _, reg := testSetup(t, nil) metrics := NewRuleEvalMetrics(cfg, nil) @@ -481,12 +481,12 @@ func TestNotifierSendExternalLabels(t *testing.T) { }, { name: "local labels without overriding", - userExternalLabels: labels.FromStrings("mylabel", "local"), + userExternalLabels: []labels.Label{{Name: "mylabel", Value: "local"}}, expectedExternalLabels: []labels.Label{{Name: "region", Value: "us-east-1"}, {Name: "mylabel", Value: "local"}}, }, { name: "local labels that override globals", - userExternalLabels: labels.FromStrings("region", "cloud", "mylabel", "local"), + userExternalLabels: []labels.Label{{Name: "region", Value: "cloud"}, {Name: "mylabel", Value: "local"}}, expectedExternalLabels: []labels.Label{{Name: "region", Value: "cloud"}, {Name: "mylabel", Value: "local"}}, }, } @@ -494,7 +494,7 @@ func TestNotifierSendExternalLabels(t *testing.T) { test := test t.Run(test.name, func(t *testing.T) { - limits.setRulerExternalLabels(test.userExternalLabels) + limits.setRulerExternalLabels(labels.New(test.userExternalLabels...)) manager.SyncRuleGroups(context.Background(), map[string]rulespb.RuleGroupList{ userID: {&rulespb.RuleGroupDesc{Name: "group", Namespace: "ns", Interval: time.Minute, User: userID}}, }) @@ -506,7 +506,7 @@ func TestNotifierSendExternalLabels(t *testing.T) { }, 10*time.Second, 10*time.Millisecond) n.notifier.Send(¬ifier.Alert{ - Labels: labels.Labels{labels.Label{Name: "alertname", Value: "testalert"}}, + Labels: labels.FromStrings("alertname", "testalert"), }) select { case <-time.After(5 * time.Second): @@ -1342,7 +1342,8 @@ func TestGetRules(t *testing.T) { // Sync Rules forEachRuler(func(_ string, r *Ruler) { - r.syncRules(context.Background(), rulerSyncReasonInitial) + err := r.syncRules(context.Background(), rulerSyncReasonInitial) + require.NoError(t, err) }) if tc.sharding { @@ -1572,7 +1573,8 @@ func TestGetRulesFromBackup(t *testing.T) { // Sync Rules forEachRuler(func(_ string, r *Ruler) { - r.syncRules(context.Background(), rulerSyncReasonInitial) + err := r.syncRules(context.Background(), rulerSyncReasonInitial) + require.NoError(t, err) }) // update the State of the rulers in the ring based on tc.rulerStateMap @@ -1788,7 +1790,8 @@ func getRulesHATest(replicationFactor int) func(t *testing.T) { // Sync Rules forEachRuler(func(_ string, r *Ruler) { - r.syncRules(context.Background(), rulerSyncReasonInitial) + err := r.syncRules(context.Background(), rulerSyncReasonInitial) + require.NoError(t, err) }) // update the State of the rulers in the ring based on tc.rulerStateMap @@ -1811,8 +1814,10 @@ func getRulesHATest(replicationFactor int) func(t *testing.T) { t.Errorf("ruler %s was not terminated with error %s", "ruler1", err.Error()) } - rulerAddrMap["ruler2"].syncRules(context.Background(), rulerSyncReasonPeriodic) - rulerAddrMap["ruler3"].syncRules(context.Background(), rulerSyncReasonPeriodic) + err = rulerAddrMap["ruler2"].syncRules(context.Background(), rulerSyncReasonPeriodic) + require.NoError(t, err) + err = rulerAddrMap["ruler3"].syncRules(context.Background(), rulerSyncReasonPeriodic) + require.NoError(t, err) requireGroupStateEqual := func(a *GroupStateDesc, b *GroupStateDesc) { require.Equal(t, a.Group.Interval, b.Group.Interval) @@ -2680,8 +2685,8 @@ func TestSendAlerts(t *testing.T) { { in: []*promRules.Alert{ { - Labels: []labels.Label{{Name: "l1", Value: "v1"}}, - Annotations: []labels.Label{{Name: "a2", Value: "v2"}}, + Labels: labels.FromStrings("l1", "v1"), + Annotations: labels.FromStrings("a2", "v2"), ActiveAt: time.Unix(1, 0), FiredAt: time.Unix(2, 0), ValidUntil: time.Unix(3, 0), @@ -2689,8 +2694,8 @@ func TestSendAlerts(t *testing.T) { }, exp: []*notifier.Alert{ { - Labels: []labels.Label{{Name: "l1", Value: "v1"}}, - Annotations: []labels.Label{{Name: "a2", Value: "v2"}}, + Labels: labels.FromStrings("l1", "v1"), + Annotations: labels.FromStrings("a2", "v2"), StartsAt: time.Unix(2, 0), EndsAt: time.Unix(3, 0), GeneratorURL: "http://localhost:9090/graph?g0.expr=up&g0.tab=1", @@ -2700,8 +2705,8 @@ func TestSendAlerts(t *testing.T) { { in: []*promRules.Alert{ { - Labels: []labels.Label{{Name: "l1", Value: "v1"}}, - Annotations: []labels.Label{{Name: "a2", Value: "v2"}}, + Labels: labels.FromStrings("l1", "v1"), + Annotations: labels.FromStrings("a2", "v2"), ActiveAt: time.Unix(1, 0), FiredAt: time.Unix(2, 0), ResolvedAt: time.Unix(4, 0), @@ -2709,8 +2714,8 @@ func TestSendAlerts(t *testing.T) { }, exp: []*notifier.Alert{ { - Labels: []labels.Label{{Name: "l1", Value: "v1"}}, - Annotations: []labels.Label{{Name: "a2", Value: "v2"}}, + Labels: labels.FromStrings("l1", "v1"), + Annotations: labels.FromStrings("a2", "v2"), StartsAt: time.Unix(2, 0), EndsAt: time.Unix(4, 0), GeneratorURL: "http://localhost:9090/graph?g0.expr=up&g0.tab=1", @@ -2800,7 +2805,8 @@ func TestRecoverAlertsPostOutage(t *testing.T) { evalFunc := func(ctx context.Context, g *promRules.Group, evalTimestamp time.Time) {} r, _ := buildRulerWithIterFunc(t, rulerCfg, &querier.TestConfig{Cfg: querierConfig, Distributor: d, Stores: queryables}, store, nil, evalFunc) - r.syncRules(context.Background(), rulerSyncReasonInitial) + err := r.syncRules(context.Background(), rulerSyncReasonInitial) + require.NoError(t, err) // assert initial state of rule group ruleGroup := r.manager.GetRules("user1")[0] @@ -3265,7 +3271,8 @@ func TestGetShardSizeForUser(t *testing.T) { // Sync Rules forEachRuler(func(_ string, r *Ruler) { - r.syncRules(context.Background(), rulerSyncReasonInitial) + err := r.syncRules(context.Background(), rulerSyncReasonInitial) + require.NoError(t, err) }) result := testRuler.getShardSizeForUser(tc.userID) diff --git a/pkg/storage/bucket/client_mock.go b/pkg/storage/bucket/client_mock.go index f323000db27..d641067ae05 100644 --- a/pkg/storage/bucket/client_mock.go +++ b/pkg/storage/bucket/client_mock.go @@ -5,6 +5,7 @@ import ( "context" "errors" "io" + "strings" "sync" "time" @@ -23,6 +24,10 @@ type ClientMock struct { uploaded sync.Map } +func (m *ClientMock) Provider() objstore.ObjProvider { + return objstore.FILESYSTEM +} + func (m *ClientMock) WithExpectedErrs(objstore.IsOpFailureExpectedFunc) objstore.Bucket { return m } @@ -32,16 +37,21 @@ func (m *ClientMock) ReaderWithExpectedErrs(objstore.IsOpFailureExpectedFunc) ob } // Upload mocks objstore.Bucket.Upload() -func (m *ClientMock) Upload(ctx context.Context, name string, r io.Reader) error { +func (m *ClientMock) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) error { if _, ok := m.uploaded.Load(name); ok { m.uploaded.Store(name, true) } - args := m.Called(ctx, name, r) - return args.Error(0) + if len(opts) > 0 { + args := m.Called(ctx, name, r, opts) + return args.Error(0) + } else { + args := m.Called(ctx, name, r) + return args.Error(0) + } } func (m *ClientMock) MockUpload(name string, err error) { - m.On("Upload", mock.Anything, name, mock.Anything).Return(err) + m.On("Upload", mock.Anything, name, mock.Anything, mock.Anything).Return(err) } // Delete mocks objstore.Bucket.Delete() @@ -73,6 +83,42 @@ func (m *ClientMock) Iter(ctx context.Context, dir string, f func(string) error, return args.Error(0) } +func (m *ClientMock) MockIterWithAttributes(prefix string, objects []string, err error, cb func()) { + m.On("IterWithAttributes", mock.Anything, prefix, mock.Anything, mock.Anything).Return(err).Run(func(args mock.Arguments) { + f := args.Get(2).(func(attrs objstore.IterObjectAttributes) error) + opts := args.Get(3).([]objstore.IterOption) + + // Determine if recursive flag is passed + params := objstore.ApplyIterOptions(opts...) + recursive := params.Recursive + + for _, o := range objects { + // Check if object is under current prefix + if !strings.HasPrefix(o, prefix) { + continue + } + + // Extract the remaining path after prefix + suffix := strings.TrimPrefix(o, prefix) + + // If not recursive and there's a slash in the remaining path, skip it + if !recursive && strings.Contains(suffix, "/") { + continue + } + + attrs := objstore.IterObjectAttributes{ + Name: o, + } + if cb != nil { + cb() + } + if err := f(attrs); err != nil { + break + } + } + }) +} + // MockIter is a convenient method to mock Iter() func (m *ClientMock) MockIter(prefix string, objects []string, err error) { m.MockIterWithCallback(prefix, objects, err, nil) @@ -81,6 +127,7 @@ func (m *ClientMock) MockIter(prefix string, objects []string, err error) { // MockIterWithCallback is a convenient method to mock Iter() and get a callback called when the Iter // API is called. func (m *ClientMock) MockIterWithCallback(prefix string, objects []string, err error, cb func()) { + m.MockIterWithAttributes(prefix, objects, err, cb) m.On("Iter", mock.Anything, prefix, mock.Anything, mock.Anything).Return(err).Run(func(args mock.Arguments) { if cb != nil { cb() diff --git a/pkg/storage/bucket/prefixed_bucket_client.go b/pkg/storage/bucket/prefixed_bucket_client.go index ac3ca06ce30..1f979df3121 100644 --- a/pkg/storage/bucket/prefixed_bucket_client.go +++ b/pkg/storage/bucket/prefixed_bucket_client.go @@ -31,8 +31,8 @@ func (b *PrefixedBucketClient) Close() error { } // Upload the contents of the reader as an object into the bucket. -func (b *PrefixedBucketClient) Upload(ctx context.Context, name string, r io.Reader) (err error) { - err = b.bucket.Upload(ctx, b.fullName(name), r) +func (b *PrefixedBucketClient) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) (err error) { + err = b.bucket.Upload(ctx, b.fullName(name), r, opts...) return } @@ -44,9 +44,14 @@ func (b *PrefixedBucketClient) Delete(ctx context.Context, name string) error { // Name returns the bucket name for the provider. func (b *PrefixedBucketClient) Name() string { return b.bucket.Name() } -// TODO(Sungjin1212): Implement if needed +// IterWithAttributes calls f for each entry in the given directory (not recursive.). The argument to f is the object attributes +// including the prefix of the inspected directory. The configured prefix will be stripped +// before supplied function is applied. func (b *PrefixedBucketClient) IterWithAttributes(ctx context.Context, dir string, f func(attrs objstore.IterObjectAttributes) error, options ...objstore.IterOption) error { - return b.bucket.IterWithAttributes(ctx, dir, f, options...) + return b.bucket.IterWithAttributes(ctx, b.fullName(dir), func(attrs objstore.IterObjectAttributes) error { + attrs.Name = strings.TrimPrefix(attrs.Name, b.prefix+objstore.DirDelim) + return f(attrs) + }, options...) } func (b *PrefixedBucketClient) SupportedIterOptions() []objstore.IterOptionType { @@ -109,3 +114,7 @@ func (b *PrefixedBucketClient) WithExpectedErrs(fn objstore.IsOpFailureExpectedF } return b } + +func (b *PrefixedBucketClient) Provider() objstore.ObjProvider { + return b.bucket.Provider() +} diff --git a/pkg/storage/bucket/s3/bucket_client.go b/pkg/storage/bucket/s3/bucket_client.go index 220afb90256..8d3ed4a6367 100644 --- a/pkg/storage/bucket/s3/bucket_client.go +++ b/pkg/storage/bucket/s3/bucket_client.go @@ -119,6 +119,10 @@ type BucketWithRetries struct { retryMaxBackoff time.Duration } +func (b *BucketWithRetries) Provider() objstore.ObjProvider { + return b.bucket.Provider() +} + func (b *BucketWithRetries) retry(ctx context.Context, f func() error, operationInfo string) error { var lastErr error retries := backoff.New(ctx, backoff.Config{ @@ -191,12 +195,12 @@ func (b *BucketWithRetries) Exists(ctx context.Context, name string) (exists boo return } -func (b *BucketWithRetries) Upload(ctx context.Context, name string, r io.Reader) error { +func (b *BucketWithRetries) Upload(ctx context.Context, name string, r io.Reader, uploadOpts ...objstore.ObjectUploadOption) error { rs, ok := r.(io.ReadSeeker) if !ok { // Skip retry if incoming Reader is not seekable to avoid // loading entire content into memory - err := b.bucket.Upload(ctx, name, r) + err := b.bucket.Upload(ctx, name, r, uploadOpts...) if err != nil { level.Warn(b.logger).Log("msg", "skip upload retry as reader is not seekable", "file", name, "err", err) } @@ -206,7 +210,7 @@ func (b *BucketWithRetries) Upload(ctx context.Context, name string, r io.Reader if _, err := rs.Seek(0, io.SeekStart); err != nil { return err } - return b.bucket.Upload(ctx, name, rs) + return b.bucket.Upload(ctx, name, rs, uploadOpts...) }, fmt.Sprintf("Upload %s", name)) } diff --git a/pkg/storage/bucket/s3/bucket_client_test.go b/pkg/storage/bucket/s3/bucket_client_test.go index ec757100a0b..50653d32665 100644 --- a/pkg/storage/bucket/s3/bucket_client_test.go +++ b/pkg/storage/bucket/s3/bucket_client_test.go @@ -184,8 +184,12 @@ type mockBucket struct { calledCount int } +func (m *mockBucket) Provider() objstore.ObjProvider { + return objstore.FILESYSTEM +} + // Upload mocks objstore.Bucket.Upload() -func (m *mockBucket) Upload(ctx context.Context, name string, r io.Reader) error { +func (m *mockBucket) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) error { var buf bytes.Buffer if _, err := buf.ReadFrom(r); err != nil { return err diff --git a/pkg/storage/bucket/sse_bucket_client.go b/pkg/storage/bucket/sse_bucket_client.go index 873b74e74a8..1f645ab6577 100644 --- a/pkg/storage/bucket/sse_bucket_client.go +++ b/pkg/storage/bucket/sse_bucket_client.go @@ -51,7 +51,7 @@ func (b *SSEBucketClient) Close() error { } // Upload the contents of the reader as an object into the bucket. -func (b *SSEBucketClient) Upload(ctx context.Context, name string, r io.Reader) error { +func (b *SSEBucketClient) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) error { if sse, err := b.getCustomS3SSEConfig(); err != nil { return err } else if sse != nil { @@ -60,7 +60,11 @@ func (b *SSEBucketClient) Upload(ctx context.Context, name string, r io.Reader) ctx = s3.ContextWithSSEConfig(ctx, sse) } - return b.bucket.Upload(ctx, name, r) + return b.bucket.Upload(ctx, name, r, opts...) +} + +func (b *SSEBucketClient) Provider() objstore.ObjProvider { + return b.bucket.Provider() } // Delete implements objstore.Bucket. diff --git a/pkg/storage/tsdb/bucketindex/block_ids_fetcher.go b/pkg/storage/tsdb/bucketindex/block_ids_fetcher.go index 51a333c60c1..f942b7009a9 100644 --- a/pkg/storage/tsdb/bucketindex/block_ids_fetcher.go +++ b/pkg/storage/tsdb/bucketindex/block_ids_fetcher.go @@ -33,20 +33,20 @@ func NewBlockLister(logger log.Logger, bkt objstore.Bucket, userID string, cfgPr } } -func (f *BlockLister) GetActiveAndPartialBlockIDs(ctx context.Context, ch chan<- ulid.ULID) (partialBlocks map[ulid.ULID]bool, err error) { +func (f *BlockLister) GetActiveAndPartialBlockIDs(ctx context.Context, activeBlocks chan<- block.ActiveBlockFetchData) (partialBlocks map[ulid.ULID]bool, err error) { // Fetch the bucket index. idx, err := ReadIndex(ctx, f.bkt, f.userID, f.cfgProvider, f.logger) if errors.Is(err, ErrIndexNotFound) { // This is a legit case happening when the first blocks of a tenant have recently been uploaded by ingesters // and their bucket index has not been created yet. // Fallback to BaseBlockIDsFetcher. - return f.baseLister.GetActiveAndPartialBlockIDs(ctx, ch) + return f.baseLister.GetActiveAndPartialBlockIDs(ctx, activeBlocks) } if errors.Is(err, ErrIndexCorrupted) { // In case a single tenant bucket index is corrupted, we want to return empty active blocks and parital blocks, so skipping this compaction cycle level.Error(f.logger).Log("msg", "corrupted bucket index found", "user", f.userID, "err", err) // Fallback to BaseBlockIDsFetcher. - return f.baseLister.GetActiveAndPartialBlockIDs(ctx, ch) + return f.baseLister.GetActiveAndPartialBlockIDs(ctx, activeBlocks) } if errors.Is(err, bucket.ErrCustomerManagedKeyAccessDenied) { @@ -73,7 +73,7 @@ func (f *BlockLister) GetActiveAndPartialBlockIDs(ctx context.Context, ch chan<- select { case <-ctx.Done(): return nil, ctx.Err() - case ch <- b.ID: + case activeBlocks <- block.ActiveBlockFetchData{ULID: b.ID}: } } return nil, nil diff --git a/pkg/storage/tsdb/bucketindex/block_ids_fetcher_test.go b/pkg/storage/tsdb/bucketindex/block_ids_fetcher_test.go index c3673d287ee..04c807f6d9d 100644 --- a/pkg/storage/tsdb/bucketindex/block_ids_fetcher_test.go +++ b/pkg/storage/tsdb/bucketindex/block_ids_fetcher_test.go @@ -13,6 +13,7 @@ import ( "github.com/go-kit/log" "github.com/oklog/ulid/v2" "github.com/stretchr/testify/require" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" @@ -44,14 +45,14 @@ func TestBlockIDsFetcher_Fetch(t *testing.T) { })) blockIdsFetcher := NewBlockLister(logger, bkt, userID, nil) - ch := make(chan ulid.ULID) + ch := make(chan block.ActiveBlockFetchData) var wg sync.WaitGroup var blockIds []ulid.ULID wg.Add(1) go func() { defer wg.Done() for id := range ch { - blockIds = append(blockIds, id) + blockIds = append(blockIds, id.ULID) } }() _, err := blockIdsFetcher.GetActiveAndPartialBlockIDs(ctx, ch) @@ -96,14 +97,14 @@ func TestBlockIDsFetcherFetcher_Fetch_NoBucketIndex(t *testing.T) { require.NoError(t, bkt.Upload(ctx, path.Join(userID, mark.ID.String(), metadata.DeletionMarkFilename), &buf)) } blockIdsFetcher := NewBlockLister(logger, bkt, userID, nil) - ch := make(chan ulid.ULID) + ch := make(chan block.ActiveBlockFetchData) var wg sync.WaitGroup var blockIds []ulid.ULID wg.Add(1) go func() { defer wg.Done() for id := range ch { - blockIds = append(blockIds, id) + blockIds = append(blockIds, id.ULID) } }() _, err := blockIdsFetcher.GetActiveAndPartialBlockIDs(ctx, ch) diff --git a/pkg/storage/tsdb/bucketindex/markers_bucket_client.go b/pkg/storage/tsdb/bucketindex/markers_bucket_client.go index e2271cc3939..1773db2a680 100644 --- a/pkg/storage/tsdb/bucketindex/markers_bucket_client.go +++ b/pkg/storage/tsdb/bucketindex/markers_bucket_client.go @@ -24,11 +24,15 @@ func BucketWithGlobalMarkers(b objstore.InstrumentedBucket) objstore.Instrumente } } +func (b *globalMarkersBucket) Provider() objstore.ObjProvider { + return b.parent.Provider() +} + // Upload implements objstore.Bucket. -func (b *globalMarkersBucket) Upload(ctx context.Context, name string, r io.Reader) error { +func (b *globalMarkersBucket) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) error { globalMarkPath, ok := b.isMark(name) if !ok { - return b.parent.Upload(ctx, name, r) + return b.parent.Upload(ctx, name, r, opts...) } // Read the marker. @@ -38,12 +42,12 @@ func (b *globalMarkersBucket) Upload(ctx context.Context, name string, r io.Read } // Upload it to the global marker's location. - if err := b.parent.Upload(ctx, globalMarkPath, bytes.NewReader(body)); err != nil { + if err := b.parent.Upload(ctx, globalMarkPath, bytes.NewReader(body), opts...); err != nil { return err } // Upload it to the original location too. - return b.parent.Upload(ctx, name, bytes.NewReader(body)) + return b.parent.Upload(ctx, name, bytes.NewReader(body), opts...) } // Delete implements objstore.Bucket. diff --git a/pkg/storage/tsdb/cached_chunks_querier.go b/pkg/storage/tsdb/cached_chunks_querier.go index e5b230e64be..ab3b11c4fd0 100644 --- a/pkg/storage/tsdb/cached_chunks_querier.go +++ b/pkg/storage/tsdb/cached_chunks_querier.go @@ -61,7 +61,7 @@ func newBlockBaseQuerier(b prom_tsdb.BlockReader, mint, maxt int64) (*blockBaseQ } func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { - res, err := q.index.SortedLabelValues(ctx, name, matchers...) + res, err := q.index.SortedLabelValues(ctx, name, hints, matchers...) return res, nil, err } diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go index 7b92fe0d887..cc1be08b13b 100644 --- a/pkg/storage/tsdb/config.go +++ b/pkg/storage/tsdb/config.go @@ -285,7 +285,7 @@ type BucketStoreConfig struct { IndexCache IndexCacheConfig `yaml:"index_cache"` ChunksCache ChunksCacheConfig `yaml:"chunks_cache"` MetadataCache MetadataCacheConfig `yaml:"metadata_cache"` - ParquetLabelsCache ParquetLabelsCacheConfig `yaml:"parquet_labels_cache" doc:"hidden"` + ParquetLabelsCache ParquetLabelsCacheConfig `yaml:"parquet_labels_cache"` MatchersCacheMaxItems int `yaml:"matchers_cache_max_items"` IgnoreDeletionMarksDelay time.Duration `yaml:"ignore_deletion_mark_delay"` IgnoreBlocksWithin time.Duration `yaml:"ignore_blocks_within"` diff --git a/pkg/storage/tsdb/testutil/objstore.go b/pkg/storage/tsdb/testutil/objstore.go index d879ab2bb42..c2ad987f5c7 100644 --- a/pkg/storage/tsdb/testutil/objstore.go +++ b/pkg/storage/tsdb/testutil/objstore.go @@ -79,7 +79,7 @@ func (m *MockBucketFailure) Get(ctx context.Context, name string) (io.ReadCloser return m.Bucket.Get(ctx, name) } -func (m *MockBucketFailure) Upload(ctx context.Context, name string, r io.Reader) error { +func (m *MockBucketFailure) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) error { m.UploadCalls.Add(1) for prefix, err := range m.UploadFailures { if strings.HasPrefix(name, prefix) { @@ -90,7 +90,7 @@ func (m *MockBucketFailure) Upload(ctx context.Context, name string, r io.Reader return e } - return m.Bucket.Upload(ctx, name, r) + return m.Bucket.Upload(ctx, name, r, opts...) } func (m *MockBucketFailure) WithExpectedErrs(expectedFunc objstore.IsOpFailureExpectedFunc) objstore.Bucket { diff --git a/pkg/storegateway/bucket_index_metadata_fetcher_test.go b/pkg/storegateway/bucket_index_metadata_fetcher_test.go index 9a7f7dd562a..8bd23eaa44a 100644 --- a/pkg/storegateway/bucket_index_metadata_fetcher_test.go +++ b/pkg/storegateway/bucket_index_metadata_fetcher_test.go @@ -86,6 +86,7 @@ func TestBucketIndexMetadataFetcher_Fetch(t *testing.T) { blocks_meta_synced{state="marked-for-no-compact"} 0 blocks_meta_synced{state="no-bucket-index"} 0 blocks_meta_synced{state="no-meta-json"} 0 + blocks_meta_synced{state="parquet-migrated"} 0 blocks_meta_synced{state="time-excluded"} 0 blocks_meta_synced{state="too-fresh"} 0 @@ -134,6 +135,7 @@ func TestBucketIndexMetadataFetcher_Fetch_KeyPermissionDenied(t *testing.T) { blocks_meta_synced{state="marked-for-no-compact"} 0 blocks_meta_synced{state="no-bucket-index"} 0 blocks_meta_synced{state="no-meta-json"} 0 + blocks_meta_synced{state="parquet-migrated"} 0 blocks_meta_synced{state="time-excluded"} 0 blocks_meta_synced{state="too-fresh"} 0 # HELP blocks_meta_syncs_total Total blocks metadata synchronization attempts @@ -185,6 +187,7 @@ func TestBucketIndexMetadataFetcher_Fetch_NoBucketIndex(t *testing.T) { blocks_meta_synced{state="marked-for-no-compact"} 0 blocks_meta_synced{state="no-bucket-index"} 1 blocks_meta_synced{state="no-meta-json"} 0 + blocks_meta_synced{state="parquet-migrated"} 0 blocks_meta_synced{state="time-excluded"} 0 blocks_meta_synced{state="too-fresh"} 0 @@ -240,6 +243,7 @@ func TestBucketIndexMetadataFetcher_Fetch_CorruptedBucketIndex(t *testing.T) { blocks_meta_synced{state="marked-for-no-compact"} 0 blocks_meta_synced{state="no-bucket-index"} 0 blocks_meta_synced{state="no-meta-json"} 0 + blocks_meta_synced{state="parquet-migrated"} 0 blocks_meta_synced{state="time-excluded"} 0 blocks_meta_synced{state="too-fresh"} 0 @@ -287,6 +291,7 @@ func TestBucketIndexMetadataFetcher_Fetch_ShouldResetGaugeMetrics(t *testing.T) blocks_meta_synced{state="marked-for-no-compact"} 0 blocks_meta_synced{state="no-bucket-index"} 0 blocks_meta_synced{state="no-meta-json"} 0 + blocks_meta_synced{state="parquet-migrated"} 0 blocks_meta_synced{state="time-excluded"} 0 blocks_meta_synced{state="too-fresh"} 0 `), "blocks_meta_synced")) @@ -311,6 +316,7 @@ func TestBucketIndexMetadataFetcher_Fetch_ShouldResetGaugeMetrics(t *testing.T) blocks_meta_synced{state="marked-for-no-compact"} 0 blocks_meta_synced{state="no-bucket-index"} 1 blocks_meta_synced{state="no-meta-json"} 0 + blocks_meta_synced{state="parquet-migrated"} 0 blocks_meta_synced{state="time-excluded"} 0 blocks_meta_synced{state="too-fresh"} 0 `), "blocks_meta_synced")) @@ -343,6 +349,7 @@ func TestBucketIndexMetadataFetcher_Fetch_ShouldResetGaugeMetrics(t *testing.T) blocks_meta_synced{state="marked-for-no-compact"} 0 blocks_meta_synced{state="no-bucket-index"} 0 blocks_meta_synced{state="no-meta-json"} 0 + blocks_meta_synced{state="parquet-migrated"} 0 blocks_meta_synced{state="time-excluded"} 0 blocks_meta_synced{state="too-fresh"} 0 `), "blocks_meta_synced")) @@ -369,6 +376,7 @@ func TestBucketIndexMetadataFetcher_Fetch_ShouldResetGaugeMetrics(t *testing.T) blocks_meta_synced{state="marked-for-no-compact"} 0 blocks_meta_synced{state="no-bucket-index"} 0 blocks_meta_synced{state="no-meta-json"} 0 + blocks_meta_synced{state="parquet-migrated"} 0 blocks_meta_synced{state="time-excluded"} 0 blocks_meta_synced{state="too-fresh"} 0 `), "blocks_meta_synced")) diff --git a/pkg/storegateway/bucket_stores_test.go b/pkg/storegateway/bucket_stores_test.go index 69c018ccfa4..674a2bae27b 100644 --- a/pkg/storegateway/bucket_stores_test.go +++ b/pkg/storegateway/bucket_stores_test.go @@ -659,6 +659,7 @@ func TestBucketStores_SyncBlocksWithIgnoreBlocksBefore(t *testing.T) { cortex_blocks_meta_synced{state="marked-for-deletion"} 0 cortex_blocks_meta_synced{state="marked-for-no-compact"} 0 cortex_blocks_meta_synced{state="no-meta-json"} 0 + cortex_blocks_meta_synced{state="parquet-migrated"} 0 cortex_blocks_meta_synced{state="time-excluded"} 1 cortex_blocks_meta_synced{state="too-fresh"} 0 # HELP cortex_blocks_meta_syncs_total Total blocks metadata synchronization attempts @@ -701,7 +702,7 @@ func generateStorageBlock(t *testing.T, storageDir, userID string, metricName st require.NoError(t, db.Close()) }() - series := labels.Labels{labels.Label{Name: labels.MetricName, Value: metricName}} + series := labels.FromStrings(labels.MetricName, metricName) app := db.Appender(context.Background()) for ts := minT; ts < maxT; ts += int64(step) { diff --git a/pkg/storegateway/gateway_ring.go b/pkg/storegateway/gateway_ring.go index fc39f80b42e..798d1221a2c 100644 --- a/pkg/storegateway/gateway_ring.go +++ b/pkg/storegateway/gateway_ring.go @@ -68,6 +68,7 @@ type RingConfig struct { ZoneAwarenessEnabled bool `yaml:"zone_awareness_enabled"` KeepInstanceInTheRingOnShutdown bool `yaml:"keep_instance_in_the_ring_on_shutdown"` ZoneStableShuffleSharding bool `yaml:"zone_stable_shuffle_sharding" doc:"hidden"` + DetailedMetricsEnabled bool `yaml:"detailed_metrics_enabled"` // Wait ring stability. WaitStabilityMinDuration time.Duration `yaml:"wait_stability_min_duration"` @@ -107,6 +108,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.ZoneAwarenessEnabled, ringFlagsPrefix+"zone-awareness-enabled", false, "True to enable zone-awareness and replicate blocks across different availability zones.") f.BoolVar(&cfg.KeepInstanceInTheRingOnShutdown, ringFlagsPrefix+"keep-instance-in-the-ring-on-shutdown", false, "True to keep the store gateway instance in the ring when it shuts down. The instance will then be auto-forgotten from the ring after 10*heartbeat_timeout.") f.BoolVar(&cfg.ZoneStableShuffleSharding, ringFlagsPrefix+"zone-stable-shuffle-sharding", true, "If true, use zone stable shuffle sharding algorithm. Otherwise, use the default shuffle sharding algorithm.") + f.BoolVar(&cfg.DetailedMetricsEnabled, ringFlagsPrefix+"detailed-metrics-enabled", true, "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted.") // Wait stability flags. f.DurationVar(&cfg.WaitStabilityMinDuration, ringFlagsPrefix+"wait-stability-min-duration", time.Minute, "Minimum time to wait for ring stability at startup. 0 to disable.") @@ -138,6 +140,7 @@ func (cfg *RingConfig) ToRingConfig() ring.Config { rc.ReplicationFactor = cfg.ReplicationFactor rc.ZoneAwarenessEnabled = cfg.ZoneAwarenessEnabled rc.SubringCacheDisabled = true + rc.DetailedMetricsEnabled = cfg.DetailedMetricsEnabled return rc } diff --git a/pkg/storegateway/gateway_test.go b/pkg/storegateway/gateway_test.go index 57bccae5fe3..b9070c236e7 100644 --- a/pkg/storegateway/gateway_test.go +++ b/pkg/storegateway/gateway_test.go @@ -1299,7 +1299,7 @@ func mockTSDB(t *testing.T, dir string, numSeries, numBlocks int, minT, maxT int step := (maxT - minT) / int64(numSeries) ctx := context.Background() addSample := func(i int) { - lbls := labels.Labels{labels.Label{Name: "series_id", Value: strconv.Itoa(i)}} + lbls := labels.FromStrings("series_id", strconv.Itoa(i)) app := db.Appender(ctx) _, err := app.Append(0, lbls, minT+(step*int64(i)), float64(i)) diff --git a/pkg/util/grpcutil/grpc_interceptors_test.go b/pkg/util/grpcutil/grpc_interceptors_test.go index 6a0011c9a90..81788d22d7d 100644 --- a/pkg/util/grpcutil/grpc_interceptors_test.go +++ b/pkg/util/grpcutil/grpc_interceptors_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/grpc/metadata" - util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/requestmeta" ) func TestHTTPHeaderPropagationClientInterceptor(t *testing.T) { @@ -18,14 +18,14 @@ func TestHTTPHeaderPropagationClientInterceptor(t *testing.T) { contentsMap["TestHeader1"] = "RequestID" contentsMap["TestHeader2"] = "ContentsOfTestHeader2" contentsMap["Test3"] = "SomeInformation" - ctx = util_log.ContextWithHeaderMap(ctx, contentsMap) + ctx = requestmeta.ContextWithRequestMetadataMap(ctx, contentsMap) - ctx = injectForwardedHeadersIntoMetadata(ctx) + ctx = injectForwardedRequestMetadata(ctx) md, ok := metadata.FromOutgoingContext(ctx) require.True(t, ok) - headers := md[util_log.HeaderPropagationStringForRequestLogging] + headers := md[requestmeta.PropagationStringForRequestMetadata] assert.Equal(t, 6, len(headers)) assert.Contains(t, headers, "TestHeader1") assert.Contains(t, headers, "TestHeader2") @@ -37,20 +37,20 @@ func TestHTTPHeaderPropagationClientInterceptor(t *testing.T) { func TestExistingValuesInMetadataForHTTPPropagationClientInterceptor(t *testing.T) { ctx := context.Background() - ctx = metadata.AppendToOutgoingContext(ctx, util_log.HeaderPropagationStringForRequestLogging, "testabc123") + ctx = metadata.AppendToOutgoingContext(ctx, requestmeta.PropagationStringForRequestMetadata, "testabc123") contentsMap := make(map[string]string) contentsMap["TestHeader1"] = "RequestID" contentsMap["TestHeader2"] = "ContentsOfTestHeader2" contentsMap["Test3"] = "SomeInformation" - ctx = util_log.ContextWithHeaderMap(ctx, contentsMap) + ctx = requestmeta.ContextWithRequestMetadataMap(ctx, contentsMap) - ctx = injectForwardedHeadersIntoMetadata(ctx) + ctx = injectForwardedRequestMetadata(ctx) md, ok := metadata.FromOutgoingContext(ctx) require.True(t, ok) - contents := md[util_log.HeaderPropagationStringForRequestLogging] + contents := md[requestmeta.PropagationStringForRequestMetadata] assert.Contains(t, contents, "testabc123") assert.Equal(t, 1, len(contents)) } @@ -63,14 +63,14 @@ func TestGRPCHeaderInjectionForHTTPPropagationServerInterceptor(t *testing.T) { testMap["TestHeader2"] = "Results2" ctx = metadata.NewOutgoingContext(ctx, nil) - ctx = util_log.ContextWithHeaderMap(ctx, testMap) - ctx = injectForwardedHeadersIntoMetadata(ctx) + ctx = requestmeta.ContextWithRequestMetadataMap(ctx, testMap) + ctx = injectForwardedRequestMetadata(ctx) md, ok := metadata.FromOutgoingContext(ctx) require.True(t, ok) - ctx = util_log.ContextWithHeaderMapFromMetadata(ctx, md) + ctx = requestmeta.ContextWithRequestMetadataMapFromMetadata(ctx, md) - headersMap := util_log.HeaderMapFromContext(ctx) + headersMap := requestmeta.MapFromContext(ctx) require.NotNil(t, headersMap) assert.Equal(t, 2, len(headersMap)) @@ -82,11 +82,11 @@ func TestGRPCHeaderInjectionForHTTPPropagationServerInterceptor(t *testing.T) { func TestGRPCHeaderDifferentLengthsForHTTPPropagationServerInterceptor(t *testing.T) { ctx := context.Background() - ctx = metadata.AppendToOutgoingContext(ctx, util_log.HeaderPropagationStringForRequestLogging, "Test123") - ctx = metadata.AppendToOutgoingContext(ctx, util_log.HeaderPropagationStringForRequestLogging, "Results") - ctx = metadata.AppendToOutgoingContext(ctx, util_log.HeaderPropagationStringForRequestLogging, "Results2") + ctx = metadata.AppendToOutgoingContext(ctx, requestmeta.PropagationStringForRequestMetadata, "Test123") + ctx = metadata.AppendToOutgoingContext(ctx, requestmeta.PropagationStringForRequestMetadata, "Results") + ctx = metadata.AppendToOutgoingContext(ctx, requestmeta.PropagationStringForRequestMetadata, "Results2") - ctx = extractForwardedHeadersFromMetadata(ctx) + ctx = extractForwardedRequestMetadataFromMetadata(ctx) - assert.Nil(t, util_log.HeaderMapFromContext(ctx)) + assert.Nil(t, requestmeta.MapFromContext(ctx)) } diff --git a/pkg/util/grpcutil/health_check.go b/pkg/util/grpcutil/health_check.go index e6883447fb6..b37ee5dd854 100644 --- a/pkg/util/grpcutil/health_check.go +++ b/pkg/util/grpcutil/health_check.go @@ -32,19 +32,6 @@ func (h *HealthCheck) Check(_ context.Context, _ *grpc_health_v1.HealthCheckRequ return &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVING}, nil } -func (h *HealthCheck) List(ctx context.Context, request *grpc_health_v1.HealthListRequest) (*grpc_health_v1.HealthListResponse, error) { - checkResp, err := h.Check(ctx, nil) - if err != nil { - return &grpc_health_v1.HealthListResponse{}, err - } - - return &grpc_health_v1.HealthListResponse{ - Statuses: map[string]*grpc_health_v1.HealthCheckResponse{ - "server": checkResp, - }, - }, nil -} - // Watch implements the grpc healthcheck. func (h *HealthCheck) Watch(_ *grpc_health_v1.HealthCheckRequest, _ grpc_health_v1.Health_WatchServer) error { return status.Error(codes.Unimplemented, "Watching is not supported") diff --git a/pkg/util/grpcutil/util.go b/pkg/util/grpcutil/util.go index 8da1c6916e7..41ab05a350b 100644 --- a/pkg/util/grpcutil/util.go +++ b/pkg/util/grpcutil/util.go @@ -8,7 +8,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" - util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/requestmeta" ) type wrappedServerStream struct { @@ -34,49 +34,50 @@ func IsGRPCContextCanceled(err error) bool { // HTTPHeaderPropagationServerInterceptor allows for propagation of HTTP Request headers across gRPC calls - works // alongside HTTPHeaderPropagationClientInterceptor func HTTPHeaderPropagationServerInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - ctx = extractForwardedHeadersFromMetadata(ctx) + ctx = extractForwardedRequestMetadataFromMetadata(ctx) h, err := handler(ctx, req) return h, err } // HTTPHeaderPropagationStreamServerInterceptor does the same as HTTPHeaderPropagationServerInterceptor but for streams func HTTPHeaderPropagationStreamServerInterceptor(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + ctx := extractForwardedRequestMetadataFromMetadata(ss.Context()) return handler(srv, wrappedServerStream{ - ctx: extractForwardedHeadersFromMetadata(ss.Context()), + ctx: ctx, ServerStream: ss, }) } -// extractForwardedHeadersFromMetadata implements HTTPHeaderPropagationServerInterceptor by placing forwarded +// extractForwardedRequestMetadataFromMetadata implements HTTPHeaderPropagationServerInterceptor by placing forwarded // headers into incoming context -func extractForwardedHeadersFromMetadata(ctx context.Context) context.Context { +func extractForwardedRequestMetadataFromMetadata(ctx context.Context) context.Context { md, ok := metadata.FromIncomingContext(ctx) if !ok { return ctx } - return util_log.ContextWithHeaderMapFromMetadata(ctx, md) + return requestmeta.ContextWithRequestMetadataMapFromMetadata(ctx, md) } // HTTPHeaderPropagationClientInterceptor allows for propagation of HTTP Request headers across gRPC calls - works // alongside HTTPHeaderPropagationServerInterceptor func HTTPHeaderPropagationClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - ctx = injectForwardedHeadersIntoMetadata(ctx) + ctx = injectForwardedRequestMetadata(ctx) return invoker(ctx, method, req, reply, cc, opts...) } // HTTPHeaderPropagationStreamClientInterceptor does the same as HTTPHeaderPropagationClientInterceptor but for streams func HTTPHeaderPropagationStreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - ctx = injectForwardedHeadersIntoMetadata(ctx) + ctx = injectForwardedRequestMetadata(ctx) return streamer(ctx, desc, cc, method, opts...) } -// injectForwardedHeadersIntoMetadata implements HTTPHeaderPropagationClientInterceptor and HTTPHeaderPropagationStreamClientInterceptor +// injectForwardedRequestMetadata implements HTTPHeaderPropagationClientInterceptor and HTTPHeaderPropagationStreamClientInterceptor // by inserting headers that are supposed to be forwarded into metadata of the request -func injectForwardedHeadersIntoMetadata(ctx context.Context) context.Context { - headerMap := util_log.HeaderMapFromContext(ctx) - if headerMap == nil { +func injectForwardedRequestMetadata(ctx context.Context) context.Context { + requestMetadataMap := requestmeta.MapFromContext(ctx) + if requestMetadataMap == nil { return ctx } md, ok := metadata.FromOutgoingContext(ctx) @@ -85,13 +86,13 @@ func injectForwardedHeadersIntoMetadata(ctx context.Context) context.Context { } newCtx := ctx - if _, ok := md[util_log.HeaderPropagationStringForRequestLogging]; !ok { + if _, ok := md[requestmeta.PropagationStringForRequestMetadata]; !ok { var mdContent []string - for header, content := range headerMap { - mdContent = append(mdContent, header, content) + for requestMetadata, content := range requestMetadataMap { + mdContent = append(mdContent, requestMetadata, content) } md = md.Copy() - md[util_log.HeaderPropagationStringForRequestLogging] = mdContent + md[requestmeta.PropagationStringForRequestMetadata] = mdContent newCtx = metadata.NewOutgoingContext(ctx, md) } return newCtx diff --git a/pkg/util/labels.go b/pkg/util/labels.go index c1bc12653f7..2e78a0aa905 100644 --- a/pkg/util/labels.go +++ b/pkg/util/labels.go @@ -10,10 +10,10 @@ import ( // LabelsToMetric converts a Labels to Metric // Don't do this on any performance sensitive paths. func LabelsToMetric(ls labels.Labels) model.Metric { - m := make(model.Metric, len(ls)) - for _, l := range ls { + m := make(model.Metric, ls.Len()) + ls.Range(func(l labels.Label) { m[model.LabelName(l.Name)] = model.LabelValue(l.Value) - } + }) return m } diff --git a/pkg/util/log/log.go b/pkg/util/log/log.go index 1db95b0b074..79b93b3c576 100644 --- a/pkg/util/log/log.go +++ b/pkg/util/log/log.go @@ -1,9 +1,7 @@ package log import ( - "context" "fmt" - "net/http" "os" "github.com/go-kit/log" @@ -12,15 +10,6 @@ import ( "github.com/prometheus/common/promslog" "github.com/weaveworks/common/logging" "github.com/weaveworks/common/server" - "google.golang.org/grpc/metadata" -) - -type contextKey int - -const ( - headerMapContextKey contextKey = 0 - - HeaderPropagationStringForRequestLogging string = "x-http-header-forwarding-logging" ) var ( @@ -126,36 +115,3 @@ func CheckFatal(location string, err error) { os.Exit(1) } } - -func HeaderMapFromContext(ctx context.Context) map[string]string { - headerMap, ok := ctx.Value(headerMapContextKey).(map[string]string) - if !ok { - return nil - } - return headerMap -} - -func ContextWithHeaderMap(ctx context.Context, headerMap map[string]string) context.Context { - return context.WithValue(ctx, headerMapContextKey, headerMap) -} - -// InjectHeadersIntoHTTPRequest injects the logging header map from the context into the request headers. -func InjectHeadersIntoHTTPRequest(headerMap map[string]string, request *http.Request) { - for header, contents := range headerMap { - request.Header.Add(header, contents) - } -} - -func ContextWithHeaderMapFromMetadata(ctx context.Context, md metadata.MD) context.Context { - headersSlice, ok := md[HeaderPropagationStringForRequestLogging] - if !ok || len(headersSlice)%2 == 1 { - return ctx - } - - headerMap := make(map[string]string) - for i := 0; i < len(headersSlice); i += 2 { - headerMap[headersSlice[i]] = headersSlice[i+1] - } - - return ContextWithHeaderMap(ctx, headerMap) -} diff --git a/pkg/util/log/log_test.go b/pkg/util/log/log_test.go index 0401d4ce086..cb4700afac8 100644 --- a/pkg/util/log/log_test.go +++ b/pkg/util/log/log_test.go @@ -1,73 +1,15 @@ package log import ( - "context" "io" - "net/http" "os" "testing" "github.com/go-kit/log/level" "github.com/stretchr/testify/require" "github.com/weaveworks/common/server" - "google.golang.org/grpc/metadata" ) -func TestHeaderMapFromMetadata(t *testing.T) { - md := metadata.New(nil) - md.Append(HeaderPropagationStringForRequestLogging, "TestHeader1", "SomeInformation", "TestHeader2", "ContentsOfTestHeader2") - - ctx := context.Background() - - ctx = ContextWithHeaderMapFromMetadata(ctx, md) - - headerMap := HeaderMapFromContext(ctx) - - require.Contains(t, headerMap, "TestHeader1") - require.Contains(t, headerMap, "TestHeader2") - require.Equal(t, "SomeInformation", headerMap["TestHeader1"]) - require.Equal(t, "ContentsOfTestHeader2", headerMap["TestHeader2"]) -} - -func TestHeaderMapFromMetadataWithImproperLength(t *testing.T) { - md := metadata.New(nil) - md.Append(HeaderPropagationStringForRequestLogging, "TestHeader1", "SomeInformation", "TestHeader2", "ContentsOfTestHeader2", "Test3") - - ctx := context.Background() - - ctx = ContextWithHeaderMapFromMetadata(ctx, md) - - headerMap := HeaderMapFromContext(ctx) - require.Nil(t, headerMap) -} - -func TestInjectHeadersIntoHTTPRequest(t *testing.T) { - contentsMap := make(map[string]string) - contentsMap["TestHeader1"] = "RequestID" - contentsMap["TestHeader2"] = "ContentsOfTestHeader2" - - h := http.Header{} - req := &http.Request{ - Method: "GET", - RequestURI: "/HTTPHeaderTest", - Body: http.NoBody, - Header: h, - } - InjectHeadersIntoHTTPRequest(contentsMap, req) - - header1 := req.Header.Values("TestHeader1") - header2 := req.Header.Values("TestHeader2") - - require.NotNil(t, header1) - require.NotNil(t, header2) - require.Equal(t, 1, len(header1)) - require.Equal(t, 1, len(header2)) - - require.Equal(t, "RequestID", header1[0]) - require.Equal(t, "ContentsOfTestHeader2", header2[0]) - -} - func TestInitLogger(t *testing.T) { stderr := os.Stderr r, w, err := os.Pipe() @@ -85,8 +27,8 @@ func TestInitLogger(t *testing.T) { require.NoError(t, w.Close()) logs, err := io.ReadAll(r) require.NoError(t, err) - require.Contains(t, string(logs), "caller=log_test.go:82 level=debug hello=world") - require.Contains(t, string(logs), "caller=log_test.go:83 level=debug msg=\"hello world\"") + require.Contains(t, string(logs), "caller=log_test.go:24 level=debug hello=world") + require.Contains(t, string(logs), "caller=log_test.go:25 level=debug msg=\"hello world\"") } func BenchmarkDisallowedLogLevels(b *testing.B) { diff --git a/pkg/util/log/wrappers.go b/pkg/util/log/wrappers.go index 1394b7b0b7b..9a706a570e5 100644 --- a/pkg/util/log/wrappers.go +++ b/pkg/util/log/wrappers.go @@ -9,6 +9,7 @@ import ( "go.opentelemetry.io/otel/trace" "github.com/cortexproject/cortex/pkg/tenant" + "github.com/cortexproject/cortex/pkg/util/requestmeta" ) // WithUserID returns a Logger that has information about the current user in @@ -64,7 +65,7 @@ func WithSourceIPs(sourceIPs string, l log.Logger) log.Logger { // HeadersFromContext enables the logging of specified HTTP Headers that have been added to a context func HeadersFromContext(ctx context.Context, l log.Logger) log.Logger { - headerContentsMap := HeaderMapFromContext(ctx) + headerContentsMap := requestmeta.LoggingHeadersAndRequestIdFromContext(ctx) for header, contents := range headerContentsMap { l = log.With(l, header, contents) } diff --git a/pkg/util/metrics_helper.go b/pkg/util/metrics_helper.go index 0a823920fdc..e5f9e7fb76b 100644 --- a/pkg/util/metrics_helper.go +++ b/pkg/util/metrics_helper.go @@ -723,7 +723,7 @@ func (r *UserRegistries) BuildMetricFamiliesPerUser() MetricFamiliesPerUser { // FromLabelPairsToLabels converts dto.LabelPair into labels.Labels. func FromLabelPairsToLabels(pairs []*dto.LabelPair) labels.Labels { - builder := labels.NewBuilder(nil) + builder := labels.NewBuilder(labels.EmptyLabels()) for _, pair := range pairs { builder.Set(pair.GetName(), pair.GetValue()) } @@ -770,7 +770,7 @@ func GetLabels(c prometheus.Collector, filter map[string]string) ([]labels.Label errs := tsdb_errors.NewMulti() var result []labels.Labels dtoMetric := &dto.Metric{} - lbls := labels.NewBuilder(nil) + lbls := labels.NewBuilder(labels.EmptyLabels()) nextMetric: for m := range ch { @@ -781,7 +781,7 @@ nextMetric: continue } - lbls.Reset(nil) + lbls.Reset(labels.EmptyLabels()) for _, lp := range dtoMetric.Label { n := lp.GetName() v := lp.GetValue() diff --git a/pkg/util/push/otlp.go b/pkg/util/push/otlp.go index e328f1ae712..cd0d3059ab2 100644 --- a/pkg/util/push/otlp.go +++ b/pkg/util/push/otlp.go @@ -10,6 +10,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite" @@ -66,7 +67,7 @@ func OTLPHandler(maxRecvMsgSize int, overrides *validation.Overrides, cfg distri // otlp to prompb TimeSeries promTsList, promMetadata, err := convertToPromTS(r.Context(), req.Metrics(), cfg, overrides, userID, logger) - if err != nil { + if err != nil && len(promTsList) == 0 { http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -177,8 +178,10 @@ func decodeOTLPWriteRequest(ctx context.Context, r *http.Request, maxSize int) ( func convertToPromTS(ctx context.Context, pmetrics pmetric.Metrics, cfg distributor.OTLPConfig, overrides *validation.Overrides, userID string, logger log.Logger) ([]prompb.TimeSeries, []prompb.MetricMetadata, error) { promConverter := prometheusremotewrite.NewPrometheusConverter() settings := prometheusremotewrite.Settings{ - AddMetricSuffixes: true, - DisableTargetInfo: cfg.DisableTargetInfo, + AddMetricSuffixes: true, + DisableTargetInfo: cfg.DisableTargetInfo, + AllowDeltaTemporality: cfg.AllowDeltaTemporality, + EnableTypeAndUnitLabels: cfg.EnableTypeAndUnitLabels, } var annots annotations.Annotations @@ -187,7 +190,9 @@ func convertToPromTS(ctx context.Context, pmetrics pmetric.Metrics, cfg distribu if cfg.ConvertAllAttributes { annots, err = promConverter.FromMetrics(ctx, convertToMetricsAttributes(pmetrics), settings) } else { - settings.PromoteResourceAttributes = overrides.PromoteResourceAttributes(userID) + settings.PromoteResourceAttributes = prometheusremotewrite.NewPromoteResourceAttributes(config.OTLPConfig{ + PromoteResourceAttributes: overrides.PromoteResourceAttributes(userID), + }) annots, err = promConverter.FromMetrics(ctx, pmetrics, settings) } @@ -197,19 +202,18 @@ func convertToPromTS(ctx context.Context, pmetrics pmetric.Metrics, cfg distribu } if err != nil { - level.Error(logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", err) - return nil, nil, err + level.Warn(logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", err) } - return promConverter.TimeSeries(), promConverter.Metadata(), nil + return promConverter.TimeSeries(), promConverter.Metadata(), err } func makeLabels(in []prompb.Label) []cortexpb.LabelAdapter { - out := make(labels.Labels, 0, len(in)) + builder := labels.NewBuilder(labels.EmptyLabels()) for _, l := range in { - out = append(out, labels.Label{Name: l.Name, Value: l.Value}) + builder.Set(l.Name, l.Value) } - return cortexpb.FromLabelsToLabelAdapters(out) + return cortexpb.FromLabelsToLabelAdapters(builder.Labels()) } func makeSamples(in []prompb.Sample) []cortexpb.Sample { diff --git a/pkg/util/push/otlp_test.go b/pkg/util/push/otlp_test.go index de3b780e095..c02b98d68ef 100644 --- a/pkg/util/push/otlp_test.go +++ b/pkg/util/push/otlp_test.go @@ -4,13 +4,16 @@ import ( "bytes" "compress/gzip" "context" + "fmt" "io" "net/http" "net/http/httptest" + "sort" "testing" "time" "github.com/go-kit/log" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -25,6 +28,325 @@ import ( "github.com/cortexproject/cortex/pkg/util/validation" ) +func TestOTLP_EnableTypeAndUnitLabels(t *testing.T) { + logger := log.NewNopLogger() + ctx := context.Background() + ts := time.Now() + + tests := []struct { + description string + enableTypeAndUnitLabels bool + allowDeltaTemporality bool + otlpSeries pmetric.Metric + expectedLabels labels.Labels + expectedMetadata prompb.MetricMetadata + }{ + { + description: "[enableTypeAndUnitLabels: true], the '__type__' label should be attached when the type is the gauge", + enableTypeAndUnitLabels: true, + otlpSeries: createOtelSum("test", "seconds", pmetric.AggregationTemporalityCumulative, ts), + expectedLabels: labels.FromMap(map[string]string{ + "__name__": "test_seconds", + "__type__": "gauge", + "__unit__": "seconds", + "test_label": "test_value", + }), + expectedMetadata: createPromMetadata("test_seconds", "seconds", prompb.MetricMetadata_GAUGE), + }, + { + description: "[enableTypeAndUnitLabels: true], the '__type__' label should not be attached when the type is unknown", + enableTypeAndUnitLabels: true, + allowDeltaTemporality: true, + otlpSeries: createOtelSum("test", "seconds", pmetric.AggregationTemporalityDelta, ts), + expectedLabels: labels.FromMap(map[string]string{ + "__name__": "test_seconds", + "__unit__": "seconds", + "test_label": "test_value", + }), + expectedMetadata: createPromMetadata("test_seconds", "seconds", prompb.MetricMetadata_UNKNOWN), + }, + { + description: "[enableTypeAndUnitLabels: false]", + enableTypeAndUnitLabels: false, + otlpSeries: createOtelSum("test", "seconds", pmetric.AggregationTemporalityCumulative, ts), + expectedLabels: labels.FromMap(map[string]string{ + "__name__": "test_seconds", + "test_label": "test_value", + }), + expectedMetadata: createPromMetadata("test_seconds", "seconds", prompb.MetricMetadata_GAUGE), + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + cfg := distributor.OTLPConfig{ + EnableTypeAndUnitLabels: test.enableTypeAndUnitLabels, + AllowDeltaTemporality: test.allowDeltaTemporality, + } + metrics := pmetric.NewMetrics() + rm := metrics.ResourceMetrics().AppendEmpty() + sm := rm.ScopeMetrics().AppendEmpty() + + test.otlpSeries.CopyTo(sm.Metrics().AppendEmpty()) + + limits := validation.Limits{} + overrides := validation.NewOverrides(limits, nil) + promSeries, metadata, err := convertToPromTS(ctx, metrics, cfg, overrides, "user-1", logger) + require.NoError(t, err) + require.Equal(t, 1, len(promSeries)) + require.Equal(t, prompb.FromLabels(test.expectedLabels, nil), promSeries[0].Labels) + + require.Equal(t, 1, len(metadata)) + require.Equal(t, test.expectedMetadata, metadata[0]) + }) + } +} + +func TestOTLP_AllowDeltaTemporality(t *testing.T) { + logger := log.NewNopLogger() + ctx := context.Background() + ts := time.Now() + + tests := []struct { + description string + allowDeltaTemporality bool + otlpSeries []pmetric.Metric + expectedSeries []prompb.TimeSeries + expectedMetadata []prompb.MetricMetadata + expectedErr string + }{ + { + description: "[allowDeltaTemporality: false] cumulative type should be converted", + allowDeltaTemporality: false, + otlpSeries: []pmetric.Metric{ + createOtelSum("test_1", "", pmetric.AggregationTemporalityCumulative, ts), + createOtelSum("test_2", "", pmetric.AggregationTemporalityCumulative, ts), + }, + expectedSeries: []prompb.TimeSeries{ + createPromFloatSeries("test_1", ts), + createPromFloatSeries("test_2", ts), + }, + expectedMetadata: []prompb.MetricMetadata{ + createPromMetadata("test_1", "", prompb.MetricMetadata_GAUGE), + createPromMetadata("test_2", "", prompb.MetricMetadata_GAUGE), + }, + }, + { + description: "[allowDeltaTemporality: false] delta type should not be converted", + allowDeltaTemporality: false, + otlpSeries: []pmetric.Metric{ + createOtelSum("test_1", "", pmetric.AggregationTemporalityDelta, ts), + createOtelSum("test_2", "", pmetric.AggregationTemporalityDelta, ts), + }, + expectedSeries: []prompb.TimeSeries{}, + expectedMetadata: []prompb.MetricMetadata{}, + expectedErr: `invalid temporality and type combination for metric "test_1"; invalid temporality and type combination for metric "test_2"`, + }, + { + description: "[allowDeltaTemporality: true] delta type should be converted", + allowDeltaTemporality: true, + otlpSeries: []pmetric.Metric{ + createOtelSum("test_1", "", pmetric.AggregationTemporalityDelta, ts), + createOtelSum("test_2", "", pmetric.AggregationTemporalityDelta, ts), + }, + expectedSeries: []prompb.TimeSeries{ + createPromFloatSeries("test_1", ts), + createPromFloatSeries("test_2", ts), + }, + expectedMetadata: []prompb.MetricMetadata{ + createPromMetadata("test_1", "", prompb.MetricMetadata_UNKNOWN), + createPromMetadata("test_2", "", prompb.MetricMetadata_UNKNOWN), + }, + }, + { + description: "[allowDeltaTemporality: false] mixed delta and cumulative, should be converted only for cumulative type", + allowDeltaTemporality: false, + otlpSeries: []pmetric.Metric{ + createOtelSum("test_1", "", pmetric.AggregationTemporalityDelta, ts), + createOtelSum("test_2", "", pmetric.AggregationTemporalityCumulative, ts), + }, + expectedSeries: []prompb.TimeSeries{ + createPromFloatSeries("test_2", ts), + }, + expectedMetadata: []prompb.MetricMetadata{ + createPromMetadata("test_2", "", prompb.MetricMetadata_GAUGE), + }, + expectedErr: `invalid temporality and type combination for metric "test_1"`, + }, + { + description: "[allowDeltaTemporality: false, exponential histogram] cumulative histogram should be converted", + allowDeltaTemporality: false, + otlpSeries: []pmetric.Metric{ + createOtelExponentialHistogram("test_1", pmetric.AggregationTemporalityCumulative, ts), + createOtelExponentialHistogram("test_2", pmetric.AggregationTemporalityCumulative, ts), + }, + expectedSeries: []prompb.TimeSeries{ + createPromNativeHistogramSeries("test_1", prompb.Histogram_UNKNOWN, ts), + createPromNativeHistogramSeries("test_2", prompb.Histogram_UNKNOWN, ts), + }, + expectedMetadata: []prompb.MetricMetadata{ + createPromMetadata("test_1", "", prompb.MetricMetadata_HISTOGRAM), + createPromMetadata("test_2", "", prompb.MetricMetadata_HISTOGRAM), + }, + }, + { + description: "[allowDeltaTemporality: false, exponential histogram] delta histogram should not be converted", + allowDeltaTemporality: false, + otlpSeries: []pmetric.Metric{ + createOtelExponentialHistogram("test_1", pmetric.AggregationTemporalityDelta, ts), + createOtelExponentialHistogram("test_2", pmetric.AggregationTemporalityDelta, ts), + }, + expectedSeries: []prompb.TimeSeries{}, + expectedMetadata: []prompb.MetricMetadata{}, + expectedErr: `invalid temporality and type combination for metric "test_1"; invalid temporality and type combination for metric "test_2"`, + }, + { + description: "[allowDeltaTemporality: true, exponential histogram] delta histogram should be converted", + allowDeltaTemporality: true, + otlpSeries: []pmetric.Metric{ + createOtelExponentialHistogram("test_1", pmetric.AggregationTemporalityDelta, ts), + createOtelExponentialHistogram("test_2", pmetric.AggregationTemporalityDelta, ts), + }, + expectedSeries: []prompb.TimeSeries{ + createPromNativeHistogramSeries("test_1", prompb.Histogram_GAUGE, ts), + createPromNativeHistogramSeries("test_2", prompb.Histogram_GAUGE, ts), + }, + expectedMetadata: []prompb.MetricMetadata{ + createPromMetadata("test_1", "", prompb.MetricMetadata_UNKNOWN), + createPromMetadata("test_2", "", prompb.MetricMetadata_UNKNOWN), + }, + }, + { + description: "[allowDeltaTemporality: false, exponential histogram] mixed delta and cumulative histogram, should be converted only for cumulative type", + allowDeltaTemporality: false, + otlpSeries: []pmetric.Metric{ + createOtelExponentialHistogram("test_1", pmetric.AggregationTemporalityDelta, ts), + createOtelExponentialHistogram("test_2", pmetric.AggregationTemporalityCumulative, ts), + }, + expectedSeries: []prompb.TimeSeries{ + createPromNativeHistogramSeries("test_2", prompb.Histogram_UNKNOWN, ts), + }, + expectedMetadata: []prompb.MetricMetadata{ + createPromMetadata("test_2", "", prompb.MetricMetadata_HISTOGRAM), + }, + expectedErr: `invalid temporality and type combination for metric "test_1"`, + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + cfg := distributor.OTLPConfig{AllowDeltaTemporality: test.allowDeltaTemporality} + metrics := pmetric.NewMetrics() + rm := metrics.ResourceMetrics().AppendEmpty() + sm := rm.ScopeMetrics().AppendEmpty() + + for _, s := range test.otlpSeries { + s.CopyTo(sm.Metrics().AppendEmpty()) + } + + limits := validation.Limits{} + overrides := validation.NewOverrides(limits, nil) + promSeries, metadata, err := convertToPromTS(ctx, metrics, cfg, overrides, "user-1", logger) + require.Equal(t, sortTimeSeries(test.expectedSeries), sortTimeSeries(promSeries)) + require.Equal(t, test.expectedMetadata, metadata) + if test.expectedErr != "" { + require.Equal(t, test.expectedErr, err.Error()) + } else { + require.NoError(t, err) + } + + }) + } +} + +func createPromMetadata(name, unit string, metadataType prompb.MetricMetadata_MetricType) prompb.MetricMetadata { + return prompb.MetricMetadata{ + Type: metadataType, + Unit: unit, + MetricFamilyName: name, + } +} + +// copied from: https://github.com/prometheus/prometheus/blob/v3.5.0/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +func sortTimeSeries(series []prompb.TimeSeries) []prompb.TimeSeries { + for i := range series { + sort.Slice(series[i].Labels, func(j, k int) bool { + return series[i].Labels[j].Name < series[i].Labels[k].Name + }) + } + + sort.Slice(series, func(i, j int) bool { + return fmt.Sprint(series[i].Labels) < fmt.Sprint(series[j].Labels) + }) + + return series +} + +// copied from: https://github.com/prometheus/prometheus/blob/v3.5.0/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +func createPromFloatSeries(name string, ts time.Time) prompb.TimeSeries { + return prompb.TimeSeries{ + Labels: []prompb.Label{ + {Name: "__name__", Value: name}, + {Name: "test_label", Value: "test_value"}, + }, + Samples: []prompb.Sample{{ + Value: 5, + Timestamp: ts.UnixMilli(), + }}, + } +} + +// copied from: https://github.com/prometheus/prometheus/blob/v3.5.0/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +func createOtelSum(name, unit string, temporality pmetric.AggregationTemporality, ts time.Time) pmetric.Metric { + metrics := pmetric.NewMetricSlice() + m := metrics.AppendEmpty() + m.SetName(name) + m.SetUnit(unit) + sum := m.SetEmptySum() + sum.SetAggregationTemporality(temporality) + dp := sum.DataPoints().AppendEmpty() + dp.SetDoubleValue(5) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) + dp.Attributes().PutStr("test_label", "test_value") + return m +} + +// copied from: https://github.com/prometheus/prometheus/blob/v3.5.0/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +func createOtelExponentialHistogram(name string, temporality pmetric.AggregationTemporality, ts time.Time) pmetric.Metric { + metrics := pmetric.NewMetricSlice() + m := metrics.AppendEmpty() + m.SetName(name) + hist := m.SetEmptyExponentialHistogram() + hist.SetAggregationTemporality(temporality) + dp := hist.DataPoints().AppendEmpty() + dp.SetCount(1) + dp.SetSum(5) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) + dp.Attributes().PutStr("test_label", "test_value") + return m +} + +// copied from: https://github.com/prometheus/prometheus/blob/v3.5.0/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +func createPromNativeHistogramSeries(name string, hint prompb.Histogram_ResetHint, ts time.Time) prompb.TimeSeries { + return prompb.TimeSeries{ + Labels: []prompb.Label{ + {Name: "__name__", Value: name}, + {Name: "test_label", Value: "test_value"}, + }, + Histograms: []prompb.Histogram{ + { + Count: &prompb.Histogram_CountInt{CountInt: 1}, + Sum: 5, + Schema: 0, + ZeroThreshold: 1e-128, + ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, + Timestamp: ts.UnixMilli(), + ResetHint: hint, + }, + }, + } +} + func TestOTLPConvertToPromTS(t *testing.T) { logger := log.NewNopLogger() ctx := context.Background() diff --git a/pkg/util/push/push.go b/pkg/util/push/push.go index 9cabb395228..17413f0e4dd 100644 --- a/pkg/util/push/push.go +++ b/pkg/util/push/push.go @@ -2,22 +2,43 @@ package push import ( "context" + "fmt" "net/http" + "strconv" + "strings" "github.com/go-kit/log/level" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/labels" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" + "github.com/prometheus/prometheus/util/compression" "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/middleware" "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/extract" "github.com/cortexproject/cortex/pkg/util/log" ) +const ( + remoteWriteVersionHeader = "X-Prometheus-Remote-Write-Version" + remoteWriteVersion1HeaderValue = "0.1.0" + remoteWriteVersion20HeaderValue = "2.0.0" + appProtoContentType = "application/x-protobuf" + appProtoV1ContentType = "application/x-protobuf;proto=prometheus.WriteRequest" + appProtoV2ContentType = "application/x-protobuf;proto=io.prometheus.write.v2.Request" + + rw20WrittenSamplesHeader = "X-Prometheus-Remote-Write-Samples-Written" + rw20WrittenHistogramsHeader = "X-Prometheus-Remote-Write-Histograms-Written" + rw20WrittenExemplarsHeader = "X-Prometheus-Remote-Write-Exemplars-Written" +) + // Func defines the type of the push. It is similar to http.HandlerFunc. type Func func(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) // Handler is a http.Handler which accepts WriteRequests. -func Handler(maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push Func) http.Handler { +func Handler(remoteWrite2Enabled bool, maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push Func) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() logger := log.WithContext(ctx, log.Logger) @@ -28,31 +49,245 @@ func Handler(maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push F logger = log.WithSourceIPs(source, logger) } } - var req cortexpb.PreallocWriteRequest - err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRecvMsgSize, &req, util.RawSnappy) - if err != nil { - level.Error(logger).Log("err", err.Error()) - http.Error(w, err.Error(), http.StatusBadRequest) - return + + handlePRW1 := func() { + var req cortexpb.PreallocWriteRequest + err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRecvMsgSize, &req, util.RawSnappy) + if err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + req.SkipLabelNameValidation = false + if req.Source == 0 { + req.Source = cortexpb.API + } + + if _, err := push(ctx, &req.WriteRequest); err != nil { + resp, ok := httpgrpc.HTTPResponseFromError(err) + if !ok { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if resp.GetCode()/100 == 5 { + level.Error(logger).Log("msg", "push error", "err", err) + } else if resp.GetCode() != http.StatusAccepted && resp.GetCode() != http.StatusTooManyRequests { + level.Warn(logger).Log("msg", "push refused", "err", err) + } + http.Error(w, string(resp.Body), int(resp.Code)) + } } - req.SkipLabelNameValidation = false - if req.Source == 0 { - req.Source = cortexpb.API + handlePRW2 := func() { + var req writev2.Request + err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRecvMsgSize, &req, util.RawSnappy) + if err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + v1Req, err := convertV2RequestToV1(&req) + if err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + v1Req.SkipLabelNameValidation = false + if v1Req.Source == 0 { + v1Req.Source = cortexpb.API + } + + if resp, err := push(ctx, &v1Req.WriteRequest); err != nil { + resp, ok := httpgrpc.HTTPResponseFromError(err) + setPRW2RespHeader(w, 0, 0, 0) + if !ok { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if resp.GetCode()/100 == 5 { + level.Error(logger).Log("msg", "push error", "err", err) + } else if resp.GetCode() != http.StatusAccepted && resp.GetCode() != http.StatusTooManyRequests { + level.Warn(logger).Log("msg", "push refused", "err", err) + } + http.Error(w, string(resp.Body), int(resp.Code)) + } else { + setPRW2RespHeader(w, resp.Samples, resp.Histograms, resp.Exemplars) + } } - if _, err := push(ctx, &req.WriteRequest); err != nil { - resp, ok := httpgrpc.HTTPResponseFromError(err) - if !ok { - http.Error(w, err.Error(), http.StatusInternalServerError) + if remoteWrite2Enabled { + // follow Prometheus https://github.com/prometheus/prometheus/blob/v3.3.1/storage/remote/write_handler.go#L121 + contentType := r.Header.Get("Content-Type") + if contentType == "" { + contentType = appProtoContentType + } + + msgType, err := parseProtoMsg(contentType) + if err != nil { + level.Error(logger).Log("Error decoding remote write request", "err", err) + http.Error(w, err.Error(), http.StatusUnsupportedMediaType) return } - if resp.GetCode()/100 == 5 { - level.Error(logger).Log("msg", "push error", "err", err) - } else if resp.GetCode() != http.StatusAccepted && resp.GetCode() != http.StatusTooManyRequests { - level.Warn(logger).Log("msg", "push refused", "err", err) + + if msgType != config.RemoteWriteProtoMsgV1 && msgType != config.RemoteWriteProtoMsgV2 { + level.Error(logger).Log("Not accepted msg type", "msgType", msgType, "err", err) + http.Error(w, err.Error(), http.StatusUnsupportedMediaType) + return } - http.Error(w, string(resp.Body), int(resp.Code)) + + enc := r.Header.Get("Content-Encoding") + if enc == "" { + } else if enc != compression.Snappy { + err := fmt.Errorf("%v encoding (compression) is not accepted by this server; only %v is acceptable", enc, compression.Snappy) + level.Error(logger).Log("Error decoding remote write request", "err", err) + http.Error(w, err.Error(), http.StatusUnsupportedMediaType) + return + } + + switch msgType { + case config.RemoteWriteProtoMsgV1: + handlePRW1() + case config.RemoteWriteProtoMsgV2: + handlePRW2() + } + } else { + handlePRW1() } }) } + +func setPRW2RespHeader(w http.ResponseWriter, samples, histograms, exemplars int64) { + w.Header().Set(rw20WrittenSamplesHeader, strconv.FormatInt(samples, 10)) + w.Header().Set(rw20WrittenHistogramsHeader, strconv.FormatInt(histograms, 10)) + w.Header().Set(rw20WrittenExemplarsHeader, strconv.FormatInt(exemplars, 10)) +} + +// Refer to parseProtoMsg in https://github.com/prometheus/prometheus/blob/main/storage/remote/write_handler.go +func parseProtoMsg(contentType string) (config.RemoteWriteProtoMsg, error) { + contentType = strings.TrimSpace(contentType) + + parts := strings.Split(contentType, ";") + if parts[0] != appProtoContentType { + return "", fmt.Errorf("expected %v as the first (media) part, got %v content-type", appProtoContentType, contentType) + } + // Parse potential https://www.rfc-editor.org/rfc/rfc9110#parameter + for _, p := range parts[1:] { + pair := strings.Split(p, "=") + if len(pair) != 2 { + return "", fmt.Errorf("as per https://www.rfc-editor.org/rfc/rfc9110#parameter expected parameters to be key-values, got %v in %v content-type", p, contentType) + } + if pair[0] == "proto" { + ret := config.RemoteWriteProtoMsg(pair[1]) + if err := ret.Validate(); err != nil { + return "", fmt.Errorf("got %v content type; %w", contentType, err) + } + return ret, nil + } + } + // No "proto=" parameter, assuming v1. + return config.RemoteWriteProtoMsgV1, nil +} + +func convertV2RequestToV1(req *writev2.Request) (cortexpb.PreallocWriteRequest, error) { + var v1Req cortexpb.PreallocWriteRequest + v1Timeseries := make([]cortexpb.PreallocTimeseries, 0, len(req.Timeseries)) + var v1Metadata []*cortexpb.MetricMetadata + + b := labels.NewScratchBuilder(0) + symbols := req.Symbols + for _, v2Ts := range req.Timeseries { + lbs := v2Ts.ToLabels(&b, symbols) + v1Timeseries = append(v1Timeseries, cortexpb.PreallocTimeseries{ + TimeSeries: &cortexpb.TimeSeries{ + Labels: cortexpb.FromLabelsToLabelAdapters(lbs), + Samples: convertV2ToV1Samples(v2Ts.Samples), + Exemplars: convertV2ToV1Exemplars(b, symbols, v2Ts.Exemplars), + Histograms: convertV2ToV1Histograms(v2Ts.Histograms), + }, + }) + + if shouldConvertV2Metadata(v2Ts.Metadata) { + metricName, err := extract.MetricNameFromLabels(lbs) + if err != nil { + return v1Req, err + } + v1Metadata = append(v1Metadata, convertV2ToV1Metadata(metricName, symbols, v2Ts.Metadata)) + } + } + + v1Req.Timeseries = v1Timeseries + v1Req.Metadata = v1Metadata + + return v1Req, nil +} + +func shouldConvertV2Metadata(metadata writev2.Metadata) bool { + return !(metadata.HelpRef == 0 && metadata.UnitRef == 0 && metadata.Type == writev2.Metadata_METRIC_TYPE_UNSPECIFIED) //nolint:staticcheck +} + +func convertV2ToV1Histograms(histograms []writev2.Histogram) []cortexpb.Histogram { + v1Histograms := make([]cortexpb.Histogram, 0, len(histograms)) + + for _, h := range histograms { + v1Histograms = append(v1Histograms, cortexpb.HistogramWriteV2ProtoToHistogramProto(h)) + } + + return v1Histograms +} + +func convertV2ToV1Samples(samples []writev2.Sample) []cortexpb.Sample { + v1Samples := make([]cortexpb.Sample, 0, len(samples)) + + for _, s := range samples { + v1Samples = append(v1Samples, cortexpb.Sample{ + Value: s.Value, + TimestampMs: s.Timestamp, + }) + } + + return v1Samples +} + +func convertV2ToV1Metadata(name string, symbols []string, metadata writev2.Metadata) *cortexpb.MetricMetadata { + t := cortexpb.UNKNOWN + + switch metadata.Type { + case writev2.Metadata_METRIC_TYPE_COUNTER: + t = cortexpb.COUNTER + case writev2.Metadata_METRIC_TYPE_GAUGE: + t = cortexpb.GAUGE + case writev2.Metadata_METRIC_TYPE_HISTOGRAM: + t = cortexpb.HISTOGRAM + case writev2.Metadata_METRIC_TYPE_GAUGEHISTOGRAM: + t = cortexpb.GAUGEHISTOGRAM + case writev2.Metadata_METRIC_TYPE_SUMMARY: + t = cortexpb.SUMMARY + case writev2.Metadata_METRIC_TYPE_INFO: + t = cortexpb.INFO + case writev2.Metadata_METRIC_TYPE_STATESET: + t = cortexpb.STATESET + } + + return &cortexpb.MetricMetadata{ + Type: t, + MetricFamilyName: name, + Unit: symbols[metadata.UnitRef], + Help: symbols[metadata.HelpRef], + } +} + +func convertV2ToV1Exemplars(b labels.ScratchBuilder, symbols []string, v2Exemplars []writev2.Exemplar) []cortexpb.Exemplar { + v1Exemplars := make([]cortexpb.Exemplar, 0, len(v2Exemplars)) + for _, e := range v2Exemplars { + promExemplar := e.ToExemplar(&b, symbols) + v1Exemplars = append(v1Exemplars, cortexpb.Exemplar{ + Labels: cortexpb.FromLabelsToLabelAdapters(promExemplar.Labels), + Value: e.Value, + TimestampMs: e.Timestamp, + }) + } + return v1Exemplars +} diff --git a/pkg/util/push/push_test.go b/pkg/util/push/push_test.go index b806011a611..46cb0770f75 100644 --- a/pkg/util/push/push_test.go +++ b/pkg/util/push/push_test.go @@ -3,13 +3,17 @@ package push import ( "bytes" "context" + "fmt" "net/http" "net/http/httptest" "testing" "time" "github.com/golang/snappy" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/prompb" + writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" + "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/weaveworks/common/middleware" @@ -17,30 +21,374 @@ import ( "github.com/cortexproject/cortex/pkg/cortexpb" ) +var ( + testHistogram = histogram.Histogram{ + Schema: 2, + ZeroThreshold: 1e-128, + ZeroCount: 0, + Count: 3, + Sum: 20, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []int64{1}, + NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}}, + NegativeBuckets: []int64{2}, + } +) + +func makeV2ReqWithSeries(num int) *writev2.Request { + ts := make([]writev2.TimeSeries, 0, num) + symbols := []string{"", "__name__", "test_metric1", "b", "c", "baz", "qux", "d", "e", "foo", "bar", "f", "g", "h", "i", "Test gauge for test purposes", "Maybe op/sec who knows (:", "Test counter for test purposes"} + for i := 0; i < num; i++ { + ts = append(ts, writev2.TimeSeries{ + LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + Metadata: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_GAUGE, + + HelpRef: 15, + UnitRef: 16, + }, + Samples: []writev2.Sample{{Value: 1, Timestamp: 10}}, + Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 10}}, + Histograms: []writev2.Histogram{ + writev2.FromIntHistogram(10, &testHistogram), + writev2.FromFloatHistogram(20, testHistogram.ToFloat(nil)), + }, + }) + } + + return &writev2.Request{ + Symbols: symbols, + Timeseries: ts, + } +} + +func createPRW1HTTPRequest(seriesNum int) (*http.Request, error) { + series := makeV2ReqWithSeries(seriesNum) + v1Req, err := convertV2RequestToV1(series) + if err != nil { + return nil, err + } + protobuf, err := v1Req.Marshal() + if err != nil { + return nil, err + } + + body := snappy.Encode(nil, protobuf) + req, err := http.NewRequest("POST", "http://localhost/", newResetReader(body)) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Encoding", "snappy") + req.Header.Set("Content-Type", appProtoContentType) + req.Header.Set("X-Prometheus-Remote-Write-Version", remoteWriteVersion1HeaderValue) + req.ContentLength = int64(len(body)) + return req, nil +} + +func createPRW2HTTPRequest(seriesNum int) (*http.Request, error) { + series := makeV2ReqWithSeries(seriesNum) + protobuf, err := series.Marshal() + if err != nil { + return nil, err + } + + body := snappy.Encode(nil, protobuf) + req, err := http.NewRequest("POST", "http://localhost/", newResetReader(body)) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Encoding", "snappy") + req.Header.Set("Content-Type", appProtoV2ContentType) + req.Header.Set("X-Prometheus-Remote-Write-Version", remoteWriteVersion20HeaderValue) + req.ContentLength = int64(len(body)) + return req, nil +} + +func Benchmark_Handler(b *testing.B) { + mockHandler := func(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) { + // Nothing to do. + return &cortexpb.WriteResponse{}, nil + } + testSeriesNums := []int{10, 100, 500, 1000} + for _, seriesNum := range testSeriesNums { + b.Run(fmt.Sprintf("PRW1 with %d series", seriesNum), func(b *testing.B) { + handler := Handler(true, 1000000, nil, mockHandler) + req, err := createPRW1HTTPRequest(seriesNum) + require.NoError(b, err) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(b, http.StatusOK, resp.Code) + req.Body.(*resetReader).Reset() + } + }) + b.Run(fmt.Sprintf("PRW2 with %d series", seriesNum), func(b *testing.B) { + handler := Handler(true, 1000000, nil, mockHandler) + req, err := createPRW2HTTPRequest(seriesNum) + require.NoError(b, err) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(b, http.StatusOK, resp.Code) + req.Body.(*resetReader).Reset() + } + }) + } +} + +func Benchmark_convertV2RequestToV1(b *testing.B) { + testSeriesNums := []int{100, 500, 1000} + + for _, seriesNum := range testSeriesNums { + b.Run(fmt.Sprintf("%d series", seriesNum), func(b *testing.B) { + series := makeV2ReqWithSeries(seriesNum) + + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _, err := convertV2RequestToV1(series) + require.NoError(b, err) + } + }) + } +} + +func Test_convertV2RequestToV1(t *testing.T) { + var v2Req writev2.Request + + fh := tsdbutil.GenerateTestFloatHistogram(1) + ph := writev2.FromFloatHistogram(4, fh) + + symbols := []string{"", "__name__", "test_metric", "b", "c", "baz", "qux", "d", "e", "foo", "bar", "f", "g", "h", "i", "Test gauge for test purposes", "Maybe op/sec who knows (:", "Test counter for test purposes"} + timeseries := []writev2.TimeSeries{ + { + LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + Metadata: writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_COUNTER, + + HelpRef: 15, + UnitRef: 16, + }, + Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}, + Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 1}}, + }, + { + LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + Samples: []writev2.Sample{{Value: 2, Timestamp: 2}}, + }, + { + LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + Samples: []writev2.Sample{{Value: 3, Timestamp: 3}}, + }, + { + LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + Histograms: []writev2.Histogram{ph, ph}, + Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 1}}, + }, + } + + v2Req.Symbols = symbols + v2Req.Timeseries = timeseries + v1Req, err := convertV2RequestToV1(&v2Req) + assert.NoError(t, err) + expectedSamples := 3 + expectedExemplars := 2 + expectedHistograms := 2 + countSamples := 0 + countExemplars := 0 + countHistograms := 0 + + for _, ts := range v1Req.Timeseries { + countSamples += len(ts.Samples) + countExemplars += len(ts.Exemplars) + countHistograms += len(ts.Histograms) + } + + assert.Equal(t, expectedSamples, countSamples) + assert.Equal(t, expectedExemplars, countExemplars) + assert.Equal(t, expectedHistograms, countHistograms) + assert.Equal(t, 4, len(v1Req.Timeseries)) + assert.Equal(t, 1, len(v1Req.Metadata)) +} + func TestHandler_remoteWrite(t *testing.T) { - req := createRequest(t, createPrometheusRemoteWriteProtobuf(t)) - resp := httptest.NewRecorder() - handler := Handler(100000, nil, verifyWriteRequestHandler(t, cortexpb.API)) - handler.ServeHTTP(resp, req) - assert.Equal(t, 200, resp.Code) + t.Run("remote write v1", func(t *testing.T) { + handler := Handler(true, 100000, nil, verifyWriteRequestHandler(t, cortexpb.API)) + req := createRequest(t, createPrometheusRemoteWriteProtobuf(t), false) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(t, http.StatusOK, resp.Code) + }) + t.Run("remote write v2", func(t *testing.T) { + handler := Handler(true, 100000, nil, verifyWriteRequestHandler(t, cortexpb.API)) + req := createRequest(t, createPrometheusRemoteWriteV2Protobuf(t), true) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(t, http.StatusOK, resp.Code) + + // test header value + respHeader := resp.Header() + assert.Equal(t, "1", respHeader[rw20WrittenSamplesHeader][0]) + assert.Equal(t, "1", respHeader[rw20WrittenHistogramsHeader][0]) + assert.Equal(t, "1", respHeader[rw20WrittenExemplarsHeader][0]) + }) +} + +func TestHandler_ContentTypeAndEncoding(t *testing.T) { + sourceIPs, _ := middleware.NewSourceIPs("SomeField", "(.*)") + handler := Handler(true, 100000, sourceIPs, verifyWriteRequestHandler(t, cortexpb.API)) + + tests := []struct { + description string + reqHeaders map[string]string + expectedCode int + isV2 bool + }{ + { + description: "[RW 2.0] correct content-type", + reqHeaders: map[string]string{ + "Content-Type": appProtoV2ContentType, + "Content-Encoding": "snappy", + remoteWriteVersionHeader: "2.0.0", + }, + expectedCode: http.StatusOK, + isV2: true, + }, + { + description: "[RW 1.0] correct content-type", + reqHeaders: map[string]string{ + "Content-Type": appProtoV1ContentType, + "Content-Encoding": "snappy", + remoteWriteVersionHeader: "0.1.0", + }, + expectedCode: http.StatusOK, + isV2: false, + }, + { + description: "[RW 2.0] wrong content-type", + reqHeaders: map[string]string{ + "Content-Type": "yolo", + "Content-Encoding": "snappy", + remoteWriteVersionHeader: "2.0.0", + }, + expectedCode: http.StatusUnsupportedMediaType, + isV2: true, + }, + { + description: "[RW 2.0] wrong content-type", + reqHeaders: map[string]string{ + "Content-Type": "application/x-protobuf;proto=yolo", + "Content-Encoding": "snappy", + remoteWriteVersionHeader: "2.0.0", + }, + expectedCode: http.StatusUnsupportedMediaType, + isV2: true, + }, + { + description: "[RW 2.0] wrong content-encoding", + reqHeaders: map[string]string{ + "Content-Type": "application/x-protobuf;proto=io.prometheus.write.v2.Request", + "Content-Encoding": "zstd", + remoteWriteVersionHeader: "2.0.0", + }, + expectedCode: http.StatusUnsupportedMediaType, + isV2: true, + }, + { + description: "no header, should treated as RW 1.0", + expectedCode: http.StatusOK, + isV2: false, + }, + { + description: "missing content-type, should treated as RW 1.0", + reqHeaders: map[string]string{ + "Content-Encoding": "snappy", + remoteWriteVersionHeader: "2.0.0", + }, + expectedCode: http.StatusOK, + isV2: false, + }, + { + description: "missing content-encoding", + reqHeaders: map[string]string{ + "Content-Type": appProtoV2ContentType, + remoteWriteVersionHeader: "2.0.0", + }, + expectedCode: http.StatusOK, + isV2: true, + }, + { + description: "missing remote write version, should treated based on Content-type", + reqHeaders: map[string]string{ + "Content-Type": appProtoV2ContentType, + "Content-Encoding": "snappy", + }, + expectedCode: http.StatusOK, + isV2: true, + }, + { + description: "missing remote write version, should treated based on Content-type", + reqHeaders: map[string]string{ + "Content-Type": appProtoV1ContentType, + "Content-Encoding": "snappy", + }, + expectedCode: http.StatusOK, + isV2: false, + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + if test.isV2 { + req := createRequestWithHeaders(t, test.reqHeaders, createCortexRemoteWriteV2Protobuf(t, false, cortexpb.API)) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(t, test.expectedCode, resp.Code) + } else { + req := createRequestWithHeaders(t, test.reqHeaders, createCortexWriteRequestProtobuf(t, false, cortexpb.API)) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(t, test.expectedCode, resp.Code) + } + }) + } } func TestHandler_cortexWriteRequest(t *testing.T) { - req := createRequest(t, createCortexWriteRequestProtobuf(t, false)) - resp := httptest.NewRecorder() sourceIPs, _ := middleware.NewSourceIPs("SomeField", "(.*)") - handler := Handler(100000, sourceIPs, verifyWriteRequestHandler(t, cortexpb.RULE)) - handler.ServeHTTP(resp, req) - assert.Equal(t, 200, resp.Code) + handler := Handler(true, 100000, sourceIPs, verifyWriteRequestHandler(t, cortexpb.API)) + + t.Run("remote write v1", func(t *testing.T) { + req := createRequest(t, createCortexWriteRequestProtobuf(t, false, cortexpb.API), false) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(t, 200, resp.Code) + }) + t.Run("remote write v2", func(t *testing.T) { + req := createRequest(t, createCortexRemoteWriteV2Protobuf(t, false, cortexpb.API), true) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + assert.Equal(t, 200, resp.Code) + }) } func TestHandler_ignoresSkipLabelNameValidationIfSet(t *testing.T) { for _, req := range []*http.Request{ - createRequest(t, createCortexWriteRequestProtobuf(t, true)), - createRequest(t, createCortexWriteRequestProtobuf(t, false)), + createRequest(t, createCortexWriteRequestProtobuf(t, true, cortexpb.RULE), false), + createRequest(t, createCortexWriteRequestProtobuf(t, true, cortexpb.RULE), false), } { resp := httptest.NewRecorder() - handler := Handler(100000, nil, verifyWriteRequestHandler(t, cortexpb.RULE)) + handler := Handler(true, 100000, nil, verifyWriteRequestHandler(t, cortexpb.RULE)) handler.ServeHTTP(resp, req) assert.Equal(t, 200, resp.Code) } @@ -54,21 +402,86 @@ func verifyWriteRequestHandler(t *testing.T, expectSource cortexpb.WriteRequest_ assert.Equal(t, "foo", request.Timeseries[0].Labels[0].Value) assert.Equal(t, expectSource, request.Source) assert.False(t, request.SkipLabelNameValidation) - return &cortexpb.WriteResponse{}, nil + + resp := &cortexpb.WriteResponse{ + Samples: 1, + Histograms: 1, + Exemplars: 1, + } + + return resp, nil } } -func createRequest(t *testing.T, protobuf []byte) *http.Request { +func createRequestWithHeaders(t *testing.T, headers map[string]string, protobuf []byte) *http.Request { t.Helper() inoutBytes := snappy.Encode(nil, protobuf) req, err := http.NewRequest("POST", "http://localhost/", bytes.NewReader(inoutBytes)) require.NoError(t, err) + + for k, v := range headers { + req.Header.Set(k, v) + } + return req +} + +func createRequest(t *testing.T, protobuf []byte, isV2 bool) *http.Request { + t.Helper() + inoutBytes := snappy.Encode(nil, protobuf) + req, err := http.NewRequest("POST", "http://localhost/", bytes.NewReader(inoutBytes)) + require.NoError(t, err) + req.Header.Add("Content-Encoding", "snappy") - req.Header.Set("Content-Type", "application/x-protobuf") - req.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0") + + if isV2 { + req.Header.Set("Content-Type", appProtoV2ContentType) + req.Header.Set("X-Prometheus-Remote-Write-Version", remoteWriteVersion20HeaderValue) + return req + } + + req.Header.Set("Content-Type", appProtoContentType) + req.Header.Set("X-Prometheus-Remote-Write-Version", remoteWriteVersion1HeaderValue) return req } +func createCortexRemoteWriteV2Protobuf(t *testing.T, skipLabelNameValidation bool, source cortexpb.WriteRequest_SourceEnum) []byte { + t.Helper() + input := writev2.Request{ + Symbols: []string{"", "__name__", "foo"}, + Timeseries: []writev2.TimeSeries{ + { + LabelsRefs: []uint32{1, 2}, + Samples: []writev2.Sample{ + {Value: 1, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()}, + }, + }, + }, + } + + inoutBytes, err := input.Marshal() + require.NoError(t, err) + return inoutBytes +} + +func createPrometheusRemoteWriteV2Protobuf(t *testing.T) []byte { + t.Helper() + input := writev2.Request{ + Symbols: []string{"", "__name__", "foo"}, + Timeseries: []writev2.TimeSeries{ + { + LabelsRefs: []uint32{1, 2}, + Samples: []writev2.Sample{ + {Value: 1, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()}, + }, + }, + }, + } + + inoutBytes, err := input.Marshal() + require.NoError(t, err) + return inoutBytes +} + func createPrometheusRemoteWriteProtobuf(t *testing.T) []byte { t.Helper() input := prompb.WriteRequest{ @@ -87,7 +500,7 @@ func createPrometheusRemoteWriteProtobuf(t *testing.T) []byte { require.NoError(t, err) return inoutBytes } -func createCortexWriteRequestProtobuf(t *testing.T, skipLabelNameValidation bool) []byte { +func createCortexWriteRequestProtobuf(t *testing.T, skipLabelNameValidation bool, source cortexpb.WriteRequest_SourceEnum) []byte { t.Helper() ts := cortexpb.PreallocTimeseries{ TimeSeries: &cortexpb.TimeSeries{ @@ -101,7 +514,7 @@ func createCortexWriteRequestProtobuf(t *testing.T, skipLabelNameValidation bool } input := cortexpb.WriteRequest{ Timeseries: []cortexpb.PreallocTimeseries{ts}, - Source: cortexpb.RULE, + Source: source, SkipLabelNameValidation: skipLabelNameValidation, } inoutBytes, err := input.Marshal() diff --git a/pkg/util/requestmeta/context.go b/pkg/util/requestmeta/context.go new file mode 100644 index 00000000000..2efae506d96 --- /dev/null +++ b/pkg/util/requestmeta/context.go @@ -0,0 +1,75 @@ +package requestmeta + +import ( + "context" + "net/http" + "net/textproto" + + "google.golang.org/grpc/metadata" +) + +type contextKey int + +const ( + requestMetadataContextKey contextKey = 0 + PropagationStringForRequestMetadata string = "x-request-metadata-propagation-string" + // HeaderPropagationStringForRequestLogging is used for backwards compatibility + HeaderPropagationStringForRequestLogging string = "x-http-header-forwarding-logging" +) + +func ContextWithRequestMetadataMap(ctx context.Context, requestContextMap map[string]string) context.Context { + return context.WithValue(ctx, requestMetadataContextKey, requestContextMap) +} + +func MapFromContext(ctx context.Context) map[string]string { + requestContextMap, ok := ctx.Value(requestMetadataContextKey).(map[string]string) + if !ok { + return nil + } + return requestContextMap +} + +// ContextWithRequestMetadataMapFromHeaders adds MetadataContext headers to context and Removes non-existent headers. +// targetHeaders is passed for backwards compatibility, otherwise header keys should be in header itself. +func ContextWithRequestMetadataMapFromHeaders(ctx context.Context, headers map[string]string, targetHeaders []string) context.Context { + headerMap := make(map[string]string) + loggingHeaders := headers[textproto.CanonicalMIMEHeaderKey(LoggingHeadersKey)] + headerKeys := targetHeaders + if loggingHeaders != "" { + headerKeys = LoggingHeaderKeysFromString(loggingHeaders) + headerKeys = append(headerKeys, LoggingHeadersKey) + } + headerKeys = append(headerKeys, RequestIdKey) + for _, header := range headerKeys { + if v, ok := headers[textproto.CanonicalMIMEHeaderKey(header)]; ok { + headerMap[header] = v + } + } + return ContextWithRequestMetadataMap(ctx, headerMap) +} + +func InjectMetadataIntoHTTPRequestHeaders(requestMetadataMap map[string]string, request *http.Request) { + for key, contents := range requestMetadataMap { + request.Header.Add(key, contents) + } +} + +func ContextWithRequestMetadataMapFromMetadata(ctx context.Context, md metadata.MD) context.Context { + headersSlice, ok := md[PropagationStringForRequestMetadata] + + // we want to check old key if no data + if !ok { + headersSlice, ok = md[HeaderPropagationStringForRequestLogging] + } + + if !ok || len(headersSlice)%2 == 1 { + return ctx + } + + requestMetadataMap := make(map[string]string) + for i := 0; i < len(headersSlice); i += 2 { + requestMetadataMap[headersSlice[i]] = headersSlice[i+1] + } + + return ContextWithRequestMetadataMap(ctx, requestMetadataMap) +} diff --git a/pkg/util/requestmeta/context_test.go b/pkg/util/requestmeta/context_test.go new file mode 100644 index 00000000000..23a0d3b4dab --- /dev/null +++ b/pkg/util/requestmeta/context_test.go @@ -0,0 +1,113 @@ +package requestmeta + +import ( + "context" + "net/http" + "net/textproto" + "testing" + + "github.com/stretchr/testify/require" + "google.golang.org/grpc/metadata" +) + +func TestRequestMetadataMapFromMetadata(t *testing.T) { + md := metadata.New(nil) + md.Append(PropagationStringForRequestMetadata, "TestHeader1", "SomeInformation", "TestHeader2", "ContentsOfTestHeader2") + + ctx := context.Background() + + ctx = ContextWithRequestMetadataMapFromMetadata(ctx, md) + + requestMetadataMap := MapFromContext(ctx) + + require.Contains(t, requestMetadataMap, "TestHeader1") + require.Contains(t, requestMetadataMap, "TestHeader2") + require.Equal(t, "SomeInformation", requestMetadataMap["TestHeader1"]) + require.Equal(t, "ContentsOfTestHeader2", requestMetadataMap["TestHeader2"]) +} + +func TestRequestMetadataMapFromMetadataWithImproperLength(t *testing.T) { + md := metadata.New(nil) + md.Append(PropagationStringForRequestMetadata, "TestHeader1", "SomeInformation", "TestHeader2", "ContentsOfTestHeader2", "Test3") + + ctx := context.Background() + + ctx = ContextWithRequestMetadataMapFromMetadata(ctx, md) + + requestMetadataMap := MapFromContext(ctx) + require.Nil(t, requestMetadataMap) +} + +func TestContextWithRequestMetadataMapFromHeaders_WithLoggingHeaders(t *testing.T) { + headers := map[string]string{ + textproto.CanonicalMIMEHeaderKey("X-Request-ID"): "1234", + textproto.CanonicalMIMEHeaderKey("X-User-ID"): "user5678", + textproto.CanonicalMIMEHeaderKey(LoggingHeadersKey): "X-Request-ID,X-User-ID", + } + + ctx := context.Background() + ctx = ContextWithRequestMetadataMapFromHeaders(ctx, headers, nil) + + requestMetadataMap := MapFromContext(ctx) + + require.Contains(t, requestMetadataMap, "X-Request-ID") + require.Contains(t, requestMetadataMap, "X-User-ID") + require.Equal(t, "1234", requestMetadataMap["X-Request-ID"]) + require.Equal(t, "user5678", requestMetadataMap["X-User-ID"]) +} + +func TestContextWithRequestMetadataMapFromHeaders_BackwardCompatibleTargetHeaders(t *testing.T) { + headers := map[string]string{ + textproto.CanonicalMIMEHeaderKey("X-Legacy-Header"): "legacy-value", + } + + ctx := context.Background() + ctx = ContextWithRequestMetadataMapFromHeaders(ctx, headers, []string{"X-Legacy-Header"}) + + requestMetadataMap := MapFromContext(ctx) + + require.Contains(t, requestMetadataMap, "X-Legacy-Header") + require.Equal(t, "legacy-value", requestMetadataMap["X-Legacy-Header"]) +} + +func TestContextWithRequestMetadataMapFromHeaders_OnlyMatchingKeysUsed(t *testing.T) { + headers := map[string]string{ + textproto.CanonicalMIMEHeaderKey("X-Some-Header"): "value1", + textproto.CanonicalMIMEHeaderKey("Unused-Header"): "value2", + textproto.CanonicalMIMEHeaderKey(LoggingHeadersKey): "X-Some-Header", + } + + ctx := context.Background() + ctx = ContextWithRequestMetadataMapFromHeaders(ctx, headers, nil) + + requestMetadataMap := MapFromContext(ctx) + + require.Equal(t, "value1", requestMetadataMap["X-Some-Header"]) +} + +func TestInjectMetadataIntoHTTPRequestHeaders(t *testing.T) { + contentsMap := make(map[string]string) + contentsMap["TestHeader1"] = "RequestID" + contentsMap["TestHeader2"] = "ContentsOfTestHeader2" + + h := http.Header{} + req := &http.Request{ + Method: "GET", + RequestURI: "/HTTPHeaderTest", + Body: http.NoBody, + Header: h, + } + InjectMetadataIntoHTTPRequestHeaders(contentsMap, req) + + header1 := req.Header.Values("TestHeader1") + header2 := req.Header.Values("TestHeader2") + + require.NotNil(t, header1) + require.NotNil(t, header2) + require.Equal(t, 1, len(header1)) + require.Equal(t, 1, len(header2)) + + require.Equal(t, "RequestID", header1[0]) + require.Equal(t, "ContentsOfTestHeader2", header2[0]) + +} diff --git a/pkg/util/requestmeta/id.go b/pkg/util/requestmeta/id.go new file mode 100644 index 00000000000..01b34e430a1 --- /dev/null +++ b/pkg/util/requestmeta/id.go @@ -0,0 +1,22 @@ +package requestmeta + +import "context" + +const RequestIdKey = "x-cortex-request-id" + +func RequestIdFromContext(ctx context.Context) string { + metadataMap := MapFromContext(ctx) + if metadataMap == nil { + return "" + } + return metadataMap[RequestIdKey] +} + +func ContextWithRequestId(ctx context.Context, reqId string) context.Context { + metadataMap := MapFromContext(ctx) + if metadataMap == nil { + metadataMap = make(map[string]string) + } + metadataMap[RequestIdKey] = reqId + return ContextWithRequestMetadataMap(ctx, metadataMap) +} diff --git a/pkg/util/requestmeta/logging_headers.go b/pkg/util/requestmeta/logging_headers.go new file mode 100644 index 00000000000..cdf6f0d2e2c --- /dev/null +++ b/pkg/util/requestmeta/logging_headers.go @@ -0,0 +1,56 @@ +package requestmeta + +import ( + "context" + "strings" +) + +const ( + LoggingHeadersKey = "x-request-logging-headers-key" + loggingHeadersDelimiter = "," +) + +func LoggingHeaderKeysToString(targetHeaders []string) string { + return strings.Join(targetHeaders, loggingHeadersDelimiter) +} + +func LoggingHeaderKeysFromString(headerKeysString string) []string { + return strings.Split(headerKeysString, loggingHeadersDelimiter) +} + +func LoggingHeadersFromContext(ctx context.Context) map[string]string { + metadataMap := MapFromContext(ctx) + if metadataMap == nil { + return nil + } + loggingHeadersString := metadataMap[LoggingHeadersKey] + if loggingHeadersString == "" { + // Backward compatibility: if no specific headers are listed, return all metadata + result := make(map[string]string, len(metadataMap)) + for k, v := range metadataMap { + result[k] = v + } + return result + } + + result := make(map[string]string) + for _, header := range LoggingHeaderKeysFromString(loggingHeadersString) { + if v, ok := metadataMap[header]; ok { + result[header] = v + } + } + return result +} + +func LoggingHeadersAndRequestIdFromContext(ctx context.Context) map[string]string { + metadataMap := MapFromContext(ctx) + if metadataMap == nil { + return nil + } + + loggingHeaders := LoggingHeadersFromContext(ctx) + reqId := RequestIdFromContext(ctx) + loggingHeaders[RequestIdKey] = reqId + + return loggingHeaders +} diff --git a/pkg/util/tls/test/tls_integration_test.go b/pkg/util/tls/test/tls_integration_test.go index ce3bcb4cb95..d37e57f2c6e 100644 --- a/pkg/util/tls/test/tls_integration_test.go +++ b/pkg/util/tls/test/tls_integration_test.go @@ -39,19 +39,6 @@ type grpcHealthCheck struct { healthy bool } -func (h *grpcHealthCheck) List(ctx context.Context, request *grpc_health_v1.HealthListRequest) (*grpc_health_v1.HealthListResponse, error) { - checkResp, err := h.Check(ctx, nil) - if err != nil { - return &grpc_health_v1.HealthListResponse{}, err - } - - return &grpc_health_v1.HealthListResponse{ - Statuses: map[string]*grpc_health_v1.HealthCheckResponse{ - "server": checkResp, - }, - }, nil -} - func (h *grpcHealthCheck) Check(_ context.Context, _ *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) { if !h.healthy { return &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_NOT_SERVING}, nil diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index fcd96fea36b..fd077ebd18c 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -184,7 +184,7 @@ type Limits struct { MaxQueryResponseSize int64 `yaml:"max_query_response_size" json:"max_query_response_size"` MaxCacheFreshness model.Duration `yaml:"max_cache_freshness" json:"max_cache_freshness"` MaxQueriersPerTenant float64 `yaml:"max_queriers_per_tenant" json:"max_queriers_per_tenant"` - QueryVerticalShardSize int `yaml:"query_vertical_shard_size" json:"query_vertical_shard_size" doc:"hidden"` + QueryVerticalShardSize int `yaml:"query_vertical_shard_size" json:"query_vertical_shard_size"` QueryPartialData bool `yaml:"query_partial_data" json:"query_partial_data" doc:"nocli|description=Enable to allow queries to be evaluated with data from a single zone, if other zones are not available.|default=false"` // Parquet Queryable enforced limits. @@ -1202,11 +1202,16 @@ outer: defaultPartitionIndex = i continue } - for _, lbl := range lbls.LabelSet { + found := true + lbls.LabelSet.Range(func(l labels.Label) { // We did not find some of the labels on the set - if v := metric.Get(lbl.Name); v != lbl.Value { - continue outer + if v := metric.Get(l.Name); v != l.Value { + found = false } + }) + + if !found { + continue outer } r = append(r, lbls) } diff --git a/pkg/util/validation/limits_test.go b/pkg/util/validation/limits_test.go index 308067e959e..260686fdb50 100644 --- a/pkg/util/validation/limits_test.go +++ b/pkg/util/validation/limits_test.go @@ -116,11 +116,11 @@ func TestLimits_Validate(t *testing.T) { expected: errMaxLocalNativeHistogramSeriesPerUserValidation, }, "external-labels invalid label name": { - limits: Limits{RulerExternalLabels: labels.Labels{{Name: "123invalid", Value: "good"}}}, + limits: Limits{RulerExternalLabels: labels.FromStrings("123invalid", "good")}, expected: errInvalidLabelName, }, "external-labels invalid label value": { - limits: Limits{RulerExternalLabels: labels.Labels{{Name: "good", Value: string([]byte{0xff, 0xfe, 0xfd})}}}, + limits: Limits{RulerExternalLabels: labels.FromStrings("good", string([]byte{0xff, 0xfe, 0xfd}))}, expected: errInvalidLabelValue, }, } diff --git a/schemas/cortex-config-schema.json b/schemas/cortex-config-schema.json new file mode 100644 index 00000000000..13839752ab6 --- /dev/null +++ b/schemas/cortex-config-schema.json @@ -0,0 +1,8474 @@ +{ + "$id": "https://cortexmetrics.io/schemas/cortex-config.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "definitions": { + "DisabledRuleGroup": { + "properties": { + "name": { + "description": "name of the rule group", + "type": "string" + }, + "namespace": { + "description": "namespace in which the rule group belongs", + "type": "string" + } + }, + "type": "object" + }, + "Label": { + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + "LimitsPerLabelSet": { + "properties": { + "label_set": { + "additionalProperties": true, + "default": [], + "description": "LabelSet which the limit should be applied. If no labels are provided, it becomes the default partition which matches any series that doesn't match any other explicitly defined label sets.'", + "type": "object" + }, + "limits": { + "properties": { + "max_series": { + "description": "The maximum number of active series per LabelSet, across the cluster before replication. Setting the value 0 will enable the monitoring (metrics) but would not enforce any limits.", + "type": "number" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "PriorityDef": { + "properties": { + "priority": { + "default": 0, + "description": "Priority level. Must be a unique value.", + "type": "number" + }, + "query_attributes": { + "default": [], + "description": "List of query_attributes to match and assign priority to queries. A query is assigned to this priority if it matches any query_attribute in this list. Each query_attribute has several properties (e.g., regex, time_window, user_agent), and all specified properties must match for a query_attribute to be considered a match. Only the specified properties are checked, and an AND operator is applied to them.", + "items": { + "type": "string" + }, + "type": "array" + }, + "reserved_queriers": { + "default": 0, + "description": "Number of reserved queriers to handle priorities higher or equal to the priority level. Value between 0 and 1 will be used as a percentage.", + "type": "number" + } + }, + "type": "object" + }, + "QueryAttribute": { + "properties": { + "api_type": { + "description": "API type for the query. Should be one of the query, query_range, series, labels, label_values. If not set, it won't be checked.", + "type": "string" + }, + "dashboard_uid": { + "description": "Grafana includes X-Dashboard-Uid header in query requests. If this field is provided then X-Dashboard-Uid header of request should match this value. If not set, it won't be checked. This property won't be applied to metadata queries.", + "type": "string" + }, + "panel_id": { + "description": "Grafana includes X-Panel-Id header in query requests. If this field is provided then X-Panel-Id header of request should match this value. If not set, it won't be checked. This property won't be applied to metadata queries.", + "type": "string" + }, + "query_step_limit": { + "description": "If query step provided should be within this limit to match. If not set, it won't be checked. This property only applied to range queries and ignored for other types of queries.", + "properties": { + "max": { + "default": 0, + "description": "Query step should be below or equal to this value to match. If set to 0, it won't be checked.", + "type": "number" + }, + "min": { + "default": 0, + "description": "Query step should be above or equal to this value to match. If set to 0, it won't be checked.", + "type": "number" + } + }, + "type": "object" + }, + "regex": { + "description": "Regex that the query string (or at least one of the matchers in metadata query) should match. If not set, it won't be checked.", + "type": "string" + }, + "time_range_limit": { + "description": "Query time range should be within this limit to match. Depending on where it was used, in most of the use-cases, either min or max value will be used. If not set, it won't be checked.", + "properties": { + "max": { + "default": 0, + "description": "This will be duration (12h, 1d, 15d etc.). Query time range should be below or equal to this value to match. Ex: if this value is 24h, then queries whose range is smaller than or equal to 24h will match.If set to 0, it won't be checked.", + "type": "number" + }, + "min": { + "default": 0, + "description": "This will be duration (12h, 1d, 15d etc.). Query time range should be above or equal to this value to match. Ex: if this value is 20d, then queries whose range is bigger than or equal to 20d will match. If set to 0, it won't be checked.", + "type": "number" + } + }, + "type": "object" + }, + "time_window": { + "description": "Overall data select time window (including range selectors, modifiers and lookback delta) that the query should be within. If not set, it won't be checked.", + "properties": { + "end": { + "default": 0, + "description": "End of the data select time window (including range selectors, modifiers and lookback delta) that the query should be within. If set to 0, it won't be checked.", + "type": "number" + }, + "start": { + "default": 0, + "description": "Start of the data select time window (including range selectors, modifiers and lookback delta) that the query should be within. If set to 0, it won't be checked.", + "type": "number" + } + }, + "type": "object" + }, + "user_agent_regex": { + "description": "Regex that User-Agent header of the request should match. If not set, it won't be checked.", + "type": "string" + } + }, + "type": "object" + }, + "alertmanager_config": { + "description": "The alertmanager_config configures the Cortex alertmanager.", + "properties": { + "alertmanager_client": { + "properties": { + "connect_timeout": { + "default": "5s", + "description": "The maximum amount of time to establish a connection. A value of 0 means using default gRPC client connect timeout 5s.", + "type": "string", + "x-cli-flag": "alertmanager.alertmanager-client.connect-timeout", + "x-format": "duration" + }, + "grpc_compression": { + "description": "Use compression when sending messages. Supported values are: 'gzip', 'snappy' and '' (disable compression)", + "type": "string", + "x-cli-flag": "alertmanager.alertmanager-client.grpc-compression" + }, + "max_recv_msg_size": { + "default": 16777216, + "description": "gRPC client max receive message size (bytes).", + "type": "number", + "x-cli-flag": "alertmanager.alertmanager-client.grpc-max-recv-msg-size" + }, + "max_send_msg_size": { + "default": 4194304, + "description": "gRPC client max send message size (bytes).", + "type": "number", + "x-cli-flag": "alertmanager.alertmanager-client.grpc-max-send-msg-size" + }, + "remote_timeout": { + "default": "2s", + "description": "Timeout for downstream alertmanagers.", + "type": "string", + "x-cli-flag": "alertmanager.alertmanager-client.remote-timeout", + "x-format": "duration" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "alertmanager.alertmanager-client.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "alertmanager.alertmanager-client.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.", + "type": "boolean", + "x-cli-flag": "alertmanager.alertmanager-client.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "alertmanager.alertmanager-client.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "alertmanager.alertmanager-client.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "alertmanager.alertmanager-client.tls-server-name" + } + }, + "type": "object" + }, + "api_concurrency": { + "default": 0, + "description": "Maximum number of concurrent GET API requests before returning an error.", + "type": "number", + "x-cli-flag": "alertmanager.api-concurrency" + }, + "auto_webhook_root": { + "description": "Root of URL to generate if config is http://internal.monitor", + "type": "string", + "x-cli-flag": "alertmanager.configs.auto-webhook-root" + }, + "cluster": { + "properties": { + "advertise_address": { + "description": "Explicit address or hostname to advertise in cluster.", + "type": "string", + "x-cli-flag": "alertmanager.cluster.advertise-address" + }, + "gossip_interval": { + "default": "200ms", + "description": "The interval between sending gossip messages. By lowering this value (more frequent) gossip messages are propagated across cluster more quickly at the expense of increased bandwidth usage.", + "type": "string", + "x-cli-flag": "alertmanager.cluster.gossip-interval", + "x-format": "duration" + }, + "listen_address": { + "default": "0.0.0.0:9094", + "description": "Listen address and port for the cluster. Not specifying this flag disables high-availability mode.", + "type": "string", + "x-cli-flag": "alertmanager.cluster.listen-address" + }, + "peer_timeout": { + "default": "15s", + "description": "Time to wait between peers to send notifications.", + "type": "string", + "x-cli-flag": "alertmanager.cluster.peer-timeout", + "x-format": "duration" + }, + "peers": { + "description": "Comma-separated list of initial peers.", + "type": "string", + "x-cli-flag": "alertmanager.cluster.peers" + }, + "push_pull_interval": { + "default": "1m0s", + "description": "The interval between gossip state syncs. Setting this interval lower (more frequent) will increase convergence speeds across larger clusters at the expense of increased bandwidth usage.", + "type": "string", + "x-cli-flag": "alertmanager.cluster.push-pull-interval", + "x-format": "duration" + } + }, + "type": "object" + }, + "data_dir": { + "default": "data/", + "description": "Base path for data storage.", + "type": "string", + "x-cli-flag": "alertmanager.storage.path" + }, + "disabled_tenants": { + "description": "Comma separated list of tenants whose alerts this alertmanager cannot process. If specified, a alertmanager that would normally pick the specified tenant(s) for processing will ignore them instead.", + "type": "string", + "x-cli-flag": "alertmanager.disabled-tenants" + }, + "enable_api": { + "default": false, + "description": "Enable the experimental alertmanager config api.", + "type": "boolean", + "x-cli-flag": "experimental.alertmanager.enable-api" + }, + "enabled_tenants": { + "description": "Comma separated list of tenants whose alerts this alertmanager can process. If specified, only these tenants will be handled by alertmanager, otherwise this alertmanager can process alerts from all tenants.", + "type": "string", + "x-cli-flag": "alertmanager.enabled-tenants" + }, + "external_url": { + "description": "The URL under which Alertmanager is externally reachable (for example, if Alertmanager is served via a reverse proxy). Used for generating relative and absolute links back to Alertmanager itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Alertmanager. If omitted, relevant URL components will be derived automatically.", + "format": "uri", + "type": "string", + "x-cli-flag": "alertmanager.web.external-url" + }, + "fallback_config_file": { + "description": "Filename of fallback config to use if none specified for instance.", + "type": "string", + "x-cli-flag": "alertmanager.configs.fallback" + }, + "gc_interval": { + "default": "30m0s", + "description": "Alertmanager alerts Garbage collection interval.", + "type": "string", + "x-cli-flag": "alertmanager.alerts-gc-interval", + "x-format": "duration" + }, + "max_recv_msg_size": { + "default": 16777216, + "description": "Maximum size (bytes) of an accepted HTTP request body.", + "type": "number", + "x-cli-flag": "alertmanager.max-recv-msg-size" + }, + "persist_interval": { + "default": "15m0s", + "description": "The interval between persisting the current alertmanager state (notification log and silences) to object storage. This is only used when sharding is enabled. This state is read when all replicas for a shard can not be contacted. In this scenario, having persisted the state more frequently will result in potentially fewer lost silences, and fewer duplicate notifications.", + "type": "string", + "x-cli-flag": "alertmanager.persist-interval", + "x-format": "duration" + }, + "poll_interval": { + "default": "15s", + "description": "How frequently to poll Cortex configs", + "type": "string", + "x-cli-flag": "alertmanager.configs.poll-interval", + "x-format": "duration" + }, + "retention": { + "default": "120h0m0s", + "description": "How long to keep data for.", + "type": "string", + "x-cli-flag": "alertmanager.storage.retention", + "x-format": "duration" + }, + "sharding_enabled": { + "default": false, + "description": "Shard tenants across multiple alertmanager instances.", + "type": "boolean", + "x-cli-flag": "alertmanager.sharding-enabled" + }, + "sharding_ring": { + "properties": { + "detailed_metrics_enabled": { + "default": true, + "description": "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted.", + "type": "boolean", + "x-cli-flag": "alertmanager.sharding-ring.detailed-metrics-enabled" + }, + "final_sleep": { + "default": "0s", + "description": "The sleep seconds when alertmanager is shutting down. Need to be close to or larger than KV Store information propagation delay", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.final-sleep", + "x-format": "duration" + }, + "heartbeat_period": { + "default": "15s", + "description": "Period at which to heartbeat to the ring. 0 = disabled.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.heartbeat-period", + "x-format": "duration" + }, + "heartbeat_timeout": { + "default": "1m0s", + "description": "The heartbeat timeout after which alertmanagers are considered unhealthy within the ring. 0 = never (timeout disabled).", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.heartbeat-timeout", + "x-format": "duration" + }, + "instance_availability_zone": { + "description": "The availability zone where this instance is running. Required if zone-awareness is enabled.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.instance-availability-zone" + }, + "instance_interface_names": { + "default": "[eth0 en0]", + "description": "Name of network interface to read address from.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "alertmanager.sharding-ring.instance-interface-names" + }, + "keep_instance_in_the_ring_on_shutdown": { + "default": false, + "description": "Keep instance in the ring on shut down.", + "type": "boolean", + "x-cli-flag": "alertmanager.sharding-ring.keep-instance-in-the-ring-on-shutdown" + }, + "kvstore": { + "description": "The key-value store used to share the hash ring across multiple instances.", + "properties": { + "consul": { + "$ref": "#/definitions/consul_config" + }, + "dynamodb": { + "properties": { + "max_cas_retries": { + "default": 10, + "description": "Maximum number of retries for DDB KV CAS.", + "type": "number", + "x-cli-flag": "alertmanager.sharding-ring.dynamodb.max-cas-retries" + }, + "puller_sync_time": { + "default": "1m0s", + "description": "Time to refresh local ring with information on dynamodb.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.dynamodb.puller-sync-time", + "x-format": "duration" + }, + "region": { + "description": "Region to access dynamodb.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.dynamodb.region" + }, + "table_name": { + "description": "Table name to use on dynamodb.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.dynamodb.table-name" + }, + "timeout": { + "default": "2m0s", + "description": "Timeout of dynamoDbClient requests. Default is 2m.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.dynamodb.timeout", + "x-format": "duration" + }, + "ttl": { + "default": "0s", + "description": "Time to expire items on dynamodb.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.dynamodb.ttl-time", + "x-format": "duration" + } + }, + "type": "object" + }, + "etcd": { + "$ref": "#/definitions/etcd_config" + }, + "multi": { + "properties": { + "mirror_enabled": { + "default": false, + "description": "Mirror writes to secondary store.", + "type": "boolean", + "x-cli-flag": "alertmanager.sharding-ring.multi.mirror-enabled" + }, + "mirror_timeout": { + "default": "2s", + "description": "Timeout for storing value to secondary store.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.multi.mirror-timeout", + "x-format": "duration" + }, + "primary": { + "description": "Primary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.multi.primary" + }, + "secondary": { + "description": "Secondary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.multi.secondary" + } + }, + "type": "object" + }, + "prefix": { + "default": "alertmanagers/", + "description": "The prefix for the keys in the store. Should end with a /.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.prefix" + }, + "store": { + "default": "consul", + "description": "Backend storage to use for the ring. Supported values are: consul, etcd, inmemory, memberlist, multi.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.store" + } + }, + "type": "object" + }, + "replication_factor": { + "default": 3, + "description": "The replication factor to use when sharding the alertmanager.", + "type": "number", + "x-cli-flag": "alertmanager.sharding-ring.replication-factor" + }, + "tokens_file_path": { + "description": "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.tokens-file-path" + }, + "wait_instance_state_timeout": { + "default": "10m0s", + "description": "Timeout for waiting on alertmanager to become desired state in the ring.", + "type": "string", + "x-cli-flag": "alertmanager.sharding-ring.wait-instance-state-timeout", + "x-format": "duration" + }, + "zone_awareness_enabled": { + "default": false, + "description": "True to enable zone-awareness and replicate alerts across different availability zones.", + "type": "boolean", + "x-cli-flag": "alertmanager.sharding-ring.zone-awareness-enabled" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "alertmanager_storage_config": { + "description": "The alertmanager_storage_config configures the Cortex alertmanager storage backend.", + "properties": { + "azure": { + "properties": { + "account_key": { + "description": "Azure storage account key", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.account-key" + }, + "account_name": { + "description": "Azure storage account name", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.account-name" + }, + "connection_string": { + "description": "The values of `account-name` and `endpoint-suffix` values will not be ignored if `connection-string` is set. Use this method over `account-key` if you need to authenticate via a SAS token or if you use the Azurite emulator.", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.connection-string" + }, + "container_name": { + "description": "Azure storage container name", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.container-name" + }, + "endpoint_suffix": { + "description": "Azure storage endpoint suffix without schema. The account name will be prefixed to this value to create the FQDN", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.endpoint-suffix" + }, + "http": { + "properties": { + "expect_continue_timeout": { + "default": "1s", + "description": "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.expect-continue-timeout", + "x-format": "duration" + }, + "idle_conn_timeout": { + "default": "1m30s", + "description": "The time an idle connection will remain idle before closing.", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.http.idle-conn-timeout", + "x-format": "duration" + }, + "insecure_skip_verify": { + "default": false, + "description": "If the client connects via HTTPS and this option is enabled, the client will accept any certificate and hostname.", + "type": "boolean", + "x-cli-flag": "alertmanager-storage.azure.http.insecure-skip-verify" + }, + "max_connections_per_host": { + "default": 0, + "description": "Maximum number of connections per host. 0 means no limit.", + "type": "number", + "x-cli-flag": "alertmanager-storage.azure.max-connections-per-host" + }, + "max_idle_connections": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.", + "type": "number", + "x-cli-flag": "alertmanager-storage.azure.max-idle-connections" + }, + "max_idle_connections_per_host": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.", + "type": "number", + "x-cli-flag": "alertmanager-storage.azure.max-idle-connections-per-host" + }, + "response_header_timeout": { + "default": "2m0s", + "description": "The amount of time the client will wait for a servers response headers.", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.http.response-header-timeout", + "x-format": "duration" + }, + "tls_handshake_timeout": { + "default": "10s", + "description": "Maximum time to wait for a TLS handshake. 0 means no limit.", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.tls-handshake-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "max_retries": { + "default": 20, + "description": "Number of retries for recoverable errors", + "type": "number", + "x-cli-flag": "alertmanager-storage.azure.max-retries" + }, + "msi_resource": { + "description": "Deprecated: Azure storage MSI resource. It will be set automatically by Azure SDK.", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.msi-resource" + }, + "user_assigned_id": { + "description": "Azure storage MSI resource managed identity client Id. If not supplied default Azure credential will be used. Set it to empty if you need to authenticate via Azure Workload Identity.", + "type": "string", + "x-cli-flag": "alertmanager-storage.azure.user-assigned-id" + } + }, + "type": "object" + }, + "backend": { + "default": "s3", + "description": "Backend storage to use. Supported backends are: s3, gcs, azure, swift, filesystem, configdb, local.", + "type": "string", + "x-cli-flag": "alertmanager-storage.backend" + }, + "configdb": { + "$ref": "#/definitions/configstore_config" + }, + "filesystem": { + "properties": { + "dir": { + "description": "Local filesystem storage directory.", + "type": "string", + "x-cli-flag": "alertmanager-storage.filesystem.dir" + } + }, + "type": "object" + }, + "gcs": { + "properties": { + "bucket_name": { + "description": "GCS bucket name", + "type": "string", + "x-cli-flag": "alertmanager-storage.gcs.bucket-name" + }, + "service_account": { + "description": "JSON representing either a Google Developers Console client_credentials.json file or a Google Developers service account key file. If empty, fallback to Google default logic.", + "type": "string", + "x-cli-flag": "alertmanager-storage.gcs.service-account" + } + }, + "type": "object" + }, + "local": { + "properties": { + "path": { + "description": "Path at which alertmanager configurations are stored.", + "type": "string", + "x-cli-flag": "alertmanager-storage.local.path" + } + }, + "type": "object" + }, + "s3": { + "properties": { + "access_key_id": { + "description": "S3 access key ID", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.access-key-id" + }, + "bucket_lookup_type": { + "default": "auto", + "description": "The s3 bucket lookup style. Supported values are: auto, virtual-hosted, path.", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.bucket-lookup-type" + }, + "bucket_name": { + "description": "S3 bucket name", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.bucket-name" + }, + "disable_dualstack": { + "default": false, + "description": "If enabled, S3 endpoint will use the non-dualstack variant.", + "type": "boolean", + "x-cli-flag": "alertmanager-storage.s3.disable-dualstack" + }, + "endpoint": { + "description": "The S3 bucket endpoint. It could be an AWS S3 endpoint listed at https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an S3-compatible service in hostname:port format.", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.endpoint" + }, + "http": { + "properties": { + "expect_continue_timeout": { + "default": "1s", + "description": "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.expect-continue-timeout", + "x-format": "duration" + }, + "idle_conn_timeout": { + "default": "1m30s", + "description": "The time an idle connection will remain idle before closing.", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.http.idle-conn-timeout", + "x-format": "duration" + }, + "insecure_skip_verify": { + "default": false, + "description": "If the client connects via HTTPS and this option is enabled, the client will accept any certificate and hostname.", + "type": "boolean", + "x-cli-flag": "alertmanager-storage.s3.http.insecure-skip-verify" + }, + "max_connections_per_host": { + "default": 0, + "description": "Maximum number of connections per host. 0 means no limit.", + "type": "number", + "x-cli-flag": "alertmanager-storage.s3.max-connections-per-host" + }, + "max_idle_connections": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.", + "type": "number", + "x-cli-flag": "alertmanager-storage.s3.max-idle-connections" + }, + "max_idle_connections_per_host": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.", + "type": "number", + "x-cli-flag": "alertmanager-storage.s3.max-idle-connections-per-host" + }, + "response_header_timeout": { + "default": "2m0s", + "description": "The amount of time the client will wait for a servers response headers.", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.http.response-header-timeout", + "x-format": "duration" + }, + "tls_handshake_timeout": { + "default": "10s", + "description": "Maximum time to wait for a TLS handshake. 0 means no limit.", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.tls-handshake-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "insecure": { + "default": false, + "description": "If enabled, use http:// for the S3 endpoint instead of https://. This could be useful in local dev/test environments while using an S3-compatible backend storage, like Minio.", + "type": "boolean", + "x-cli-flag": "alertmanager-storage.s3.insecure" + }, + "list_objects_version": { + "description": "The list api version. Supported values are: v1, v2, and ''.", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.list-objects-version" + }, + "region": { + "description": "S3 region. If unset, the client will issue a S3 GetBucketLocation API call to autodetect it.", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.region" + }, + "secret_access_key": { + "description": "S3 secret access key", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.secret-access-key" + }, + "send_content_md5": { + "default": true, + "description": "If true, attach MD5 checksum when upload objects and S3 uses MD5 checksum algorithm to verify the provided digest. If false, use CRC32C algorithm instead.", + "type": "boolean", + "x-cli-flag": "alertmanager-storage.s3.send-content-md5" + }, + "signature_version": { + "default": "v4", + "description": "The signature version to use for authenticating against S3. Supported values are: v4, v2.", + "type": "string", + "x-cli-flag": "alertmanager-storage.s3.signature-version" + }, + "sse": { + "$ref": "#/definitions/s3_sse_config" + } + }, + "type": "object" + }, + "swift": { + "properties": { + "application_credential_id": { + "description": "OpenStack Swift application credential ID.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.application-credential-id" + }, + "application_credential_name": { + "description": "OpenStack Swift application credential name.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.application-credential-name" + }, + "application_credential_secret": { + "description": "OpenStack Swift application credential secret.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.application-credential-secret" + }, + "auth_url": { + "description": "OpenStack Swift authentication URL", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.auth-url" + }, + "auth_version": { + "default": 0, + "description": "OpenStack Swift authentication API version. 0 to autodetect.", + "type": "number", + "x-cli-flag": "alertmanager-storage.swift.auth-version" + }, + "connect_timeout": { + "default": "10s", + "description": "Time after which a connection attempt is aborted.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.connect-timeout", + "x-format": "duration" + }, + "container_name": { + "description": "Name of the OpenStack Swift container to put chunks in.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.container-name" + }, + "domain_id": { + "description": "OpenStack Swift user's domain ID.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.domain-id" + }, + "domain_name": { + "description": "OpenStack Swift user's domain name.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.domain-name" + }, + "max_retries": { + "default": 3, + "description": "Max retries on requests error.", + "type": "number", + "x-cli-flag": "alertmanager-storage.swift.max-retries" + }, + "password": { + "description": "OpenStack Swift API key.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.password" + }, + "project_domain_id": { + "description": "ID of the OpenStack Swift project's domain (v3 auth only), only needed if it differs the from user domain.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.project-domain-id" + }, + "project_domain_name": { + "description": "Name of the OpenStack Swift project's domain (v3 auth only), only needed if it differs from the user domain.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.project-domain-name" + }, + "project_id": { + "description": "OpenStack Swift project ID (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.project-id" + }, + "project_name": { + "description": "OpenStack Swift project name (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.project-name" + }, + "region_name": { + "description": "OpenStack Swift Region to use (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.region-name" + }, + "request_timeout": { + "default": "5s", + "description": "Time after which an idle request is aborted. The timeout watchdog is reset each time some data is received, so the timeout triggers after X time no data is received on a request.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.request-timeout", + "x-format": "duration" + }, + "user_domain_id": { + "description": "OpenStack Swift user's domain ID.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.user-domain-id" + }, + "user_domain_name": { + "description": "OpenStack Swift user's domain name.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.user-domain-name" + }, + "user_id": { + "description": "OpenStack Swift user ID.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.user-id" + }, + "username": { + "description": "OpenStack Swift username.", + "type": "string", + "x-cli-flag": "alertmanager-storage.swift.username" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "blocks_storage_config": { + "description": "The blocks_storage_config configures the blocks storage.", + "properties": { + "azure": { + "properties": { + "account_key": { + "description": "Azure storage account key", + "type": "string", + "x-cli-flag": "blocks-storage.azure.account-key" + }, + "account_name": { + "description": "Azure storage account name", + "type": "string", + "x-cli-flag": "blocks-storage.azure.account-name" + }, + "connection_string": { + "description": "The values of `account-name` and `endpoint-suffix` values will not be ignored if `connection-string` is set. Use this method over `account-key` if you need to authenticate via a SAS token or if you use the Azurite emulator.", + "type": "string", + "x-cli-flag": "blocks-storage.azure.connection-string" + }, + "container_name": { + "description": "Azure storage container name", + "type": "string", + "x-cli-flag": "blocks-storage.azure.container-name" + }, + "endpoint_suffix": { + "description": "Azure storage endpoint suffix without schema. The account name will be prefixed to this value to create the FQDN", + "type": "string", + "x-cli-flag": "blocks-storage.azure.endpoint-suffix" + }, + "http": { + "properties": { + "expect_continue_timeout": { + "default": "1s", + "description": "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.", + "type": "string", + "x-cli-flag": "blocks-storage.azure.expect-continue-timeout", + "x-format": "duration" + }, + "idle_conn_timeout": { + "default": "1m30s", + "description": "The time an idle connection will remain idle before closing.", + "type": "string", + "x-cli-flag": "blocks-storage.azure.http.idle-conn-timeout", + "x-format": "duration" + }, + "insecure_skip_verify": { + "default": false, + "description": "If the client connects via HTTPS and this option is enabled, the client will accept any certificate and hostname.", + "type": "boolean", + "x-cli-flag": "blocks-storage.azure.http.insecure-skip-verify" + }, + "max_connections_per_host": { + "default": 0, + "description": "Maximum number of connections per host. 0 means no limit.", + "type": "number", + "x-cli-flag": "blocks-storage.azure.max-connections-per-host" + }, + "max_idle_connections": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.", + "type": "number", + "x-cli-flag": "blocks-storage.azure.max-idle-connections" + }, + "max_idle_connections_per_host": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.", + "type": "number", + "x-cli-flag": "blocks-storage.azure.max-idle-connections-per-host" + }, + "response_header_timeout": { + "default": "2m0s", + "description": "The amount of time the client will wait for a servers response headers.", + "type": "string", + "x-cli-flag": "blocks-storage.azure.http.response-header-timeout", + "x-format": "duration" + }, + "tls_handshake_timeout": { + "default": "10s", + "description": "Maximum time to wait for a TLS handshake. 0 means no limit.", + "type": "string", + "x-cli-flag": "blocks-storage.azure.tls-handshake-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "max_retries": { + "default": 20, + "description": "Number of retries for recoverable errors", + "type": "number", + "x-cli-flag": "blocks-storage.azure.max-retries" + }, + "msi_resource": { + "description": "Deprecated: Azure storage MSI resource. It will be set automatically by Azure SDK.", + "type": "string", + "x-cli-flag": "blocks-storage.azure.msi-resource" + }, + "user_assigned_id": { + "description": "Azure storage MSI resource managed identity client Id. If not supplied default Azure credential will be used. Set it to empty if you need to authenticate via Azure Workload Identity.", + "type": "string", + "x-cli-flag": "blocks-storage.azure.user-assigned-id" + } + }, + "type": "object" + }, + "backend": { + "default": "s3", + "description": "Backend storage to use. Supported backends are: s3, gcs, azure, swift, filesystem.", + "type": "string", + "x-cli-flag": "blocks-storage.backend" + }, + "bucket_store": { + "description": "This configures how the querier and store-gateway discover and synchronize blocks stored in the bucket.", + "properties": { + "block_discovery_strategy": { + "default": "concurrent", + "description": "One of concurrent, recursive, bucket_index. When set to concurrent, stores will concurrently issue one call per directory to discover active blocks in the bucket. The recursive strategy iterates through all objects in the bucket, recursively traversing into each directory. This avoids N+1 calls at the expense of having slower bucket iterations. bucket_index strategy can be used in Compactor only and utilizes the existing bucket index to fetch block IDs to sync. This avoids iterating the bucket but can be impacted by delays of cleaner creating bucket index.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.block-discovery-strategy" + }, + "block_sync_concurrency": { + "default": 20, + "description": "Maximum number of concurrent blocks syncing per tenant.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.block-sync-concurrency" + }, + "bucket_index": { + "properties": { + "enabled": { + "default": false, + "description": "True to enable querier and store-gateway to discover blocks in the storage via bucket index instead of bucket scanning.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.bucket-index.enabled" + }, + "idle_timeout": { + "default": "1h0m0s", + "description": "How long a unused bucket index should be cached. Once this timeout expires, the unused bucket index is removed from the in-memory cache. This option is used only by querier.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.bucket-index.idle-timeout", + "x-format": "duration" + }, + "max_stale_period": { + "default": "1h0m0s", + "description": "The maximum allowed age of a bucket index (last updated) before queries start failing because the bucket index is too old. The bucket index is periodically updated by the compactor, while this check is enforced in the querier (at query time).", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.bucket-index.max-stale-period", + "x-format": "duration" + }, + "update_on_error_interval": { + "default": "1m0s", + "description": "How frequently a bucket index, which previously failed to load, should be tried to load again. This option is used only by querier.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.bucket-index.update-on-error-interval", + "x-format": "duration" + } + }, + "type": "object" + }, + "chunks_cache": { + "properties": { + "attributes_ttl": { + "default": "168h0m0s", + "description": "TTL for caching object attributes for chunks.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.attributes-ttl", + "x-format": "duration" + }, + "backend": { + "description": "The chunks cache backend type. Single or Multiple cache backend can be provided. Supported values in single cache: memcached, redis, inmemory, and '' (disable). Supported values in multi level cache: a comma-separated list of (inmemory, memcached, redis)", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.backend" + }, + "inmemory": { + "properties": { + "max_size_bytes": { + "default": 1073741824, + "description": "Maximum size in bytes of in-memory chunks cache used (shared between all tenants).", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.inmemory.max-size-bytes" + } + }, + "type": "object" + }, + "max_get_range_requests": { + "default": 3, + "description": "Maximum number of sub-GetRange requests that a single GetRange request can be split into when fetching chunks. Zero or negative value = unlimited number of sub-requests.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.max-get-range-requests" + }, + "memcached": { + "properties": { + "addresses": { + "description": "Comma separated list of memcached addresses. Supported prefixes are: dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after that).", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.addresses" + }, + "auto_discovery": { + "default": false, + "description": "Use memcached auto-discovery mechanism provided by some cloud provider like GCP and AWS", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.auto-discovery" + }, + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency" + }, + "max_get_multi_batch_size": { + "default": 0, + "description": "The maximum number of keys a single underlying get operation should run. If more keys are specified, internally keys are split into multiple batches and fetched concurrently, honoring the max concurrency. If set to 0, the max batch size is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-batch-size" + }, + "max_get_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent connections running get operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency" + }, + "max_idle_connections": { + "default": 16, + "description": "The maximum number of idle connections that will be maintained per address.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections" + }, + "max_item_size": { + "default": 1048576, + "description": "The maximum size of an item stored in memcached. Bigger items are not stored. If set to 0, no maximum size is enforced.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.max-item-size" + }, + "set_async_circuit_breaker_config": { + "properties": { + "consecutive_failures": { + "default": 5, + "description": "Consecutive failures to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.set-async.circuit-breaker.consecutive-failures" + }, + "enabled": { + "default": false, + "description": "If true, enable circuit breaker.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.set-async.circuit-breaker.enabled" + }, + "failure_percent": { + "default": 0.05, + "description": "Failure percentage to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.set-async.circuit-breaker.failure-percent" + }, + "half_open_max_requests": { + "default": 10, + "description": "Maximum number of requests allowed to pass through when the circuit breaker is half-open. If set to 0, by default it allows 1 request.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.set-async.circuit-breaker.half-open-max-requests" + }, + "min_requests": { + "default": 50, + "description": "Minimal requests to trigger the circuit breaker.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.set-async.circuit-breaker.min-requests" + }, + "open_duration": { + "default": "5s", + "description": "Period of the open state after which the state of the circuit breaker becomes half-open. If set to 0, by default open duration is 60 seconds.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.set-async.circuit-breaker.open-duration", + "x-format": "duration" + } + }, + "type": "object" + }, + "timeout": { + "default": "100ms", + "description": "The socket read/write timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.memcached.timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "multilevel": { + "properties": { + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed when backfilling cache items.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.multilevel.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur when backfilling cache items.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.multilevel.max-async-concurrency" + }, + "max_backfill_items": { + "default": 10000, + "description": "The maximum number of items to backfill per asynchronous operation.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.multilevel.max-backfill-items" + } + }, + "type": "object" + }, + "redis": { + "properties": { + "addresses": { + "description": "Comma separated list of redis addresses. Supported prefixes are: dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after that).", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.addresses" + }, + "cache_size": { + "default": 0, + "description": "If not zero then client-side caching is enabled. Client-side caching is when data is stored in memory instead of fetching data each time. See https://redis.io/docs/manual/client-side-caching/ for more info.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.cache-size" + }, + "db": { + "default": 0, + "description": "Database to be selected after connecting to the server.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.db" + }, + "dial_timeout": { + "default": "5s", + "description": "Client dial timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.dial-timeout", + "x-format": "duration" + }, + "get_multi_batch_size": { + "default": 100, + "description": "The maximum size per batch for mget.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.get-multi-batch-size" + }, + "master_name": { + "description": "Specifies the master's name. Must be not empty for Redis Sentinel.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.master-name" + }, + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.max-async-concurrency" + }, + "max_get_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent GetMulti() operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.max-get-multi-concurrency" + }, + "max_set_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent SetMulti() operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.max-set-multi-concurrency" + }, + "password": { + "description": "Redis password.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.password" + }, + "read_timeout": { + "default": "3s", + "description": "Client read timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.read-timeout", + "x-format": "duration" + }, + "set_async_circuit_breaker_config": { + "properties": { + "consecutive_failures": { + "default": 5, + "description": "Consecutive failures to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.set-async.circuit-breaker.consecutive-failures" + }, + "enabled": { + "default": false, + "description": "If true, enable circuit breaker.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.set-async.circuit-breaker.enabled" + }, + "failure_percent": { + "default": 0.05, + "description": "Failure percentage to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.set-async.circuit-breaker.failure-percent" + }, + "half_open_max_requests": { + "default": 10, + "description": "Maximum number of requests allowed to pass through when the circuit breaker is half-open. If set to 0, by default it allows 1 request.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.set-async.circuit-breaker.half-open-max-requests" + }, + "min_requests": { + "default": 50, + "description": "Minimal requests to trigger the circuit breaker.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.set-async.circuit-breaker.min-requests" + }, + "open_duration": { + "default": "5s", + "description": "Period of the open state after which the state of the circuit breaker becomes half-open. If set to 0, by default open duration is 60 seconds.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.set-async.circuit-breaker.open-duration", + "x-format": "duration" + } + }, + "type": "object" + }, + "set_multi_batch_size": { + "default": 100, + "description": "The maximum size per batch for pipeline set.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.set-multi-batch-size" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Whether to enable tls for redis connection.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.tls-server-name" + }, + "username": { + "description": "Redis username.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.username" + }, + "write_timeout": { + "default": "3s", + "description": "Client write timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.redis.write-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "subrange_size": { + "default": 16000, + "description": "Size of each subrange that bucket object is split into for better caching.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.subrange-size" + }, + "subrange_ttl": { + "default": "24h0m0s", + "description": "TTL for caching individual chunks subranges.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.chunks-cache.subrange-ttl", + "x-format": "duration" + } + }, + "type": "object" + }, + "consistency_delay": { + "default": "0s", + "description": "Minimum age of a block before it's being read. Set it to safe value (e.g 30m) if your object storage is eventually consistent. GCS and S3 are (roughly) strongly consistent.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.consistency-delay", + "x-format": "duration" + }, + "ignore_blocks_before": { + "default": "0s", + "description": "The blocks created before `now() - ignore_blocks_before` will not be synced. 0 to disable.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.ignore-blocks-before", + "x-format": "duration" + }, + "ignore_blocks_within": { + "default": "0s", + "description": "The blocks created since `now() - ignore_blocks_within` will not be synced. This should be used together with `-querier.query-store-after` to filter out the blocks that are too new to be queried. A reasonable value for this flag would be `-querier.query-store-after - blocks-storage.bucket-store.bucket-index.max-stale-period` to give some buffer. 0 to disable.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.ignore-blocks-within", + "x-format": "duration" + }, + "ignore_deletion_mark_delay": { + "default": "6h0m0s", + "description": "Duration after which the blocks marked for deletion will be filtered out while fetching blocks. The idea of ignore-deletion-marks-delay is to ignore blocks that are marked for deletion with some delay. This ensures store can still serve blocks that are meant to be deleted but do not have a replacement yet. Default is 6h, half of the default value for -compactor.deletion-delay.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.ignore-deletion-marks-delay", + "x-format": "duration" + }, + "index_cache": { + "properties": { + "backend": { + "default": "inmemory", + "description": "The index cache backend type. Multiple cache backend can be provided as a comma-separated ordered list to enable the implementation of a cache hierarchy. Supported values: inmemory, memcached, redis.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.backend" + }, + "inmemory": { + "properties": { + "enabled_items": { + "default": [], + "description": "Selectively cache index item types. Supported values are Postings, ExpandedPostings and Series", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.inmemory.enabled-items" + }, + "max_size_bytes": { + "default": 1073741824, + "description": "Maximum size in bytes of in-memory index cache used to speed up blocks index lookups (shared between all tenants).", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.inmemory.max-size-bytes" + } + }, + "type": "object" + }, + "memcached": { + "properties": { + "addresses": { + "description": "Comma separated list of memcached addresses. Supported prefixes are: dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after that).", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.addresses" + }, + "auto_discovery": { + "default": false, + "description": "Use memcached auto-discovery mechanism provided by some cloud provider like GCP and AWS", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.auto-discovery" + }, + "enabled_items": { + "default": [], + "description": "Selectively cache index item types. Supported values are Postings, ExpandedPostings and Series", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.enabled-items" + }, + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency" + }, + "max_get_multi_batch_size": { + "default": 0, + "description": "The maximum number of keys a single underlying get operation should run. If more keys are specified, internally keys are split into multiple batches and fetched concurrently, honoring the max concurrency. If set to 0, the max batch size is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.max-get-multi-batch-size" + }, + "max_get_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent connections running get operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency" + }, + "max_idle_connections": { + "default": 16, + "description": "The maximum number of idle connections that will be maintained per address.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.max-idle-connections" + }, + "max_item_size": { + "default": 1048576, + "description": "The maximum size of an item stored in memcached. Bigger items are not stored. If set to 0, no maximum size is enforced.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.max-item-size" + }, + "set_async_circuit_breaker_config": { + "properties": { + "consecutive_failures": { + "default": 5, + "description": "Consecutive failures to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.set-async.circuit-breaker.consecutive-failures" + }, + "enabled": { + "default": false, + "description": "If true, enable circuit breaker.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.set-async.circuit-breaker.enabled" + }, + "failure_percent": { + "default": 0.05, + "description": "Failure percentage to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.set-async.circuit-breaker.failure-percent" + }, + "half_open_max_requests": { + "default": 10, + "description": "Maximum number of requests allowed to pass through when the circuit breaker is half-open. If set to 0, by default it allows 1 request.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.set-async.circuit-breaker.half-open-max-requests" + }, + "min_requests": { + "default": 50, + "description": "Minimal requests to trigger the circuit breaker.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.set-async.circuit-breaker.min-requests" + }, + "open_duration": { + "default": "5s", + "description": "Period of the open state after which the state of the circuit breaker becomes half-open. If set to 0, by default open duration is 60 seconds.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.set-async.circuit-breaker.open-duration", + "x-format": "duration" + } + }, + "type": "object" + }, + "timeout": { + "default": "100ms", + "description": "The socket read/write timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.memcached.timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "multilevel": { + "properties": { + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed when backfilling cache items.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.multilevel.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur when backfilling cache items.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.multilevel.max-async-concurrency" + }, + "max_backfill_items": { + "default": 10000, + "description": "The maximum number of items to backfill per asynchronous operation.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.multilevel.max-backfill-items" + } + }, + "type": "object" + }, + "redis": { + "properties": { + "addresses": { + "description": "Comma separated list of redis addresses. Supported prefixes are: dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after that).", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.addresses" + }, + "cache_size": { + "default": 0, + "description": "If not zero then client-side caching is enabled. Client-side caching is when data is stored in memory instead of fetching data each time. See https://redis.io/docs/manual/client-side-caching/ for more info.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.cache-size" + }, + "db": { + "default": 0, + "description": "Database to be selected after connecting to the server.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.db" + }, + "dial_timeout": { + "default": "5s", + "description": "Client dial timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.dial-timeout", + "x-format": "duration" + }, + "enabled_items": { + "default": [], + "description": "Selectively cache index item types. Supported values are Postings, ExpandedPostings and Series", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.enabled-items" + }, + "get_multi_batch_size": { + "default": 100, + "description": "The maximum size per batch for mget.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.get-multi-batch-size" + }, + "master_name": { + "description": "Specifies the master's name. Must be not empty for Redis Sentinel.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.master-name" + }, + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.max-async-concurrency" + }, + "max_get_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent GetMulti() operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.max-get-multi-concurrency" + }, + "max_set_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent SetMulti() operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.max-set-multi-concurrency" + }, + "password": { + "description": "Redis password.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.password" + }, + "read_timeout": { + "default": "3s", + "description": "Client read timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.read-timeout", + "x-format": "duration" + }, + "set_async_circuit_breaker_config": { + "properties": { + "consecutive_failures": { + "default": 5, + "description": "Consecutive failures to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.set-async.circuit-breaker.consecutive-failures" + }, + "enabled": { + "default": false, + "description": "If true, enable circuit breaker.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.set-async.circuit-breaker.enabled" + }, + "failure_percent": { + "default": 0.05, + "description": "Failure percentage to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.set-async.circuit-breaker.failure-percent" + }, + "half_open_max_requests": { + "default": 10, + "description": "Maximum number of requests allowed to pass through when the circuit breaker is half-open. If set to 0, by default it allows 1 request.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.set-async.circuit-breaker.half-open-max-requests" + }, + "min_requests": { + "default": 50, + "description": "Minimal requests to trigger the circuit breaker.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.set-async.circuit-breaker.min-requests" + }, + "open_duration": { + "default": "5s", + "description": "Period of the open state after which the state of the circuit breaker becomes half-open. If set to 0, by default open duration is 60 seconds.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.set-async.circuit-breaker.open-duration", + "x-format": "duration" + } + }, + "type": "object" + }, + "set_multi_batch_size": { + "default": 100, + "description": "The maximum size per batch for pipeline set.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.set-multi-batch-size" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Whether to enable tls for redis connection.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.tls-server-name" + }, + "username": { + "description": "Redis username.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.username" + }, + "write_timeout": { + "default": "3s", + "description": "Client write timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-cache.redis.write-timeout", + "x-format": "duration" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "index_header_lazy_loading_enabled": { + "default": false, + "description": "If enabled, store-gateway will lazily memory-map an index-header only once required by a query.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.index-header-lazy-loading-enabled" + }, + "index_header_lazy_loading_idle_timeout": { + "default": "20m0s", + "description": "If index-header lazy loading is enabled and this setting is \u003e 0, the store-gateway will release memory-mapped index-headers after 'idle timeout' inactivity.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout", + "x-format": "duration" + }, + "lazy_expanded_posting_group_max_key_series_ratio": { + "default": 100, + "description": "Mark posting group as lazy if it fetches more keys than R * max series the query should fetch. With R set to 100, a posting group which fetches 100K keys will be marked as lazy if the current query only fetches 1000 series. This config is only valid if lazy expanded posting is enabled. 0 disables the limit.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.lazy-expanded-posting-group-max-key-series-ratio" + }, + "lazy_expanded_postings_enabled": { + "default": false, + "description": "If true, Store Gateway will estimate postings size and try to lazily expand postings if it downloads less data than expanding all postings.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.lazy-expanded-postings-enabled" + }, + "matchers_cache_max_items": { + "default": 0, + "description": "Maximum number of entries in the regex matchers cache. 0 to disable.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.matchers-cache-max-items" + }, + "max_chunk_pool_bytes": { + "default": 2147483648, + "description": "Max size - in bytes - of a chunks pool, used to reduce memory allocations. The pool is shared across all tenants. 0 to disable the limit.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.max-chunk-pool-bytes" + }, + "max_concurrent": { + "default": 100, + "description": "Max number of concurrent queries to execute against the long-term storage. The limit is shared across all tenants.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.max-concurrent" + }, + "max_inflight_requests": { + "default": 0, + "description": "Max number of inflight queries to execute against the long-term storage. The limit is shared across all tenants. 0 to disable.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.max-inflight-requests" + }, + "meta_sync_concurrency": { + "default": 20, + "description": "Number of Go routines to use when syncing block meta files from object storage per tenant.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.meta-sync-concurrency" + }, + "metadata_cache": { + "properties": { + "backend": { + "description": "The metadata cache backend type. Single or Multiple cache backend can be provided. Supported values in single cache: memcached, redis, inmemory, and '' (disable). Supported values in multi level cache: a comma-separated list of (inmemory, memcached, redis)", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.backend" + }, + "block_index_attributes_ttl": { + "default": "168h0m0s", + "description": "How long to cache attributes of the block index.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.block-index-attributes-ttl", + "x-format": "duration" + }, + "bucket_index_content_ttl": { + "default": "5m0s", + "description": "How long to cache content of the bucket index.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.bucket-index-content-ttl", + "x-format": "duration" + }, + "bucket_index_max_size_bytes": { + "default": 1048576, + "description": "Maximum size of bucket index content to cache in bytes. Caching will be skipped if the content exceeds this size. This is useful to avoid network round trip for large content if the configured caching backend has an hard limit on cached items size (in this case, you should set this limit to the same limit in the caching backend).", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.bucket-index-max-size-bytes" + }, + "chunks_list_ttl": { + "default": "24h0m0s", + "description": "How long to cache list of chunks for a block.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.chunks-list-ttl", + "x-format": "duration" + }, + "inmemory": { + "properties": { + "max_size_bytes": { + "default": 1073741824, + "description": "Maximum size in bytes of in-memory metadata cache used (shared between all tenants).", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.inmemory.max-size-bytes" + } + }, + "type": "object" + }, + "memcached": { + "properties": { + "addresses": { + "description": "Comma separated list of memcached addresses. Supported prefixes are: dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after that).", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.addresses" + }, + "auto_discovery": { + "default": false, + "description": "Use memcached auto-discovery mechanism provided by some cloud provider like GCP and AWS", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.auto-discovery" + }, + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency" + }, + "max_get_multi_batch_size": { + "default": 0, + "description": "The maximum number of keys a single underlying get operation should run. If more keys are specified, internally keys are split into multiple batches and fetched concurrently, honoring the max concurrency. If set to 0, the max batch size is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-batch-size" + }, + "max_get_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent connections running get operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency" + }, + "max_idle_connections": { + "default": 16, + "description": "The maximum number of idle connections that will be maintained per address.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections" + }, + "max_item_size": { + "default": 1048576, + "description": "The maximum size of an item stored in memcached. Bigger items are not stored. If set to 0, no maximum size is enforced.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.max-item-size" + }, + "set_async_circuit_breaker_config": { + "properties": { + "consecutive_failures": { + "default": 5, + "description": "Consecutive failures to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.set-async.circuit-breaker.consecutive-failures" + }, + "enabled": { + "default": false, + "description": "If true, enable circuit breaker.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.set-async.circuit-breaker.enabled" + }, + "failure_percent": { + "default": 0.05, + "description": "Failure percentage to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.set-async.circuit-breaker.failure-percent" + }, + "half_open_max_requests": { + "default": 10, + "description": "Maximum number of requests allowed to pass through when the circuit breaker is half-open. If set to 0, by default it allows 1 request.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.set-async.circuit-breaker.half-open-max-requests" + }, + "min_requests": { + "default": 50, + "description": "Minimal requests to trigger the circuit breaker.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.set-async.circuit-breaker.min-requests" + }, + "open_duration": { + "default": "5s", + "description": "Period of the open state after which the state of the circuit breaker becomes half-open. If set to 0, by default open duration is 60 seconds.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.set-async.circuit-breaker.open-duration", + "x-format": "duration" + } + }, + "type": "object" + }, + "timeout": { + "default": "100ms", + "description": "The socket read/write timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.memcached.timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "metafile_attributes_ttl": { + "default": "168h0m0s", + "description": "How long to cache attributes of the block metafile.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.metafile-attributes-ttl", + "x-format": "duration" + }, + "metafile_content_ttl": { + "default": "24h0m0s", + "description": "How long to cache content of the metafile.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.metafile-content-ttl", + "x-format": "duration" + }, + "metafile_doesnt_exist_ttl": { + "default": "5m0s", + "description": "How long to cache information that block metafile doesn't exist. Also used for user deletion mark file.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.metafile-doesnt-exist-ttl", + "x-format": "duration" + }, + "metafile_exists_ttl": { + "default": "2h0m0s", + "description": "How long to cache information that block metafile exists. Also used for user deletion mark file.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.metafile-exists-ttl", + "x-format": "duration" + }, + "metafile_max_size_bytes": { + "default": 1048576, + "description": "Maximum size of metafile content to cache in bytes. Caching will be skipped if the content exceeds this size. This is useful to avoid network round trip for large content if the configured caching backend has an hard limit on cached items size (in this case, you should set this limit to the same limit in the caching backend).", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.metafile-max-size-bytes" + }, + "multilevel": { + "properties": { + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed when backfilling cache items.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.multilevel.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur when backfilling cache items.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.multilevel.max-async-concurrency" + }, + "max_backfill_items": { + "default": 10000, + "description": "The maximum number of items to backfill per asynchronous operation.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.multilevel.max-backfill-items" + } + }, + "type": "object" + }, + "partitioned_groups_list_ttl": { + "default": "0s", + "description": "How long to cache list of partitioned groups for an user. 0 disables caching", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.partitioned-groups-list-ttl", + "x-format": "duration" + }, + "redis": { + "properties": { + "addresses": { + "description": "Comma separated list of redis addresses. Supported prefixes are: dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after that).", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.addresses" + }, + "cache_size": { + "default": 0, + "description": "If not zero then client-side caching is enabled. Client-side caching is when data is stored in memory instead of fetching data each time. See https://redis.io/docs/manual/client-side-caching/ for more info.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.cache-size" + }, + "db": { + "default": 0, + "description": "Database to be selected after connecting to the server.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.db" + }, + "dial_timeout": { + "default": "5s", + "description": "Client dial timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.dial-timeout", + "x-format": "duration" + }, + "get_multi_batch_size": { + "default": 100, + "description": "The maximum size per batch for mget.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.get-multi-batch-size" + }, + "master_name": { + "description": "Specifies the master's name. Must be not empty for Redis Sentinel.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.master-name" + }, + "max_async_buffer_size": { + "default": 10000, + "description": "The maximum number of enqueued asynchronous operations allowed.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.max-async-buffer-size" + }, + "max_async_concurrency": { + "default": 3, + "description": "The maximum number of concurrent asynchronous operations can occur.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.max-async-concurrency" + }, + "max_get_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent GetMulti() operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.max-get-multi-concurrency" + }, + "max_set_multi_concurrency": { + "default": 100, + "description": "The maximum number of concurrent SetMulti() operations. If set to 0, concurrency is unlimited.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.max-set-multi-concurrency" + }, + "password": { + "description": "Redis password.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.password" + }, + "read_timeout": { + "default": "3s", + "description": "Client read timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.read-timeout", + "x-format": "duration" + }, + "set_async_circuit_breaker_config": { + "properties": { + "consecutive_failures": { + "default": 5, + "description": "Consecutive failures to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.set-async.circuit-breaker.consecutive-failures" + }, + "enabled": { + "default": false, + "description": "If true, enable circuit breaker.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.set-async.circuit-breaker.enabled" + }, + "failure_percent": { + "default": 0.05, + "description": "Failure percentage to determine if the circuit breaker should open.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.set-async.circuit-breaker.failure-percent" + }, + "half_open_max_requests": { + "default": 10, + "description": "Maximum number of requests allowed to pass through when the circuit breaker is half-open. If set to 0, by default it allows 1 request.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.set-async.circuit-breaker.half-open-max-requests" + }, + "min_requests": { + "default": 50, + "description": "Minimal requests to trigger the circuit breaker.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.set-async.circuit-breaker.min-requests" + }, + "open_duration": { + "default": "5s", + "description": "Period of the open state after which the state of the circuit breaker becomes half-open. If set to 0, by default open duration is 60 seconds.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.set-async.circuit-breaker.open-duration", + "x-format": "duration" + } + }, + "type": "object" + }, + "set_multi_batch_size": { + "default": 100, + "description": "The maximum size per batch for pipeline set.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.set-multi-batch-size" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Whether to enable tls for redis connection.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.tls-server-name" + }, + "username": { + "description": "Redis username.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.username" + }, + "write_timeout": { + "default": "3s", + "description": "Client write timeout.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.redis.write-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "tenant_blocks_list_ttl": { + "default": "5m0s", + "description": "How long to cache list of blocks for each tenant.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.tenant-blocks-list-ttl", + "x-format": "duration" + }, + "tenants_list_ttl": { + "default": "15m0s", + "description": "How long to cache list of tenants in the bucket.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.metadata-cache.tenants-list-ttl", + "x-format": "duration" + } + }, + "type": "object" + }, + "series_batch_size": { + "default": 10000, + "description": "Controls how many series to fetch per batch in Store Gateway. Default value is 10000.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.series-batch-size" + }, + "sync_dir": { + "default": "tsdb-sync", + "description": "Directory to store synchronized TSDB index headers.", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.sync-dir" + }, + "sync_interval": { + "default": "15m0s", + "description": "How frequently to scan the bucket, or to refresh the bucket index (if enabled), in order to look for changes (new blocks shipped by ingesters and blocks deleted by retention or compaction).", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.sync-interval", + "x-format": "duration" + }, + "tenant_sync_concurrency": { + "default": 10, + "description": "Maximum number of concurrent tenants syncing blocks.", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.tenant-sync-concurrency" + }, + "token_bucket_bytes_limiter": { + "properties": { + "instance_token_bucket_size": { + "default": 859832320, + "description": "Instance token bucket size", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.token-bucket-bytes-limiter.instance-token-bucket-size" + }, + "mode": { + "default": "disabled", + "description": "Token bucket bytes limiter mode. Supported values are: disabled, dryrun, enabled", + "type": "string", + "x-cli-flag": "blocks-storage.bucket-store.token-bucket-bytes-limiter.mode" + }, + "request_token_bucket_size": { + "default": 4194304, + "description": "Request token bucket size", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.token-bucket-bytes-limiter.request-token-bucket-size" + }, + "user_token_bucket_size": { + "default": 644874240, + "description": "User token bucket size", + "type": "number", + "x-cli-flag": "blocks-storage.bucket-store.token-bucket-bytes-limiter.user-token-bucket-size" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "filesystem": { + "properties": { + "dir": { + "description": "Local filesystem storage directory.", + "type": "string", + "x-cli-flag": "blocks-storage.filesystem.dir" + } + }, + "type": "object" + }, + "gcs": { + "properties": { + "bucket_name": { + "description": "GCS bucket name", + "type": "string", + "x-cli-flag": "blocks-storage.gcs.bucket-name" + }, + "service_account": { + "description": "JSON representing either a Google Developers Console client_credentials.json file or a Google Developers service account key file. If empty, fallback to Google default logic.", + "type": "string", + "x-cli-flag": "blocks-storage.gcs.service-account" + } + }, + "type": "object" + }, + "s3": { + "properties": { + "access_key_id": { + "description": "S3 access key ID", + "type": "string", + "x-cli-flag": "blocks-storage.s3.access-key-id" + }, + "bucket_lookup_type": { + "default": "auto", + "description": "The s3 bucket lookup style. Supported values are: auto, virtual-hosted, path.", + "type": "string", + "x-cli-flag": "blocks-storage.s3.bucket-lookup-type" + }, + "bucket_name": { + "description": "S3 bucket name", + "type": "string", + "x-cli-flag": "blocks-storage.s3.bucket-name" + }, + "disable_dualstack": { + "default": false, + "description": "If enabled, S3 endpoint will use the non-dualstack variant.", + "type": "boolean", + "x-cli-flag": "blocks-storage.s3.disable-dualstack" + }, + "endpoint": { + "description": "The S3 bucket endpoint. It could be an AWS S3 endpoint listed at https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an S3-compatible service in hostname:port format.", + "type": "string", + "x-cli-flag": "blocks-storage.s3.endpoint" + }, + "http": { + "properties": { + "expect_continue_timeout": { + "default": "1s", + "description": "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.", + "type": "string", + "x-cli-flag": "blocks-storage.s3.expect-continue-timeout", + "x-format": "duration" + }, + "idle_conn_timeout": { + "default": "1m30s", + "description": "The time an idle connection will remain idle before closing.", + "type": "string", + "x-cli-flag": "blocks-storage.s3.http.idle-conn-timeout", + "x-format": "duration" + }, + "insecure_skip_verify": { + "default": false, + "description": "If the client connects via HTTPS and this option is enabled, the client will accept any certificate and hostname.", + "type": "boolean", + "x-cli-flag": "blocks-storage.s3.http.insecure-skip-verify" + }, + "max_connections_per_host": { + "default": 0, + "description": "Maximum number of connections per host. 0 means no limit.", + "type": "number", + "x-cli-flag": "blocks-storage.s3.max-connections-per-host" + }, + "max_idle_connections": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.", + "type": "number", + "x-cli-flag": "blocks-storage.s3.max-idle-connections" + }, + "max_idle_connections_per_host": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.", + "type": "number", + "x-cli-flag": "blocks-storage.s3.max-idle-connections-per-host" + }, + "response_header_timeout": { + "default": "2m0s", + "description": "The amount of time the client will wait for a servers response headers.", + "type": "string", + "x-cli-flag": "blocks-storage.s3.http.response-header-timeout", + "x-format": "duration" + }, + "tls_handshake_timeout": { + "default": "10s", + "description": "Maximum time to wait for a TLS handshake. 0 means no limit.", + "type": "string", + "x-cli-flag": "blocks-storage.s3.tls-handshake-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "insecure": { + "default": false, + "description": "If enabled, use http:// for the S3 endpoint instead of https://. This could be useful in local dev/test environments while using an S3-compatible backend storage, like Minio.", + "type": "boolean", + "x-cli-flag": "blocks-storage.s3.insecure" + }, + "list_objects_version": { + "description": "The list api version. Supported values are: v1, v2, and ''.", + "type": "string", + "x-cli-flag": "blocks-storage.s3.list-objects-version" + }, + "region": { + "description": "S3 region. If unset, the client will issue a S3 GetBucketLocation API call to autodetect it.", + "type": "string", + "x-cli-flag": "blocks-storage.s3.region" + }, + "secret_access_key": { + "description": "S3 secret access key", + "type": "string", + "x-cli-flag": "blocks-storage.s3.secret-access-key" + }, + "send_content_md5": { + "default": true, + "description": "If true, attach MD5 checksum when upload objects and S3 uses MD5 checksum algorithm to verify the provided digest. If false, use CRC32C algorithm instead.", + "type": "boolean", + "x-cli-flag": "blocks-storage.s3.send-content-md5" + }, + "signature_version": { + "default": "v4", + "description": "The signature version to use for authenticating against S3. Supported values are: v4, v2.", + "type": "string", + "x-cli-flag": "blocks-storage.s3.signature-version" + }, + "sse": { + "$ref": "#/definitions/s3_sse_config" + } + }, + "type": "object" + }, + "swift": { + "properties": { + "application_credential_id": { + "description": "OpenStack Swift application credential ID.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.application-credential-id" + }, + "application_credential_name": { + "description": "OpenStack Swift application credential name.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.application-credential-name" + }, + "application_credential_secret": { + "description": "OpenStack Swift application credential secret.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.application-credential-secret" + }, + "auth_url": { + "description": "OpenStack Swift authentication URL", + "type": "string", + "x-cli-flag": "blocks-storage.swift.auth-url" + }, + "auth_version": { + "default": 0, + "description": "OpenStack Swift authentication API version. 0 to autodetect.", + "type": "number", + "x-cli-flag": "blocks-storage.swift.auth-version" + }, + "connect_timeout": { + "default": "10s", + "description": "Time after which a connection attempt is aborted.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.connect-timeout", + "x-format": "duration" + }, + "container_name": { + "description": "Name of the OpenStack Swift container to put chunks in.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.container-name" + }, + "domain_id": { + "description": "OpenStack Swift user's domain ID.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.domain-id" + }, + "domain_name": { + "description": "OpenStack Swift user's domain name.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.domain-name" + }, + "max_retries": { + "default": 3, + "description": "Max retries on requests error.", + "type": "number", + "x-cli-flag": "blocks-storage.swift.max-retries" + }, + "password": { + "description": "OpenStack Swift API key.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.password" + }, + "project_domain_id": { + "description": "ID of the OpenStack Swift project's domain (v3 auth only), only needed if it differs the from user domain.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.project-domain-id" + }, + "project_domain_name": { + "description": "Name of the OpenStack Swift project's domain (v3 auth only), only needed if it differs from the user domain.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.project-domain-name" + }, + "project_id": { + "description": "OpenStack Swift project ID (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "blocks-storage.swift.project-id" + }, + "project_name": { + "description": "OpenStack Swift project name (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "blocks-storage.swift.project-name" + }, + "region_name": { + "description": "OpenStack Swift Region to use (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "blocks-storage.swift.region-name" + }, + "request_timeout": { + "default": "5s", + "description": "Time after which an idle request is aborted. The timeout watchdog is reset each time some data is received, so the timeout triggers after X time no data is received on a request.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.request-timeout", + "x-format": "duration" + }, + "user_domain_id": { + "description": "OpenStack Swift user's domain ID.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.user-domain-id" + }, + "user_domain_name": { + "description": "OpenStack Swift user's domain name.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.user-domain-name" + }, + "user_id": { + "description": "OpenStack Swift user ID.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.user-id" + }, + "username": { + "description": "OpenStack Swift username.", + "type": "string", + "x-cli-flag": "blocks-storage.swift.username" + } + }, + "type": "object" + }, + "tsdb": { + "properties": { + "block_ranges_period": { + "default": "2h0m0s", + "description": "TSDB blocks range period.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "blocks-storage.tsdb.block-ranges-period" + }, + "close_idle_tsdb_timeout": { + "default": "0s", + "description": "If TSDB has not received any data for this duration, and all blocks from TSDB have been shipped, TSDB is closed and deleted from local disk. If set to positive value, this value should be equal or higher than -querier.query-ingesters-within flag to make sure that TSDB is not closed prematurely, which could cause partial query results. 0 or negative value disables closing of idle TSDB.", + "type": "string", + "x-cli-flag": "blocks-storage.tsdb.close-idle-tsdb-timeout", + "x-format": "duration" + }, + "dir": { + "default": "tsdb", + "description": "Local directory to store TSDBs in the ingesters.", + "type": "string", + "x-cli-flag": "blocks-storage.tsdb.dir" + }, + "expanded_postings_cache": { + "description": "[EXPERIMENTAL] If enabled, ingesters will cache expanded postings when querying blocks. Caching can be configured separately for the head and compacted blocks.", + "properties": { + "blocks": { + "description": "If enabled, ingesters will cache expanded postings for the compacted blocks. The cache is shared between all blocks.", + "properties": { + "enabled": { + "default": false, + "description": "Whether the postings cache is enabled or not", + "type": "boolean", + "x-cli-flag": "blocks-storage.expanded_postings_cache.block.enabled" + }, + "max_bytes": { + "default": 10485760, + "description": "Max bytes for postings cache", + "type": "number", + "x-cli-flag": "blocks-storage.expanded_postings_cache.block.max-bytes" + }, + "ttl": { + "default": "10m0s", + "description": "TTL for postings cache", + "type": "string", + "x-cli-flag": "blocks-storage.expanded_postings_cache.block.ttl", + "x-format": "duration" + } + }, + "type": "object" + }, + "head": { + "description": "If enabled, ingesters will cache expanded postings for the head block. Only queries with with an equal matcher for metric __name__ are cached.", + "properties": { + "enabled": { + "default": false, + "description": "Whether the postings cache is enabled or not", + "type": "boolean", + "x-cli-flag": "blocks-storage.expanded_postings_cache.head.enabled" + }, + "max_bytes": { + "default": 10485760, + "description": "Max bytes for postings cache", + "type": "number", + "x-cli-flag": "blocks-storage.expanded_postings_cache.head.max-bytes" + }, + "ttl": { + "default": "10m0s", + "description": "TTL for postings cache", + "type": "string", + "x-cli-flag": "blocks-storage.expanded_postings_cache.head.ttl", + "x-format": "duration" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "flush_blocks_on_shutdown": { + "default": false, + "description": "True to flush blocks to storage on shutdown. If false, incomplete blocks will be reused after restart.", + "type": "boolean", + "x-cli-flag": "blocks-storage.tsdb.flush-blocks-on-shutdown" + }, + "head_chunks_write_buffer_size_bytes": { + "default": 4194304, + "description": "The write buffer size used by the head chunks mapper. Lower values reduce memory utilisation on clusters with a large number of tenants at the cost of increased disk I/O operations.", + "type": "number", + "x-cli-flag": "blocks-storage.tsdb.head-chunks-write-buffer-size-bytes" + }, + "head_chunks_write_queue_size": { + "default": 0, + "description": "The size of the in-memory queue used before flushing chunks to the disk.", + "type": "number", + "x-cli-flag": "blocks-storage.tsdb.head-chunks-write-queue-size" + }, + "head_compaction_concurrency": { + "default": 5, + "description": "Maximum number of tenants concurrently compacting TSDB head into a new block", + "type": "number", + "x-cli-flag": "blocks-storage.tsdb.head-compaction-concurrency" + }, + "head_compaction_idle_timeout": { + "default": "1h0m0s", + "description": "If TSDB head is idle for this duration, it is compacted. Note that up to 25% jitter is added to the value to avoid ingesters compacting concurrently. 0 means disabled.", + "type": "string", + "x-cli-flag": "blocks-storage.tsdb.head-compaction-idle-timeout", + "x-format": "duration" + }, + "head_compaction_interval": { + "default": "1m0s", + "description": "How frequently does Cortex try to compact TSDB head. Block is only created if data covers smallest block range. Must be greater than 0 and max 30 minutes. Note that up to 50% jitter is added to the value for the first compaction to avoid ingesters compacting concurrently.", + "type": "string", + "x-cli-flag": "blocks-storage.tsdb.head-compaction-interval", + "x-format": "duration" + }, + "max_exemplars": { + "default": 0, + "description": "Deprecated, use maxExemplars in limits instead. If the MaxExemplars value in limits is set to zero, cortex will fallback on this value. This setting enables support for exemplars in TSDB and sets the maximum number that will be stored. 0 or less means disabled.", + "type": "number", + "x-cli-flag": "blocks-storage.tsdb.max-exemplars" + }, + "max_tsdb_opening_concurrency_on_startup": { + "default": 10, + "description": "limit the number of concurrently opening TSDB's on startup", + "type": "number", + "x-cli-flag": "blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup" + }, + "memory_snapshot_on_shutdown": { + "default": false, + "description": "True to enable snapshotting of in-memory TSDB data on disk when shutting down.", + "type": "boolean", + "x-cli-flag": "blocks-storage.tsdb.memory-snapshot-on-shutdown" + }, + "out_of_order_cap_max": { + "default": 32, + "description": "[EXPERIMENTAL] Configures the maximum number of samples per chunk that can be out-of-order.", + "type": "number", + "x-cli-flag": "blocks-storage.tsdb.out-of-order-cap-max" + }, + "retention_period": { + "default": "6h0m0s", + "description": "TSDB blocks retention in the ingester before a block is removed. This should be larger than the block_ranges_period and large enough to give store-gateways and queriers enough time to discover newly uploaded blocks.", + "type": "string", + "x-cli-flag": "blocks-storage.tsdb.retention-period", + "x-format": "duration" + }, + "ship_concurrency": { + "default": 10, + "description": "Maximum number of tenants concurrently shipping blocks to the storage.", + "type": "number", + "x-cli-flag": "blocks-storage.tsdb.ship-concurrency" + }, + "ship_interval": { + "default": "1m0s", + "description": "How frequently the TSDB blocks are scanned and new ones are shipped to the storage. 0 means shipping is disabled.", + "type": "string", + "x-cli-flag": "blocks-storage.tsdb.ship-interval", + "x-format": "duration" + }, + "stripe_size": { + "default": 16384, + "description": "The number of shards of series to use in TSDB (must be a power of 2). Reducing this will decrease memory footprint, but can negatively impact performance.", + "type": "number", + "x-cli-flag": "blocks-storage.tsdb.stripe-size" + }, + "wal_compression_type": { + "description": "TSDB WAL type. Supported values are: 'snappy', 'zstd' and '' (disable compression)", + "type": "string", + "x-cli-flag": "blocks-storage.tsdb.wal-compression-type" + }, + "wal_segment_size_bytes": { + "default": 134217728, + "description": "TSDB WAL segments files max size (bytes).", + "type": "number", + "x-cli-flag": "blocks-storage.tsdb.wal-segment-size-bytes" + } + }, + "type": "object" + }, + "users_scanner": { + "properties": { + "cache_ttl": { + "default": "0s", + "description": "TTL of the cached users. 0 disables caching and relies on caching at bucket client level.", + "type": "string", + "x-cli-flag": "blocks-storage.users-scanner.cache-ttl", + "x-format": "duration" + }, + "max_stale_period": { + "default": "1h0m0s", + "description": "Maximum period of time to consider the user index as stale. Fall back to the base scanner if stale. Only valid when strategy is user_index.", + "type": "string", + "x-cli-flag": "blocks-storage.users-scanner.user-index.max-stale-period", + "x-format": "duration" + }, + "strategy": { + "default": "list", + "description": "Strategy to use to scan users. Supported values are: list, user_index.", + "type": "string", + "x-cli-flag": "blocks-storage.users-scanner.strategy" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "compactor_config": { + "description": "The compactor_config configures the compactor for the blocks storage.", + "properties": { + "accept_malformed_index": { + "default": false, + "description": "When enabled, index verification will ignore out of order label names.", + "type": "boolean", + "x-cli-flag": "compactor.accept-malformed-index" + }, + "block_deletion_marks_migration_enabled": { + "default": false, + "description": "When enabled, at compactor startup the bucket will be scanned and all found deletion marks inside the block location will be copied to the markers global location too. This option can (and should) be safely disabled as soon as the compactor has successfully run at least once.", + "type": "boolean", + "x-cli-flag": "compactor.block-deletion-marks-migration-enabled" + }, + "block_files_concurrency": { + "default": 10, + "description": "Number of goroutines to use when fetching/uploading block files from object storage.", + "type": "number", + "x-cli-flag": "compactor.block-files-concurrency" + }, + "block_ranges": { + "default": "2h0m0s,12h0m0s,24h0m0s", + "description": "List of compaction time ranges.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "compactor.block-ranges" + }, + "block_sync_concurrency": { + "default": 20, + "description": "Number of Go routines to use when syncing block index and chunks files from the long term storage.", + "type": "number", + "x-cli-flag": "compactor.block-sync-concurrency" + }, + "blocks_fetch_concurrency": { + "default": 3, + "description": "Number of goroutines to use when fetching blocks from object storage when compacting.", + "type": "number", + "x-cli-flag": "compactor.blocks-fetch-concurrency" + }, + "caching_bucket_enabled": { + "default": false, + "description": "When enabled, caching bucket will be used for compactor, except cleaner service, which serves as the source of truth for block status", + "type": "boolean", + "x-cli-flag": "compactor.caching-bucket-enabled" + }, + "cleaner_caching_bucket_enabled": { + "default": false, + "description": "When enabled, caching bucket will be used for cleaner", + "type": "boolean", + "x-cli-flag": "compactor.cleaner-caching-bucket-enabled" + }, + "cleaner_visit_marker_file_update_interval": { + "default": "5m0s", + "description": "How frequently cleaner visit marker file should be updated when cleaning user.", + "type": "string", + "x-cli-flag": "compactor.cleaner-visit-marker-file-update-interval", + "x-format": "duration" + }, + "cleaner_visit_marker_timeout": { + "default": "10m0s", + "description": "How long cleaner visit marker file should be considered as expired and able to be picked up by cleaner again. The value should be smaller than -compactor.cleanup-interval", + "type": "string", + "x-cli-flag": "compactor.cleaner-visit-marker-timeout", + "x-format": "duration" + }, + "cleanup_concurrency": { + "default": 20, + "description": "Max number of tenants for which blocks cleanup and maintenance should run concurrently.", + "type": "number", + "x-cli-flag": "compactor.cleanup-concurrency" + }, + "cleanup_interval": { + "default": "15m0s", + "description": "How frequently compactor should run blocks cleanup and maintenance, as well as update the bucket index.", + "type": "string", + "x-cli-flag": "compactor.cleanup-interval", + "x-format": "duration" + }, + "compaction_concurrency": { + "default": 1, + "description": "Max number of concurrent compactions running.", + "type": "number", + "x-cli-flag": "compactor.compaction-concurrency" + }, + "compaction_interval": { + "default": "1h0m0s", + "description": "The frequency at which the compaction runs", + "type": "string", + "x-cli-flag": "compactor.compaction-interval", + "x-format": "duration" + }, + "compaction_retries": { + "default": 3, + "description": "How many times to retry a failed compaction within a single compaction run.", + "type": "number", + "x-cli-flag": "compactor.compaction-retries" + }, + "compaction_strategy": { + "default": "default", + "description": "The compaction strategy to use. Supported values are: default, partitioning.", + "type": "string", + "x-cli-flag": "compactor.compaction-strategy" + }, + "compaction_visit_marker_file_update_interval": { + "default": "1m0s", + "description": "How frequently compaction visit marker file should be updated duration compaction.", + "type": "string", + "x-cli-flag": "compactor.compaction-visit-marker-file-update-interval", + "x-format": "duration" + }, + "compaction_visit_marker_timeout": { + "default": "10m0s", + "description": "How long compaction visit marker file should be considered as expired and able to be picked up by compactor again.", + "type": "string", + "x-cli-flag": "compactor.compaction-visit-marker-timeout", + "x-format": "duration" + }, + "consistency_delay": { + "default": "0s", + "description": "Minimum age of fresh (non-compacted) blocks before they are being processed. Malformed blocks older than the maximum of consistency-delay and 48h0m0s will be removed.", + "type": "string", + "x-cli-flag": "compactor.consistency-delay", + "x-format": "duration" + }, + "data_dir": { + "default": "./data", + "description": "Data directory in which to cache blocks and process compactions", + "type": "string", + "x-cli-flag": "compactor.data-dir" + }, + "deletion_delay": { + "default": "12h0m0s", + "description": "Time before a block marked for deletion is deleted from bucket. If not 0, blocks will be marked for deletion and compactor component will permanently delete blocks marked for deletion from the bucket. If 0, blocks will be deleted straight away. Note that deleting blocks immediately can cause query failures.", + "type": "string", + "x-cli-flag": "compactor.deletion-delay", + "x-format": "duration" + }, + "disabled_tenants": { + "description": "Comma separated list of tenants that cannot be compacted by this compactor. If specified, and compactor would normally pick given tenant for compaction (via -compactor.enabled-tenants or sharding), it will be ignored instead.", + "type": "string", + "x-cli-flag": "compactor.disabled-tenants" + }, + "enabled_tenants": { + "description": "Comma separated list of tenants that can be compacted. If specified, only these tenants will be compacted by compactor, otherwise all tenants can be compacted. Subject to sharding.", + "type": "string", + "x-cli-flag": "compactor.enabled-tenants" + }, + "meta_sync_concurrency": { + "default": 20, + "description": "Number of Go routines to use when syncing block meta files from the long term storage.", + "type": "number", + "x-cli-flag": "compactor.meta-sync-concurrency" + }, + "sharding_enabled": { + "default": false, + "description": "Shard tenants across multiple compactor instances. Sharding is required if you run multiple compactor instances, in order to coordinate compactions and avoid race conditions leading to the same tenant blocks simultaneously compacted by different instances.", + "type": "boolean", + "x-cli-flag": "compactor.sharding-enabled" + }, + "sharding_planner_delay": { + "default": "10s", + "description": "How long shuffle sharding planner would wait before running planning code. This delay would prevent double compaction when two compactors claimed same partition in grouper at same time.", + "type": "string", + "x-cli-flag": "compactor.sharding-planner-delay", + "x-format": "duration" + }, + "sharding_ring": { + "properties": { + "auto_forget_delay": { + "default": "2m0s", + "description": "Time since last heartbeat before compactor will be removed from ring. 0 to disable", + "type": "string", + "x-cli-flag": "compactor.auto-forget-delay", + "x-format": "duration" + }, + "detailed_metrics_enabled": { + "default": true, + "description": "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted.", + "type": "boolean", + "x-cli-flag": "compactor.ring.detailed-metrics-enabled" + }, + "heartbeat_period": { + "default": "5s", + "description": "Period at which to heartbeat to the ring. 0 = disabled.", + "type": "string", + "x-cli-flag": "compactor.ring.heartbeat-period", + "x-format": "duration" + }, + "heartbeat_timeout": { + "default": "1m0s", + "description": "The heartbeat timeout after which compactors are considered unhealthy within the ring. 0 = never (timeout disabled).", + "type": "string", + "x-cli-flag": "compactor.ring.heartbeat-timeout", + "x-format": "duration" + }, + "instance_interface_names": { + "default": "[eth0 en0]", + "description": "Name of network interface to read address from.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "compactor.ring.instance-interface-names" + }, + "kvstore": { + "properties": { + "consul": { + "$ref": "#/definitions/consul_config" + }, + "dynamodb": { + "properties": { + "max_cas_retries": { + "default": 10, + "description": "Maximum number of retries for DDB KV CAS.", + "type": "number", + "x-cli-flag": "compactor.ring.dynamodb.max-cas-retries" + }, + "puller_sync_time": { + "default": "1m0s", + "description": "Time to refresh local ring with information on dynamodb.", + "type": "string", + "x-cli-flag": "compactor.ring.dynamodb.puller-sync-time", + "x-format": "duration" + }, + "region": { + "description": "Region to access dynamodb.", + "type": "string", + "x-cli-flag": "compactor.ring.dynamodb.region" + }, + "table_name": { + "description": "Table name to use on dynamodb.", + "type": "string", + "x-cli-flag": "compactor.ring.dynamodb.table-name" + }, + "timeout": { + "default": "2m0s", + "description": "Timeout of dynamoDbClient requests. Default is 2m.", + "type": "string", + "x-cli-flag": "compactor.ring.dynamodb.timeout", + "x-format": "duration" + }, + "ttl": { + "default": "0s", + "description": "Time to expire items on dynamodb.", + "type": "string", + "x-cli-flag": "compactor.ring.dynamodb.ttl-time", + "x-format": "duration" + } + }, + "type": "object" + }, + "etcd": { + "$ref": "#/definitions/etcd_config" + }, + "multi": { + "properties": { + "mirror_enabled": { + "default": false, + "description": "Mirror writes to secondary store.", + "type": "boolean", + "x-cli-flag": "compactor.ring.multi.mirror-enabled" + }, + "mirror_timeout": { + "default": "2s", + "description": "Timeout for storing value to secondary store.", + "type": "string", + "x-cli-flag": "compactor.ring.multi.mirror-timeout", + "x-format": "duration" + }, + "primary": { + "description": "Primary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "compactor.ring.multi.primary" + }, + "secondary": { + "description": "Secondary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "compactor.ring.multi.secondary" + } + }, + "type": "object" + }, + "prefix": { + "default": "collectors/", + "description": "The prefix for the keys in the store. Should end with a /.", + "type": "string", + "x-cli-flag": "compactor.ring.prefix" + }, + "store": { + "default": "consul", + "description": "Backend storage to use for the ring. Supported values are: consul, etcd, inmemory, memberlist, multi.", + "type": "string", + "x-cli-flag": "compactor.ring.store" + } + }, + "type": "object" + }, + "tokens_file_path": { + "description": "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.", + "type": "string", + "x-cli-flag": "compactor.ring.tokens-file-path" + }, + "unregister_on_shutdown": { + "default": true, + "description": "Unregister the compactor during shutdown if true.", + "type": "boolean", + "x-cli-flag": "compactor.ring.unregister-on-shutdown" + }, + "wait_active_instance_timeout": { + "default": "10m0s", + "description": "Timeout for waiting on compactor to become ACTIVE in the ring.", + "type": "string", + "x-cli-flag": "compactor.ring.wait-active-instance-timeout", + "x-format": "duration" + }, + "wait_stability_max_duration": { + "default": "5m0s", + "description": "Maximum time to wait for ring stability at startup. If the compactor ring keeps changing after this period of time, the compactor will start anyway.", + "type": "string", + "x-cli-flag": "compactor.ring.wait-stability-max-duration", + "x-format": "duration" + }, + "wait_stability_min_duration": { + "default": "1m0s", + "description": "Minimum time to wait for ring stability at startup. 0 to disable.", + "type": "string", + "x-cli-flag": "compactor.ring.wait-stability-min-duration", + "x-format": "duration" + } + }, + "type": "object" + }, + "sharding_strategy": { + "default": "default", + "description": "The sharding strategy to use. Supported values are: default, shuffle-sharding.", + "type": "string", + "x-cli-flag": "compactor.sharding-strategy" + }, + "skip_blocks_with_out_of_order_chunks_enabled": { + "default": false, + "description": "When enabled, mark blocks containing index with out-of-order chunks for no compact instead of halting the compaction.", + "type": "boolean", + "x-cli-flag": "compactor.skip-blocks-with-out-of-order-chunks-enabled" + }, + "tenant_cleanup_delay": { + "default": "6h0m0s", + "description": "For tenants marked for deletion, this is time between deleting of last block, and doing final cleanup (marker files, debug files) of the tenant.", + "type": "string", + "x-cli-flag": "compactor.tenant-cleanup-delay", + "x-format": "duration" + } + }, + "type": "object" + }, + "configs_config": { + "description": "The configs_config configures the Cortex Configs DB and API.", + "properties": { + "api": { + "properties": { + "notifications": { + "properties": { + "disable_email": { + "default": false, + "description": "Disable Email notifications for Alertmanager.", + "type": "boolean", + "x-cli-flag": "configs.notifications.disable-email" + }, + "disable_webhook": { + "default": false, + "description": "Disable WebHook notifications for Alertmanager.", + "type": "boolean", + "x-cli-flag": "configs.notifications.disable-webhook" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "database": { + "properties": { + "migrations_dir": { + "description": "Path where the database migration files can be found", + "type": "string", + "x-cli-flag": "configs.database.migrations-dir" + }, + "password_file": { + "description": "File containing password (username goes in URI)", + "type": "string", + "x-cli-flag": "configs.database.password-file" + }, + "uri": { + "default": "postgres://postgres@configs-db.weave.local/configs?sslmode=disable", + "description": "URI where the database can be found (for dev you can use memory://)", + "type": "string", + "x-cli-flag": "configs.database.uri" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "configstore_config": { + "description": "The configstore_config configures the config database storing rules and alerts, and is used by the Cortex alertmanager.", + "properties": { + "client_timeout": { + "default": "5s", + "description": "Timeout for requests to Weave Cloud configs service.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.configs.client-timeout", + "x-format": "duration" + }, + "configs_api_url": { + "description": "URL of configs API server.", + "format": "uri", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.configs.url" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.configs.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.configs.tls-cert-path" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "\u003cprefix\u003e.configs.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.configs.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.configs.tls-server-name" + } + }, + "type": "object" + }, + "consul_config": { + "description": "The consul_config configures the consul client.", + "properties": { + "acl_token": { + "description": "ACL Token used to interact with Consul.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.consul.acl-token" + }, + "consistent_reads": { + "default": false, + "description": "Enable consistent reads to Consul.", + "type": "boolean", + "x-cli-flag": "\u003cprefix\u003e.consul.consistent-reads" + }, + "host": { + "default": "localhost:8500", + "description": "Hostname and port of Consul.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.consul.hostname" + }, + "http_client_timeout": { + "default": "20s", + "description": "HTTP timeout when talking to Consul.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.consul.client-timeout", + "x-format": "duration" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.consul.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.consul.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS.", + "type": "boolean", + "x-cli-flag": "\u003cprefix\u003e.consul.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "\u003cprefix\u003e.consul.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.consul.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.consul.tls-server-name" + }, + "watch_burst_size": { + "default": 1, + "description": "Burst size used in rate limit. Values less than 1 are treated as 1.", + "type": "number", + "x-cli-flag": "\u003cprefix\u003e.consul.watch-burst-size" + }, + "watch_rate_limit": { + "default": 1, + "description": "Rate limit when watching key or prefix in Consul, in requests per second. 0 disables the rate limit.", + "type": "number", + "x-cli-flag": "\u003cprefix\u003e.consul.watch-rate-limit" + } + }, + "type": "object" + }, + "distributor_config": { + "description": "The distributor_config configures the Cortex distributor.", + "properties": { + "extend_writes": { + "default": true, + "description": "Try writing to an additional ingester in the presence of an ingester not in the ACTIVE state. It is useful to disable this along with -ingester.unregister-on-shutdown=false in order to not spread samples to extra ingesters during rolling restarts with consistent naming.", + "type": "boolean", + "x-cli-flag": "distributor.extend-writes" + }, + "extra_queue_delay": { + "default": "0s", + "description": "Time to wait before sending more than the minimum successful query requests.", + "type": "string", + "x-cli-flag": "distributor.extra-query-delay", + "x-format": "duration" + }, + "ha_tracker": { + "properties": { + "enable_ha_tracker": { + "default": false, + "description": "Enable the distributors HA tracker so that it can accept samples from Prometheus HA replicas gracefully (requires labels).", + "type": "boolean", + "x-cli-flag": "distributor.ha-tracker.enable" + }, + "ha_tracker_failover_timeout": { + "default": "30s", + "description": "If we don't receive any samples from the accepted replica for a cluster in this amount of time we will failover to the next replica we receive a sample from. This value must be greater than the update timeout", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.failover-timeout", + "x-format": "duration" + }, + "ha_tracker_update_timeout": { + "default": "15s", + "description": "Update the timestamp in the KV store for a given cluster/replica only after this amount of time has passed since the current stored timestamp.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.update-timeout", + "x-format": "duration" + }, + "ha_tracker_update_timeout_jitter_max": { + "default": "5s", + "description": "Maximum jitter applied to the update timeout, in order to spread the HA heartbeats over time.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.update-timeout-jitter-max", + "x-format": "duration" + }, + "kvstore": { + "description": "Backend storage to use for the ring. Please be aware that memberlist is not supported by the HA tracker since gossip propagation is too slow for HA purposes.", + "properties": { + "consul": { + "$ref": "#/definitions/consul_config" + }, + "dynamodb": { + "properties": { + "max_cas_retries": { + "default": 10, + "description": "Maximum number of retries for DDB KV CAS.", + "type": "number", + "x-cli-flag": "distributor.ha-tracker.dynamodb.max-cas-retries" + }, + "puller_sync_time": { + "default": "1m0s", + "description": "Time to refresh local ring with information on dynamodb.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.dynamodb.puller-sync-time", + "x-format": "duration" + }, + "region": { + "description": "Region to access dynamodb.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.dynamodb.region" + }, + "table_name": { + "description": "Table name to use on dynamodb.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.dynamodb.table-name" + }, + "timeout": { + "default": "2m0s", + "description": "Timeout of dynamoDbClient requests. Default is 2m.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.dynamodb.timeout", + "x-format": "duration" + }, + "ttl": { + "default": "0s", + "description": "Time to expire items on dynamodb.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.dynamodb.ttl-time", + "x-format": "duration" + } + }, + "type": "object" + }, + "etcd": { + "$ref": "#/definitions/etcd_config" + }, + "multi": { + "properties": { + "mirror_enabled": { + "default": false, + "description": "Mirror writes to secondary store.", + "type": "boolean", + "x-cli-flag": "distributor.ha-tracker.multi.mirror-enabled" + }, + "mirror_timeout": { + "default": "2s", + "description": "Timeout for storing value to secondary store.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.multi.mirror-timeout", + "x-format": "duration" + }, + "primary": { + "description": "Primary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.multi.primary" + }, + "secondary": { + "description": "Secondary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.multi.secondary" + } + }, + "type": "object" + }, + "prefix": { + "default": "ha-tracker/", + "description": "The prefix for the keys in the store. Should end with a /.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.prefix" + }, + "store": { + "default": "consul", + "description": "Backend storage to use for the ring. Supported values are: consul, etcd, inmemory, memberlist, multi.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.store" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "instance_limits": { + "properties": { + "max_inflight_client_requests": { + "default": 0, + "description": "Max inflight ingester client requests that this distributor can handle. This limit is per-distributor, not per-tenant. Additional requests will be rejected. 0 = unlimited.", + "type": "number", + "x-cli-flag": "distributor.instance-limits.max-inflight-client-requests" + }, + "max_inflight_push_requests": { + "default": 0, + "description": "Max inflight push requests that this distributor can handle. This limit is per-distributor, not per-tenant. Additional requests will be rejected. 0 = unlimited.", + "type": "number", + "x-cli-flag": "distributor.instance-limits.max-inflight-push-requests" + }, + "max_ingestion_rate": { + "default": 0, + "description": "Max ingestion rate (samples/sec) that this distributor will accept. This limit is per-distributor, not per-tenant. Additional push requests will be rejected. Current ingestion rate is computed as exponentially weighted moving average, updated every second. 0 = unlimited.", + "type": "number", + "x-cli-flag": "distributor.instance-limits.max-ingestion-rate" + } + }, + "type": "object" + }, + "max_recv_msg_size": { + "default": 104857600, + "description": "remote_write API max receive message size (bytes).", + "type": "number", + "x-cli-flag": "distributor.max-recv-msg-size" + }, + "num_push_workers": { + "default": 0, + "description": "EXPERIMENTAL: Number of go routines to handle push calls from distributors to ingesters. When no workers are available, a new goroutine will be spawned automatically. If set to 0 (default), workers are disabled, and a new goroutine will be created for each push request.", + "type": "number", + "x-cli-flag": "distributor.num-push-workers" + }, + "otlp": { + "properties": { + "allow_delta_temporality": { + "default": false, + "description": "EXPERIMENTAL: If true, delta temporality otlp metrics to be ingested.", + "type": "boolean", + "x-cli-flag": "distributor.otlp.allow-delta-temporality" + }, + "convert_all_attributes": { + "default": false, + "description": "If true, all resource attributes are converted to labels.", + "type": "boolean", + "x-cli-flag": "distributor.otlp.convert-all-attributes" + }, + "disable_target_info": { + "default": false, + "description": "If true, a target_info metric is not ingested. (refer to: https://github.com/prometheus/OpenMetrics/blob/main/specification/OpenMetrics.md#supporting-target-metadata-in-both-push-based-and-pull-based-systems)", + "type": "boolean", + "x-cli-flag": "distributor.otlp.disable-target-info" + } + }, + "type": "object" + }, + "otlp_max_recv_msg_size": { + "default": 104857600, + "description": "Maximum OTLP request size in bytes that the Distributor can accept.", + "type": "number", + "x-cli-flag": "distributor.otlp-max-recv-msg-size" + }, + "pool": { + "properties": { + "client_cleanup_period": { + "default": "15s", + "description": "How frequently to clean up clients for ingesters that have gone away.", + "type": "string", + "x-cli-flag": "distributor.client-cleanup-period", + "x-format": "duration" + }, + "health_check_ingesters": { + "default": true, + "description": "Run a health check on each ingester client during periodic cleanup.", + "type": "boolean", + "x-cli-flag": "distributor.health-check-ingesters" + } + }, + "type": "object" + }, + "remote_timeout": { + "default": "2s", + "description": "Timeout for downstream ingesters.", + "type": "string", + "x-cli-flag": "distributor.remote-timeout", + "x-format": "duration" + }, + "ring": { + "properties": { + "detailed_metrics_enabled": { + "default": true, + "description": "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted.", + "type": "boolean", + "x-cli-flag": "distributor.ring.detailed-metrics-enabled" + }, + "heartbeat_period": { + "default": "5s", + "description": "Period at which to heartbeat to the ring. 0 = disabled.", + "type": "string", + "x-cli-flag": "distributor.ring.heartbeat-period", + "x-format": "duration" + }, + "heartbeat_timeout": { + "default": "1m0s", + "description": "The heartbeat timeout after which distributors are considered unhealthy within the ring. 0 = never (timeout disabled).", + "type": "string", + "x-cli-flag": "distributor.ring.heartbeat-timeout", + "x-format": "duration" + }, + "instance_interface_names": { + "default": "[eth0 en0]", + "description": "Name of network interface to read address from.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "distributor.ring.instance-interface-names" + }, + "kvstore": { + "properties": { + "consul": { + "$ref": "#/definitions/consul_config" + }, + "dynamodb": { + "properties": { + "max_cas_retries": { + "default": 10, + "description": "Maximum number of retries for DDB KV CAS.", + "type": "number", + "x-cli-flag": "distributor.ring.dynamodb.max-cas-retries" + }, + "puller_sync_time": { + "default": "1m0s", + "description": "Time to refresh local ring with information on dynamodb.", + "type": "string", + "x-cli-flag": "distributor.ring.dynamodb.puller-sync-time", + "x-format": "duration" + }, + "region": { + "description": "Region to access dynamodb.", + "type": "string", + "x-cli-flag": "distributor.ring.dynamodb.region" + }, + "table_name": { + "description": "Table name to use on dynamodb.", + "type": "string", + "x-cli-flag": "distributor.ring.dynamodb.table-name" + }, + "timeout": { + "default": "2m0s", + "description": "Timeout of dynamoDbClient requests. Default is 2m.", + "type": "string", + "x-cli-flag": "distributor.ring.dynamodb.timeout", + "x-format": "duration" + }, + "ttl": { + "default": "0s", + "description": "Time to expire items on dynamodb.", + "type": "string", + "x-cli-flag": "distributor.ring.dynamodb.ttl-time", + "x-format": "duration" + } + }, + "type": "object" + }, + "etcd": { + "$ref": "#/definitions/etcd_config" + }, + "multi": { + "properties": { + "mirror_enabled": { + "default": false, + "description": "Mirror writes to secondary store.", + "type": "boolean", + "x-cli-flag": "distributor.ring.multi.mirror-enabled" + }, + "mirror_timeout": { + "default": "2s", + "description": "Timeout for storing value to secondary store.", + "type": "string", + "x-cli-flag": "distributor.ring.multi.mirror-timeout", + "x-format": "duration" + }, + "primary": { + "description": "Primary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "distributor.ring.multi.primary" + }, + "secondary": { + "description": "Secondary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "distributor.ring.multi.secondary" + } + }, + "type": "object" + }, + "prefix": { + "default": "collectors/", + "description": "The prefix for the keys in the store. Should end with a /.", + "type": "string", + "x-cli-flag": "distributor.ring.prefix" + }, + "store": { + "default": "consul", + "description": "Backend storage to use for the ring. Supported values are: consul, etcd, inmemory, memberlist, multi.", + "type": "string", + "x-cli-flag": "distributor.ring.store" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "shard_by_all_labels": { + "default": false, + "description": "Distribute samples based on all labels, as opposed to solely by user and metric name.", + "type": "boolean", + "x-cli-flag": "distributor.shard-by-all-labels" + }, + "sharding_strategy": { + "default": "default", + "description": "The sharding strategy to use. Supported values are: default, shuffle-sharding.", + "type": "string", + "x-cli-flag": "distributor.sharding-strategy" + }, + "sign_write_requests": { + "default": false, + "description": "EXPERIMENTAL: If enabled, sign the write request between distributors and ingesters.", + "type": "boolean", + "x-cli-flag": "distributor.sign-write-requests" + }, + "use_stream_push": { + "default": false, + "description": "EXPERIMENTAL: If enabled, distributor would use stream connection to send requests to ingesters.", + "type": "boolean", + "x-cli-flag": "distributor.use-stream-push" + } + }, + "type": "object" + }, + "etcd_config": { + "description": "The etcd_config configures the etcd client.", + "properties": { + "dial_timeout": { + "default": "10s", + "description": "The dial timeout for the etcd connection.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.etcd.dial-timeout", + "x-format": "duration" + }, + "endpoints": { + "default": [], + "description": "The etcd endpoints to connect to.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "\u003cprefix\u003e.etcd.endpoints" + }, + "max_retries": { + "default": 10, + "description": "The maximum number of retries to do for failed ops.", + "type": "number", + "x-cli-flag": "\u003cprefix\u003e.etcd.max-retries" + }, + "password": { + "description": "Etcd password.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.etcd.password" + }, + "ping-without-stream-allowed": { + "default": true, + "description": "Send Keepalive pings with no streams.", + "type": "boolean", + "x-cli-flag": "\u003cprefix\u003e.etcd.ping-without-stream-allowed" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.etcd.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.etcd.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS.", + "type": "boolean", + "x-cli-flag": "\u003cprefix\u003e.etcd.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "\u003cprefix\u003e.etcd.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.etcd.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.etcd.tls-server-name" + }, + "username": { + "description": "Etcd username.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.etcd.username" + } + }, + "type": "object" + }, + "fifo_cache_config": { + "description": "The fifo_cache_config configures the local in-memory cache.", + "properties": { + "max_size_bytes": { + "description": "Maximum memory size of the cache in bytes. A unit suffix (KB, MB, GB) may be applied.", + "type": "string", + "x-cli-flag": "frontend.fifocache.max-size-bytes" + }, + "max_size_items": { + "default": 0, + "description": "Maximum number of entries in the cache.", + "type": "number", + "x-cli-flag": "frontend.fifocache.max-size-items" + }, + "size": { + "default": 0, + "description": "Deprecated (use max-size-items or max-size-bytes instead): The number of entries to cache. ", + "type": "number", + "x-cli-flag": "frontend.fifocache.size" + }, + "validity": { + "default": "0s", + "description": "The expiry duration for the cache.", + "type": "string", + "x-cli-flag": "frontend.fifocache.duration", + "x-format": "duration" + } + }, + "type": "object" + }, + "flusher_config": { + "description": "The flusher_config configures the WAL flusher target, used to manually run one-time flushes when scaling down ingesters.", + "properties": { + "exit_after_flush": { + "default": true, + "description": "Stop Cortex after flush has finished. If false, Cortex process will keep running, doing nothing.", + "type": "boolean", + "x-cli-flag": "flusher.exit-after-flush" + } + }, + "type": "object" + }, + "frontend_worker_config": { + "description": "The frontend_worker_config configures the worker - running within the Cortex querier - picking up and executing queries enqueued by the query-frontend or query-scheduler.", + "properties": { + "dns_lookup_duration": { + "default": "10s", + "description": "How often to query DNS for query-frontend or query-scheduler address.", + "type": "string", + "x-cli-flag": "querier.dns-lookup-period", + "x-format": "duration" + }, + "frontend_address": { + "description": "Address of query frontend service, in host:port format. If -querier.scheduler-address is set as well, querier will use scheduler instead. Only one of -querier.frontend-address or -querier.scheduler-address can be set. If neither is set, queries are only received via HTTP endpoint.", + "type": "string", + "x-cli-flag": "querier.frontend-address" + }, + "grpc_client_config": { + "properties": { + "backoff_config": { + "properties": { + "max_period": { + "default": "10s", + "description": "Maximum delay when backing off.", + "type": "string", + "x-cli-flag": "querier.frontend-client.backoff-max-period", + "x-format": "duration" + }, + "max_retries": { + "default": 10, + "description": "Number of times to backoff and retry before failing.", + "type": "number", + "x-cli-flag": "querier.frontend-client.backoff-retries" + }, + "min_period": { + "default": "100ms", + "description": "Minimum delay when backing off.", + "type": "string", + "x-cli-flag": "querier.frontend-client.backoff-min-period", + "x-format": "duration" + } + }, + "type": "object" + }, + "backoff_on_ratelimits": { + "default": false, + "description": "Enable backoff and retry when we hit ratelimits.", + "type": "boolean", + "x-cli-flag": "querier.frontend-client.backoff-on-ratelimits" + }, + "connect_timeout": { + "default": "5s", + "description": "The maximum amount of time to establish a connection. A value of 0 means using default gRPC client connect timeout 20s.", + "type": "string", + "x-cli-flag": "querier.frontend-client.connect-timeout", + "x-format": "duration" + }, + "grpc_compression": { + "description": "Use compression when sending messages. Supported values are: 'gzip', 'snappy', 'snappy-block' ,'zstd' and '' (disable compression)", + "type": "string", + "x-cli-flag": "querier.frontend-client.grpc-compression" + }, + "max_recv_msg_size": { + "default": 104857600, + "description": "gRPC client max receive message size (bytes).", + "type": "number", + "x-cli-flag": "querier.frontend-client.grpc-max-recv-msg-size" + }, + "max_send_msg_size": { + "default": 16777216, + "description": "gRPC client max send message size (bytes).", + "type": "number", + "x-cli-flag": "querier.frontend-client.grpc-max-send-msg-size" + }, + "rate_limit": { + "default": 0, + "description": "Rate limit for gRPC client; 0 means disabled.", + "type": "number", + "x-cli-flag": "querier.frontend-client.grpc-client-rate-limit" + }, + "rate_limit_burst": { + "default": 0, + "description": "Rate limit burst for gRPC client.", + "type": "number", + "x-cli-flag": "querier.frontend-client.grpc-client-rate-limit-burst" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "querier.frontend-client.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "querier.frontend-client.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.", + "type": "boolean", + "x-cli-flag": "querier.frontend-client.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "querier.frontend-client.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "querier.frontend-client.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "querier.frontend-client.tls-server-name" + } + }, + "type": "object" + }, + "id": { + "description": "Querier ID, sent to frontend service to identify requests from the same querier. Defaults to hostname.", + "type": "string", + "x-cli-flag": "querier.id" + }, + "match_max_concurrent": { + "default": false, + "description": "Force worker concurrency to match the -querier.max-concurrent option. Overrides querier.worker-parallelism.", + "type": "boolean", + "x-cli-flag": "querier.worker-match-max-concurrent" + }, + "parallelism": { + "default": 10, + "description": "Number of simultaneous queries to process per query-frontend or query-scheduler.", + "type": "number", + "x-cli-flag": "querier.worker-parallelism" + }, + "scheduler_address": { + "description": "Hostname (and port) of scheduler that querier will periodically resolve, connect to and receive queries from. Only one of -querier.frontend-address or -querier.scheduler-address can be set. If neither is set, queries are only received via HTTP endpoint.", + "type": "string", + "x-cli-flag": "querier.scheduler-address" + } + }, + "type": "object" + }, + "ingester_client_config": { + "description": "The ingester_client_config configures how the Cortex distributors connect to the ingesters.", + "properties": { + "grpc_client_config": { + "properties": { + "backoff_config": { + "properties": { + "max_period": { + "default": "10s", + "description": "Maximum delay when backing off.", + "type": "string", + "x-cli-flag": "ingester.client.backoff-max-period", + "x-format": "duration" + }, + "max_retries": { + "default": 10, + "description": "Number of times to backoff and retry before failing.", + "type": "number", + "x-cli-flag": "ingester.client.backoff-retries" + }, + "min_period": { + "default": "100ms", + "description": "Minimum delay when backing off.", + "type": "string", + "x-cli-flag": "ingester.client.backoff-min-period", + "x-format": "duration" + } + }, + "type": "object" + }, + "backoff_on_ratelimits": { + "default": false, + "description": "Enable backoff and retry when we hit ratelimits.", + "type": "boolean", + "x-cli-flag": "ingester.client.backoff-on-ratelimits" + }, + "connect_timeout": { + "default": "5s", + "description": "The maximum amount of time to establish a connection. A value of 0 means using default gRPC client connect timeout 20s.", + "type": "string", + "x-cli-flag": "ingester.client.connect-timeout", + "x-format": "duration" + }, + "grpc_compression": { + "default": "snappy-block", + "description": "Use compression when sending messages. Supported values are: 'gzip', 'snappy', 'snappy-block' ,'zstd' and '' (disable compression)", + "type": "string", + "x-cli-flag": "ingester.client.grpc-compression" + }, + "healthcheck_config": { + "description": "EXPERIMENTAL: If enabled, gRPC clients perform health checks for each target and fail the request if the target is marked as unhealthy.", + "properties": { + "interval": { + "default": "5s", + "description": "The approximate amount of time between health checks of an individual target.", + "type": "string", + "x-cli-flag": "ingester.client.healthcheck.interval", + "x-format": "duration" + }, + "timeout": { + "default": "1s", + "description": "The amount of time during which no response from a target means a failed health check.", + "type": "string", + "x-cli-flag": "ingester.client.healthcheck.timeout", + "x-format": "duration" + }, + "unhealthy_threshold": { + "default": 0, + "description": "The number of consecutive failed health checks required before considering a target unhealthy. 0 means disabled.", + "type": "number", + "x-cli-flag": "ingester.client.healthcheck.unhealthy-threshold" + } + }, + "type": "object" + }, + "max_recv_msg_size": { + "default": 104857600, + "description": "gRPC client max receive message size (bytes).", + "type": "number", + "x-cli-flag": "ingester.client.grpc-max-recv-msg-size" + }, + "max_send_msg_size": { + "default": 16777216, + "description": "gRPC client max send message size (bytes).", + "type": "number", + "x-cli-flag": "ingester.client.grpc-max-send-msg-size" + }, + "rate_limit": { + "default": 0, + "description": "Rate limit for gRPC client; 0 means disabled.", + "type": "number", + "x-cli-flag": "ingester.client.grpc-client-rate-limit" + }, + "rate_limit_burst": { + "default": 0, + "description": "Rate limit burst for gRPC client.", + "type": "number", + "x-cli-flag": "ingester.client.grpc-client-rate-limit-burst" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "ingester.client.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "ingester.client.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.", + "type": "boolean", + "x-cli-flag": "ingester.client.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "ingester.client.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "ingester.client.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "ingester.client.tls-server-name" + } + }, + "type": "object" + }, + "max_inflight_push_requests": { + "default": 0, + "description": "Max inflight push requests that this ingester client can handle. This limit is per-ingester-client. Additional requests will be rejected. 0 = unlimited.", + "type": "number", + "x-cli-flag": "ingester.client.max-inflight-push-requests" + } + }, + "type": "object" + }, + "ingester_config": { + "description": "The ingester_config configures the Cortex ingester.", + "properties": { + "active_series_metrics_enabled": { + "default": true, + "description": "Enable tracking of active series and export them as metrics.", + "type": "boolean", + "x-cli-flag": "ingester.active-series-metrics-enabled" + }, + "active_series_metrics_idle_timeout": { + "default": "10m0s", + "description": "After what time a series is considered to be inactive.", + "type": "string", + "x-cli-flag": "ingester.active-series-metrics-idle-timeout", + "x-format": "duration" + }, + "active_series_metrics_update_period": { + "default": "1m0s", + "description": "How often to update active series metrics.", + "type": "string", + "x-cli-flag": "ingester.active-series-metrics-update-period", + "x-format": "duration" + }, + "admin_limit_message": { + "default": "please contact administrator to raise it", + "description": "Customize the message contained in limit errors", + "type": "string", + "x-cli-flag": "ingester.admin-limit-message" + }, + "disable_chunk_trimming": { + "default": false, + "description": "Disable trimming of matching series chunks based on query Start and End time. When disabled, the result may contain samples outside the queried time range but select performances may be improved. Note that certain query results might change by changing this option.", + "type": "boolean", + "x-cli-flag": "ingester.disable-chunk-trimming" + }, + "ignore_series_limit_for_metric_names": { + "description": "Comma-separated list of metric names, for which -ingester.max-series-per-metric and -ingester.max-global-series-per-metric limits will be ignored. Does not affect max-series-per-user or max-global-series-per-metric limits.", + "type": "string", + "x-cli-flag": "ingester.ignore-series-limit-for-metric-names" + }, + "instance_limits": { + "properties": { + "max_inflight_push_requests": { + "default": 0, + "description": "Max inflight push requests that this ingester can handle (across all tenants). Additional requests will be rejected. 0 = unlimited.", + "type": "number", + "x-cli-flag": "ingester.instance-limits.max-inflight-push-requests" + }, + "max_inflight_query_requests": { + "default": 0, + "description": "Max inflight query requests that this ingester can handle (across all tenants). Additional requests will be rejected. 0 = unlimited.", + "type": "number", + "x-cli-flag": "ingester.instance-limits.max-inflight-query-requests" + }, + "max_ingestion_rate": { + "default": 0, + "description": "Max ingestion rate (samples/sec) that ingester will accept. This limit is per-ingester, not per-tenant. Additional push requests will be rejected. Current ingestion rate is computed as exponentially weighted moving average, updated every second. This limit only works when using blocks engine. 0 = unlimited.", + "type": "number", + "x-cli-flag": "ingester.instance-limits.max-ingestion-rate" + }, + "max_series": { + "default": 0, + "description": "Max series that this ingester can hold (across all tenants). Requests to create additional series will be rejected. This limit only works when using blocks engine. 0 = unlimited.", + "type": "number", + "x-cli-flag": "ingester.instance-limits.max-series" + }, + "max_tenants": { + "default": 0, + "description": "Max users that this ingester can hold. Requests from additional users will be rejected. This limit only works when using blocks engine. 0 = unlimited.", + "type": "number", + "x-cli-flag": "ingester.instance-limits.max-tenants" + } + }, + "type": "object" + }, + "labels_string_interning_enabled": { + "default": false, + "description": "Experimental: Enable string interning for metrics labels.", + "type": "boolean", + "x-cli-flag": "ingester.labels-string-interning-enabled" + }, + "lifecycler": { + "properties": { + "availability_zone": { + "description": "The availability zone where this instance is running.", + "type": "string", + "x-cli-flag": "ingester.availability-zone" + }, + "final_sleep": { + "default": "30s", + "description": "Duration to sleep for before exiting, to ensure metrics are scraped.", + "type": "string", + "x-cli-flag": "ingester.final-sleep", + "x-format": "duration" + }, + "heartbeat_period": { + "default": "5s", + "description": "Period at which to heartbeat to consul. 0 = disabled.", + "type": "string", + "x-cli-flag": "ingester.heartbeat-period", + "x-format": "duration" + }, + "interface_names": { + "default": "[eth0 en0]", + "description": "Name of network interface to read address from.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "ingester.lifecycler.interface" + }, + "join_after": { + "default": "0s", + "description": "Period to wait for a claim from another member; will join automatically after this.", + "type": "string", + "x-cli-flag": "ingester.join-after", + "x-format": "duration" + }, + "min_ready_duration": { + "default": "15s", + "description": "Minimum duration to wait after the internal readiness checks have passed but before succeeding the readiness endpoint. This is used to slowdown deployment controllers (eg. Kubernetes) after an instance is ready and before they proceed with a rolling update, to give the rest of the cluster instances enough time to receive ring updates.", + "type": "string", + "x-cli-flag": "ingester.min-ready-duration", + "x-format": "duration" + }, + "num_tokens": { + "default": 128, + "description": "Number of tokens for each ingester.", + "type": "number", + "x-cli-flag": "ingester.num-tokens" + }, + "observe_period": { + "default": "0s", + "description": "Observe tokens after generating to resolve collisions. Useful when using gossiping ring.", + "type": "string", + "x-cli-flag": "ingester.observe-period", + "x-format": "duration" + }, + "readiness_check_ring_health": { + "default": true, + "description": "When enabled the readiness probe succeeds only after all instances are ACTIVE and healthy in the ring, otherwise only the instance itself is checked. This option should be disabled if in your cluster multiple instances can be rolled out simultaneously, otherwise rolling updates may be slowed down.", + "type": "boolean", + "x-cli-flag": "ingester.readiness-check-ring-health" + }, + "ring": { + "properties": { + "detailed_metrics_enabled": { + "default": true, + "description": "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted by the distributors.", + "type": "boolean", + "x-cli-flag": "ring.detailed-metrics-enabled" + }, + "excluded_zones": { + "description": "Comma-separated list of zones to exclude from the ring. Instances in excluded zones will be filtered out from the ring.", + "type": "string", + "x-cli-flag": "distributor.excluded-zones" + }, + "heartbeat_timeout": { + "default": "1m0s", + "description": "The heartbeat timeout after which ingesters are skipped for reads/writes. 0 = never (timeout disabled).", + "type": "string", + "x-cli-flag": "ring.heartbeat-timeout", + "x-format": "duration" + }, + "kvstore": { + "properties": { + "consul": { + "$ref": "#/definitions/consul_config" + }, + "dynamodb": { + "properties": { + "max_cas_retries": { + "default": 10, + "description": "Maximum number of retries for DDB KV CAS.", + "type": "number", + "x-cli-flag": "dynamodb.max-cas-retries" + }, + "puller_sync_time": { + "default": "1m0s", + "description": "Time to refresh local ring with information on dynamodb.", + "type": "string", + "x-cli-flag": "dynamodb.puller-sync-time", + "x-format": "duration" + }, + "region": { + "description": "Region to access dynamodb.", + "type": "string", + "x-cli-flag": "dynamodb.region" + }, + "table_name": { + "description": "Table name to use on dynamodb.", + "type": "string", + "x-cli-flag": "dynamodb.table-name" + }, + "timeout": { + "default": "2m0s", + "description": "Timeout of dynamoDbClient requests. Default is 2m.", + "type": "string", + "x-cli-flag": "dynamodb.timeout", + "x-format": "duration" + }, + "ttl": { + "default": "0s", + "description": "Time to expire items on dynamodb.", + "type": "string", + "x-cli-flag": "dynamodb.ttl-time", + "x-format": "duration" + } + }, + "type": "object" + }, + "etcd": { + "$ref": "#/definitions/etcd_config" + }, + "multi": { + "properties": { + "mirror_enabled": { + "default": false, + "description": "Mirror writes to secondary store.", + "type": "boolean", + "x-cli-flag": "multi.mirror-enabled" + }, + "mirror_timeout": { + "default": "2s", + "description": "Timeout for storing value to secondary store.", + "type": "string", + "x-cli-flag": "multi.mirror-timeout", + "x-format": "duration" + }, + "primary": { + "description": "Primary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "multi.primary" + }, + "secondary": { + "description": "Secondary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "multi.secondary" + } + }, + "type": "object" + }, + "prefix": { + "default": "collectors/", + "description": "The prefix for the keys in the store. Should end with a /.", + "type": "string", + "x-cli-flag": "ring.prefix" + }, + "store": { + "default": "consul", + "description": "Backend storage to use for the ring. Supported values are: consul, etcd, inmemory, memberlist, multi.", + "type": "string", + "x-cli-flag": "ring.store" + } + }, + "type": "object" + }, + "replication_factor": { + "default": 3, + "description": "The number of ingesters to write to and read from.", + "type": "number", + "x-cli-flag": "distributor.replication-factor" + }, + "zone_awareness_enabled": { + "default": false, + "description": "True to enable the zone-awareness and replicate ingested samples across different availability zones.", + "type": "boolean", + "x-cli-flag": "distributor.zone-awareness-enabled" + } + }, + "type": "object" + }, + "tokens_file_path": { + "description": "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.", + "type": "string", + "x-cli-flag": "ingester.tokens-file-path" + }, + "tokens_generator_strategy": { + "default": "random", + "description": "EXPERIMENTAL: Algorithm used to generate new ring tokens. Supported Values: random,minimize-spread", + "type": "string", + "x-cli-flag": "ingester.tokens-generator-strategy" + }, + "unregister_on_shutdown": { + "default": true, + "description": "Unregister from the ring upon clean shutdown. It can be useful to disable for rolling restarts with consistent naming in conjunction with -distributor.extend-writes=false.", + "type": "boolean", + "x-cli-flag": "ingester.unregister-on-shutdown" + } + }, + "type": "object" + }, + "matchers_cache_max_items": { + "default": 0, + "description": "Maximum number of entries in the regex matchers cache. 0 to disable.", + "type": "number", + "x-cli-flag": "ingester.matchers-cache-max-items" + }, + "metadata_retain_period": { + "default": "10m0s", + "description": "Period at which metadata we have not seen will remain in memory before being deleted.", + "type": "string", + "x-cli-flag": "ingester.metadata-retain-period", + "x-format": "duration" + }, + "query_protection": { + "properties": { + "rejection": { + "properties": { + "enabled": { + "default": false, + "description": "EXPERIMENTAL: Enable query rejection feature, where the component return 503 to all incoming query requests when the configured thresholds are breached.", + "type": "boolean", + "x-cli-flag": "ingester.query-protection.rejection.enabled" + }, + "threshold": { + "properties": { + "cpu_utilization": { + "default": 0, + "description": "EXPERIMENTAL: Max CPU utilization that this ingester can reach before rejecting new query request (across all tenants) in percentage, between 0 and 1. monitored_resources config must include the resource type. 0 to disable.", + "type": "number", + "x-cli-flag": "ingester.query-protection.rejection.threshold.cpu-utilization" + }, + "heap_utilization": { + "default": 0, + "description": "EXPERIMENTAL: Max heap utilization that this ingester can reach before rejecting new query request (across all tenants) in percentage, between 0 and 1. monitored_resources config must include the resource type. 0 to disable.", + "type": "number", + "x-cli-flag": "ingester.query-protection.rejection.threshold.heap-utilization" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "rate_update_period": { + "default": "15s", + "description": "Period with which to update the per-user ingestion rates.", + "type": "string", + "x-cli-flag": "ingester.rate-update-period", + "x-format": "duration" + }, + "skip_metadata_limits": { + "default": true, + "description": "If enabled, the metadata API returns all metadata regardless of the limits.", + "type": "boolean", + "x-cli-flag": "ingester.skip-metadata-limits" + }, + "upload_compacted_blocks_enabled": { + "default": true, + "description": "Enable uploading compacted blocks.", + "type": "boolean", + "x-cli-flag": "ingester.upload-compacted-blocks-enabled" + }, + "user_tsdb_configs_update_period": { + "default": "15s", + "description": "Period with which to update the per-user tsdb config.", + "type": "string", + "x-cli-flag": "ingester.user-tsdb-configs-update-period", + "x-format": "duration" + } + }, + "type": "object" + }, + "limits_config": { + "description": "The limits_config configures default and per-tenant limits imposed by Cortex services (ie. distributor, ingester, ...).", + "properties": { + "accept_ha_samples": { + "default": false, + "description": "Flag to enable, for all users, handling of samples with external labels identifying replicas in an HA Prometheus setup.", + "type": "boolean", + "x-cli-flag": "distributor.ha-tracker.enable-for-all-users" + }, + "accept_mixed_ha_samples": { + "default": false, + "description": "[Experimental] Flag to enable handling of samples with mixed external labels identifying replicas in an HA Prometheus setup. Supported only if -distributor.ha-tracker.enable-for-all-users is true.", + "type": "boolean", + "x-cli-flag": "experimental.distributor.ha-tracker.mixed-ha-samples" + }, + "alertmanager_max_alerts_count": { + "default": 0, + "description": "Maximum number of alerts that a single user can have. Inserting more alerts will fail with a log message and metric increment. 0 = no limit.", + "type": "number", + "x-cli-flag": "alertmanager.max-alerts-count" + }, + "alertmanager_max_alerts_size_bytes": { + "default": 0, + "description": "Maximum total size of alerts that a single user can have, alert size is the sum of the bytes of its labels, annotations and generatorURL. Inserting more alerts will fail with a log message and metric increment. 0 = no limit.", + "type": "number", + "x-cli-flag": "alertmanager.max-alerts-size-bytes" + }, + "alertmanager_max_config_size_bytes": { + "default": 0, + "description": "Maximum size of configuration file for Alertmanager that tenant can upload via Alertmanager API. 0 = no limit.", + "type": "number", + "x-cli-flag": "alertmanager.max-config-size-bytes" + }, + "alertmanager_max_dispatcher_aggregation_groups": { + "default": 0, + "description": "Maximum number of aggregation groups in Alertmanager's dispatcher that a tenant can have. Each active aggregation group uses single goroutine. When the limit is reached, dispatcher will not dispatch alerts that belong to additional aggregation groups, but existing groups will keep working properly. 0 = no limit.", + "type": "number", + "x-cli-flag": "alertmanager.max-dispatcher-aggregation-groups" + }, + "alertmanager_max_silences_count": { + "default": 0, + "description": "Maximum number of silences that a single user can have, including expired silences. 0 = no limit.", + "type": "number", + "x-cli-flag": "alertmanager.max-silences-count" + }, + "alertmanager_max_silences_size_bytes": { + "default": 0, + "description": "Maximum size of individual silences that a single user can have. 0 = no limit.", + "type": "number", + "x-cli-flag": "alertmanager.max-silences-size-bytes" + }, + "alertmanager_max_template_size_bytes": { + "default": 0, + "description": "Maximum size of single template in tenant's Alertmanager configuration uploaded via Alertmanager API. 0 = no limit.", + "type": "number", + "x-cli-flag": "alertmanager.max-template-size-bytes" + }, + "alertmanager_max_templates_count": { + "default": 0, + "description": "Maximum number of templates in tenant's Alertmanager configuration uploaded via Alertmanager API. 0 = no limit.", + "type": "number", + "x-cli-flag": "alertmanager.max-templates-count" + }, + "alertmanager_notification_rate_limit": { + "default": 0, + "description": "Per-user rate limit for sending notifications from Alertmanager in notifications/sec. 0 = rate limit disabled. Negative value = no notifications are allowed.", + "type": "number", + "x-cli-flag": "alertmanager.notification-rate-limit" + }, + "alertmanager_notification_rate_limit_per_integration": { + "additionalProperties": true, + "default": "{}", + "description": "Per-integration notification rate limits. Value is a map, where each key is integration name and value is a rate-limit (float). On command line, this map is given in JSON format. Rate limit has the same meaning as -alertmanager.notification-rate-limit, but only applies for specific integration. Allowed integration names: webhook, email, pagerduty, opsgenie, wechat, slack, victorops, pushover, sns, telegram, discord, webex, msteams, msteamsv2, jira, rocketchat.", + "type": "object", + "x-cli-flag": "alertmanager.notification-rate-limit-per-integration" + }, + "alertmanager_receivers_firewall_block_cidr_networks": { + "description": "Comma-separated list of network CIDRs to block in Alertmanager receiver integrations.", + "type": "string", + "x-cli-flag": "alertmanager.receivers-firewall-block-cidr-networks" + }, + "alertmanager_receivers_firewall_block_private_addresses": { + "default": false, + "description": "True to block private and local addresses in Alertmanager receiver integrations. It blocks private addresses defined by RFC 1918 (IPv4 addresses) and RFC 4193 (IPv6 addresses), as well as loopback, local unicast and local multicast addresses.", + "type": "boolean", + "x-cli-flag": "alertmanager.receivers-firewall-block-private-addresses" + }, + "compactor_blocks_retention_period": { + "default": "0s", + "description": "Delete blocks containing samples older than the specified retention period. 0 to disable.", + "type": "string", + "x-cli-flag": "compactor.blocks-retention-period", + "x-format": "duration" + }, + "compactor_partition_index_size_bytes": { + "default": 68719476736, + "description": "Index size limit in bytes for each compaction partition. 0 means no limit", + "type": "number", + "x-cli-flag": "compactor.partition-index-size-bytes" + }, + "compactor_partition_series_count": { + "default": 0, + "description": "Time series count limit for each compaction partition. 0 means no limit", + "type": "number", + "x-cli-flag": "compactor.partition-series-count" + }, + "compactor_tenant_shard_size": { + "default": 0, + "description": "The default tenant's shard size when the shuffle-sharding strategy is used by the compactor. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant. If the value is \u003c 1 and \u003e 0 the shard size will be a percentage of the total compactors", + "type": "number", + "x-cli-flag": "compactor.tenant-shard-size" + }, + "creation_grace_period": { + "default": "10m", + "description": "Duration which table will be created/deleted before/after it's needed; we won't accept sample from before this time.", + "type": "string", + "x-cli-flag": "validation.create-grace-period", + "x-format": "duration" + }, + "disabled_rule_groups": { + "default": [], + "description": "list of rule groups to disable", + "items": { + "type": "string" + }, + "type": "array" + }, + "drop_labels": { + "default": [], + "description": "This flag can be used to specify label names that to drop during sample ingestion within the distributor and can be repeated in order to drop multiple labels.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "distributor.drop-label" + }, + "enable_native_histograms": { + "default": false, + "description": "[EXPERIMENTAL] True to enable native histogram.", + "type": "boolean", + "x-cli-flag": "blocks-storage.tsdb.enable-native-histograms" + }, + "enforce_metadata_metric_name": { + "default": true, + "description": "Enforce every metadata has a metric name.", + "type": "boolean", + "x-cli-flag": "validation.enforce-metadata-metric-name" + }, + "enforce_metric_name": { + "default": true, + "description": "Enforce every sample has a metric name.", + "type": "boolean", + "x-cli-flag": "validation.enforce-metric-name" + }, + "ha_cluster_label": { + "default": "cluster", + "description": "Prometheus label to look for in samples to identify a Prometheus HA cluster.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.cluster" + }, + "ha_max_clusters": { + "default": 0, + "description": "Maximum number of clusters that HA tracker will keep track of for single user. 0 to disable the limit.", + "type": "number", + "x-cli-flag": "distributor.ha-tracker.max-clusters" + }, + "ha_replica_label": { + "default": "__replica__", + "description": "Prometheus label to look for in samples to identify a Prometheus HA replica.", + "type": "string", + "x-cli-flag": "distributor.ha-tracker.replica" + }, + "ingestion_burst_size": { + "default": 50000, + "description": "Per-user allowed ingestion burst size (in number of samples).", + "type": "number", + "x-cli-flag": "distributor.ingestion-burst-size" + }, + "ingestion_rate": { + "default": 25000, + "description": "Per-user ingestion rate limit in samples per second.", + "type": "number", + "x-cli-flag": "distributor.ingestion-rate-limit" + }, + "ingestion_rate_strategy": { + "default": "local", + "description": "Whether the ingestion rate limit should be applied individually to each distributor instance (local), or evenly shared across the cluster (global).", + "type": "string", + "x-cli-flag": "distributor.ingestion-rate-limit-strategy" + }, + "ingestion_tenant_shard_size": { + "default": 0, + "description": "The default tenant's shard size when the shuffle-sharding strategy is used. Must be set both on ingesters and distributors. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant.", + "type": "number", + "x-cli-flag": "distributor.ingestion-tenant-shard-size" + }, + "limits_per_label_set": { + "default": [], + "description": "[Experimental] Enable limits per LabelSet. Supported limits per labelSet: [max_series]", + "items": { + "type": "string" + }, + "type": "array" + }, + "max_cache_freshness": { + "default": "1m", + "description": "Most recent allowed cacheable result per-tenant, to prevent caching very recent results that might still be in flux.", + "type": "string", + "x-cli-flag": "frontend.max-cache-freshness", + "x-format": "duration" + }, + "max_downloaded_bytes_per_request": { + "default": 0, + "description": "The maximum number of data bytes to download per gRPC request in Store Gateway, including Series/LabelNames/LabelValues requests. 0 to disable.", + "type": "number", + "x-cli-flag": "store-gateway.max-downloaded-bytes-per-request" + }, + "max_exemplars": { + "default": 0, + "description": "Enables support for exemplars in TSDB and sets the maximum number that will be stored. less than zero means disabled. If the value is set to zero, cortex will fallback to blocks-storage.tsdb.max-exemplars value.", + "type": "number", + "x-cli-flag": "ingester.max-exemplars" + }, + "max_fetched_chunk_bytes_per_query": { + "default": 0, + "description": "Deprecated (use max-fetched-data-bytes-per-query instead): The maximum size of all chunks in bytes that a query can fetch from each ingester and storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable.", + "type": "number", + "x-cli-flag": "querier.max-fetched-chunk-bytes-per-query" + }, + "max_fetched_chunks_per_query": { + "default": 2000000, + "description": "Maximum number of chunks that can be fetched in a single query from ingesters and long-term storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable.", + "type": "number", + "x-cli-flag": "querier.max-fetched-chunks-per-query" + }, + "max_fetched_data_bytes_per_query": { + "default": 0, + "description": "The maximum combined size of all data that a query can fetch from each ingester and storage. This limit is enforced in the querier and ruler for `query`, `query_range` and `series` APIs. 0 to disable.", + "type": "number", + "x-cli-flag": "querier.max-fetched-data-bytes-per-query" + }, + "max_fetched_series_per_query": { + "default": 0, + "description": "The maximum number of unique series for which a query can fetch samples from each ingesters and blocks storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable", + "type": "number", + "x-cli-flag": "querier.max-fetched-series-per-query" + }, + "max_global_metadata_per_metric": { + "default": 0, + "description": "The maximum number of metadata per metric, across the cluster. 0 to disable.", + "type": "number", + "x-cli-flag": "ingester.max-global-metadata-per-metric" + }, + "max_global_metadata_per_user": { + "default": 0, + "description": "The maximum number of active metrics with metadata per user, across the cluster. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.", + "type": "number", + "x-cli-flag": "ingester.max-global-metadata-per-user" + }, + "max_global_native_histogram_series_per_user": { + "default": 0, + "description": "The maximum number of active native histogram series per user, across the cluster before replication. 0 to disable. Supported only if -distributor.shard-by-all-labels and ingester.active-series-metrics-enabled is true.", + "type": "number", + "x-cli-flag": "ingester.max-global-native-histogram-series-per-user" + }, + "max_global_series_per_metric": { + "default": 0, + "description": "The maximum number of active series per metric name, across the cluster before replication. 0 to disable.", + "type": "number", + "x-cli-flag": "ingester.max-global-series-per-metric" + }, + "max_global_series_per_user": { + "default": 0, + "description": "The maximum number of active series per user, across the cluster before replication. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.", + "type": "number", + "x-cli-flag": "ingester.max-global-series-per-user" + }, + "max_label_name_length": { + "default": 1024, + "description": "Maximum length accepted for label names", + "type": "number", + "x-cli-flag": "validation.max-length-label-name" + }, + "max_label_names_per_series": { + "default": 30, + "description": "Maximum number of label names per series.", + "type": "number", + "x-cli-flag": "validation.max-label-names-per-series" + }, + "max_label_value_length": { + "default": 2048, + "description": "Maximum length accepted for label value. This setting also applies to the metric name", + "type": "number", + "x-cli-flag": "validation.max-length-label-value" + }, + "max_labels_size_bytes": { + "default": 0, + "description": "Maximum combined size in bytes of all labels and label values accepted for a series. 0 to disable the limit.", + "type": "number", + "x-cli-flag": "validation.max-labels-size-bytes" + }, + "max_metadata_length": { + "default": 1024, + "description": "Maximum length accepted for metric metadata. Metadata refers to Metric Name, HELP and UNIT.", + "type": "number", + "x-cli-flag": "validation.max-metadata-length" + }, + "max_metadata_per_metric": { + "default": 10, + "description": "The maximum number of metadata per metric, per ingester. 0 to disable.", + "type": "number", + "x-cli-flag": "ingester.max-metadata-per-metric" + }, + "max_metadata_per_user": { + "default": 8000, + "description": "The maximum number of active metrics with metadata per user, per ingester. 0 to disable.", + "type": "number", + "x-cli-flag": "ingester.max-metadata-per-user" + }, + "max_native_histogram_buckets": { + "default": 0, + "description": "Limit on total number of positive and negative buckets allowed in a single native histogram. The resolution of a histogram with more buckets will be reduced until the number of buckets is within the limit. If the limit cannot be reached, the sample will be discarded. 0 means no limit. Enforced at Distributor.", + "type": "number", + "x-cli-flag": "validation.max-native-histogram-buckets" + }, + "max_native_histogram_sample_size_bytes": { + "default": 0, + "description": "Maximum size in bytes of a native histogram sample. 0 to disable the limit.", + "type": "number", + "x-cli-flag": "validation.max-native-histogram-sample-size-bytes" + }, + "max_native_histogram_series_per_user": { + "default": 0, + "description": "The maximum number of active native histogram series per user, per ingester. 0 to disable. Supported only if ingester.active-series-metrics-enabled is true.", + "type": "number", + "x-cli-flag": "ingester.max-native-histogram-series-per-user" + }, + "max_outstanding_requests_per_tenant": { + "default": 100, + "description": "Maximum number of outstanding requests per tenant per request queue (either query frontend or query scheduler); requests beyond this error with HTTP 429.", + "type": "number", + "x-cli-flag": "frontend.max-outstanding-requests-per-tenant" + }, + "max_queriers_per_tenant": { + "default": 0, + "description": "Maximum number of queriers that can handle requests for a single tenant. If set to 0 or value higher than number of available queriers, *all* queriers will handle requests for the tenant. If the value is \u003c 1, it will be treated as a percentage and the gets a percentage of the total queriers. Each frontend (or query-scheduler, if used) will select the same set of queriers for the same tenant (given that all queriers are connected to all frontends / query-schedulers). This option only works with queriers connecting to the query-frontend / query-scheduler, not when using downstream URL.", + "type": "number", + "x-cli-flag": "frontend.max-queriers-per-tenant" + }, + "max_query_length": { + "default": "0s", + "description": "Limit the query time range (end - start time of range query parameter and max - min of data fetched time range). This limit is enforced in the query-frontend and ruler (on the received query). 0 to disable.", + "type": "string", + "x-cli-flag": "store.max-query-length", + "x-format": "duration" + }, + "max_query_lookback": { + "default": "0s", + "description": "Limit how long back data (series and metadata) can be queried, up until \u003clookback\u003e duration ago. This limit is enforced in the query-frontend, querier and ruler. If the requested time range is outside the allowed range, the request will not fail but will be manipulated to only query data within the allowed time range. 0 to disable.", + "type": "string", + "x-cli-flag": "querier.max-query-lookback", + "x-format": "duration" + }, + "max_query_parallelism": { + "default": 14, + "description": "Maximum number of split queries will be scheduled in parallel by the frontend.", + "type": "number", + "x-cli-flag": "querier.max-query-parallelism" + }, + "max_query_response_size": { + "default": 0, + "description": "The maximum total uncompressed query response size. If the query was sharded the limit is applied to the total response size of all shards. This limit is enforced in query-frontend for `query` and `query_range` APIs. 0 to disable.", + "type": "number", + "x-cli-flag": "frontend.max-query-response-size" + }, + "max_series_per_metric": { + "default": 50000, + "description": "The maximum number of active series per metric name, per ingester. 0 to disable.", + "type": "number", + "x-cli-flag": "ingester.max-series-per-metric" + }, + "max_series_per_user": { + "default": 5000000, + "description": "The maximum number of active series per user, per ingester. 0 to disable.", + "type": "number", + "x-cli-flag": "ingester.max-series-per-user" + }, + "metric_relabel_configs": { + "default": [], + "description": "List of metric relabel configurations. Note that in most situations, it is more effective to use metrics relabeling directly in the Prometheus server, e.g. remote_write.write_relabel_configs.", + "type": "string" + }, + "native_histogram_ingestion_burst_size": { + "default": 0, + "description": "Per-user allowed native histogram ingestion burst size (in number of samples)", + "type": "number", + "x-cli-flag": "distributor.native-histogram-ingestion-burst-size" + }, + "native_histogram_ingestion_rate": { + "default": 1.7976931348623157e+308, + "description": "Per-user native histogram ingestion rate limit in samples per second. Disabled by default", + "type": "number", + "x-cli-flag": "distributor.native-histogram-ingestion-rate-limit" + }, + "out_of_order_time_window": { + "default": "0s", + "description": "[Experimental] Configures the allowed time window for ingestion of out-of-order samples. Disabled (0s) by default.", + "type": "string", + "x-cli-flag": "ingester.out-of-order-time-window", + "x-format": "duration" + }, + "promote_resource_attributes": { + "description": "Comma separated list of resource attributes that should be converted to labels.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "distributor.promote-resource-attributes" + }, + "query_partial_data": { + "default": false, + "description": "Enable to allow queries to be evaluated with data from a single zone, if other zones are not available.", + "type": "boolean" + }, + "query_priority": { + "description": "Configuration for query priority.", + "properties": { + "default_priority": { + "default": 0, + "description": "Priority assigned to all queries by default. Must be a unique value. Use this as a baseline to make certain queries higher/lower priority.", + "type": "number", + "x-cli-flag": "frontend.query-priority.default-priority" + }, + "enabled": { + "default": false, + "description": "Whether queries are assigned with priorities.", + "type": "boolean", + "x-cli-flag": "frontend.query-priority.enabled" + }, + "priorities": { + "default": [], + "description": "List of priority definitions.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "query_rejection": { + "description": "Configuration for query rejection.", + "properties": { + "enabled": { + "default": false, + "description": "Whether query rejection is enabled.", + "type": "boolean", + "x-cli-flag": "frontend.query-rejection.enabled" + }, + "query_attributes": { + "default": [], + "description": "List of query_attributes to match and reject queries. A query is rejected if it matches any query_attribute in this list. Each query_attribute has several properties (e.g., regex, time_window, user_agent), and all specified properties must match for a query_attribute to be considered a match. Only the specified properties are checked, and an AND operator is applied to them.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "reject_old_samples": { + "default": false, + "description": "Reject old samples.", + "type": "boolean", + "x-cli-flag": "validation.reject-old-samples" + }, + "reject_old_samples_max_age": { + "default": "2w", + "description": "Maximum accepted sample age before rejecting.", + "type": "string", + "x-cli-flag": "validation.reject-old-samples.max-age", + "x-format": "duration" + }, + "ruler_evaluation_delay_duration": { + "default": "0s", + "description": "Deprecated(use ruler.query-offset instead) and will be removed in v1.19.0: Duration to delay the evaluation of rules to ensure the underlying metrics have been pushed to Cortex.", + "type": "string", + "x-cli-flag": "ruler.evaluation-delay-duration", + "x-format": "duration" + }, + "ruler_external_labels": { + "additionalProperties": true, + "default": [], + "description": "external labels for alerting rules", + "type": "object" + }, + "ruler_max_rule_groups_per_tenant": { + "default": 0, + "description": "Maximum number of rule groups per-tenant. 0 to disable.", + "type": "number", + "x-cli-flag": "ruler.max-rule-groups-per-tenant" + }, + "ruler_max_rules_per_rule_group": { + "default": 0, + "description": "Maximum number of rules per rule group per-tenant. 0 to disable.", + "type": "number", + "x-cli-flag": "ruler.max-rules-per-rule-group" + }, + "ruler_query_offset": { + "default": "0s", + "description": "Duration to offset all rule evaluation queries per-tenant.", + "type": "string", + "x-cli-flag": "ruler.query-offset", + "x-format": "duration" + }, + "ruler_tenant_shard_size": { + "default": 0, + "description": "The default tenant's shard size when the shuffle-sharding strategy is used by ruler. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant. If the value is \u003c 1 the shard size will be a percentage of the total rulers.", + "type": "number", + "x-cli-flag": "ruler.tenant-shard-size" + }, + "rules_partial_data": { + "default": false, + "description": "Enable to allow rules to be evaluated with data from a single zone, if other zones are not available.", + "type": "boolean" + }, + "s3_sse_kms_encryption_context": { + "description": "S3 server-side encryption KMS encryption context. If unset and the key ID override is set, the encryption context will not be provided to S3. Ignored if the SSE type override is not set.", + "type": "string" + }, + "s3_sse_kms_key_id": { + "description": "S3 server-side encryption KMS Key ID. Ignored if the SSE type override is not set.", + "type": "string" + }, + "s3_sse_type": { + "description": "S3 server-side encryption type. Required to enable server-side encryption overrides for a specific tenant. If not set, the default S3 client settings are used.", + "type": "string" + }, + "store_gateway_tenant_shard_size": { + "default": 0, + "description": "The default tenant's shard size when the shuffle-sharding strategy is used. Must be set when the store-gateway sharding is enabled with the shuffle-sharding strategy. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant. If the value is \u003c 1 the shard size will be a percentage of the total store-gateways.", + "type": "number", + "x-cli-flag": "store-gateway.tenant-shard-size" + } + }, + "type": "object" + }, + "memberlist_config": { + "description": "The memberlist_config configures the Gossip memberlist.", + "properties": { + "abort_if_cluster_join_fails": { + "default": true, + "description": "If this node fails to join memberlist cluster, abort.", + "type": "boolean", + "x-cli-flag": "memberlist.abort-if-join-fails" + }, + "advertise_addr": { + "description": "Gossip address to advertise to other members in the cluster. Used for NAT traversal.", + "type": "string", + "x-cli-flag": "memberlist.advertise-addr" + }, + "advertise_port": { + "default": 7946, + "description": "Gossip port to advertise to other members in the cluster. Used for NAT traversal.", + "type": "number", + "x-cli-flag": "memberlist.advertise-port" + }, + "bind_addr": { + "default": [], + "description": "IP address to listen on for gossip messages. Multiple addresses may be specified. Defaults to 0.0.0.0", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "memberlist.bind-addr" + }, + "bind_port": { + "default": 7946, + "description": "Port to listen on for gossip messages.", + "type": "number", + "x-cli-flag": "memberlist.bind-port" + }, + "compression_enabled": { + "default": true, + "description": "Enable message compression. This can be used to reduce bandwidth usage at the cost of slightly more CPU utilization.", + "type": "boolean", + "x-cli-flag": "memberlist.compression-enabled" + }, + "dead_node_reclaim_time": { + "default": "0s", + "description": "How soon can dead node's name be reclaimed with new address. 0 to disable.", + "type": "string", + "x-cli-flag": "memberlist.dead-node-reclaim-time", + "x-format": "duration" + }, + "gossip_interval": { + "default": "200ms", + "description": "How often to gossip.", + "type": "string", + "x-cli-flag": "memberlist.gossip-interval", + "x-format": "duration" + }, + "gossip_nodes": { + "default": 3, + "description": "How many nodes to gossip to.", + "type": "number", + "x-cli-flag": "memberlist.gossip-nodes" + }, + "gossip_to_dead_nodes_time": { + "default": "30s", + "description": "How long to keep gossiping to dead nodes, to give them chance to refute their death.", + "type": "string", + "x-cli-flag": "memberlist.gossip-to-dead-nodes-time", + "x-format": "duration" + }, + "join_members": { + "default": [], + "description": "Other cluster members to join. Can be specified multiple times. It can be an IP, hostname or an entry specified in the DNS Service Discovery format.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "memberlist.join" + }, + "leave_timeout": { + "default": "5s", + "description": "Timeout for leaving memberlist cluster.", + "type": "string", + "x-cli-flag": "memberlist.leave-timeout", + "x-format": "duration" + }, + "left_ingesters_timeout": { + "default": "5m0s", + "description": "How long to keep LEFT ingesters in the ring.", + "type": "string", + "x-cli-flag": "memberlist.left-ingesters-timeout", + "x-format": "duration" + }, + "max_join_backoff": { + "default": "1m0s", + "description": "Max backoff duration to join other cluster members.", + "type": "string", + "x-cli-flag": "memberlist.max-join-backoff", + "x-format": "duration" + }, + "max_join_retries": { + "default": 10, + "description": "Max number of retries to join other cluster members.", + "type": "number", + "x-cli-flag": "memberlist.max-join-retries" + }, + "message_history_buffer_bytes": { + "default": 0, + "description": "How much space to use for keeping received and sent messages in memory for troubleshooting (two buffers). 0 to disable.", + "type": "number", + "x-cli-flag": "memberlist.message-history-buffer-bytes" + }, + "min_join_backoff": { + "default": "1s", + "description": "Min backoff duration to join other cluster members.", + "type": "string", + "x-cli-flag": "memberlist.min-join-backoff", + "x-format": "duration" + }, + "node_name": { + "description": "Name of the node in memberlist cluster. Defaults to hostname.", + "type": "string", + "x-cli-flag": "memberlist.nodename" + }, + "packet_dial_timeout": { + "default": "5s", + "description": "Timeout used when connecting to other nodes to send packet.", + "type": "string", + "x-cli-flag": "memberlist.packet-dial-timeout", + "x-format": "duration" + }, + "packet_write_timeout": { + "default": "5s", + "description": "Timeout for writing 'packet' data.", + "type": "string", + "x-cli-flag": "memberlist.packet-write-timeout", + "x-format": "duration" + }, + "pull_push_interval": { + "default": "30s", + "description": "How often to use pull/push sync.", + "type": "string", + "x-cli-flag": "memberlist.pullpush-interval", + "x-format": "duration" + }, + "randomize_node_name": { + "default": true, + "description": "Add random suffix to the node name.", + "type": "boolean", + "x-cli-flag": "memberlist.randomize-node-name" + }, + "rejoin_interval": { + "default": "0s", + "description": "If not 0, how often to rejoin the cluster. Occasional rejoin can help to fix the cluster split issue, and is harmless otherwise. For example when using only few components as a seed nodes (via -memberlist.join), then it's recommended to use rejoin. If -memberlist.join points to dynamic service that resolves to all gossiping nodes (eg. Kubernetes headless service), then rejoin is not needed.", + "type": "string", + "x-cli-flag": "memberlist.rejoin-interval", + "x-format": "duration" + }, + "retransmit_factor": { + "default": 4, + "description": "Multiplication factor used when sending out messages (factor * log(N+1)).", + "type": "number", + "x-cli-flag": "memberlist.retransmit-factor" + }, + "stream_timeout": { + "default": "10s", + "description": "The timeout for establishing a connection with a remote node, and for read/write operations.", + "type": "string", + "x-cli-flag": "memberlist.stream-timeout", + "x-format": "duration" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "memberlist.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "memberlist.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS on the memberlist transport layer.", + "type": "boolean", + "x-cli-flag": "memberlist.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "memberlist.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "memberlist.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "memberlist.tls-server-name" + } + }, + "type": "object" + }, + "memcached_client_config": { + "description": "The memcached_client_config configures the client used to connect to Memcached.", + "properties": { + "addresses": { + "description": "EXPERIMENTAL: Comma separated addresses list in DNS Service Discovery format: https://cortexmetrics.io/docs/configuration/arguments/#dns-service-discovery", + "type": "string", + "x-cli-flag": "frontend.memcached.addresses" + }, + "circuit_breaker_consecutive_failures": { + "default": 10, + "description": "Trip circuit-breaker after this number of consecutive dial failures (if zero then circuit-breaker is disabled).", + "type": "number", + "x-cli-flag": "frontend.memcached.circuit-breaker-consecutive-failures" + }, + "circuit_breaker_interval": { + "default": "10s", + "description": "Reset circuit-breaker counts after this long (if zero then never reset).", + "type": "string", + "x-cli-flag": "frontend.memcached.circuit-breaker-interval", + "x-format": "duration" + }, + "circuit_breaker_timeout": { + "default": "10s", + "description": "Duration circuit-breaker remains open after tripping (if zero then 60 seconds is used).", + "type": "string", + "x-cli-flag": "frontend.memcached.circuit-breaker-timeout", + "x-format": "duration" + }, + "consistent_hash": { + "default": true, + "description": "Use consistent hashing to distribute to memcache servers.", + "type": "boolean", + "x-cli-flag": "frontend.memcached.consistent-hash" + }, + "host": { + "description": "Hostname for memcached service to use. If empty and if addresses is unset, no memcached will be used.", + "type": "string", + "x-cli-flag": "frontend.memcached.hostname" + }, + "max_idle_conns": { + "default": 16, + "description": "Maximum number of idle connections in pool.", + "type": "number", + "x-cli-flag": "frontend.memcached.max-idle-conns" + }, + "max_item_size": { + "default": 0, + "description": "The maximum size of an item stored in memcached. Bigger items are not stored. If set to 0, no maximum size is enforced.", + "type": "number", + "x-cli-flag": "frontend.memcached.max-item-size" + }, + "service": { + "default": "memcached", + "description": "SRV service used to discover memcache servers.", + "type": "string", + "x-cli-flag": "frontend.memcached.service" + }, + "timeout": { + "default": "100ms", + "description": "Maximum time to wait before giving up on memcached requests.", + "type": "string", + "x-cli-flag": "frontend.memcached.timeout", + "x-format": "duration" + }, + "update_interval": { + "default": "1m0s", + "description": "Period with which to poll DNS for memcache servers.", + "type": "string", + "x-cli-flag": "frontend.memcached.update-interval", + "x-format": "duration" + } + }, + "type": "object" + }, + "memcached_config": { + "description": "The memcached_config block configures how data is stored in Memcached (ie. expiration).", + "properties": { + "batch_size": { + "default": 1024, + "description": "How many keys to fetch in each batch.", + "type": "number", + "x-cli-flag": "frontend.memcached.batchsize" + }, + "expiration": { + "default": "0s", + "description": "How long keys stay in the memcache.", + "type": "string", + "x-cli-flag": "frontend.memcached.expiration", + "x-format": "duration" + }, + "parallelism": { + "default": 100, + "description": "Maximum active requests to memcache.", + "type": "number", + "x-cli-flag": "frontend.memcached.parallelism" + } + }, + "type": "object" + }, + "querier_config": { + "description": "The querier_config configures the Cortex querier.", + "properties": { + "active_query_tracker_dir": { + "default": "./active-query-tracker", + "description": "Active query tracker monitors active queries, and writes them to the file in given directory. If Cortex discovers any queries in this log during startup, it will log them to the log file. Setting to empty value disables active query tracker, which also disables -querier.max-concurrent option.", + "type": "string", + "x-cli-flag": "querier.active-query-tracker-dir" + }, + "default_evaluation_interval": { + "default": "1m0s", + "description": "The default evaluation interval or step size for subqueries.", + "type": "string", + "x-cli-flag": "querier.default-evaluation-interval", + "x-format": "duration" + }, + "enable_parquet_queryable": { + "default": false, + "description": "[Experimental] If true, querier will try to query the parquet files if available.", + "type": "boolean", + "x-cli-flag": "querier.enable-parquet-queryable" + }, + "enable_promql_experimental_functions": { + "default": false, + "description": "[Experimental] If true, experimental promQL functions are enabled.", + "type": "boolean", + "x-cli-flag": "querier.enable-promql-experimental-functions" + }, + "ignore_max_query_length": { + "default": false, + "description": "If enabled, ignore max query length check at Querier select method. Users can choose to ignore it since the validation can be done before Querier evaluation like at Query Frontend or Ruler.", + "type": "boolean", + "x-cli-flag": "querier.ignore-max-query-length" + }, + "ingester_label_names_with_matchers": { + "default": false, + "description": "Use LabelNames ingester RPCs with match params.", + "type": "boolean", + "x-cli-flag": "querier.ingester-label-names-with-matchers" + }, + "ingester_metadata_streaming": { + "default": true, + "description": "Deprecated (This feature will be always on after v1.18): Use streaming RPCs for metadata APIs from ingester.", + "type": "boolean", + "x-cli-flag": "querier.ingester-metadata-streaming" + }, + "ingester_query_max_attempts": { + "default": 1, + "description": "The maximum number of times we attempt fetching data from ingesters for retryable errors (ex. partial data returned).", + "type": "number", + "x-cli-flag": "querier.ingester-query-max-attempts" + }, + "lookback_delta": { + "default": "5m0s", + "description": "Time since the last sample after which a time series is considered stale and ignored by expression evaluations.", + "type": "string", + "x-cli-flag": "querier.lookback-delta", + "x-format": "duration" + }, + "max_concurrent": { + "default": 20, + "description": "The maximum number of concurrent queries.", + "type": "number", + "x-cli-flag": "querier.max-concurrent" + }, + "max_query_into_future": { + "default": "10m0s", + "description": "Maximum duration into the future you can query. 0 to disable.", + "type": "string", + "x-cli-flag": "querier.max-query-into-future", + "x-format": "duration" + }, + "max_samples": { + "default": 50000000, + "description": "Maximum number of samples a single query can load into memory.", + "type": "number", + "x-cli-flag": "querier.max-samples" + }, + "max_subquery_steps": { + "default": 0, + "description": "Max number of steps allowed for every subquery expression in query. Number of steps is calculated using subquery range / step. A value \u003e 0 enables it.", + "type": "number", + "x-cli-flag": "querier.max-subquery-steps" + }, + "parquet_queryable_default_block_store": { + "default": "parquet", + "description": "[Experimental] Parquet queryable's default block store to query. Valid options are tsdb and parquet. If it is set to tsdb, parquet queryable always fallback to store gateway.", + "type": "string", + "x-cli-flag": "querier.parquet-queryable-default-block-store" + }, + "parquet_queryable_fallback_disabled": { + "default": false, + "description": "[Experimental] Disable Parquet queryable to fallback queries to Store Gateway if the block is not available as Parquet files but available in TSDB. Setting this to true will disable the fallback and users can remove Store Gateway. But need to make sure Parquet files are created before it is queryable.", + "type": "boolean", + "x-cli-flag": "querier.parquet-queryable-fallback-disabled" + }, + "parquet_queryable_shard_cache_size": { + "default": 512, + "description": "[Experimental] Maximum size of the Parquet queryable shard cache. 0 to disable.", + "type": "number", + "x-cli-flag": "querier.parquet-queryable-shard-cache-size" + }, + "per_step_stats_enabled": { + "default": false, + "description": "Enable returning samples stats per steps in query response.", + "type": "boolean", + "x-cli-flag": "querier.per-step-stats-enabled" + }, + "query_ingesters_within": { + "default": "0s", + "description": "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.", + "type": "string", + "x-cli-flag": "querier.query-ingesters-within", + "x-format": "duration" + }, + "query_store_after": { + "default": "0s", + "description": "The time after which a metric should be queried from storage and not just ingesters. 0 means all queries are sent to store. When running the blocks storage, if this option is enabled, the time range of the query sent to the store will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.", + "type": "string", + "x-cli-flag": "querier.query-store-after", + "x-format": "duration" + }, + "response_compression": { + "default": "gzip", + "description": "Use compression for metrics query API or instant and range query APIs. Supported compression 'gzip', 'snappy', 'zstd' and '' (disable compression)", + "type": "string", + "x-cli-flag": "querier.response-compression" + }, + "shuffle_sharding_ingesters_lookback_period": { + "default": "0s", + "description": "When distributor's sharding strategy is shuffle-sharding and this setting is \u003e 0, queriers fetch in-memory series from the minimum set of required ingesters, selecting only ingesters which may have received series since 'now - lookback period'. The lookback period should be greater or equal than the configured 'query store after' and 'query ingesters within'. If this setting is 0, queriers always query all ingesters (ingesters shuffle sharding on read path is disabled).", + "type": "string", + "x-cli-flag": "querier.shuffle-sharding-ingesters-lookback-period", + "x-format": "duration" + }, + "store_gateway_addresses": { + "description": "Comma separated list of store-gateway addresses in DNS Service Discovery format. This option should be set when using the blocks storage and the store-gateway sharding is disabled (when enabled, the store-gateway instances form a ring and addresses are picked from the ring).", + "type": "string", + "x-cli-flag": "querier.store-gateway-addresses" + }, + "store_gateway_client": { + "properties": { + "connect_timeout": { + "default": "5s", + "description": "The maximum amount of time to establish a connection. A value of 0 means using default gRPC client connect timeout 5s.", + "type": "string", + "x-cli-flag": "querier.store-gateway-client.connect-timeout", + "x-format": "duration" + }, + "grpc_compression": { + "description": "Use compression when sending messages. Supported values are: 'gzip', 'snappy' and '' (disable compression)", + "type": "string", + "x-cli-flag": "querier.store-gateway-client.grpc-compression" + }, + "healthcheck_config": { + "description": "EXPERIMENTAL: If enabled, gRPC clients perform health checks for each target and fail the request if the target is marked as unhealthy.", + "properties": { + "interval": { + "default": "5s", + "description": "The approximate amount of time between health checks of an individual target.", + "type": "string", + "x-cli-flag": "querier.store-gateway-client.healthcheck.interval", + "x-format": "duration" + }, + "timeout": { + "default": "1s", + "description": "The amount of time during which no response from a target means a failed health check.", + "type": "string", + "x-cli-flag": "querier.store-gateway-client.healthcheck.timeout", + "x-format": "duration" + }, + "unhealthy_threshold": { + "default": 0, + "description": "The number of consecutive failed health checks required before considering a target unhealthy. 0 means disabled.", + "type": "number", + "x-cli-flag": "querier.store-gateway-client.healthcheck.unhealthy-threshold" + } + }, + "type": "object" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "querier.store-gateway-client.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "querier.store-gateway-client.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS for gRPC client connecting to store-gateway.", + "type": "boolean", + "x-cli-flag": "querier.store-gateway-client.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "querier.store-gateway-client.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "querier.store-gateway-client.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "querier.store-gateway-client.tls-server-name" + } + }, + "type": "object" + }, + "store_gateway_consistency_check_max_attempts": { + "default": 3, + "description": "The maximum number of times we attempt fetching missing blocks from different store-gateways. If no more store-gateways are left (ie. due to lower replication factor) than we'll end the retries earlier", + "type": "number", + "x-cli-flag": "querier.store-gateway-consistency-check-max-attempts" + }, + "store_gateway_query_stats": { + "default": true, + "description": "If enabled, store gateway query stats will be logged using `info` log level.", + "type": "boolean", + "x-cli-flag": "querier.store-gateway-query-stats-enabled" + }, + "thanos_engine": { + "properties": { + "enable_x_functions": { + "default": false, + "description": "Enable xincrease, xdelta, xrate etc from Thanos engine.", + "type": "boolean", + "x-cli-flag": "querier.enable-x-functions" + }, + "enabled": { + "default": false, + "description": "Experimental. Use Thanos promql engine https://github.com/thanos-io/promql-engine rather than the Prometheus promql engine.", + "type": "boolean", + "x-cli-flag": "querier.thanos-engine" + }, + "optimizers": { + "default": "default", + "description": "Logical plan optimizers. Multiple optimizers can be provided as a comma-separated list. Supported values: default, all, propagate-matchers, sort-matchers, merge-selects, detect-histogram-stats", + "type": "string", + "x-cli-flag": "querier.optimizers" + } + }, + "type": "object" + }, + "timeout": { + "default": "2m0s", + "description": "The timeout for a query.", + "type": "string", + "x-cli-flag": "querier.timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "query_frontend_config": { + "description": "The query_frontend_config configures the Cortex query-frontend.", + "properties": { + "downstream_url": { + "description": "URL of downstream Prometheus.", + "type": "string", + "x-cli-flag": "frontend.downstream-url" + }, + "enabled_ruler_query_stats_log": { + "default": false, + "description": "If enabled, report the query stats log for queries coming from the ruler to evaluate rules. It only takes effect when '-ruler.frontend-address' is configured.", + "type": "boolean", + "x-cli-flag": "frontend.enabled-ruler-query-stats" + }, + "grpc_client_config": { + "properties": { + "backoff_config": { + "properties": { + "max_period": { + "default": "10s", + "description": "Maximum delay when backing off.", + "type": "string", + "x-cli-flag": "frontend.grpc-client-config.backoff-max-period", + "x-format": "duration" + }, + "max_retries": { + "default": 10, + "description": "Number of times to backoff and retry before failing.", + "type": "number", + "x-cli-flag": "frontend.grpc-client-config.backoff-retries" + }, + "min_period": { + "default": "100ms", + "description": "Minimum delay when backing off.", + "type": "string", + "x-cli-flag": "frontend.grpc-client-config.backoff-min-period", + "x-format": "duration" + } + }, + "type": "object" + }, + "backoff_on_ratelimits": { + "default": false, + "description": "Enable backoff and retry when we hit ratelimits.", + "type": "boolean", + "x-cli-flag": "frontend.grpc-client-config.backoff-on-ratelimits" + }, + "connect_timeout": { + "default": "5s", + "description": "The maximum amount of time to establish a connection. A value of 0 means using default gRPC client connect timeout 20s.", + "type": "string", + "x-cli-flag": "frontend.grpc-client-config.connect-timeout", + "x-format": "duration" + }, + "grpc_compression": { + "description": "Use compression when sending messages. Supported values are: 'gzip', 'snappy', 'snappy-block' ,'zstd' and '' (disable compression)", + "type": "string", + "x-cli-flag": "frontend.grpc-client-config.grpc-compression" + }, + "max_recv_msg_size": { + "default": 104857600, + "description": "gRPC client max receive message size (bytes).", + "type": "number", + "x-cli-flag": "frontend.grpc-client-config.grpc-max-recv-msg-size" + }, + "max_send_msg_size": { + "default": 16777216, + "description": "gRPC client max send message size (bytes).", + "type": "number", + "x-cli-flag": "frontend.grpc-client-config.grpc-max-send-msg-size" + }, + "rate_limit": { + "default": 0, + "description": "Rate limit for gRPC client; 0 means disabled.", + "type": "number", + "x-cli-flag": "frontend.grpc-client-config.grpc-client-rate-limit" + }, + "rate_limit_burst": { + "default": 0, + "description": "Rate limit burst for gRPC client.", + "type": "number", + "x-cli-flag": "frontend.grpc-client-config.grpc-client-rate-limit-burst" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "frontend.grpc-client-config.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "frontend.grpc-client-config.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.", + "type": "boolean", + "x-cli-flag": "frontend.grpc-client-config.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "frontend.grpc-client-config.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "frontend.grpc-client-config.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "frontend.grpc-client-config.tls-server-name" + } + }, + "type": "object" + }, + "instance_interface_names": { + "default": "[eth0 en0]", + "description": "Name of network interface to read address from. This address is sent to query-scheduler and querier, which uses it to send the query response back to query-frontend.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "frontend.instance-interface-names" + }, + "log_queries_longer_than": { + "default": "0s", + "description": "Log queries that are slower than the specified duration. Set to 0 to disable. Set to \u003c 0 to enable on all queries.", + "type": "string", + "x-cli-flag": "frontend.log-queries-longer-than", + "x-format": "duration" + }, + "max_body_size": { + "default": 10485760, + "description": "Max body size for downstream prometheus.", + "type": "number", + "x-cli-flag": "frontend.max-body-size" + }, + "querier_forget_delay": { + "default": "0s", + "description": "If a querier disconnects without sending notification about graceful shutdown, the query-frontend will keep the querier in the tenant's shard until the forget delay has passed. This feature is useful to reduce the blast radius when shuffle-sharding is enabled.", + "type": "string", + "x-cli-flag": "query-frontend.querier-forget-delay", + "x-format": "duration" + }, + "query_stats_enabled": { + "default": false, + "description": "True to enable query statistics tracking. When enabled, a message with some statistics is logged for every query.", + "type": "boolean", + "x-cli-flag": "frontend.query-stats-enabled" + }, + "retry_on_too_many_outstanding_requests": { + "default": false, + "description": "When multiple query-schedulers are available, re-enqueue queries that were rejected due to too many outstanding requests.", + "type": "boolean", + "x-cli-flag": "frontend.retry-on-too-many-outstanding-requests" + }, + "scheduler_address": { + "description": "DNS hostname used for finding query-schedulers.", + "type": "string", + "x-cli-flag": "frontend.scheduler-address" + }, + "scheduler_dns_lookup_period": { + "default": "10s", + "description": "How often to resolve the scheduler-address, in order to look for new query-scheduler instances.", + "type": "string", + "x-cli-flag": "frontend.scheduler-dns-lookup-period", + "x-format": "duration" + }, + "scheduler_worker_concurrency": { + "default": 5, + "description": "Number of concurrent workers forwarding queries to single query-scheduler.", + "type": "number", + "x-cli-flag": "frontend.scheduler-worker-concurrency" + } + }, + "type": "object" + }, + "query_range_config": { + "description": "The query_range_config configures the query splitting and caching in the Cortex query-frontend.", + "properties": { + "align_queries_with_step": { + "default": false, + "description": "Mutate incoming queries to align their start and end with their step.", + "type": "boolean", + "x-cli-flag": "querier.align-querier-with-step" + }, + "cache_results": { + "default": false, + "description": "Cache query results.", + "type": "boolean", + "x-cli-flag": "querier.cache-results" + }, + "dynamic_query_splits": { + "properties": { + "enable_dynamic_vertical_sharding": { + "default": false, + "description": "[EXPERIMENTAL] Dynamically adjust vertical shard size to maximize the total combined number of query shards and splits.", + "type": "boolean", + "x-cli-flag": "querier.enable-dynamic-vertical-sharding" + }, + "max_fetched_data_duration_per_query": { + "default": "0s", + "description": "[EXPERIMENTAL] Max total duration of data fetched from storage by all query shards, 0 disables it. Dynamically uses a multiple of split interval to maintain a total fetched duration of data lower than the value set. It takes into account additional duration fetched by matrix selectors and subqueries.", + "type": "string", + "x-cli-flag": "querier.max-fetched-data-duration-per-query", + "x-format": "duration" + }, + "max_shards_per_query": { + "default": 0, + "description": "[EXPERIMENTAL] Maximum number of shards for a query, 0 disables it. Dynamically uses a multiple of split interval to maintain a total number of shards below the set value. If vertical sharding is enabled for a query, the combined total number of interval splits and vertical shards is kept below this value.", + "type": "number", + "x-cli-flag": "querier.max-shards-per-query" + } + }, + "type": "object" + }, + "forward_headers_list": { + "default": [], + "description": "List of headers forwarded by the query Frontend to downstream querier.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "frontend.forward-headers-list" + }, + "max_retries": { + "default": 5, + "description": "Maximum number of retries for a single request; beyond this, the downstream error is returned.", + "type": "number", + "x-cli-flag": "querier.max-retries-per-request" + }, + "results_cache": { + "properties": { + "cache": { + "properties": { + "background": { + "properties": { + "writeback_buffer": { + "default": 10000, + "description": "How many key batches to buffer for background write-back.", + "type": "number", + "x-cli-flag": "frontend.background.write-back-buffer" + }, + "writeback_goroutines": { + "default": 10, + "description": "At what concurrency to write back to cache.", + "type": "number", + "x-cli-flag": "frontend.background.write-back-concurrency" + } + }, + "type": "object" + }, + "default_validity": { + "default": "0s", + "description": "The default validity of entries for caches unless overridden.", + "type": "string", + "x-cli-flag": "frontend.default-validity", + "x-format": "duration" + }, + "enable_fifocache": { + "default": false, + "description": "Enable in-memory cache.", + "type": "boolean", + "x-cli-flag": "frontend.cache.enable-fifocache" + }, + "fifocache": { + "$ref": "#/definitions/fifo_cache_config" + }, + "memcached": { + "$ref": "#/definitions/memcached_config" + }, + "memcached_client": { + "$ref": "#/definitions/memcached_client_config" + }, + "redis": { + "$ref": "#/definitions/redis_config" + } + }, + "type": "object" + }, + "cache_queryable_samples_stats": { + "default": false, + "description": "Cache Statistics queryable samples on results cache.", + "type": "boolean", + "x-cli-flag": "frontend.cache-queryable-samples-stats" + }, + "compression": { + "description": "Use compression in results cache. Supported values are: 'snappy' and '' (disable compression).", + "type": "string", + "x-cli-flag": "frontend.compression" + } + }, + "type": "object" + }, + "split_queries_by_interval": { + "default": "0s", + "description": "Split queries by an interval and execute in parallel, 0 disables it. You should use a multiple of 24 hours (same as the storage bucketing scheme), to avoid queriers downloading and processing the same chunks. This also determines how cache keys are chosen when result caching is enabled", + "type": "string", + "x-cli-flag": "querier.split-queries-by-interval", + "x-format": "duration" + } + }, + "type": "object" + }, + "redis_config": { + "description": "The redis_config configures the Redis backend cache.", + "properties": { + "db": { + "default": 0, + "description": "Database index.", + "type": "number", + "x-cli-flag": "frontend.redis.db" + }, + "endpoint": { + "description": "Redis Server endpoint to use for caching. A comma-separated list of endpoints for Redis Cluster or Redis Sentinel. If empty, no redis will be used.", + "type": "string", + "x-cli-flag": "frontend.redis.endpoint" + }, + "expiration": { + "default": "0s", + "description": "How long keys stay in the redis.", + "type": "string", + "x-cli-flag": "frontend.redis.expiration", + "x-format": "duration" + }, + "idle_timeout": { + "default": "0s", + "description": "Close connections after remaining idle for this duration. If the value is zero, then idle connections are not closed.", + "type": "string", + "x-cli-flag": "frontend.redis.idle-timeout", + "x-format": "duration" + }, + "master_name": { + "description": "Redis Sentinel master name. An empty string for Redis Server or Redis Cluster.", + "type": "string", + "x-cli-flag": "frontend.redis.master-name" + }, + "max_connection_age": { + "default": "0s", + "description": "Close connections older than this duration. If the value is zero, then the pool does not close connections based on age.", + "type": "string", + "x-cli-flag": "frontend.redis.max-connection-age", + "x-format": "duration" + }, + "password": { + "description": "Password to use when connecting to redis.", + "type": "string", + "x-cli-flag": "frontend.redis.password" + }, + "pool_size": { + "default": 0, + "description": "Maximum number of connections in the pool.", + "type": "number", + "x-cli-flag": "frontend.redis.pool-size" + }, + "timeout": { + "default": "500ms", + "description": "Maximum time to wait before giving up on redis requests.", + "type": "string", + "x-cli-flag": "frontend.redis.timeout", + "x-format": "duration" + }, + "tls_enabled": { + "default": false, + "description": "Enable connecting to redis with TLS.", + "type": "boolean", + "x-cli-flag": "frontend.redis.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "frontend.redis.tls-insecure-skip-verify" + } + }, + "type": "object" + }, + "ruler_config": { + "description": "The ruler_config configures the Cortex ruler.", + "properties": { + "alertmanager_client": { + "properties": { + "basic_auth_password": { + "description": "HTTP Basic authentication password. It overrides the password set in the URL (if any).", + "type": "string", + "x-cli-flag": "ruler.alertmanager-client.basic-auth-password" + }, + "basic_auth_username": { + "description": "HTTP Basic authentication username. It overrides the username set in the URL (if any).", + "type": "string", + "x-cli-flag": "ruler.alertmanager-client.basic-auth-username" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "ruler.alertmanager-client.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "ruler.alertmanager-client.tls-cert-path" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "ruler.alertmanager-client.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "ruler.alertmanager-client.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "ruler.alertmanager-client.tls-server-name" + } + }, + "type": "object" + }, + "alertmanager_refresh_interval": { + "default": "1m0s", + "description": "How long to wait between refreshing DNS resolutions of Alertmanager hosts.", + "type": "string", + "x-cli-flag": "ruler.alertmanager-refresh-interval", + "x-format": "duration" + }, + "alertmanager_url": { + "description": "Comma-separated list of URL(s) of the Alertmanager(s) to send notifications to. Each Alertmanager URL is treated as a separate group in the configuration. Multiple Alertmanagers in HA per group can be supported by using DNS resolution via -ruler.alertmanager-discovery.", + "type": "string", + "x-cli-flag": "ruler.alertmanager-url" + }, + "api_deduplicate_rules": { + "default": false, + "description": "EXPERIMENTAL: Remove duplicate rules in the prometheus rules and alerts API response. If there are duplicate rules the rule with the latest evaluation timestamp will be kept.", + "type": "boolean", + "x-cli-flag": "experimental.ruler.api-deduplicate-rules" + }, + "concurrent_evals_enabled": { + "default": false, + "description": "If enabled, rules from a single rule group can be evaluated concurrently if there is no dependency between each other. Max concurrency for each rule group is controlled via ruler.max-concurrent-evals flag.", + "type": "boolean", + "x-cli-flag": "ruler.concurrent-evals-enabled" + }, + "disable_rule_group_label": { + "default": false, + "description": "Disable the rule_group label on exported metrics", + "type": "boolean", + "x-cli-flag": "ruler.disable-rule-group-label" + }, + "disabled_tenants": { + "description": "Comma separated list of tenants whose rules this ruler cannot evaluate. If specified, a ruler that would normally pick the specified tenant(s) for processing will ignore them instead. Subject to sharding.", + "type": "string", + "x-cli-flag": "ruler.disabled-tenants" + }, + "enable_alertmanager_discovery": { + "default": false, + "description": "Use DNS SRV records to discover Alertmanager hosts.", + "type": "boolean", + "x-cli-flag": "ruler.alertmanager-discovery" + }, + "enable_api": { + "default": false, + "description": "Enable the ruler api", + "type": "boolean", + "x-cli-flag": "experimental.ruler.enable-api" + }, + "enable_ha_evaluation": { + "default": false, + "description": "Enable high availability", + "type": "boolean", + "x-cli-flag": "ruler.enable-ha-evaluation" + }, + "enable_sharding": { + "default": false, + "description": "Distribute rule evaluation using ring backend", + "type": "boolean", + "x-cli-flag": "ruler.enable-sharding" + }, + "enabled_tenants": { + "description": "Comma separated list of tenants whose rules this ruler can evaluate. If specified, only these tenants will be handled by ruler, otherwise this ruler can process rules from all tenants. Subject to sharding.", + "type": "string", + "x-cli-flag": "ruler.enabled-tenants" + }, + "evaluation_interval": { + "default": "1m0s", + "description": "How frequently to evaluate rules", + "type": "string", + "x-cli-flag": "ruler.evaluation-interval", + "x-format": "duration" + }, + "external_labels": { + "additionalProperties": true, + "default": [], + "description": "Labels to add to all alerts.", + "type": "object" + }, + "external_url": { + "description": "URL of alerts return path.", + "format": "uri", + "type": "string", + "x-cli-flag": "ruler.external.url" + }, + "flush_period": { + "default": "1m0s", + "description": "Period with which to attempt to flush rule groups.", + "type": "string", + "x-cli-flag": "ruler.flush-period", + "x-format": "duration" + }, + "for_grace_period": { + "default": "10m0s", + "description": "Minimum duration between alert and restored \"for\" state. This is maintained only for alerts with configured \"for\" time greater than grace period.", + "type": "string", + "x-cli-flag": "ruler.for-grace-period", + "x-format": "duration" + }, + "for_outage_tolerance": { + "default": "1h0m0s", + "description": "Max time to tolerate outage for restoring \"for\" state of alert.", + "type": "string", + "x-cli-flag": "ruler.for-outage-tolerance", + "x-format": "duration" + }, + "frontend_address": { + "description": "[Experimental] GRPC listen address of the Query Frontend, in host:port format. If set, Ruler queries to Query Frontends via gRPC. If not set, ruler queries to Ingesters directly.", + "type": "string", + "x-cli-flag": "ruler.frontend-address" + }, + "frontend_client": { + "properties": { + "backoff_config": { + "properties": { + "max_period": { + "default": "10s", + "description": "Maximum delay when backing off.", + "type": "string", + "x-cli-flag": "ruler.frontendClient.backoff-max-period", + "x-format": "duration" + }, + "max_retries": { + "default": 10, + "description": "Number of times to backoff and retry before failing.", + "type": "number", + "x-cli-flag": "ruler.frontendClient.backoff-retries" + }, + "min_period": { + "default": "100ms", + "description": "Minimum delay when backing off.", + "type": "string", + "x-cli-flag": "ruler.frontendClient.backoff-min-period", + "x-format": "duration" + } + }, + "type": "object" + }, + "backoff_on_ratelimits": { + "default": false, + "description": "Enable backoff and retry when we hit ratelimits.", + "type": "boolean", + "x-cli-flag": "ruler.frontendClient.backoff-on-ratelimits" + }, + "connect_timeout": { + "default": "5s", + "description": "The maximum amount of time to establish a connection. A value of 0 means using default gRPC client connect timeout 20s.", + "type": "string", + "x-cli-flag": "ruler.frontendClient.connect-timeout", + "x-format": "duration" + }, + "grpc_compression": { + "description": "Use compression when sending messages. Supported values are: 'gzip', 'snappy', 'snappy-block' ,'zstd' and '' (disable compression)", + "type": "string", + "x-cli-flag": "ruler.frontendClient.grpc-compression" + }, + "max_recv_msg_size": { + "default": 104857600, + "description": "gRPC client max receive message size (bytes).", + "type": "number", + "x-cli-flag": "ruler.frontendClient.grpc-max-recv-msg-size" + }, + "max_send_msg_size": { + "default": 16777216, + "description": "gRPC client max send message size (bytes).", + "type": "number", + "x-cli-flag": "ruler.frontendClient.grpc-max-send-msg-size" + }, + "rate_limit": { + "default": 0, + "description": "Rate limit for gRPC client; 0 means disabled.", + "type": "number", + "x-cli-flag": "ruler.frontendClient.grpc-client-rate-limit" + }, + "rate_limit_burst": { + "default": 0, + "description": "Rate limit burst for gRPC client.", + "type": "number", + "x-cli-flag": "ruler.frontendClient.grpc-client-rate-limit-burst" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "ruler.frontendClient.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "ruler.frontendClient.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.", + "type": "boolean", + "x-cli-flag": "ruler.frontendClient.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "ruler.frontendClient.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "ruler.frontendClient.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "ruler.frontendClient.tls-server-name" + } + }, + "type": "object" + }, + "liveness_check_timeout": { + "default": "1s", + "description": "Timeout duration for non-primary rulers during liveness checks. If the check times out, the non-primary ruler will evaluate the rule group. Applicable when ruler.enable-ha-evaluation is true.", + "type": "string", + "x-cli-flag": "ruler.liveness-check-timeout", + "x-format": "duration" + }, + "max_concurrent_evals": { + "default": 1, + "description": "Max concurrency for a single rule group to evaluate independent rules.", + "type": "number", + "x-cli-flag": "ruler.max-concurrent-evals" + }, + "notification_queue_capacity": { + "default": 10000, + "description": "Capacity of the queue for notifications to be sent to the Alertmanager.", + "type": "number", + "x-cli-flag": "ruler.notification-queue-capacity" + }, + "notification_timeout": { + "default": "10s", + "description": "HTTP timeout duration when sending notifications to the Alertmanager.", + "type": "string", + "x-cli-flag": "ruler.notification-timeout", + "x-format": "duration" + }, + "poll_interval": { + "default": "1m0s", + "description": "How frequently to poll for rule changes", + "type": "string", + "x-cli-flag": "ruler.poll-interval", + "x-format": "duration" + }, + "query_response_format": { + "default": "protobuf", + "description": "[Experimental] Query response format to get query results from Query Frontend when the rule evaluation. It will only take effect when `-ruler.frontend-address` is configured. Supported values: json,protobuf", + "type": "string", + "x-cli-flag": "ruler.query-response-format" + }, + "query_stats_enabled": { + "default": false, + "description": "Report query statistics for ruler queries to complete as a per user metric and as an info level log message.", + "type": "boolean", + "x-cli-flag": "ruler.query-stats-enabled" + }, + "resend_delay": { + "default": "1m0s", + "description": "Minimum amount of time to wait before resending an alert to Alertmanager.", + "type": "string", + "x-cli-flag": "ruler.resend-delay", + "x-format": "duration" + }, + "ring": { + "properties": { + "detailed_metrics_enabled": { + "default": true, + "description": "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted.", + "type": "boolean", + "x-cli-flag": "ruler.ring.detailed-metrics-enabled" + }, + "final_sleep": { + "default": "0s", + "description": "The sleep seconds when ruler is shutting down. Need to be close to or larger than KV Store information propagation delay", + "type": "string", + "x-cli-flag": "ruler.ring.final-sleep", + "x-format": "duration" + }, + "heartbeat_period": { + "default": "5s", + "description": "Period at which to heartbeat to the ring. 0 = disabled.", + "type": "string", + "x-cli-flag": "ruler.ring.heartbeat-period", + "x-format": "duration" + }, + "heartbeat_timeout": { + "default": "1m0s", + "description": "The heartbeat timeout after which rulers are considered unhealthy within the ring. 0 = never (timeout disabled).", + "type": "string", + "x-cli-flag": "ruler.ring.heartbeat-timeout", + "x-format": "duration" + }, + "instance_interface_names": { + "default": "[eth0 en0]", + "description": "Name of network interface to read address from.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "ruler.ring.instance-interface-names" + }, + "keep_instance_in_the_ring_on_shutdown": { + "default": false, + "description": "Keep instance in the ring on shut down.", + "type": "boolean", + "x-cli-flag": "ruler.ring.keep-instance-in-the-ring-on-shutdown" + }, + "kvstore": { + "properties": { + "consul": { + "$ref": "#/definitions/consul_config" + }, + "dynamodb": { + "properties": { + "max_cas_retries": { + "default": 10, + "description": "Maximum number of retries for DDB KV CAS.", + "type": "number", + "x-cli-flag": "ruler.ring.dynamodb.max-cas-retries" + }, + "puller_sync_time": { + "default": "1m0s", + "description": "Time to refresh local ring with information on dynamodb.", + "type": "string", + "x-cli-flag": "ruler.ring.dynamodb.puller-sync-time", + "x-format": "duration" + }, + "region": { + "description": "Region to access dynamodb.", + "type": "string", + "x-cli-flag": "ruler.ring.dynamodb.region" + }, + "table_name": { + "description": "Table name to use on dynamodb.", + "type": "string", + "x-cli-flag": "ruler.ring.dynamodb.table-name" + }, + "timeout": { + "default": "2m0s", + "description": "Timeout of dynamoDbClient requests. Default is 2m.", + "type": "string", + "x-cli-flag": "ruler.ring.dynamodb.timeout", + "x-format": "duration" + }, + "ttl": { + "default": "0s", + "description": "Time to expire items on dynamodb.", + "type": "string", + "x-cli-flag": "ruler.ring.dynamodb.ttl-time", + "x-format": "duration" + } + }, + "type": "object" + }, + "etcd": { + "$ref": "#/definitions/etcd_config" + }, + "multi": { + "properties": { + "mirror_enabled": { + "default": false, + "description": "Mirror writes to secondary store.", + "type": "boolean", + "x-cli-flag": "ruler.ring.multi.mirror-enabled" + }, + "mirror_timeout": { + "default": "2s", + "description": "Timeout for storing value to secondary store.", + "type": "string", + "x-cli-flag": "ruler.ring.multi.mirror-timeout", + "x-format": "duration" + }, + "primary": { + "description": "Primary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "ruler.ring.multi.primary" + }, + "secondary": { + "description": "Secondary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "ruler.ring.multi.secondary" + } + }, + "type": "object" + }, + "prefix": { + "default": "rulers/", + "description": "The prefix for the keys in the store. Should end with a /.", + "type": "string", + "x-cli-flag": "ruler.ring.prefix" + }, + "store": { + "default": "consul", + "description": "Backend storage to use for the ring. Supported values are: consul, etcd, inmemory, memberlist, multi.", + "type": "string", + "x-cli-flag": "ruler.ring.store" + } + }, + "type": "object" + }, + "num_tokens": { + "default": 128, + "description": "Number of tokens for each ruler.", + "type": "number", + "x-cli-flag": "ruler.ring.num-tokens" + }, + "replication_factor": { + "default": 1, + "description": "EXPERIMENTAL: The replication factor to use when loading rule groups for API HA.", + "type": "number", + "x-cli-flag": "ruler.ring.replication-factor" + }, + "tokens_file_path": { + "description": "EXPERIMENTAL: File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.", + "type": "string", + "x-cli-flag": "ruler.ring.tokens-file-path" + }, + "zone_awareness_enabled": { + "default": false, + "description": "EXPERIMENTAL: True to enable zone-awareness and load rule groups across different availability zones for API HA.", + "type": "boolean", + "x-cli-flag": "ruler.ring.zone-awareness-enabled" + } + }, + "type": "object" + }, + "rule_path": { + "default": "/rules", + "description": "file path to store temporary rule files for the prometheus rule managers", + "type": "string", + "x-cli-flag": "ruler.rule-path" + }, + "ruler_client": { + "properties": { + "backoff_config": { + "properties": { + "max_period": { + "default": "10s", + "description": "Maximum delay when backing off.", + "type": "string", + "x-cli-flag": "ruler.client.backoff-max-period", + "x-format": "duration" + }, + "max_retries": { + "default": 10, + "description": "Number of times to backoff and retry before failing.", + "type": "number", + "x-cli-flag": "ruler.client.backoff-retries" + }, + "min_period": { + "default": "100ms", + "description": "Minimum delay when backing off.", + "type": "string", + "x-cli-flag": "ruler.client.backoff-min-period", + "x-format": "duration" + } + }, + "type": "object" + }, + "backoff_on_ratelimits": { + "default": false, + "description": "Enable backoff and retry when we hit ratelimits.", + "type": "boolean", + "x-cli-flag": "ruler.client.backoff-on-ratelimits" + }, + "connect_timeout": { + "default": "5s", + "description": "The maximum amount of time to establish a connection. A value of 0 means using default gRPC client connect timeout 20s.", + "type": "string", + "x-cli-flag": "ruler.client.connect-timeout", + "x-format": "duration" + }, + "grpc_compression": { + "description": "Use compression when sending messages. Supported values are: 'gzip', 'snappy', 'snappy-block' ,'zstd' and '' (disable compression)", + "type": "string", + "x-cli-flag": "ruler.client.grpc-compression" + }, + "max_recv_msg_size": { + "default": 104857600, + "description": "gRPC client max receive message size (bytes).", + "type": "number", + "x-cli-flag": "ruler.client.grpc-max-recv-msg-size" + }, + "max_send_msg_size": { + "default": 16777216, + "description": "gRPC client max send message size (bytes).", + "type": "number", + "x-cli-flag": "ruler.client.grpc-max-send-msg-size" + }, + "rate_limit": { + "default": 0, + "description": "Rate limit for gRPC client; 0 means disabled.", + "type": "number", + "x-cli-flag": "ruler.client.grpc-client-rate-limit" + }, + "rate_limit_burst": { + "default": 0, + "description": "Rate limit burst for gRPC client.", + "type": "number", + "x-cli-flag": "ruler.client.grpc-client-rate-limit-burst" + }, + "remote_timeout": { + "default": "2m0s", + "description": "Timeout for downstream rulers.", + "type": "string", + "x-cli-flag": "ruler.client.remote-timeout", + "x-format": "duration" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "ruler.client.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "ruler.client.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.", + "type": "boolean", + "x-cli-flag": "ruler.client.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "ruler.client.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "ruler.client.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "ruler.client.tls-server-name" + } + }, + "type": "object" + }, + "search_pending_for": { + "default": "5m0s", + "description": "Time to spend searching for a pending ruler when shutting down.", + "type": "string", + "x-cli-flag": "ruler.search-pending-for", + "x-format": "duration" + }, + "sharding_strategy": { + "default": "default", + "description": "The sharding strategy to use. Supported values are: default, shuffle-sharding.", + "type": "string", + "x-cli-flag": "ruler.sharding-strategy" + }, + "thanos_engine": { + "properties": { + "enable_x_functions": { + "default": false, + "description": "Enable xincrease, xdelta, xrate etc from Thanos engine.", + "type": "boolean", + "x-cli-flag": "ruler.enable-x-functions" + }, + "enabled": { + "default": false, + "description": "Experimental. Use Thanos promql engine https://github.com/thanos-io/promql-engine rather than the Prometheus promql engine.", + "type": "boolean", + "x-cli-flag": "ruler.thanos-engine" + }, + "optimizers": { + "default": "default", + "description": "Logical plan optimizers. Multiple optimizers can be provided as a comma-separated list. Supported values: default, all, propagate-matchers, sort-matchers, merge-selects, detect-histogram-stats", + "type": "string", + "x-cli-flag": "ruler.optimizers" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "ruler_storage_config": { + "description": "The ruler_storage_config configures the Cortex ruler storage backend.", + "properties": { + "azure": { + "properties": { + "account_key": { + "description": "Azure storage account key", + "type": "string", + "x-cli-flag": "ruler-storage.azure.account-key" + }, + "account_name": { + "description": "Azure storage account name", + "type": "string", + "x-cli-flag": "ruler-storage.azure.account-name" + }, + "connection_string": { + "description": "The values of `account-name` and `endpoint-suffix` values will not be ignored if `connection-string` is set. Use this method over `account-key` if you need to authenticate via a SAS token or if you use the Azurite emulator.", + "type": "string", + "x-cli-flag": "ruler-storage.azure.connection-string" + }, + "container_name": { + "description": "Azure storage container name", + "type": "string", + "x-cli-flag": "ruler-storage.azure.container-name" + }, + "endpoint_suffix": { + "description": "Azure storage endpoint suffix without schema. The account name will be prefixed to this value to create the FQDN", + "type": "string", + "x-cli-flag": "ruler-storage.azure.endpoint-suffix" + }, + "http": { + "properties": { + "expect_continue_timeout": { + "default": "1s", + "description": "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.", + "type": "string", + "x-cli-flag": "ruler-storage.azure.expect-continue-timeout", + "x-format": "duration" + }, + "idle_conn_timeout": { + "default": "1m30s", + "description": "The time an idle connection will remain idle before closing.", + "type": "string", + "x-cli-flag": "ruler-storage.azure.http.idle-conn-timeout", + "x-format": "duration" + }, + "insecure_skip_verify": { + "default": false, + "description": "If the client connects via HTTPS and this option is enabled, the client will accept any certificate and hostname.", + "type": "boolean", + "x-cli-flag": "ruler-storage.azure.http.insecure-skip-verify" + }, + "max_connections_per_host": { + "default": 0, + "description": "Maximum number of connections per host. 0 means no limit.", + "type": "number", + "x-cli-flag": "ruler-storage.azure.max-connections-per-host" + }, + "max_idle_connections": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.", + "type": "number", + "x-cli-flag": "ruler-storage.azure.max-idle-connections" + }, + "max_idle_connections_per_host": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.", + "type": "number", + "x-cli-flag": "ruler-storage.azure.max-idle-connections-per-host" + }, + "response_header_timeout": { + "default": "2m0s", + "description": "The amount of time the client will wait for a servers response headers.", + "type": "string", + "x-cli-flag": "ruler-storage.azure.http.response-header-timeout", + "x-format": "duration" + }, + "tls_handshake_timeout": { + "default": "10s", + "description": "Maximum time to wait for a TLS handshake. 0 means no limit.", + "type": "string", + "x-cli-flag": "ruler-storage.azure.tls-handshake-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "max_retries": { + "default": 20, + "description": "Number of retries for recoverable errors", + "type": "number", + "x-cli-flag": "ruler-storage.azure.max-retries" + }, + "msi_resource": { + "description": "Deprecated: Azure storage MSI resource. It will be set automatically by Azure SDK.", + "type": "string", + "x-cli-flag": "ruler-storage.azure.msi-resource" + }, + "user_assigned_id": { + "description": "Azure storage MSI resource managed identity client Id. If not supplied default Azure credential will be used. Set it to empty if you need to authenticate via Azure Workload Identity.", + "type": "string", + "x-cli-flag": "ruler-storage.azure.user-assigned-id" + } + }, + "type": "object" + }, + "backend": { + "default": "s3", + "description": "Backend storage to use. Supported backends are: s3, gcs, azure, swift, filesystem, configdb, local.", + "type": "string", + "x-cli-flag": "ruler-storage.backend" + }, + "configdb": { + "$ref": "#/definitions/configstore_config" + }, + "filesystem": { + "properties": { + "dir": { + "description": "Local filesystem storage directory.", + "type": "string", + "x-cli-flag": "ruler-storage.filesystem.dir" + } + }, + "type": "object" + }, + "gcs": { + "properties": { + "bucket_name": { + "description": "GCS bucket name", + "type": "string", + "x-cli-flag": "ruler-storage.gcs.bucket-name" + }, + "service_account": { + "description": "JSON representing either a Google Developers Console client_credentials.json file or a Google Developers service account key file. If empty, fallback to Google default logic.", + "type": "string", + "x-cli-flag": "ruler-storage.gcs.service-account" + } + }, + "type": "object" + }, + "local": { + "properties": { + "directory": { + "description": "Directory to scan for rules", + "type": "string", + "x-cli-flag": "ruler-storage.local.directory" + } + }, + "type": "object" + }, + "s3": { + "properties": { + "access_key_id": { + "description": "S3 access key ID", + "type": "string", + "x-cli-flag": "ruler-storage.s3.access-key-id" + }, + "bucket_lookup_type": { + "default": "auto", + "description": "The s3 bucket lookup style. Supported values are: auto, virtual-hosted, path.", + "type": "string", + "x-cli-flag": "ruler-storage.s3.bucket-lookup-type" + }, + "bucket_name": { + "description": "S3 bucket name", + "type": "string", + "x-cli-flag": "ruler-storage.s3.bucket-name" + }, + "disable_dualstack": { + "default": false, + "description": "If enabled, S3 endpoint will use the non-dualstack variant.", + "type": "boolean", + "x-cli-flag": "ruler-storage.s3.disable-dualstack" + }, + "endpoint": { + "description": "The S3 bucket endpoint. It could be an AWS S3 endpoint listed at https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an S3-compatible service in hostname:port format.", + "type": "string", + "x-cli-flag": "ruler-storage.s3.endpoint" + }, + "http": { + "properties": { + "expect_continue_timeout": { + "default": "1s", + "description": "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.", + "type": "string", + "x-cli-flag": "ruler-storage.s3.expect-continue-timeout", + "x-format": "duration" + }, + "idle_conn_timeout": { + "default": "1m30s", + "description": "The time an idle connection will remain idle before closing.", + "type": "string", + "x-cli-flag": "ruler-storage.s3.http.idle-conn-timeout", + "x-format": "duration" + }, + "insecure_skip_verify": { + "default": false, + "description": "If the client connects via HTTPS and this option is enabled, the client will accept any certificate and hostname.", + "type": "boolean", + "x-cli-flag": "ruler-storage.s3.http.insecure-skip-verify" + }, + "max_connections_per_host": { + "default": 0, + "description": "Maximum number of connections per host. 0 means no limit.", + "type": "number", + "x-cli-flag": "ruler-storage.s3.max-connections-per-host" + }, + "max_idle_connections": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.", + "type": "number", + "x-cli-flag": "ruler-storage.s3.max-idle-connections" + }, + "max_idle_connections_per_host": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.", + "type": "number", + "x-cli-flag": "ruler-storage.s3.max-idle-connections-per-host" + }, + "response_header_timeout": { + "default": "2m0s", + "description": "The amount of time the client will wait for a servers response headers.", + "type": "string", + "x-cli-flag": "ruler-storage.s3.http.response-header-timeout", + "x-format": "duration" + }, + "tls_handshake_timeout": { + "default": "10s", + "description": "Maximum time to wait for a TLS handshake. 0 means no limit.", + "type": "string", + "x-cli-flag": "ruler-storage.s3.tls-handshake-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "insecure": { + "default": false, + "description": "If enabled, use http:// for the S3 endpoint instead of https://. This could be useful in local dev/test environments while using an S3-compatible backend storage, like Minio.", + "type": "boolean", + "x-cli-flag": "ruler-storage.s3.insecure" + }, + "list_objects_version": { + "description": "The list api version. Supported values are: v1, v2, and ''.", + "type": "string", + "x-cli-flag": "ruler-storage.s3.list-objects-version" + }, + "region": { + "description": "S3 region. If unset, the client will issue a S3 GetBucketLocation API call to autodetect it.", + "type": "string", + "x-cli-flag": "ruler-storage.s3.region" + }, + "secret_access_key": { + "description": "S3 secret access key", + "type": "string", + "x-cli-flag": "ruler-storage.s3.secret-access-key" + }, + "send_content_md5": { + "default": true, + "description": "If true, attach MD5 checksum when upload objects and S3 uses MD5 checksum algorithm to verify the provided digest. If false, use CRC32C algorithm instead.", + "type": "boolean", + "x-cli-flag": "ruler-storage.s3.send-content-md5" + }, + "signature_version": { + "default": "v4", + "description": "The signature version to use for authenticating against S3. Supported values are: v4, v2.", + "type": "string", + "x-cli-flag": "ruler-storage.s3.signature-version" + }, + "sse": { + "$ref": "#/definitions/s3_sse_config" + } + }, + "type": "object" + }, + "swift": { + "properties": { + "application_credential_id": { + "description": "OpenStack Swift application credential ID.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.application-credential-id" + }, + "application_credential_name": { + "description": "OpenStack Swift application credential name.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.application-credential-name" + }, + "application_credential_secret": { + "description": "OpenStack Swift application credential secret.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.application-credential-secret" + }, + "auth_url": { + "description": "OpenStack Swift authentication URL", + "type": "string", + "x-cli-flag": "ruler-storage.swift.auth-url" + }, + "auth_version": { + "default": 0, + "description": "OpenStack Swift authentication API version. 0 to autodetect.", + "type": "number", + "x-cli-flag": "ruler-storage.swift.auth-version" + }, + "connect_timeout": { + "default": "10s", + "description": "Time after which a connection attempt is aborted.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.connect-timeout", + "x-format": "duration" + }, + "container_name": { + "description": "Name of the OpenStack Swift container to put chunks in.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.container-name" + }, + "domain_id": { + "description": "OpenStack Swift user's domain ID.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.domain-id" + }, + "domain_name": { + "description": "OpenStack Swift user's domain name.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.domain-name" + }, + "max_retries": { + "default": 3, + "description": "Max retries on requests error.", + "type": "number", + "x-cli-flag": "ruler-storage.swift.max-retries" + }, + "password": { + "description": "OpenStack Swift API key.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.password" + }, + "project_domain_id": { + "description": "ID of the OpenStack Swift project's domain (v3 auth only), only needed if it differs the from user domain.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.project-domain-id" + }, + "project_domain_name": { + "description": "Name of the OpenStack Swift project's domain (v3 auth only), only needed if it differs from the user domain.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.project-domain-name" + }, + "project_id": { + "description": "OpenStack Swift project ID (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "ruler-storage.swift.project-id" + }, + "project_name": { + "description": "OpenStack Swift project name (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "ruler-storage.swift.project-name" + }, + "region_name": { + "description": "OpenStack Swift Region to use (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "ruler-storage.swift.region-name" + }, + "request_timeout": { + "default": "5s", + "description": "Time after which an idle request is aborted. The timeout watchdog is reset each time some data is received, so the timeout triggers after X time no data is received on a request.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.request-timeout", + "x-format": "duration" + }, + "user_domain_id": { + "description": "OpenStack Swift user's domain ID.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.user-domain-id" + }, + "user_domain_name": { + "description": "OpenStack Swift user's domain name.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.user-domain-name" + }, + "user_id": { + "description": "OpenStack Swift user ID.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.user-id" + }, + "username": { + "description": "OpenStack Swift username.", + "type": "string", + "x-cli-flag": "ruler-storage.swift.username" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "runtime_configuration_storage_config": { + "description": "The runtime_configuration_storage_config configures the storage backend for the runtime configuration file.", + "properties": { + "azure": { + "properties": { + "account_key": { + "description": "Azure storage account key", + "type": "string", + "x-cli-flag": "runtime-config.azure.account-key" + }, + "account_name": { + "description": "Azure storage account name", + "type": "string", + "x-cli-flag": "runtime-config.azure.account-name" + }, + "connection_string": { + "description": "The values of `account-name` and `endpoint-suffix` values will not be ignored if `connection-string` is set. Use this method over `account-key` if you need to authenticate via a SAS token or if you use the Azurite emulator.", + "type": "string", + "x-cli-flag": "runtime-config.azure.connection-string" + }, + "container_name": { + "description": "Azure storage container name", + "type": "string", + "x-cli-flag": "runtime-config.azure.container-name" + }, + "endpoint_suffix": { + "description": "Azure storage endpoint suffix without schema. The account name will be prefixed to this value to create the FQDN", + "type": "string", + "x-cli-flag": "runtime-config.azure.endpoint-suffix" + }, + "http": { + "properties": { + "expect_continue_timeout": { + "default": "1s", + "description": "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.", + "type": "string", + "x-cli-flag": "runtime-config.azure.expect-continue-timeout", + "x-format": "duration" + }, + "idle_conn_timeout": { + "default": "1m30s", + "description": "The time an idle connection will remain idle before closing.", + "type": "string", + "x-cli-flag": "runtime-config.azure.http.idle-conn-timeout", + "x-format": "duration" + }, + "insecure_skip_verify": { + "default": false, + "description": "If the client connects via HTTPS and this option is enabled, the client will accept any certificate and hostname.", + "type": "boolean", + "x-cli-flag": "runtime-config.azure.http.insecure-skip-verify" + }, + "max_connections_per_host": { + "default": 0, + "description": "Maximum number of connections per host. 0 means no limit.", + "type": "number", + "x-cli-flag": "runtime-config.azure.max-connections-per-host" + }, + "max_idle_connections": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.", + "type": "number", + "x-cli-flag": "runtime-config.azure.max-idle-connections" + }, + "max_idle_connections_per_host": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.", + "type": "number", + "x-cli-flag": "runtime-config.azure.max-idle-connections-per-host" + }, + "response_header_timeout": { + "default": "2m0s", + "description": "The amount of time the client will wait for a servers response headers.", + "type": "string", + "x-cli-flag": "runtime-config.azure.http.response-header-timeout", + "x-format": "duration" + }, + "tls_handshake_timeout": { + "default": "10s", + "description": "Maximum time to wait for a TLS handshake. 0 means no limit.", + "type": "string", + "x-cli-flag": "runtime-config.azure.tls-handshake-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "max_retries": { + "default": 20, + "description": "Number of retries for recoverable errors", + "type": "number", + "x-cli-flag": "runtime-config.azure.max-retries" + }, + "msi_resource": { + "description": "Deprecated: Azure storage MSI resource. It will be set automatically by Azure SDK.", + "type": "string", + "x-cli-flag": "runtime-config.azure.msi-resource" + }, + "user_assigned_id": { + "description": "Azure storage MSI resource managed identity client Id. If not supplied default Azure credential will be used. Set it to empty if you need to authenticate via Azure Workload Identity.", + "type": "string", + "x-cli-flag": "runtime-config.azure.user-assigned-id" + } + }, + "type": "object" + }, + "backend": { + "default": "filesystem", + "description": "Backend storage to use. Supported backends are: s3, gcs, azure, swift, filesystem.", + "type": "string", + "x-cli-flag": "runtime-config.backend" + }, + "file": { + "description": "File with the configuration that can be updated in runtime.", + "type": "string", + "x-cli-flag": "runtime-config.file" + }, + "filesystem": { + "properties": { + "dir": { + "description": "Local filesystem storage directory.", + "type": "string", + "x-cli-flag": "runtime-config.filesystem.dir" + } + }, + "type": "object" + }, + "gcs": { + "properties": { + "bucket_name": { + "description": "GCS bucket name", + "type": "string", + "x-cli-flag": "runtime-config.gcs.bucket-name" + }, + "service_account": { + "description": "JSON representing either a Google Developers Console client_credentials.json file or a Google Developers service account key file. If empty, fallback to Google default logic.", + "type": "string", + "x-cli-flag": "runtime-config.gcs.service-account" + } + }, + "type": "object" + }, + "period": { + "default": "10s", + "description": "How often to check runtime config file.", + "type": "string", + "x-cli-flag": "runtime-config.reload-period", + "x-format": "duration" + }, + "s3": { + "properties": { + "access_key_id": { + "description": "S3 access key ID", + "type": "string", + "x-cli-flag": "runtime-config.s3.access-key-id" + }, + "bucket_lookup_type": { + "default": "auto", + "description": "The s3 bucket lookup style. Supported values are: auto, virtual-hosted, path.", + "type": "string", + "x-cli-flag": "runtime-config.s3.bucket-lookup-type" + }, + "bucket_name": { + "description": "S3 bucket name", + "type": "string", + "x-cli-flag": "runtime-config.s3.bucket-name" + }, + "disable_dualstack": { + "default": false, + "description": "If enabled, S3 endpoint will use the non-dualstack variant.", + "type": "boolean", + "x-cli-flag": "runtime-config.s3.disable-dualstack" + }, + "endpoint": { + "description": "The S3 bucket endpoint. It could be an AWS S3 endpoint listed at https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an S3-compatible service in hostname:port format.", + "type": "string", + "x-cli-flag": "runtime-config.s3.endpoint" + }, + "http": { + "properties": { + "expect_continue_timeout": { + "default": "1s", + "description": "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.", + "type": "string", + "x-cli-flag": "runtime-config.s3.expect-continue-timeout", + "x-format": "duration" + }, + "idle_conn_timeout": { + "default": "1m30s", + "description": "The time an idle connection will remain idle before closing.", + "type": "string", + "x-cli-flag": "runtime-config.s3.http.idle-conn-timeout", + "x-format": "duration" + }, + "insecure_skip_verify": { + "default": false, + "description": "If the client connects via HTTPS and this option is enabled, the client will accept any certificate and hostname.", + "type": "boolean", + "x-cli-flag": "runtime-config.s3.http.insecure-skip-verify" + }, + "max_connections_per_host": { + "default": 0, + "description": "Maximum number of connections per host. 0 means no limit.", + "type": "number", + "x-cli-flag": "runtime-config.s3.max-connections-per-host" + }, + "max_idle_connections": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.", + "type": "number", + "x-cli-flag": "runtime-config.s3.max-idle-connections" + }, + "max_idle_connections_per_host": { + "default": 100, + "description": "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.", + "type": "number", + "x-cli-flag": "runtime-config.s3.max-idle-connections-per-host" + }, + "response_header_timeout": { + "default": "2m0s", + "description": "The amount of time the client will wait for a servers response headers.", + "type": "string", + "x-cli-flag": "runtime-config.s3.http.response-header-timeout", + "x-format": "duration" + }, + "tls_handshake_timeout": { + "default": "10s", + "description": "Maximum time to wait for a TLS handshake. 0 means no limit.", + "type": "string", + "x-cli-flag": "runtime-config.s3.tls-handshake-timeout", + "x-format": "duration" + } + }, + "type": "object" + }, + "insecure": { + "default": false, + "description": "If enabled, use http:// for the S3 endpoint instead of https://. This could be useful in local dev/test environments while using an S3-compatible backend storage, like Minio.", + "type": "boolean", + "x-cli-flag": "runtime-config.s3.insecure" + }, + "list_objects_version": { + "description": "The list api version. Supported values are: v1, v2, and ''.", + "type": "string", + "x-cli-flag": "runtime-config.s3.list-objects-version" + }, + "region": { + "description": "S3 region. If unset, the client will issue a S3 GetBucketLocation API call to autodetect it.", + "type": "string", + "x-cli-flag": "runtime-config.s3.region" + }, + "secret_access_key": { + "description": "S3 secret access key", + "type": "string", + "x-cli-flag": "runtime-config.s3.secret-access-key" + }, + "send_content_md5": { + "default": true, + "description": "If true, attach MD5 checksum when upload objects and S3 uses MD5 checksum algorithm to verify the provided digest. If false, use CRC32C algorithm instead.", + "type": "boolean", + "x-cli-flag": "runtime-config.s3.send-content-md5" + }, + "signature_version": { + "default": "v4", + "description": "The signature version to use for authenticating against S3. Supported values are: v4, v2.", + "type": "string", + "x-cli-flag": "runtime-config.s3.signature-version" + }, + "sse": { + "$ref": "#/definitions/s3_sse_config" + } + }, + "type": "object" + }, + "swift": { + "properties": { + "application_credential_id": { + "description": "OpenStack Swift application credential ID.", + "type": "string", + "x-cli-flag": "runtime-config.swift.application-credential-id" + }, + "application_credential_name": { + "description": "OpenStack Swift application credential name.", + "type": "string", + "x-cli-flag": "runtime-config.swift.application-credential-name" + }, + "application_credential_secret": { + "description": "OpenStack Swift application credential secret.", + "type": "string", + "x-cli-flag": "runtime-config.swift.application-credential-secret" + }, + "auth_url": { + "description": "OpenStack Swift authentication URL", + "type": "string", + "x-cli-flag": "runtime-config.swift.auth-url" + }, + "auth_version": { + "default": 0, + "description": "OpenStack Swift authentication API version. 0 to autodetect.", + "type": "number", + "x-cli-flag": "runtime-config.swift.auth-version" + }, + "connect_timeout": { + "default": "10s", + "description": "Time after which a connection attempt is aborted.", + "type": "string", + "x-cli-flag": "runtime-config.swift.connect-timeout", + "x-format": "duration" + }, + "container_name": { + "description": "Name of the OpenStack Swift container to put chunks in.", + "type": "string", + "x-cli-flag": "runtime-config.swift.container-name" + }, + "domain_id": { + "description": "OpenStack Swift user's domain ID.", + "type": "string", + "x-cli-flag": "runtime-config.swift.domain-id" + }, + "domain_name": { + "description": "OpenStack Swift user's domain name.", + "type": "string", + "x-cli-flag": "runtime-config.swift.domain-name" + }, + "max_retries": { + "default": 3, + "description": "Max retries on requests error.", + "type": "number", + "x-cli-flag": "runtime-config.swift.max-retries" + }, + "password": { + "description": "OpenStack Swift API key.", + "type": "string", + "x-cli-flag": "runtime-config.swift.password" + }, + "project_domain_id": { + "description": "ID of the OpenStack Swift project's domain (v3 auth only), only needed if it differs the from user domain.", + "type": "string", + "x-cli-flag": "runtime-config.swift.project-domain-id" + }, + "project_domain_name": { + "description": "Name of the OpenStack Swift project's domain (v3 auth only), only needed if it differs from the user domain.", + "type": "string", + "x-cli-flag": "runtime-config.swift.project-domain-name" + }, + "project_id": { + "description": "OpenStack Swift project ID (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "runtime-config.swift.project-id" + }, + "project_name": { + "description": "OpenStack Swift project name (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "runtime-config.swift.project-name" + }, + "region_name": { + "description": "OpenStack Swift Region to use (v2,v3 auth only).", + "type": "string", + "x-cli-flag": "runtime-config.swift.region-name" + }, + "request_timeout": { + "default": "5s", + "description": "Time after which an idle request is aborted. The timeout watchdog is reset each time some data is received, so the timeout triggers after X time no data is received on a request.", + "type": "string", + "x-cli-flag": "runtime-config.swift.request-timeout", + "x-format": "duration" + }, + "user_domain_id": { + "description": "OpenStack Swift user's domain ID.", + "type": "string", + "x-cli-flag": "runtime-config.swift.user-domain-id" + }, + "user_domain_name": { + "description": "OpenStack Swift user's domain name.", + "type": "string", + "x-cli-flag": "runtime-config.swift.user-domain-name" + }, + "user_id": { + "description": "OpenStack Swift user ID.", + "type": "string", + "x-cli-flag": "runtime-config.swift.user-id" + }, + "username": { + "description": "OpenStack Swift username.", + "type": "string", + "x-cli-flag": "runtime-config.swift.username" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "s3_sse_config": { + "description": "The s3_sse_config configures the S3 server-side encryption.", + "properties": { + "kms_encryption_context": { + "description": "KMS Encryption Context used for object encryption. It expects JSON formatted string.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.s3.sse.kms-encryption-context" + }, + "kms_key_id": { + "description": "KMS Key ID used to encrypt objects in S3", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.s3.sse.kms-key-id" + }, + "type": { + "description": "Enable AWS Server Side Encryption. Supported values: SSE-KMS, SSE-S3.", + "type": "string", + "x-cli-flag": "\u003cprefix\u003e.s3.sse.type" + } + }, + "type": "object" + }, + "server_config": { + "description": "The server_config configures the HTTP and gRPC server of the launched service(s).", + "properties": { + "graceful_shutdown_timeout": { + "default": "30s", + "description": "Timeout for graceful shutdowns", + "type": "string", + "x-cli-flag": "server.graceful-shutdown-timeout", + "x-format": "duration" + }, + "grpc_listen_address": { + "description": "gRPC server listen address.", + "type": "string", + "x-cli-flag": "server.grpc-listen-address" + }, + "grpc_listen_conn_limit": { + "default": 0, + "description": "Maximum number of simultaneous grpc connections, \u003c=0 to disable", + "type": "number", + "x-cli-flag": "server.grpc-conn-limit" + }, + "grpc_listen_network": { + "default": "tcp", + "description": "gRPC server listen network", + "type": "string", + "x-cli-flag": "server.grpc-listen-network" + }, + "grpc_listen_port": { + "default": 9095, + "description": "gRPC server listen port.", + "type": "number", + "x-cli-flag": "server.grpc-listen-port" + }, + "grpc_server_keepalive_time": { + "default": "2h0m0s", + "description": "Duration after which a keepalive probe is sent in case of no activity over the connection., Default: 2h", + "type": "string", + "x-cli-flag": "server.grpc.keepalive.time", + "x-format": "duration" + }, + "grpc_server_keepalive_timeout": { + "default": "20s", + "description": "After having pinged for keepalive check, the duration after which an idle connection should be closed, Default: 20s", + "type": "string", + "x-cli-flag": "server.grpc.keepalive.timeout", + "x-format": "duration" + }, + "grpc_server_max_concurrent_streams": { + "default": 100, + "description": "Limit on the number of concurrent streams for gRPC calls (0 = unlimited)", + "type": "number", + "x-cli-flag": "server.grpc-max-concurrent-streams" + }, + "grpc_server_max_connection_age": { + "default": "2562047h47m16.854775807s", + "description": "The duration for the maximum amount of time a connection may exist before it will be closed. Default: infinity", + "type": "string", + "x-cli-flag": "server.grpc.keepalive.max-connection-age", + "x-format": "duration" + }, + "grpc_server_max_connection_age_grace": { + "default": "2562047h47m16.854775807s", + "description": "An additive period after max-connection-age after which the connection will be forcibly closed. Default: infinity", + "type": "string", + "x-cli-flag": "server.grpc.keepalive.max-connection-age-grace", + "x-format": "duration" + }, + "grpc_server_max_connection_idle": { + "default": "2562047h47m16.854775807s", + "description": "The duration after which an idle connection should be closed. Default: infinity", + "type": "string", + "x-cli-flag": "server.grpc.keepalive.max-connection-idle", + "x-format": "duration" + }, + "grpc_server_max_recv_msg_size": { + "default": 4194304, + "description": "Limit on the size of a gRPC message this server can receive (bytes).", + "type": "number", + "x-cli-flag": "server.grpc-max-recv-msg-size-bytes" + }, + "grpc_server_max_send_msg_size": { + "default": 4194304, + "description": "Limit on the size of a gRPC message this server can send (bytes).", + "type": "number", + "x-cli-flag": "server.grpc-max-send-msg-size-bytes" + }, + "grpc_server_min_time_between_pings": { + "default": "10s", + "description": "Minimum amount of time a client should wait before sending a keepalive ping. If client sends keepalive ping more often, server will send GOAWAY and close the connection.", + "type": "string", + "x-cli-flag": "server.grpc.keepalive.min-time-between-pings", + "x-format": "duration" + }, + "grpc_server_num_stream_workers": { + "default": 0, + "description": "Number of worker goroutines that should be used to process incoming streams.Setting this 0 (default) will disable workers and spawn a new goroutine for each stream.", + "type": "number", + "x-cli-flag": "server.grpc_server-num-stream-workers" + }, + "grpc_server_ping_without_stream_allowed": { + "default": true, + "description": "If true, server allows keepalive pings even when there are no active streams(RPCs). If false, and client sends ping when there are no active streams, server will send GOAWAY and close the connection.", + "type": "boolean", + "x-cli-flag": "server.grpc.keepalive.ping-without-stream-allowed" + }, + "grpc_tls_config": { + "properties": { + "cert_file": { + "description": "GRPC TLS server cert path.", + "type": "string", + "x-cli-flag": "server.grpc-tls-cert-path" + }, + "client_auth_type": { + "description": "GRPC TLS Client Auth type.", + "type": "string", + "x-cli-flag": "server.grpc-tls-client-auth" + }, + "client_ca_file": { + "description": "GRPC TLS Client CA path.", + "type": "string", + "x-cli-flag": "server.grpc-tls-ca-path" + }, + "key_file": { + "description": "GRPC TLS server key path.", + "type": "string", + "x-cli-flag": "server.grpc-tls-key-path" + } + }, + "type": "object" + }, + "http_listen_address": { + "description": "HTTP server listen address.", + "type": "string", + "x-cli-flag": "server.http-listen-address" + }, + "http_listen_conn_limit": { + "default": 0, + "description": "Maximum number of simultaneous http connections, \u003c=0 to disable", + "type": "number", + "x-cli-flag": "server.http-conn-limit" + }, + "http_listen_network": { + "default": "tcp", + "description": "HTTP server listen network, default tcp", + "type": "string", + "x-cli-flag": "server.http-listen-network" + }, + "http_listen_port": { + "default": 80, + "description": "HTTP server listen port.", + "type": "number", + "x-cli-flag": "server.http-listen-port" + }, + "http_path_prefix": { + "description": "Base path to serve all API routes from (e.g. /v1/)", + "type": "string", + "x-cli-flag": "server.path-prefix" + }, + "http_server_idle_timeout": { + "default": "2m0s", + "description": "Idle timeout for HTTP server", + "type": "string", + "x-cli-flag": "server.http-idle-timeout", + "x-format": "duration" + }, + "http_server_read_timeout": { + "default": "30s", + "description": "Read timeout for HTTP server", + "type": "string", + "x-cli-flag": "server.http-read-timeout", + "x-format": "duration" + }, + "http_server_write_timeout": { + "default": "30s", + "description": "Write timeout for HTTP server", + "type": "string", + "x-cli-flag": "server.http-write-timeout", + "x-format": "duration" + }, + "http_tls_config": { + "properties": { + "cert_file": { + "description": "HTTP server cert path.", + "type": "string", + "x-cli-flag": "server.http-tls-cert-path" + }, + "client_auth_type": { + "description": "HTTP TLS Client Auth type.", + "type": "string", + "x-cli-flag": "server.http-tls-client-auth" + }, + "client_ca_file": { + "description": "HTTP TLS Client CA path.", + "type": "string", + "x-cli-flag": "server.http-tls-ca-path" + }, + "key_file": { + "description": "HTTP server key path.", + "type": "string", + "x-cli-flag": "server.http-tls-key-path" + } + }, + "type": "object" + }, + "log_format": { + "default": "logfmt", + "description": "Output log messages in the given format. Valid formats: [logfmt, json]", + "type": "string", + "x-cli-flag": "log.format" + }, + "log_level": { + "default": "info", + "description": "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error]", + "type": "string", + "x-cli-flag": "log.level" + }, + "log_request_at_info_level_enabled": { + "default": false, + "description": "Optionally log requests at info level instead of debug level. Applies to request headers as well if server.log-request-headers is enabled.", + "type": "boolean", + "x-cli-flag": "server.log-request-at-info-level-enabled" + }, + "log_request_exclude_headers_list": { + "description": "Comma separated list of headers to exclude from loggin. Only used if server.log-request-headers is true.", + "type": "string", + "x-cli-flag": "server.log-request-headers-exclude-list" + }, + "log_request_headers": { + "default": false, + "description": "Optionally log request headers.", + "type": "boolean", + "x-cli-flag": "server.log-request-headers" + }, + "log_source_ips_enabled": { + "default": false, + "description": "Optionally log the source IPs.", + "type": "boolean", + "x-cli-flag": "server.log-source-ips-enabled" + }, + "log_source_ips_header": { + "description": "Header field storing the source IPs. Only used if server.log-source-ips-enabled is true. If not set the default Forwarded, X-Real-IP and X-Forwarded-For headers are used", + "type": "string", + "x-cli-flag": "server.log-source-ips-header" + }, + "log_source_ips_regex": { + "description": "Regex for matching the source IPs. Only used if server.log-source-ips-enabled is true. If not set the default Forwarded, X-Real-IP and X-Forwarded-For headers are used", + "type": "string", + "x-cli-flag": "server.log-source-ips-regex" + }, + "register_instrumentation": { + "default": true, + "description": "Register the intrumentation handlers (/metrics etc).", + "type": "boolean", + "x-cli-flag": "server.register-instrumentation" + }, + "tls_cipher_suites": { + "description": "Comma-separated list of cipher suites to use. If blank, the default Go cipher suites is used.", + "type": "string", + "x-cli-flag": "server.tls-cipher-suites" + }, + "tls_min_version": { + "description": "Minimum TLS version to use. Allowed values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. If blank, the Go TLS minimum version is used.", + "type": "string", + "x-cli-flag": "server.tls-min-version" + } + }, + "type": "object" + }, + "storage_config": { + "description": "The storage_config configures the storage type Cortex uses.", + "properties": { + "engine": { + "default": "blocks", + "description": "The storage engine to use: blocks is the only supported option today.", + "type": "string", + "x-cli-flag": "store.engine" + } + }, + "type": "object" + }, + "store_gateway_config": { + "description": "The store_gateway_config configures the store-gateway service used by the blocks storage.", + "properties": { + "disabled_tenants": { + "description": "Comma separated list of tenants whose store metrics this storegateway cannot process. If specified, a storegateway that would normally pick the specified tenant(s) for processing will ignore them instead.", + "type": "string", + "x-cli-flag": "store-gateway.disabled-tenants" + }, + "enabled_tenants": { + "description": "Comma separated list of tenants whose store metrics this storegateway can process. If specified, only these tenants will be handled by storegateway, otherwise this storegateway will be enabled for all the tenants in the store-gateway cluster.", + "type": "string", + "x-cli-flag": "store-gateway.enabled-tenants" + }, + "hedged_request": { + "properties": { + "enabled": { + "default": false, + "description": "If true, hedged requests are applied to object store calls. It can help with reducing tail latency.", + "type": "boolean", + "x-cli-flag": "store-gateway.hedged-request.enabled" + }, + "max_requests": { + "default": 3, + "description": "Maximum number of hedged requests allowed for each initial request. A high number can reduce latency but increase internal calls.", + "type": "number", + "x-cli-flag": "store-gateway.hedged-request.max-requests" + }, + "quantile": { + "default": 0.9, + "description": "It is used to calculate a latency threshold to trigger hedged requests. For example, additional requests are triggered when the initial request response time exceeds the 90th percentile.", + "type": "number", + "x-cli-flag": "store-gateway.hedged-request.quantile" + } + }, + "type": "object" + }, + "query_protection": { + "properties": { + "rejection": { + "properties": { + "enabled": { + "default": false, + "description": "EXPERIMENTAL: Enable query rejection feature, where the component return 503 to all incoming query requests when the configured thresholds are breached.", + "type": "boolean", + "x-cli-flag": "store-gateway.query-protection.rejection.enabled" + }, + "threshold": { + "properties": { + "cpu_utilization": { + "default": 0, + "description": "EXPERIMENTAL: Max CPU utilization that this ingester can reach before rejecting new query request (across all tenants) in percentage, between 0 and 1. monitored_resources config must include the resource type. 0 to disable.", + "type": "number", + "x-cli-flag": "store-gateway.query-protection.rejection.threshold.cpu-utilization" + }, + "heap_utilization": { + "default": 0, + "description": "EXPERIMENTAL: Max heap utilization that this ingester can reach before rejecting new query request (across all tenants) in percentage, between 0 and 1. monitored_resources config must include the resource type. 0 to disable.", + "type": "number", + "x-cli-flag": "store-gateway.query-protection.rejection.threshold.heap-utilization" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "sharding_enabled": { + "default": false, + "description": "Shard blocks across multiple store gateway instances. This option needs be set both on the store-gateway and querier when running in microservices mode.", + "type": "boolean", + "x-cli-flag": "store-gateway.sharding-enabled" + }, + "sharding_ring": { + "description": "The hash ring configuration. This option is required only if blocks sharding is enabled.", + "properties": { + "detailed_metrics_enabled": { + "default": true, + "description": "Set to true to enable ring detailed metrics. These metrics provide detailed information, such as token count and ownership per tenant. Disabling them can significantly decrease the number of metrics emitted.", + "type": "boolean", + "x-cli-flag": "store-gateway.sharding-ring.detailed-metrics-enabled" + }, + "final_sleep": { + "default": "0s", + "description": "The sleep seconds when store-gateway is shutting down. Need to be close to or larger than KV Store information propagation delay", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.final-sleep", + "x-format": "duration" + }, + "heartbeat_period": { + "default": "15s", + "description": "Period at which to heartbeat to the ring. 0 = disabled.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.heartbeat-period", + "x-format": "duration" + }, + "heartbeat_timeout": { + "default": "1m0s", + "description": "The heartbeat timeout after which store gateways are considered unhealthy within the ring. 0 = never (timeout disabled). This option needs be set both on the store-gateway and querier when running in microservices mode.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.heartbeat-timeout", + "x-format": "duration" + }, + "instance_availability_zone": { + "description": "The availability zone where this instance is running. Required if zone-awareness is enabled.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.instance-availability-zone" + }, + "instance_interface_names": { + "default": "[eth0 en0]", + "description": "Name of network interface to read address from.", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "store-gateway.sharding-ring.instance-interface-names" + }, + "keep_instance_in_the_ring_on_shutdown": { + "default": false, + "description": "True to keep the store gateway instance in the ring when it shuts down. The instance will then be auto-forgotten from the ring after 10*heartbeat_timeout.", + "type": "boolean", + "x-cli-flag": "store-gateway.sharding-ring.keep-instance-in-the-ring-on-shutdown" + }, + "kvstore": { + "description": "The key-value store used to share the hash ring across multiple instances. This option needs be set both on the store-gateway and querier when running in microservices mode.", + "properties": { + "consul": { + "$ref": "#/definitions/consul_config" + }, + "dynamodb": { + "properties": { + "max_cas_retries": { + "default": 10, + "description": "Maximum number of retries for DDB KV CAS.", + "type": "number", + "x-cli-flag": "store-gateway.sharding-ring.dynamodb.max-cas-retries" + }, + "puller_sync_time": { + "default": "1m0s", + "description": "Time to refresh local ring with information on dynamodb.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.dynamodb.puller-sync-time", + "x-format": "duration" + }, + "region": { + "description": "Region to access dynamodb.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.dynamodb.region" + }, + "table_name": { + "description": "Table name to use on dynamodb.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.dynamodb.table-name" + }, + "timeout": { + "default": "2m0s", + "description": "Timeout of dynamoDbClient requests. Default is 2m.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.dynamodb.timeout", + "x-format": "duration" + }, + "ttl": { + "default": "0s", + "description": "Time to expire items on dynamodb.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.dynamodb.ttl-time", + "x-format": "duration" + } + }, + "type": "object" + }, + "etcd": { + "$ref": "#/definitions/etcd_config" + }, + "multi": { + "properties": { + "mirror_enabled": { + "default": false, + "description": "Mirror writes to secondary store.", + "type": "boolean", + "x-cli-flag": "store-gateway.sharding-ring.multi.mirror-enabled" + }, + "mirror_timeout": { + "default": "2s", + "description": "Timeout for storing value to secondary store.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.multi.mirror-timeout", + "x-format": "duration" + }, + "primary": { + "description": "Primary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.multi.primary" + }, + "secondary": { + "description": "Secondary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.multi.secondary" + } + }, + "type": "object" + }, + "prefix": { + "default": "collectors/", + "description": "The prefix for the keys in the store. Should end with a /.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.prefix" + }, + "store": { + "default": "consul", + "description": "Backend storage to use for the ring. Supported values are: consul, etcd, inmemory, memberlist, multi.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.store" + } + }, + "type": "object" + }, + "replication_factor": { + "default": 3, + "description": "The replication factor to use when sharding blocks. This option needs be set both on the store-gateway and querier when running in microservices mode.", + "type": "number", + "x-cli-flag": "store-gateway.sharding-ring.replication-factor" + }, + "tokens_file_path": { + "description": "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.tokens-file-path" + }, + "wait_instance_state_timeout": { + "default": "10m0s", + "description": "Timeout for waiting on store-gateway to become desired state in the ring.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.wait-instance-state-timeout", + "x-format": "duration" + }, + "wait_stability_max_duration": { + "default": "5m0s", + "description": "Maximum time to wait for ring stability at startup. If the store-gateway ring keeps changing after this period of time, the store-gateway will start anyway.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.wait-stability-max-duration", + "x-format": "duration" + }, + "wait_stability_min_duration": { + "default": "1m0s", + "description": "Minimum time to wait for ring stability at startup. 0 to disable.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-ring.wait-stability-min-duration", + "x-format": "duration" + }, + "zone_awareness_enabled": { + "default": false, + "description": "True to enable zone-awareness and replicate blocks across different availability zones.", + "type": "boolean", + "x-cli-flag": "store-gateway.sharding-ring.zone-awareness-enabled" + } + }, + "type": "object" + }, + "sharding_strategy": { + "default": "default", + "description": "The sharding strategy to use. Supported values are: default, shuffle-sharding.", + "type": "string", + "x-cli-flag": "store-gateway.sharding-strategy" + } + }, + "type": "object" + }, + "tracing_config": { + "description": "The tracing_config configures backends cortex uses.", + "properties": { + "otel": { + "properties": { + "exporter_type": { + "description": "enhance/modify traces/propagators for specific exporter. If empty, OTEL defaults will apply. Supported values are: `awsxray.`", + "type": "string", + "x-cli-flag": "tracing.otel.exporter-type" + }, + "otlp_endpoint": { + "description": "otl collector endpoint that the driver will use to send spans.", + "type": "string", + "x-cli-flag": "tracing.otel.otlp-endpoint" + }, + "round_robin": { + "default": false, + "description": "If enabled, use round_robin gRPC load balancing policy. By default, use pick_first policy. For more details, please refer to https://github.com/grpc/grpc/blob/master/doc/load-balancing.md#load-balancing-policies.", + "type": "boolean", + "x-cli-flag": "tracing.otel.round-robin" + }, + "sample_ratio": { + "default": 0.001, + "description": "Fraction of traces to be sampled. Fractions \u003e= 1 means sampling if off and everything is traced.", + "type": "number", + "x-cli-flag": "tracing.otel.sample-ratio" + }, + "tls": { + "properties": { + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "tracing.otel.tls.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "tracing.otel.tls.tls-cert-path" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "tracing.otel.tls.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "tracing.otel.tls.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "tracing.otel.tls.tls-server-name" + } + }, + "type": "object" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.", + "type": "boolean", + "x-cli-flag": "tracing.otel.tls-enabled" + } + }, + "type": "object" + }, + "type": { + "default": "jaeger", + "description": "Tracing type. OTEL and JAEGER are currently supported. For jaeger `JAEGER_AGENT_HOST` environment variable should also be set. See: https://cortexmetrics.io/docs/guides/tracing .", + "type": "string", + "x-cli-flag": "tracing.type" + } + }, + "type": "object" + } + }, + "description": "JSON Schema for Cortex configuration file", + "properties": { + "alertmanager": { + "$ref": "#/definitions/alertmanager_config" + }, + "alertmanager_storage": { + "$ref": "#/definitions/alertmanager_storage_config" + }, + "api": { + "properties": { + "alertmanager_http_prefix": { + "default": "/alertmanager", + "description": "HTTP URL path under which the Alertmanager ui and api will be served.", + "type": "string", + "x-cli-flag": "http.alertmanager-http-prefix" + }, + "build_info_enabled": { + "default": false, + "description": "If enabled, build Info API will be served by query frontend or querier.", + "type": "boolean", + "x-cli-flag": "api.build-info-enabled" + }, + "cors_origin": { + "default": ".*", + "description": "Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1|domain2)\\.com'", + "type": "string", + "x-cli-flag": "server.cors-origin" + }, + "http_request_headers_to_log": { + "default": [], + "description": "Which HTTP Request headers to add to logs", + "items": { + "type": "string" + }, + "type": "array", + "x-cli-flag": "api.http-request-headers-to-log" + }, + "prometheus_http_prefix": { + "default": "/prometheus", + "description": "HTTP URL path under which the Prometheus api will be served.", + "type": "string", + "x-cli-flag": "http.prometheus-http-prefix" + }, + "querier_default_codec": { + "default": "json", + "description": "Choose default codec for querier response serialization. Supports 'json' and 'protobuf'.", + "type": "string", + "x-cli-flag": "api.querier-default-codec" + }, + "request_id_header": { + "description": "HTTP header that can be used as request id", + "type": "string", + "x-cli-flag": "api.request-id-header" + }, + "response_compression_enabled": { + "default": false, + "description": "Use GZIP compression for API responses. Some endpoints serve large YAML or JSON blobs which can benefit from compression.", + "type": "boolean", + "x-cli-flag": "api.response-compression-enabled" + } + }, + "type": "object" + }, + "auth_enabled": { + "default": true, + "description": "Set to false to disable auth.", + "type": "boolean", + "x-cli-flag": "auth.enabled" + }, + "blocks_storage": { + "$ref": "#/definitions/blocks_storage_config" + }, + "compactor": { + "$ref": "#/definitions/compactor_config" + }, + "configs": { + "$ref": "#/definitions/configs_config" + }, + "distributor": { + "$ref": "#/definitions/distributor_config" + }, + "flusher": { + "$ref": "#/definitions/flusher_config" + }, + "frontend": { + "$ref": "#/definitions/query_frontend_config" + }, + "frontend_worker": { + "$ref": "#/definitions/frontend_worker_config" + }, + "http_prefix": { + "default": "/api/prom", + "description": "HTTP path prefix for Cortex API.", + "type": "string", + "x-cli-flag": "http.prefix" + }, + "ingester": { + "$ref": "#/definitions/ingester_config" + }, + "ingester_client": { + "$ref": "#/definitions/ingester_client_config" + }, + "limits": { + "$ref": "#/definitions/limits_config" + }, + "memberlist": { + "$ref": "#/definitions/memberlist_config" + }, + "parquet_converter": { + "properties": { + "conversion_interval": { + "default": "1m0s", + "description": "How often to check for new TSDB blocks to convert to parquet format.", + "type": "string", + "x-cli-flag": "parquet-converter.conversion-interval", + "x-format": "duration" + }, + "data_dir": { + "default": "./data", + "description": "Local directory path for caching TSDB blocks during parquet conversion.", + "type": "string", + "x-cli-flag": "parquet-converter.data-dir" + }, + "file_buffer_enabled": { + "default": true, + "description": "Enable disk-based write buffering to reduce memory consumption during parquet file generation.", + "type": "boolean", + "x-cli-flag": "parquet-converter.file-buffer-enabled" + }, + "max_rows_per_row_group": { + "default": 1000000, + "description": "Maximum number of time series per parquet row group. Larger values improve compression but may reduce performance during reads.", + "type": "number", + "x-cli-flag": "parquet-converter.max-rows-per-row-group" + }, + "meta_sync_concurrency": { + "default": 20, + "description": "Maximum concurrent goroutines for downloading block metadata from object storage.", + "type": "number", + "x-cli-flag": "parquet-converter.meta-sync-concurrency" + }, + "ring": { + "properties": { + "auto_forget_delay": { + "default": "2m0s", + "description": "Time since last heartbeat before parquet-converter will be removed from ring. 0 to disable", + "type": "string", + "x-cli-flag": "parquet-converter.auto-forget-delay", + "x-format": "duration" + }, + "heartbeat_period": { + "default": "5s", + "description": "Period at which to heartbeat to the ring. 0 = disabled.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.heartbeat-period", + "x-format": "duration" + }, + "heartbeat_timeout": { + "default": "1m0s", + "description": "The heartbeat timeout after which parquet-converter are considered unhealthy within the ring. 0 = never (timeout disabled).", + "type": "string", + "x-cli-flag": "parquet-converter.ring.heartbeat-timeout", + "x-format": "duration" + }, + "kvstore": { + "properties": { + "consul": { + "$ref": "#/definitions/consul_config" + }, + "dynamodb": { + "properties": { + "max_cas_retries": { + "default": 10, + "description": "Maximum number of retries for DDB KV CAS.", + "type": "number", + "x-cli-flag": "parquet-converter.ring.dynamodb.max-cas-retries" + }, + "puller_sync_time": { + "default": "1m0s", + "description": "Time to refresh local ring with information on dynamodb.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.dynamodb.puller-sync-time", + "x-format": "duration" + }, + "region": { + "description": "Region to access dynamodb.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.dynamodb.region" + }, + "table_name": { + "description": "Table name to use on dynamodb.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.dynamodb.table-name" + }, + "timeout": { + "default": "2m0s", + "description": "Timeout of dynamoDbClient requests. Default is 2m.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.dynamodb.timeout", + "x-format": "duration" + }, + "ttl": { + "default": "0s", + "description": "Time to expire items on dynamodb.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.dynamodb.ttl-time", + "x-format": "duration" + } + }, + "type": "object" + }, + "etcd": { + "$ref": "#/definitions/etcd_config" + }, + "multi": { + "properties": { + "mirror_enabled": { + "default": false, + "description": "Mirror writes to secondary store.", + "type": "boolean", + "x-cli-flag": "parquet-converter.ring.multi.mirror-enabled" + }, + "mirror_timeout": { + "default": "2s", + "description": "Timeout for storing value to secondary store.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.multi.mirror-timeout", + "x-format": "duration" + }, + "primary": { + "description": "Primary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.multi.primary" + }, + "secondary": { + "description": "Secondary backend storage used by multi-client.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.multi.secondary" + } + }, + "type": "object" + }, + "prefix": { + "default": "collectors/", + "description": "The prefix for the keys in the store. Should end with a /.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.prefix" + }, + "store": { + "default": "consul", + "description": "Backend storage to use for the ring. Supported values are: consul, etcd, inmemory, memberlist, multi.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.store" + } + }, + "type": "object" + }, + "tokens_file_path": { + "description": "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.", + "type": "string", + "x-cli-flag": "parquet-converter.ring.tokens-file-path" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "querier": { + "$ref": "#/definitions/querier_config" + }, + "query_range": { + "$ref": "#/definitions/query_range_config" + }, + "query_scheduler": { + "properties": { + "grpc_client_config": { + "description": "This configures the gRPC client used to report errors back to the query-frontend.", + "properties": { + "backoff_config": { + "properties": { + "max_period": { + "default": "10s", + "description": "Maximum delay when backing off.", + "type": "string", + "x-cli-flag": "query-scheduler.grpc-client-config.backoff-max-period", + "x-format": "duration" + }, + "max_retries": { + "default": 10, + "description": "Number of times to backoff and retry before failing.", + "type": "number", + "x-cli-flag": "query-scheduler.grpc-client-config.backoff-retries" + }, + "min_period": { + "default": "100ms", + "description": "Minimum delay when backing off.", + "type": "string", + "x-cli-flag": "query-scheduler.grpc-client-config.backoff-min-period", + "x-format": "duration" + } + }, + "type": "object" + }, + "backoff_on_ratelimits": { + "default": false, + "description": "Enable backoff and retry when we hit ratelimits.", + "type": "boolean", + "x-cli-flag": "query-scheduler.grpc-client-config.backoff-on-ratelimits" + }, + "connect_timeout": { + "default": "5s", + "description": "The maximum amount of time to establish a connection. A value of 0 means using default gRPC client connect timeout 20s.", + "type": "string", + "x-cli-flag": "query-scheduler.grpc-client-config.connect-timeout", + "x-format": "duration" + }, + "grpc_compression": { + "description": "Use compression when sending messages. Supported values are: 'gzip', 'snappy', 'snappy-block' ,'zstd' and '' (disable compression)", + "type": "string", + "x-cli-flag": "query-scheduler.grpc-client-config.grpc-compression" + }, + "max_recv_msg_size": { + "default": 104857600, + "description": "gRPC client max receive message size (bytes).", + "type": "number", + "x-cli-flag": "query-scheduler.grpc-client-config.grpc-max-recv-msg-size" + }, + "max_send_msg_size": { + "default": 16777216, + "description": "gRPC client max send message size (bytes).", + "type": "number", + "x-cli-flag": "query-scheduler.grpc-client-config.grpc-max-send-msg-size" + }, + "rate_limit": { + "default": 0, + "description": "Rate limit for gRPC client; 0 means disabled.", + "type": "number", + "x-cli-flag": "query-scheduler.grpc-client-config.grpc-client-rate-limit" + }, + "rate_limit_burst": { + "default": 0, + "description": "Rate limit burst for gRPC client.", + "type": "number", + "x-cli-flag": "query-scheduler.grpc-client-config.grpc-client-rate-limit-burst" + }, + "tls_ca_path": { + "description": "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.", + "type": "string", + "x-cli-flag": "query-scheduler.grpc-client-config.tls-ca-path" + }, + "tls_cert_path": { + "description": "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.", + "type": "string", + "x-cli-flag": "query-scheduler.grpc-client-config.tls-cert-path" + }, + "tls_enabled": { + "default": false, + "description": "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.", + "type": "boolean", + "x-cli-flag": "query-scheduler.grpc-client-config.tls-enabled" + }, + "tls_insecure_skip_verify": { + "default": false, + "description": "Skip validating server certificate.", + "type": "boolean", + "x-cli-flag": "query-scheduler.grpc-client-config.tls-insecure-skip-verify" + }, + "tls_key_path": { + "description": "Path to the key file for the client certificate. Also requires the client certificate to be configured.", + "type": "string", + "x-cli-flag": "query-scheduler.grpc-client-config.tls-key-path" + }, + "tls_server_name": { + "description": "Override the expected name on the server certificate.", + "type": "string", + "x-cli-flag": "query-scheduler.grpc-client-config.tls-server-name" + } + }, + "type": "object" + }, + "querier_forget_delay": { + "default": "0s", + "description": "If a querier disconnects without sending notification about graceful shutdown, the query-scheduler will keep the querier in the tenant's shard until the forget delay has passed. This feature is useful to reduce the blast radius when shuffle-sharding is enabled.", + "type": "string", + "x-cli-flag": "query-scheduler.querier-forget-delay", + "x-format": "duration" + } + }, + "type": "object" + }, + "resource_monitor": { + "properties": { + "cpu_rate_interval": { + "default": "1m0s", + "description": "Interval to calculate average CPU rate. Must be greater than resource monitor interval.", + "type": "string", + "x-cli-flag": "resource-monitor.cpu-rate-interval", + "x-format": "duration" + }, + "interval": { + "default": "100ms", + "description": "Update interval of resource monitor. Must be greater than 0.", + "type": "string", + "x-cli-flag": "resource-monitor.interval", + "x-format": "duration" + }, + "resources": { + "description": "Comma-separated list of resources to monitor. Supported values are cpu and heap, which tracks metrics from github.com/prometheus/procfs and runtime/metrics that are close estimates. Empty string to disable.", + "type": "string", + "x-cli-flag": "resource-monitor.resources" + } + }, + "type": "object" + }, + "ruler": { + "$ref": "#/definitions/ruler_config" + }, + "ruler_storage": { + "$ref": "#/definitions/ruler_storage_config" + }, + "runtime_config": { + "$ref": "#/definitions/runtime_configuration_storage_config" + }, + "server": { + "$ref": "#/definitions/server_config" + }, + "storage": { + "$ref": "#/definitions/storage_config" + }, + "store_gateway": { + "$ref": "#/definitions/store_gateway_config" + }, + "target": { + "default": "all", + "description": "Comma-separated list of Cortex modules to load. The alias 'all' can be used in the list to load a number of core modules and will enable single-binary mode. Use '-modules' command line flag to get a list of available modules, and to see which modules are included in 'all'.", + "type": "string", + "x-cli-flag": "target" + }, + "tenant_federation": { + "properties": { + "enabled": { + "default": false, + "description": "If enabled on all Cortex services, queries can be federated across multiple tenants. The tenant IDs involved need to be specified separated by a `|` character in the `X-Scope-OrgID` header (experimental).", + "type": "boolean", + "x-cli-flag": "tenant-federation.enabled" + }, + "max_concurrent": { + "default": 16, + "description": "The number of workers used to process each federated query.", + "type": "number", + "x-cli-flag": "tenant-federation.max-concurrent" + }, + "max_tenant": { + "default": 0, + "description": "A maximum number of tenants to query at once. 0 means no limit.", + "type": "number", + "x-cli-flag": "tenant-federation.max-tenant" + }, + "regex_matcher_enabled": { + "default": false, + "description": "[Experimental] If enabled, the `X-Scope-OrgID` header value can accept a regex and the matched tenantIDs are automatically involved. The regex matching rule follows the Prometheus, see the detail: https://prometheus.io/docs/prometheus/latest/querying/basics/#regular-expressions. The user discovery is based on scanning block storage, so new users can get queries after uploading a block (generally 2h).", + "type": "boolean", + "x-cli-flag": "tenant-federation.regex-matcher-enabled" + }, + "user_sync_interval": { + "default": "5m0s", + "description": "[Experimental] If the regex matcher is enabled, it specifies how frequently to scan users. The scanned users are used to calculate matched tenantIDs. The scanning strategy depends on the `-blocks-storage.users-scanner.strategy`.", + "type": "string", + "x-cli-flag": "tenant-federation.user-sync-interval", + "x-format": "duration" + } + }, + "type": "object" + }, + "tracing": { + "$ref": "#/definitions/tracing_config" + } + }, + "title": "Cortex Configuration Schema", + "type": "object" +} diff --git a/tools/doc-generator/json_schema_writer.go b/tools/doc-generator/json_schema_writer.go new file mode 100644 index 00000000000..2ff96a13fb3 --- /dev/null +++ b/tools/doc-generator/json_schema_writer.go @@ -0,0 +1,238 @@ +package main + +import ( + "encoding/json" + "fmt" + "io" + "strings" +) + +type JSONSchemaWriter struct { + out io.Writer +} + +func NewJSONSchemaWriter(out io.Writer) *JSONSchemaWriter { + return &JSONSchemaWriter{out: out} +} + +func (w *JSONSchemaWriter) WriteSchema(blocks []*configBlock) error { + schema := w.generateJSONSchema(blocks) + + encoder := json.NewEncoder(w.out) + encoder.SetIndent("", " ") + return encoder.Encode(schema) +} + +func (w *JSONSchemaWriter) generateJSONSchema(blocks []*configBlock) map[string]interface{} { + schema := map[string]interface{}{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://raw.githubusercontent.com/cortexproject/cortex/master/schemas/cortex-config-schema.json", + "title": "Cortex Configuration Schema", + "description": "JSON Schema for Cortex configuration file", + "type": "object", + "properties": map[string]interface{}{}, + "definitions": map[string]interface{}{}, + } + + properties := schema["properties"].(map[string]interface{}) + definitions := schema["definitions"].(map[string]interface{}) + + // Process each config block + for _, block := range blocks { + if block.name == "" { + // This is the root block, process its entries as top-level properties + w.processBlockEntries(block, properties, definitions) + } else { + // This is a named block, add it to definitions + definitions[block.name] = w.generateBlockSchema(block) + } + } + + return schema +} + +func (w *JSONSchemaWriter) processBlockEntries(block *configBlock, properties map[string]interface{}, definitions map[string]interface{}) { + for _, entry := range block.entries { + switch entry.kind { + case "field": + properties[entry.name] = w.generateFieldSchema(entry) + case "block": + if entry.root { + // Root blocks are referenced via $ref + properties[entry.name] = map[string]interface{}{ + "$ref": fmt.Sprintf("#/definitions/%s", entry.block.name), + } + // Add the block to definitions if not already there + if _, exists := definitions[entry.block.name]; !exists { + definitions[entry.block.name] = w.generateBlockSchema(entry.block) + } + } else { + // Inline blocks are embedded directly + properties[entry.name] = w.generateBlockSchema(entry.block) + } + } + } +} + +func (w *JSONSchemaWriter) generateBlockSchema(block *configBlock) map[string]interface{} { + obj := map[string]interface{}{ + "type": "object", + "properties": map[string]interface{}{}, + } + + if block.desc != "" { + obj["description"] = block.desc + } + + properties := obj["properties"].(map[string]interface{}) + + for _, entry := range block.entries { + switch entry.kind { + case "field": + properties[entry.name] = w.generateFieldSchema(entry) + case "block": + if entry.root { + // Reference to another root block + properties[entry.name] = map[string]interface{}{ + "$ref": fmt.Sprintf("#/definitions/%s", entry.block.name), + } + } else { + // Inline nested block + properties[entry.name] = w.generateBlockSchema(entry.block) + } + } + } + + return obj +} + +func (w *JSONSchemaWriter) generateFieldSchema(entry *configEntry) map[string]interface{} { + prop := map[string]interface{}{ + "type": w.getJSONType(entry.fieldType), + } + + // Add description + if entry.fieldDesc != "" { + prop["description"] = entry.fieldDesc + } + + // Add default value + if entry.fieldDefault != "" { + prop["default"] = w.parseDefaultValue(entry.fieldDefault, entry.fieldType) + } + + // Add CLI flag information + if entry.fieldFlag != "" { + prop["x-cli-flag"] = entry.fieldFlag + } + + // Add format hints based on type + switch entry.fieldType { + case "duration": + prop["x-format"] = "duration" + prop["type"] = "string" + case "url": + prop["format"] = "uri" + prop["type"] = "string" + case "time": + prop["format"] = "date-time" + prop["type"] = "string" + } + + // Handle list types + if strings.HasPrefix(entry.fieldType, "list of ") { + prop["type"] = "array" + itemType := strings.TrimPrefix(entry.fieldType, "list of ") + prop["items"] = map[string]interface{}{ + "type": w.getJSONType(itemType), + } + } + + // Handle map types + if strings.HasPrefix(entry.fieldType, "map of ") { + prop["type"] = "object" + prop["additionalProperties"] = true + } + + // Mark required fields + if entry.required { + prop["x-required"] = true + } + + return prop +} + +func (w *JSONSchemaWriter) getJSONType(goType string) string { + switch goType { + case "string": + return "string" + case "int", "float": + return "number" + case "boolean": + return "boolean" + case "duration", "url", "time": + return "string" + default: + // Handle complex types + if strings.HasPrefix(goType, "list of ") { + return "array" + } + if strings.HasPrefix(goType, "map of ") { + return "object" + } + // Default to string for unknown types + return "string" + } +} + +func (w *JSONSchemaWriter) parseDefaultValue(defaultStr, goType string) interface{} { + if defaultStr == "" { + return nil + } + + switch goType { + case "boolean": + return defaultStr == "true" + case "int": + if val, err := parseInt(defaultStr); err == nil { + return val + } + return defaultStr + case "float": + if val, err := parseFloat(defaultStr); err == nil { + return val + } + return defaultStr + default: + // Handle special cases + if defaultStr == "[]" { + return []interface{}{} + } + if strings.HasPrefix(defaultStr, "[") && strings.HasSuffix(defaultStr, "]") { + // Try to parse as JSON array + var arr []interface{} + if err := json.Unmarshal([]byte(defaultStr), &arr); err == nil { + return arr + } + } + return defaultStr + } +} + +// Helper functions for parsing +func parseInt(s string) (int64, error) { + var result int64 + var err error + if strings.Contains(s, "e+") || strings.Contains(s, "E+") { + return 0, fmt.Errorf("scientific notation not supported") + } + _, err = fmt.Sscanf(s, "%d", &result) + return result, err +} + +func parseFloat(s string) (float64, error) { + var result float64 + var err error + _, err = fmt.Sscanf(s, "%f", &result) + return result, err +} diff --git a/tools/doc-generator/main.go b/tools/doc-generator/main.go index 58a1787ee27..2e13d3c906e 100644 --- a/tools/doc-generator/main.go +++ b/tools/doc-generator/main.go @@ -3,6 +3,7 @@ package main import ( "flag" "fmt" + "io" "os" "path/filepath" "reflect" @@ -313,11 +314,63 @@ func generateBlockMarkdown(blocks []*configBlock, blockName, fieldName string) s return "" } +func generateJSONSchemaMain(outputFile string) { + // Create a Cortex config instance + cfg := &cortex.Config{} + + // Parse CLI flags to map them with config fields + flags := parseFlags(cfg) + + // Parse the config structure + blocks, err := parseConfig(nil, cfg, flags, map[string]struct{}{}) + if err != nil { + fmt.Fprintf(os.Stderr, "Error parsing config: %s\n", err.Error()) + os.Exit(1) + } + + // Annotate the flags prefix for each root block + annotateFlagPrefix(blocks) + + // Generate JSON schema + var output io.Writer + if outputFile != "" { + file, err := os.Create(outputFile) + if err != nil { + fmt.Fprintf(os.Stderr, "Error creating file: %s\n", err.Error()) + os.Exit(1) + } + defer file.Close() + output = file + } else { + output = os.Stdout + } + + writer := NewJSONSchemaWriter(output) + err = writer.WriteSchema(blocks) + if err != nil { + fmt.Fprintf(os.Stderr, "Error writing JSON schema: %s\n", err.Error()) + os.Exit(1) + } + + if outputFile != "" { + fmt.Printf("JSON schema written to %s\n", outputFile) + } +} + func main() { // Parse the generator flags. + jsonSchema := flag.Bool("json-schema", false, "Generate JSON schema instead of markdown documentation") + outputFile := flag.String("output", "", "Output file for schema (default: stdout)") flag.Parse() + + // If JSON schema generation is requested + if *jsonSchema { + generateJSONSchemaMain(*outputFile) + return + } + if flag.NArg() != 1 { - fmt.Fprintf(os.Stderr, "Usage: doc-generator template-file") + fmt.Fprintf(os.Stderr, "Usage: doc-generator [-json-schema] [-output file] template-file") os.Exit(1) } diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index 500c34cf445..66131916eb7 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,34 @@ # Changelog +## [0.16.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.16.1...auth/v0.16.2) (2025-06-04) + + +### Bug Fixes + +* **auth:** Add back DirectPath misconfiguration logging ([#11162](https://github.com/googleapis/google-cloud-go/issues/11162)) ([8d52da5](https://github.com/googleapis/google-cloud-go/commit/8d52da58da5a0ed77a0f6307d1b561bc045406a1)) +* **auth:** Remove s2a fallback option ([#12354](https://github.com/googleapis/google-cloud-go/issues/12354)) ([d5acc59](https://github.com/googleapis/google-cloud-go/commit/d5acc599cd775ddc404349e75906fa02e8ff133e)) + +## [0.16.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.16.0...auth/v0.16.1) (2025-04-23) + + +### Bug Fixes + +* **auth:** Clone detectopts before assigning TokenBindingType ([#11881](https://github.com/googleapis/google-cloud-go/issues/11881)) ([2167b02](https://github.com/googleapis/google-cloud-go/commit/2167b020fdc43b517c2b6ecca264a10e357ea035)) + +## [0.16.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.15.0...auth/v0.16.0) (2025-04-14) + + +### Features + +* **auth/credentials:** Return X.509 certificate chain as subject token ([#11948](https://github.com/googleapis/google-cloud-go/issues/11948)) ([d445a3f](https://github.com/googleapis/google-cloud-go/commit/d445a3f66272ffd5c39c4939af9bebad4582631c)), refs [#11757](https://github.com/googleapis/google-cloud-go/issues/11757) +* **auth:** Configure DirectPath bound credentials from AllowedHardBoundTokens ([#11665](https://github.com/googleapis/google-cloud-go/issues/11665)) ([0fc40bc](https://github.com/googleapis/google-cloud-go/commit/0fc40bcf4e4673704df0973e9fa65957395d7bb4)) + + +### Bug Fixes + +* **auth:** Allow non-default SA credentials for DP ([#11828](https://github.com/googleapis/google-cloud-go/issues/11828)) ([3a996b4](https://github.com/googleapis/google-cloud-go/commit/3a996b4129e6d0a34dfda6671f535d5aefb26a82)) +* **auth:** Restore calling DialContext ([#11930](https://github.com/googleapis/google-cloud-go/issues/11930)) ([9ec9a29](https://github.com/googleapis/google-cloud-go/commit/9ec9a29494e93197edbaf45aba28984801e9770a)), refs [#11118](https://github.com/googleapis/google-cloud-go/issues/11118) + ## [0.15.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.14.1...auth/v0.15.0) (2025-02-19) diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go index a8220642348..f4f49f175dc 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go @@ -413,7 +413,10 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" { return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true") } - return &x509Provider{}, nil + return &x509Provider{ + TrustChainPath: o.CredentialSource.Certificate.TrustChainPath, + ConfigFilePath: o.CredentialSource.Certificate.CertificateConfigLocation, + }, nil } return nil, errors.New("credentials: unable to parse credential source") } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go index 115df5881f1..d86ca593c8c 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go @@ -17,27 +17,184 @@ package externalaccount import ( "context" "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/fs" "net/http" + "os" + "strings" "time" "cloud.google.com/go/auth/internal/transport/cert" ) -// x509Provider implements the subjectTokenProvider type for -// x509 workload identity credentials. Because x509 credentials -// rely on an mTLS connection to represent the 3rd party identity -// rather than a subject token, this provider will always return -// an empty string when a subject token is requested by the external account -// token provider. +// x509Provider implements the subjectTokenProvider type for x509 workload +// identity credentials. This provider retrieves and formats a JSON array +// containing the leaf certificate and trust chain (if provided) as +// base64-encoded strings. This JSON array serves as the subject token for +// mTLS authentication. type x509Provider struct { + // TrustChainPath is the path to the file containing the trust chain certificates. + // The file should contain one or more PEM-encoded certificates. + TrustChainPath string + // ConfigFilePath is the path to the configuration file containing the path + // to the leaf certificate file. + ConfigFilePath string } +const pemCertificateHeader = "-----BEGIN CERTIFICATE-----" + func (xp *x509Provider) providerType() string { return x509ProviderType } -func (xp *x509Provider) subjectToken(ctx context.Context) (string, error) { - return "", nil +// loadLeafCertificate loads and parses the leaf certificate from the specified +// configuration file. It retrieves the certificate path from the config file, +// reads the certificate file, and parses the certificate data. +func loadLeafCertificate(configFilePath string) (*x509.Certificate, error) { + // Get the path to the certificate file from the configuration file. + path, err := cert.GetCertificatePath(configFilePath) + if err != nil { + return nil, fmt.Errorf("failed to get certificate path from config file: %w", err) + } + leafCertBytes, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read leaf certificate file: %w", err) + } + // Parse the certificate bytes. + return parseCertificate(leafCertBytes) +} + +// encodeCert encodes a x509.Certificate to a base64 string. +func encodeCert(cert *x509.Certificate) string { + // cert.Raw contains the raw DER-encoded certificate. Encode the raw certificate bytes to base64. + return base64.StdEncoding.EncodeToString(cert.Raw) +} + +// parseCertificate parses a PEM-encoded certificate from the given byte slice. +func parseCertificate(certData []byte) (*x509.Certificate, error) { + if len(certData) == 0 { + return nil, errors.New("invalid certificate data: empty input") + } + // Decode the PEM-encoded data. + block, _ := pem.Decode(certData) + if block == nil { + return nil, errors.New("invalid PEM-encoded certificate data: no PEM block found") + } + if block.Type != "CERTIFICATE" { + return nil, fmt.Errorf("invalid PEM-encoded certificate data: expected CERTIFICATE block type, got %s", block.Type) + } + // Parse the DER-encoded certificate. + certificate, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %w", err) + } + return certificate, nil +} + +// readTrustChain reads a file of PEM-encoded X.509 certificates and returns a slice of parsed certificates. +// It splits the file content into PEM certificate blocks and parses each one. +func readTrustChain(trustChainPath string) ([]*x509.Certificate, error) { + certificateTrustChain := []*x509.Certificate{} + + // If no trust chain path is provided, return an empty slice. + if trustChainPath == "" { + return certificateTrustChain, nil + } + + // Read the trust chain file. + trustChainData, err := os.ReadFile(trustChainPath) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return nil, fmt.Errorf("trust chain file not found: %w", err) + } + return nil, fmt.Errorf("failed to read trust chain file: %w", err) + } + + // Split the file content into PEM certificate blocks. + certBlocks := strings.Split(string(trustChainData), pemCertificateHeader) + + // Iterate over each certificate block. + for _, certBlock := range certBlocks { + // Trim whitespace from the block. + certBlock = strings.TrimSpace(certBlock) + + if certBlock != "" { + // Add the PEM header to the block. + certData := pemCertificateHeader + "\n" + certBlock + + // Parse the certificate data. + cert, err := parseCertificate([]byte(certData)) + if err != nil { + return nil, fmt.Errorf("error parsing certificate from trust chain file: %w", err) + } + + // Append the certificate to the trust chain. + certificateTrustChain = append(certificateTrustChain, cert) + } + } + + return certificateTrustChain, nil +} + +// subjectToken retrieves the X.509 subject token. It loads the leaf +// certificate and, if a trust chain path is configured, the trust chain +// certificates. It then constructs a JSON array containing the base64-encoded +// leaf certificate and each base64-encoded certificate in the trust chain. +// The leaf certificate must be at the top of the trust chain file. This JSON +// array is used as the subject token for mTLS authentication. +func (xp *x509Provider) subjectToken(context.Context) (string, error) { + // Load the leaf certificate. + leafCert, err := loadLeafCertificate(xp.ConfigFilePath) + if err != nil { + return "", fmt.Errorf("failed to load leaf certificate: %w", err) + } + + // Read the trust chain. + trustChain, err := readTrustChain(xp.TrustChainPath) + if err != nil { + return "", fmt.Errorf("failed to read trust chain: %w", err) + } + + // Initialize the certificate chain with the leaf certificate. + certChain := []string{encodeCert(leafCert)} + + // If there is a trust chain, add certificates to the certificate chain. + if len(trustChain) > 0 { + firstCert := encodeCert(trustChain[0]) + + // If the first certificate in the trust chain is not the same as the leaf certificate, add it to the chain. + if firstCert != certChain[0] { + certChain = append(certChain, firstCert) + } + + // Iterate over the remaining certificates in the trust chain. + for i := 1; i < len(trustChain); i++ { + encoded := encodeCert(trustChain[i]) + + // Return an error if the current certificate is the same as the leaf certificate. + if encoded == certChain[0] { + return "", errors.New("the leaf certificate must be at the top of the trust chain file") + } + + // Add the current certificate to the chain. + certChain = append(certChain, encoded) + } + } + + // Convert the certificate chain to a JSON array of base64-encoded strings. + jsonChain, err := json.Marshal(certChain) + if err != nil { + return "", fmt.Errorf("failed to format certificate data: %w", err) + } + + // Return the JSON-formatted certificate chain. + return string(jsonChain), nil + } // createX509Client creates a new client that is configured with mTLS, using the diff --git a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go index c541da2b1ac..69d6d0034e4 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go @@ -20,13 +20,18 @@ import ( "os" "strconv" "strings" + "time" "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/internal/compute" + "golang.org/x/time/rate" "google.golang.org/grpc" grpcgoogle "google.golang.org/grpc/credentials/google" ) +var logRateLimiter = rate.Sometimes{Interval: 1 * time.Second} + func isDirectPathEnabled(endpoint string, opts *Options) bool { if opts.InternalOptions != nil && !opts.InternalOptions.EnableDirectPath { return false @@ -97,14 +102,36 @@ func isDirectPathXdsUsed(o *Options) bool { return false } +func isDirectPathBoundTokenEnabled(opts *InternalOptions) bool { + for _, ev := range opts.AllowHardBoundTokens { + if ev == "ALTS" { + return true + } + } + return false +} + // configureDirectPath returns some dial options and an endpoint to use if the // configuration allows the use of direct path. If it does not the provided // grpcOpts and endpoint are returned. -func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string) { +func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string, error) { + logRateLimiter.Do(func() { + logDirectPathMisconfig(endpoint, creds, opts) + }) if isDirectPathEnabled(endpoint, opts) && compute.OnComputeEngine() && isTokenProviderDirectPathCompatible(creds, opts) { // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. + defaultCredetialsOptions := grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}} + if isDirectPathBoundTokenEnabled(opts.InternalOptions) && isTokenProviderComputeEngine(creds) { + optsClone := opts.resolveDetectOptions() + optsClone.TokenBindingType = credentials.ALTSHardBinding + altsCreds, err := credentials.DetectDefault(optsClone) + if err != nil { + return nil, "", err + } + defaultCredetialsOptions.ALTSPerRPCCreds = &grpcCredentialsProvider{creds: altsCreds} + } grpcOpts = []grpc.DialOption{ - grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}}))} + grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(defaultCredetialsOptions))} if timeoutDialerOption != nil { grpcOpts = append(grpcOpts, timeoutDialerOption) } @@ -129,5 +156,22 @@ func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint str } // TODO: add support for system parameters (quota project, request reason) via chained interceptor. } - return grpcOpts, endpoint + return grpcOpts, endpoint, nil +} + +func logDirectPathMisconfig(endpoint string, creds *auth.Credentials, o *Options) { + + // Case 1: does not enable DirectPath + if !isDirectPathEnabled(endpoint, o) { + o.logger().Warn("DirectPath is disabled. To enable, please set the EnableDirectPath option along with the EnableDirectPathXds option.") + } else { + // Case 2: credential is not correctly set + if !isTokenProviderDirectPathCompatible(creds, o) { + o.logger().Warn("DirectPath is disabled. Please make sure the token source is fetched from GCE metadata server and the default service account is used.") + } + // Case 3: not running on GCE + if !compute.OnComputeEngine() { + o.logger().Warn("DirectPath is disabled. DirectPath is only available in a GCE environment.") + } + } } diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go index 4610a485511..834aef41c87 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go @@ -304,17 +304,18 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er // This condition is only met for non-DirectPath clients because // TransportTypeMTLSS2A is used only when InternalOptions.EnableDirectPath // is false. + optsClone := opts.resolveDetectOptions() if transportCreds.TransportType == transport.TransportTypeMTLSS2A { // Check that the client allows requesting hard-bound token for the transport type mTLS using S2A. for _, ev := range opts.InternalOptions.AllowHardBoundTokens { if ev == "MTLS_S2A" { - opts.DetectOpts.TokenBindingType = credentials.MTLSHardBinding + optsClone.TokenBindingType = credentials.MTLSHardBinding break } } } var err error - creds, err = credentials.DetectDefault(opts.resolveDetectOptions()) + creds, err = credentials.DetectDefault(optsClone) if err != nil { return nil, err } @@ -341,7 +342,10 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er }), ) // Attempt Direct Path - grpcOpts, transportCreds.Endpoint = configureDirectPath(grpcOpts, opts, transportCreds.Endpoint, creds) + grpcOpts, transportCreds.Endpoint, err = configureDirectPath(grpcOpts, opts, transportCreds.Endpoint, creds) + if err != nil { + return nil, err + } } // Add tracing, but before the other options, so that clients can override the @@ -350,7 +354,7 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, opts) grpcOpts = append(grpcOpts, opts.GRPCDialOpts...) - return grpc.Dial(transportCreds.Endpoint, grpcOpts...) + return grpc.DialContext(ctx, transportCreds.Endpoint, grpcOpts...) } // grpcKeyProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials. diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go index 3be6e5bbb41..606347304cb 100644 --- a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go @@ -127,6 +127,7 @@ type ExecutableConfig struct { type CertificateConfig struct { UseDefaultCertificateConfig bool `json:"use_default_certificate_config"` CertificateConfigLocation string `json:"certificate_config_location"` + TrustChainPath string `json:"trust_chain_path"` } // ServiceAccountImpersonationInfo has impersonation configuration. diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go index b1f0fcf9374..14bca966ecc 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cba.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -31,7 +31,6 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport/cert" "github.com/google/s2a-go" - "github.com/google/s2a-go/fallback" "google.golang.org/grpc/credentials" ) @@ -170,18 +169,9 @@ func GetGRPCTransportCredsAndEndpoint(opts *Options) (*GRPCTransportCredentials, return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil } - var fallbackOpts *s2a.FallbackOptions - // In case of S2A failure, fall back to the endpoint that would've been used without S2A. - if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil { - fallbackOpts = &s2a.FallbackOptions{ - FallbackClientHandshakeFunc: fallbackHandshake, - } - } - s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{ S2AAddress: s2aAddr, TransportCreds: transportCredsForS2A, - FallbackOpts: fallbackOpts, }) if err != nil { // Use default if we cannot initialize S2A client transport credentials. @@ -218,23 +208,9 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, return config.clientCertSource, nil, nil } - var fallbackOpts *s2a.FallbackOptions - // In case of S2A failure, fall back to the endpoint that would've been used without S2A. - if fallbackURL, err := url.Parse(config.endpoint); err == nil { - if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil { - fallbackOpts = &s2a.FallbackOptions{ - FallbackDialer: &s2a.FallbackDialer{ - Dialer: fallbackDialer, - ServerAddr: fallbackServerAddr, - }, - } - } - } - dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{ S2AAddress: s2aAddr, TransportCreds: transportCredsForS2A, - FallbackOpts: fallbackOpts, }) return nil, dialTLSContextFunc, nil } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go index 347aaced721..b2a3be23c74 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go @@ -37,6 +37,36 @@ type certificateConfig struct { CertConfigs certConfigs `json:"cert_configs"` } +// getconfigFilePath determines the path to the certificate configuration file. +// It first checks for the presence of an environment variable that specifies +// the file path. If the environment variable is not set, it falls back to +// a default configuration file path. +func getconfigFilePath() string { + envFilePath := util.GetConfigFilePathFromEnv() + if envFilePath != "" { + return envFilePath + } + return util.GetDefaultConfigFilePath() + +} + +// GetCertificatePath retrieves the certificate file path from the provided +// configuration file. If the configFilePath is empty, it attempts to load +// the configuration from a well-known gcloud location. +// This function is exposed to allow other packages, such as the +// externalaccount package, to retrieve the certificate path without needing +// to load the entire certificate configuration. +func GetCertificatePath(configFilePath string) (string, error) { + if configFilePath == "" { + configFilePath = getconfigFilePath() + } + certFile, _, err := getCertAndKeyFiles(configFilePath) + if err != nil { + return "", err + } + return certFile, nil +} + // NewWorkloadX509CertProvider creates a certificate source // that reads a certificate and private key file from the local file system. // This is intended to be used for workload identity federation. @@ -47,14 +77,8 @@ type certificateConfig struct { // a well-known gcloud location. func NewWorkloadX509CertProvider(configFilePath string) (Provider, error) { if configFilePath == "" { - envFilePath := util.GetConfigFilePathFromEnv() - if envFilePath != "" { - configFilePath = envFilePath - } else { - configFilePath = util.GetDefaultConfigFilePath() - } + configFilePath = getconfigFilePath() } - certFile, keyFile, err := getCertAndKeyFiles(configFilePath) if err != nil { return nil, err diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index 6bfd910506e..7839f3b8951 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,6 +1,50 @@ # Changes +## [1.5.2](https://github.com/googleapis/google-cloud-go/compare/iam/v1.5.1...iam/v1.5.2) (2025-04-15) + + +### Bug Fixes + +* **iam:** Update google.golang.org/api to 0.229.0 ([3319672](https://github.com/googleapis/google-cloud-go/commit/3319672f3dba84a7150772ccb5433e02dab7e201)) + +## [1.5.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.5.0...iam/v1.5.1) (2025-04-15) + + +### Documentation + +* **iam:** Formatting update for ListPolicyBindingsRequest ([dfdf404](https://github.com/googleapis/google-cloud-go/commit/dfdf404138728724aa6305c5c465ecc6fe5b1264)) +* **iam:** Minor doc update for ListPrincipalAccessBoundaryPoliciesResponse ([20f762c](https://github.com/googleapis/google-cloud-go/commit/20f762c528726a3f038d3e1f37e8a4952118badf)) +* **iam:** Minor doc update for ListPrincipalAccessBoundaryPoliciesResponse ([20f762c](https://github.com/googleapis/google-cloud-go/commit/20f762c528726a3f038d3e1f37e8a4952118badf)) + +## [1.5.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.4.2...iam/v1.5.0) (2025-03-31) + + +### Features + +* **iam:** New client(s) ([#11933](https://github.com/googleapis/google-cloud-go/issues/11933)) ([d5cb2e5](https://github.com/googleapis/google-cloud-go/commit/d5cb2e58334c6963cc46885f565fe3b19c52cb63)) + +## [1.4.2](https://github.com/googleapis/google-cloud-go/compare/iam/v1.4.1...iam/v1.4.2) (2025-03-13) + + +### Bug Fixes + +* **iam:** Update golang.org/x/net to 0.37.0 ([1144978](https://github.com/googleapis/google-cloud-go/commit/11449782c7fb4896bf8b8b9cde8e7441c84fb2fd)) + +## [1.4.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.4.0...iam/v1.4.1) (2025-03-06) + + +### Bug Fixes + +* **iam:** Fix out-of-sync version.go ([28f0030](https://github.com/googleapis/google-cloud-go/commit/28f00304ebb13abfd0da2f45b9b79de093cca1ec)) + +## [1.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.3.1...iam/v1.4.0) (2025-02-12) + + +### Features + +* **iam/admin:** Regenerate client ([#11570](https://github.com/googleapis/google-cloud-go/issues/11570)) ([eab87d7](https://github.com/googleapis/google-cloud-go/commit/eab87d73bea884c636ec88f03b9aa90102a2833f)), refs [#8219](https://github.com/googleapis/google-cloud-go/issues/8219) + ## [1.3.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.3.0...iam/v1.3.1) (2025-01-02) diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go index f975d76191b..2b57ae3b82d 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go index 0c82db752bd..745de05ba25 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go index a2e42f87869..0eba150896b 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/resource_policy_member.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/resource_policy_member.pb.go index 361d79752ad..c3339e26c45 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/resource_policy_member.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/resource_policy_member.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index b1a50e87388..d72e823299d 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -959,16 +959,6 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/dataform/apiv1alpha2": { - "api_shortname": "dataform", - "distribution_name": "cloud.google.com/go/dataform/apiv1alpha2", - "description": "Dataform API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1alpha2", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, "cloud.google.com/go/dataform/apiv1beta1": { "api_shortname": "dataform", "distribution_name": "cloud.google.com/go/dataform/apiv1beta1", @@ -1299,6 +1289,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/financialservices/apiv1": { + "api_shortname": "financialservices", + "distribution_name": "cloud.google.com/go/financialservices/apiv1", + "description": "Financial Services API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/financialservices/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/firestore": { "api_shortname": "firestore", "distribution_name": "cloud.google.com/go/firestore", @@ -1789,6 +1789,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/modelarmor/apiv1": { + "api_shortname": "modelarmor", + "distribution_name": "cloud.google.com/go/modelarmor/apiv1", + "description": "Model Armor API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/modelarmor/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/monitoring/apiv3/v2": { "api_shortname": "monitoring", "distribution_name": "cloud.google.com/go/monitoring/apiv3/v2", @@ -2269,16 +2279,6 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/resourcesettings/apiv1": { - "api_shortname": "resourcesettings", - "distribution_name": "cloud.google.com/go/resourcesettings/apiv1", - "description": "Resource Settings API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcesettings/latest/apiv1", - "release_level": "stable", - "library_type": "GAPIC_AUTO" - }, "cloud.google.com/go/retail/apiv2": { "api_shortname": "retail", "distribution_name": "cloud.google.com/go/retail/apiv2", diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go index 222e1d170a1..24ca1414bb3 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go index 02103f8cd49..ba0c4f65f2c 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go index e301262a2fa..81b8c8f5e46 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go index 0dbf58e4351..0c3ac5a1c8a 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go index 11d1a62d35b..c35046ac71c 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go index 3cfa112bb45..fbdf9ef54f1 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go index 1961a1e3a5c..ae7eea5b6fa 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go index 9e7cbcdd2f1..39b9595241b 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go index 5fd4f338075..e03d89efe4d 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go index 48d69d1431d..0d5cacbecb0 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go index 9ae6580b1b4..fd0230036da 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go index b1f18a6d253..6402f18ca11 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go index aa462351d7c..a9d2ae8cb67 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go index 01520d88a2c..08c2e08e264 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go index ef7fbded0c5..861e045f2d4 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go index bfe661ea702..c562d60bcc7 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go index 3555d6e0a1c..23f42835f14 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go index 7e122ade520..f303ac25156 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go index d2958b86589..9ea159bbd2d 100644 --- a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go +++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/monitoring/internal/version.go b/vendor/cloud.google.com/go/monitoring/internal/version.go index 291a237fe1c..e199c1168a1 100644 --- a/vendor/cloud.google.com/go/monitoring/internal/version.go +++ b/vendor/cloud.google.com/go/monitoring/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.24.0" +const Version = "1.24.2" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index 926ed3882cd..d99d530934b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,12 +1,18 @@ # Release History +## 1.18.1 (2025-07-10) + +### Bugs Fixed + +* Fixed incorrect request/response logging try info when logging a request that's being retried. +* Fixed a data race in `ResourceID.String()` + ## 1.18.0 (2025-04-03) ### Features Added * Added `AccessToken.RefreshOn` and updated `BearerTokenPolicy` to consider nonzero values of it when deciding whether to request a new token - ## 1.17.1 (2025-03-20) ### Other Changes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go index d9a4e36dccb..a08d3d0ffa6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go @@ -27,7 +27,8 @@ var RootResourceID = &ResourceID{ } // ResourceID represents a resource ID such as `/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg`. -// Don't create this type directly, use ParseResourceID instead. +// Don't create this type directly, use [ParseResourceID] instead. Fields are considered immutable and shouldn't be +// modified after creation. type ResourceID struct { // Parent is the parent ResourceID of this instance. // Can be nil if there is no parent. @@ -85,28 +86,6 @@ func ParseResourceID(id string) (*ResourceID, error) { // String returns the string of the ResourceID func (id *ResourceID) String() string { - if len(id.stringValue) > 0 { - return id.stringValue - } - - if id.Parent == nil { - return "" - } - - builder := strings.Builder{} - builder.WriteString(id.Parent.String()) - - if id.isChild { - builder.WriteString(fmt.Sprintf("/%s", id.ResourceType.lastType())) - if len(id.Name) > 0 { - builder.WriteString(fmt.Sprintf("/%s", id.Name)) - } - } else { - builder.WriteString(fmt.Sprintf("/providers/%s/%s/%s", id.ResourceType.Namespace, id.ResourceType.Type, id.Name)) - } - - id.stringValue = builder.String() - return id.stringValue } @@ -185,6 +164,15 @@ func (id *ResourceID) init(parent *ResourceID, resourceType ResourceType, name s id.isChild = isChild id.ResourceType = resourceType id.Name = name + id.stringValue = id.Parent.String() + if id.isChild { + id.stringValue += "/" + id.ResourceType.lastType() + if id.Name != "" { + id.stringValue += "/" + id.Name + } + } else { + id.stringValue += fmt.Sprintf("/providers/%s/%s/%s", id.ResourceType.Namespace, id.ResourceType.Type, id.Name) + } } func appendNext(parent *ResourceID, parts []string, id string) (*ResourceID, error) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml index 99348527b54..b81b6210384 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml @@ -27,3 +27,5 @@ extends: template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml parameters: ServiceDirectory: azcore + TriggeringPaths: + - /eng/ diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go index e3e2d4e588a..9b3f5badb5e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -71,7 +71,8 @@ func (ov opValues) get(value any) bool { // NewRequestFromRequest creates a new policy.Request with an existing *http.Request // Exported as runtime.NewRequestFromRequest(). func NewRequestFromRequest(req *http.Request) (*Request, error) { - policyReq := &Request{req: req} + // populate values so that the same instance is propagated across policies + policyReq := &Request{req: req, values: opValues{}} if req.Body != nil { // we can avoid a body copy here if the underlying stream is already a @@ -117,7 +118,8 @@ func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Reque if !(req.URL.Scheme == "http" || req.URL.Scheme == "https") { return nil, fmt.Errorf("unsupported protocol scheme %s", req.URL.Scheme) } - return &Request{req: req}, nil + // populate values so that the same instance is propagated across policies + return &Request{req: req, values: opValues{}}, nil } // Body returns the original body specified when the Request was created. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 85514db3b84..23788b14d92 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.18.0" + Version = "v1.18.1" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go index bb37a5efb4e..368a2199e08 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -103,7 +103,7 @@ type RetryOptions struct { // RetryDelay specifies the initial amount of delay to use before retrying an operation. // The value is used only if the HTTP response does not contain a Retry-After header. // The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay. - // The default value is four seconds. A value less than zero means no delay between retries. + // The default value is 800 milliseconds. A value less than zero means no delay between retries. RetryDelay time.Duration // MaxRetryDelay specifies the maximum delay allowed before retrying an operation. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index f5bd8586b9d..84e7941e4f3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,10 @@ # Release History +## 1.10.1 (2025-06-10) + +### Bugs Fixed +- `AzureCLICredential` and `AzureDeveloperCLICredential` could wait indefinitely for subprocess output + ## 1.10.0 (2025-05-14) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD index 2bda7f2a7f8..da2094e36b1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD @@ -27,6 +27,7 @@ Persistent caches are encrypted at rest using a mechanism that depends on the op | Linux | kernel key retention service (keyctl) | Cache data is lost on system shutdown because kernel keys are stored in memory. Depending on kernel compile options, data may also be lost on logout, or storage may be impossible because the key retention service isn't available. | | macOS | Keychain | Building requires cgo and native build tools. Keychain access requires a graphical session, so persistent caching isn't possible in a headless environment such as an SSH session (macOS as host). | | Windows | Data Protection API (DPAPI) | No specific limitations. | + Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the package documentation for examples showing how to configure persistent caching and access cached data for [users][user_example] and [service principals][sp_example]. ### Credentials supporting token caching diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index 10a4009c376..91f4f05cc0c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -219,7 +219,7 @@ azd auth token --output json --scope https://management.core.windows.net/.defaul | Error Message |Description| Mitigation | |---|---|---| -|no client ID/tenant ID/token file specified|Incomplete configuration|In most cases these values are provided via environment variables set by Azure Workload Identity.
  • If your application runs on Azure Kubernetes Servide (AKS) or a cluster that has deployed the Azure Workload Identity admission webhook, check pod labels and service account configuration. See the [AKS documentation](https://learn.microsoft.com/azure/aks/workload-identity-deploy-cluster#disable-workload-identity) and [Azure Workload Identity troubleshooting guide](https://azure.github.io/azure-workload-identity/docs/troubleshooting.html) for more details.
  • If your application isn't running on AKS or your cluster hasn't deployed the Workload Identity admission webhook, set these values in `WorkloadIdentityCredentialOptions` +|no client ID/tenant ID/token file specified|Incomplete configuration|In most cases these values are provided via environment variables set by Azure Workload Identity.
    • If your application runs on Azure Kubernetes Service (AKS) or a cluster that has deployed the Azure Workload Identity admission webhook, check pod labels and service account configuration. See the [AKS documentation](https://learn.microsoft.com/azure/aks/workload-identity-deploy-cluster#disable-workload-identity) and [Azure Workload Identity troubleshooting guide](https://azure.github.io/azure-workload-identity/docs/troubleshooting.html) for more details.
    • If your application isn't running on AKS or your cluster hasn't deployed the Workload Identity admission webhook, set these values in `WorkloadIdentityCredentialOptions` ## Troubleshoot AzurePipelinesCredential authentication issues diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go index 36e359a099e..0fd03f45634 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -148,8 +148,14 @@ var defaultAzTokenProvider azTokenProvider = func(ctx context.Context, scopes [] cliCmd.Env = os.Environ() var stderr bytes.Buffer cliCmd.Stderr = &stderr + cliCmd.WaitDelay = 100 * time.Millisecond - output, err := cliCmd.Output() + stdout, err := cliCmd.Output() + if errors.Is(err, exec.ErrWaitDelay) && len(stdout) > 0 { + // The child process wrote to stdout and exited without closing it. + // Swallow this error and return stdout because it may contain a token. + return stdout, nil + } if err != nil { msg := stderr.String() var exErr *exec.ExitError @@ -162,7 +168,7 @@ var defaultAzTokenProvider azTokenProvider = func(ctx context.Context, scopes [] return nil, newCredentialUnavailableError(credNameAzureCLI, msg) } - return output, nil + return stdout, nil } func (c *AzureCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go index 46d0b551922..1bd3720b649 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go @@ -130,7 +130,14 @@ var defaultAzdTokenProvider azdTokenProvider = func(ctx context.Context, scopes cliCmd.Env = os.Environ() var stderr bytes.Buffer cliCmd.Stderr = &stderr - output, err := cliCmd.Output() + cliCmd.WaitDelay = 100 * time.Millisecond + + stdout, err := cliCmd.Output() + if errors.Is(err, exec.ErrWaitDelay) && len(stdout) > 0 { + // The child process wrote to stdout and exited without closing it. + // Swallow this error and return stdout because it may contain a token. + return stdout, nil + } if err != nil { msg := stderr.String() var exErr *exec.ExitError @@ -144,7 +151,7 @@ var defaultAzdTokenProvider azdTokenProvider = func(ctx context.Context, scopes } return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, msg) } - return output, nil + return stdout, nil } func (c *AzureDeveloperCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index e859fba3a00..2b767762fa8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -14,5 +14,5 @@ const ( module = "github.com/Azure/azure-sdk-for-go/sdk/" + component // Version is the semantic version (see http://semver.org) of this module. - version = "v1.10.0" + version = "v1.10.1" ) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go index a015cc5b20c..7098087408f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go @@ -6,6 +6,7 @@ import ( smithybearer "github.com/aws/smithy-go/auth/bearer" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" ) // HTTPClient provides the interface to provide custom HTTPClients. Generally @@ -192,6 +193,9 @@ type Config struct { // This variable is sourced from environment variable AWS_RESPONSE_CHECKSUM_VALIDATION or // the shared config profile attribute "response_checksum_validation". ResponseChecksumValidation ResponseChecksumValidation + + // Registry of HTTP interceptors. + Interceptors smithyhttp.InterceptorRegistry } // NewConfig returns a new Config pointer that can be chained with builder diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 8e930fc6f87..af3a23a5253 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.36.3" +const goModuleVersion = "1.37.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go index 66aa2bd6ab0..32875e07798 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go @@ -59,7 +59,7 @@ func (s *StreamSigner) GetSignature(ctx context.Context, headers, payload []byte prevSignature := s.prevSignature - st := v4Internal.NewSigningTime(signingTime) + st := v4Internal.NewSigningTime(signingTime.UTC()) sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index eae3e16af7d..b604152d5fe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,21 @@ +# v1.4.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.37 (2025-07-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.36 (2025-06-17) + +* **Dependency Update**: Update to smithy-go v1.22.4. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.35 (2025-06-10) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.3.34 (2025-02-27) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index eddabe6344c..c2c39f91937 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.34" +const goModuleVersion = "1.4.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go index 5f0779997de..619c1f5d8f5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go @@ -11,7 +11,7 @@ func GetPartition(region string) *PartitionConfig { var partitions = []Partition{ { ID: "aws", - RegionRegex: "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", + RegionRegex: "^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ Name: "aws", DnsSuffix: "amazonaws.com", @@ -35,6 +35,13 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "ap-east-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "ap-northeast-1": { Name: nil, DnsSuffix: nil, @@ -98,6 +105,20 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "ap-southeast-5": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-southeast-7": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "aws-global": { Name: nil, DnsSuffix: nil, @@ -196,6 +217,13 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "mx-central-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "sa-east-1": { Name: nil, DnsSuffix: nil, @@ -378,6 +406,13 @@ var partitions = []Partition{ ImplicitGlobalRegion: "eu-isoe-west-1", }, Regions: map[string]RegionOverrides{ + "aws-iso-e-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "eu-isoe-west-1": { Name: nil, DnsSuffix: nil, @@ -398,6 +433,49 @@ var partitions = []Partition{ SupportsDualStack: false, ImplicitGlobalRegion: "us-isof-south-1", }, - Regions: map[string]RegionOverrides{}, + Regions: map[string]RegionOverrides{ + "aws-iso-f-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-isof-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-isof-south-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, + }, + { + ID: "aws-eusc", + RegionRegex: "^eusc\\-(de)\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-eusc", + DnsSuffix: "amazonaws.eu", + DualStackDnsSuffix: "amazonaws.eu", + SupportsFIPS: true, + SupportsDualStack: false, + ImplicitGlobalRegion: "eusc-de-east-1", + }, + Regions: map[string]RegionOverrides{ + "eusc-de-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, }, } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json index e19224f1b86..456b07fca67 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json @@ -17,6 +17,9 @@ "ap-east-1" : { "description" : "Asia Pacific (Hong Kong)" }, + "ap-east-2" : { + "description" : "Asia Pacific (Taipei)" + }, "ap-northeast-1" : { "description" : "Asia Pacific (Tokyo)" }, @@ -208,6 +211,9 @@ }, "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", "regions" : { + "aws-iso-e-global" : { + "description" : "AWS ISOE (Europe) global region" + }, "eu-isoe-west-1" : { "description" : "EU ISOE West" } @@ -234,6 +240,22 @@ "description" : "US ISOF SOUTH" } } + }, { + "id" : "aws-eusc", + "outputs" : { + "dnsSuffix" : "amazonaws.eu", + "dualStackDnsSuffix" : "amazonaws.eu", + "implicitGlobalRegion" : "eusc-de-east-1", + "name" : "aws-eusc", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^eusc\\-(de)\\-\\w+\\-\\d+$", + "regions" : { + "eusc-de-east-1" : { + "description" : "EU (Germany)" + } + } } ], "version" : "1.1" } \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index 83e5bd28a72..4760d92ef7e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,21 @@ +# v2.7.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.37 (2025-07-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.36 (2025-06-17) + +* **Dependency Update**: Update to smithy-go v1.22.4. +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.35 (2025-06-10) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.6.34 (2025-02-27) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index 735dba7ac79..056246dc4c0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.6.34" +const goModuleVersion = "2.7.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md index c81265a25df..32c9d515746 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -1,3 +1,11 @@ +# v1.13.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. + +# v1.12.4 (2025-06-17) + +* **Dependency Update**: Update to smithy-go v1.22.4. + # v1.12.3 (2025-02-18) * **Bug Fix**: Bump go version to 1.22 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go index d83e533effd..f4b9f0b9488 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -3,4 +3,4 @@ package acceptencoding // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.12.3" +const goModuleVersion = "1.13.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index 2b5ceb4b512..869246098ed 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,21 @@ +# v1.13.0 (2025-07-28) + +* **Feature**: Add support for HTTP interceptors. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.18 (2025-07-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.17 (2025-06-17) + +* **Dependency Update**: Update to smithy-go v1.22.4. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.16 (2025-06-10) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.12.15 (2025-02-27) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index a165a100f8d..beae329a8f3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.12.15" +const goModuleVersion = "1.13.0" diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index 4df632dce80..1d60def6d1b 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,13 +1,33 @@ -# Release (2025-02-17) +# Release (2025-07-24) ## General Highlights * **Dependency Update**: Updated to the latest SDK module versions ## Module Highlights -* `github.com/aws/smithy-go`: v1.22.3 +* `github.com/aws/smithy-go`: v1.22.5 + * **Bug Fix**: Fix HTTP metrics data race. + * **Bug Fix**: Replace usages of deprecated ioutil package. + +# Release (2025-06-16) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.22.4 + * **Bug Fix**: Fix CBOR serd empty check for string and enum fields * **Bug Fix**: Fix HTTP metrics data race. * **Bug Fix**: Replace usages of deprecated ioutil package. +# Release (2025-02-17) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.22.3 + * **Dependency Update**: Bump minimum Go version to 1.22 per our language support policy. + # Release (2025-01-21) ## General Highlights diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile index a3c2cf173de..34b17ab2fe0 100644 --- a/vendor/github.com/aws/smithy-go/Makefile +++ b/vendor/github.com/aws/smithy-go/Makefile @@ -30,6 +30,24 @@ smithy-build: smithy-clean: cd codegen && ./gradlew clean +GRADLE_RETRIES := 3 +GRADLE_SLEEP := 2 + +# We're making a call to ./gradlew to trigger downloading Gradle and +# starting the daemon. Any call works, so using `./gradlew help` +ensure-gradle-up: + @cd codegen && for i in $(shell seq 1 $(GRADLE_RETRIES)); do \ + echo "Checking if Gradle daemon is up, attempt $$i..."; \ + if ./gradlew help; then \ + echo "Gradle daemon is up!"; \ + exit 0; \ + fi; \ + echo "Failed to start Gradle, retrying in $(GRADLE_SLEEP) seconds..."; \ + sleep $(GRADLE_SLEEP); \ + done; \ + echo "Failed to start Gradle after $(GRADLE_RETRIES) attempts."; \ + exit 1 + ################## # Linting/Verify # ################## @@ -51,12 +69,10 @@ cover: .PHONY: unit unit-race unit-test unit-race-test unit: verify - go vet ${BUILD_TAGS} --all ./... && \ go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ go test -timeout=1m ${UNIT_TEST_TAGS} ./... unit-race: verify - go vet ${BUILD_TAGS} --all ./... && \ go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./... diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md index 08df74589a8..c9ba5ea5e4b 100644 --- a/vendor/github.com/aws/smithy-go/README.md +++ b/vendor/github.com/aws/smithy-go/README.md @@ -4,7 +4,7 @@ [Smithy](https://smithy.io/) code generators for Go and the accompanying smithy-go runtime. -The smithy-go runtime requires a minimum version of Go 1.20. +The smithy-go runtime requires a minimum version of Go 1.22. **WARNING: All interfaces are subject to change.** @@ -77,7 +77,7 @@ example created from `smithy init`: "service": "example.weather#Weather", "module": "github.com/example/weather", "generateGoMod": true, - "goDirective": "1.20" + "goDirective": "1.22" } } } diff --git a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go index a9352839748..f778272be30 100644 --- a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go +++ b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go @@ -9,7 +9,7 @@ import ( // Endpoint is the endpoint object returned by Endpoint resolution V2 type Endpoint struct { - // The complete URL minimally specfiying the scheme and host. + // The complete URL minimally specifying the scheme and host. // May optionally specify the port and base path component. URI url.URL diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index d12d95891d2..cbbaabee9ef 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.22.3" +const goModuleVersion = "1.22.5" diff --git a/vendor/github.com/aws/smithy-go/modman.toml b/vendor/github.com/aws/smithy-go/modman.toml index 9d94b7cbd0a..aac582fa2ce 100644 --- a/vendor/github.com/aws/smithy-go/modman.toml +++ b/vendor/github.com/aws/smithy-go/modman.toml @@ -1,5 +1,4 @@ [dependencies] - "github.com/jmespath/go-jmespath" = "v0.4.0" [modules] diff --git a/vendor/github.com/aws/smithy-go/transport/http/interceptor.go b/vendor/github.com/aws/smithy-go/transport/http/interceptor.go new file mode 100644 index 00000000000..e21f2632a6e --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/interceptor.go @@ -0,0 +1,321 @@ +package http + +import ( + "context" +) + +func icopy[T any](v []T) []T { + s := make([]T, len(v)) + copy(s, v) + return s +} + +// InterceptorContext is all the information available in different +// interceptors. +// +// Not all information is available in each interceptor, see each interface +// definition for more details. +type InterceptorContext struct { + Input any + Request *Request + + Output any + Response *Response +} + +// InterceptorRegistry holds a list of operation interceptors. +// +// Interceptors allow callers to insert custom behavior at well-defined points +// within a client's operation lifecycle. +// +// # Interceptor context +// +// All interceptors are invoked with a context object that contains input and +// output containers for the operation. The individual fields that are +// available will depend on what the interceptor is and, in certain +// interceptors, how far the operation was able to progress. See the +// documentation for each interface definition for more information about field +// availability. +// +// Implementations MUST NOT directly mutate the values of the fields in the +// interceptor context. They are free to mutate the existing values _pointed +// to_ by those fields, however. +// +// # Returning errors +// +// All interceptors can return errors. If an interceptor returns an error +// _before_ the client's retry loop, the operation will fail immediately. If +// one returns an error _within_ the retry loop, the error WILL be considered +// according to the client's retry policy. +// +// # Adding interceptors +// +// Idiomatically you will simply use one of the Add() receiver methods to +// register interceptors as desired. However, the list for each interface is +// exported on the registry struct and the caller is free to manipulate it +// directly, for example, to register a number of interceptors all at once, or +// to remove one that was previously registered. +// +// The base SDK client WILL NOT add any interceptors. SDK operations and +// customizations are implemented in terms of middleware. +// +// Modifications to the registry will not persist across operation calls when +// using per-operation functional options. This means you can register +// interceptors on a per-operation basis without affecting other operations. +type InterceptorRegistry struct { + BeforeExecution []BeforeExecutionInterceptor + BeforeSerialization []BeforeSerializationInterceptor + AfterSerialization []AfterSerializationInterceptor + BeforeRetryLoop []BeforeRetryLoopInterceptor + BeforeAttempt []BeforeAttemptInterceptor + BeforeSigning []BeforeSigningInterceptor + AfterSigning []AfterSigningInterceptor + BeforeTransmit []BeforeTransmitInterceptor + AfterTransmit []AfterTransmitInterceptor + BeforeDeserialization []BeforeDeserializationInterceptor + AfterDeserialization []AfterDeserializationInterceptor + AfterAttempt []AfterAttemptInterceptor + AfterExecution []AfterExecutionInterceptor +} + +// Copy returns a deep copy of the registry. This is used by SDK clients on +// each operation call in order to prevent per-op config mutation from +// persisting. +func (i *InterceptorRegistry) Copy() InterceptorRegistry { + return InterceptorRegistry{ + BeforeExecution: icopy(i.BeforeExecution), + BeforeSerialization: icopy(i.BeforeSerialization), + AfterSerialization: icopy(i.AfterSerialization), + BeforeRetryLoop: icopy(i.BeforeRetryLoop), + BeforeAttempt: icopy(i.BeforeAttempt), + BeforeSigning: icopy(i.BeforeSigning), + AfterSigning: icopy(i.AfterSigning), + BeforeTransmit: icopy(i.BeforeTransmit), + AfterTransmit: icopy(i.AfterTransmit), + BeforeDeserialization: icopy(i.BeforeDeserialization), + AfterDeserialization: icopy(i.AfterDeserialization), + AfterAttempt: icopy(i.AfterAttempt), + AfterExecution: icopy(i.AfterExecution), + } +} + +// AddBeforeExecution registers the provided BeforeExecutionInterceptor. +func (i *InterceptorRegistry) AddBeforeExecution(v BeforeExecutionInterceptor) { + i.BeforeExecution = append(i.BeforeExecution, v) +} + +// AddBeforeSerialization registers the provided BeforeSerializationInterceptor. +func (i *InterceptorRegistry) AddBeforeSerialization(v BeforeSerializationInterceptor) { + i.BeforeSerialization = append(i.BeforeSerialization, v) +} + +// AddAfterSerialization registers the provided AfterSerializationInterceptor. +func (i *InterceptorRegistry) AddAfterSerialization(v AfterSerializationInterceptor) { + i.AfterSerialization = append(i.AfterSerialization, v) +} + +// AddBeforeRetryLoop registers the provided BeforeRetryLoopInterceptor. +func (i *InterceptorRegistry) AddBeforeRetryLoop(v BeforeRetryLoopInterceptor) { + i.BeforeRetryLoop = append(i.BeforeRetryLoop, v) +} + +// AddBeforeAttempt registers the provided BeforeAttemptInterceptor. +func (i *InterceptorRegistry) AddBeforeAttempt(v BeforeAttemptInterceptor) { + i.BeforeAttempt = append(i.BeforeAttempt, v) +} + +// AddBeforeSigning registers the provided BeforeSigningInterceptor. +func (i *InterceptorRegistry) AddBeforeSigning(v BeforeSigningInterceptor) { + i.BeforeSigning = append(i.BeforeSigning, v) +} + +// AddAfterSigning registers the provided AfterSigningInterceptor. +func (i *InterceptorRegistry) AddAfterSigning(v AfterSigningInterceptor) { + i.AfterSigning = append(i.AfterSigning, v) +} + +// AddBeforeTransmit registers the provided BeforeTransmitInterceptor. +func (i *InterceptorRegistry) AddBeforeTransmit(v BeforeTransmitInterceptor) { + i.BeforeTransmit = append(i.BeforeTransmit, v) +} + +// AddAfterTransmit registers the provided AfterTransmitInterceptor. +func (i *InterceptorRegistry) AddAfterTransmit(v AfterTransmitInterceptor) { + i.AfterTransmit = append(i.AfterTransmit, v) +} + +// AddBeforeDeserialization registers the provided BeforeDeserializationInterceptor. +func (i *InterceptorRegistry) AddBeforeDeserialization(v BeforeDeserializationInterceptor) { + i.BeforeDeserialization = append(i.BeforeDeserialization, v) +} + +// AddAfterDeserialization registers the provided AfterDeserializationInterceptor. +func (i *InterceptorRegistry) AddAfterDeserialization(v AfterDeserializationInterceptor) { + i.AfterDeserialization = append(i.AfterDeserialization, v) +} + +// AddAfterAttempt registers the provided AfterAttemptInterceptor. +func (i *InterceptorRegistry) AddAfterAttempt(v AfterAttemptInterceptor) { + i.AfterAttempt = append(i.AfterAttempt, v) +} + +// AddAfterExecution registers the provided AfterExecutionInterceptor. +func (i *InterceptorRegistry) AddAfterExecution(v AfterExecutionInterceptor) { + i.AfterExecution = append(i.AfterExecution, v) +} + +// BeforeExecutionInterceptor runs before anything else in the operation +// lifecycle. +// +// Available InterceptorContext fields: +// - Input +type BeforeExecutionInterceptor interface { + BeforeExecution(ctx context.Context, in *InterceptorContext) error +} + +// BeforeSerializationInterceptor runs before the operation input is serialized +// into its transport request. +// +// Serialization occurs before the operation's retry loop. +// +// Available InterceptorContext fields: +// - Input +type BeforeSerializationInterceptor interface { + BeforeSerialization(ctx context.Context, in *InterceptorContext) error +} + +// AfterSerializationInterceptor runs after the operation input is serialized +// into its transport request. +// +// Available InterceptorContext fields: +// - Input +// - Request +type AfterSerializationInterceptor interface { + AfterSerialization(ctx context.Context, in *InterceptorContext) error +} + +// BeforeRetryLoopInterceptor runs right before the operation enters the retry loop. +// +// Available InterceptorContext fields: +// - Input +// - Request +type BeforeRetryLoopInterceptor interface { + BeforeRetryLoop(ctx context.Context, in *InterceptorContext) error +} + +// BeforeAttemptInterceptor runs right before every attempt in the retry loop. +// +// If this interceptor returns an error, AfterAttempt interceptors WILL NOT be +// invoked. +// +// Available InterceptorContext fields: +// - Input +// - Request +type BeforeAttemptInterceptor interface { + BeforeAttempt(ctx context.Context, in *InterceptorContext) error +} + +// BeforeSigningInterceptor runs right before the request is signed. +// +// Signing occurs within the operation's retry loop. +// +// Available InterceptorContext fields: +// - Input +// - Request +type BeforeSigningInterceptor interface { + BeforeSigning(ctx context.Context, in *InterceptorContext) error +} + +// AfterSigningInterceptor runs right after the request is signed. +// +// It is unsafe to modify the outgoing HTTP request at or past this hook, since +// doing so may invalidate the signature of the request. +// +// Available InterceptorContext fields: +// - Input +// - Request +type AfterSigningInterceptor interface { + AfterSigning(ctx context.Context, in *InterceptorContext) error +} + +// BeforeTransmitInterceptor runs right before the HTTP request is sent. +// +// HTTP transmit occurs within the operation's retry loop. +// +// Available InterceptorContext fields: +// - Input +// - Request +type BeforeTransmitInterceptor interface { + BeforeTransmit(ctx context.Context, in *InterceptorContext) error +} + +// AfterTransmitInterceptor runs right after the HTTP response is received. +// +// It will always be invoked when a response is received, regardless of its +// status code. Conversely, it WILL NOT be invoked if the HTTP round-trip was +// not successful, e.g. because of a DNS resolution error +// +// Available InterceptorContext fields: +// - Input +// - Request +// - Response +type AfterTransmitInterceptor interface { + AfterTransmit(ctx context.Context, in *InterceptorContext) error +} + +// BeforeDeserializationInterceptor runs right before the incoming HTTP response +// is deserialized. +// +// This interceptor IS NOT invoked if the HTTP round-trip was not successful. +// +// Deserialization occurs within the operation's retry loop. +// +// Available InterceptorContext fields: +// - Input +// - Request +// - Response +type BeforeDeserializationInterceptor interface { + BeforeDeserialization(ctx context.Context, in *InterceptorContext) error +} + +// AfterDeserializationInterceptor runs right after the incoming HTTP response +// is deserialized. This hook is invoked regardless of whether the deserialized +// result was an error. +// +// This interceptor IS NOT invoked if the HTTP round-trip was not successful. +// +// Available InterceptorContext fields: +// - Input +// - Output (IF the operation had a success-level response) +// - Request +// - Response +type AfterDeserializationInterceptor interface { + AfterDeserialization(ctx context.Context, in *InterceptorContext) error +} + +// AfterAttemptInterceptor runs right after the incoming HTTP response +// is deserialized. This hook is invoked regardless of whether the deserialized +// result was an error, or if another interceptor within the retry loop +// returned an error. +// +// Available InterceptorContext fields: +// - Input +// - Output (IF the operation had a success-level response) +// - Request (IF the operation did not return an error during serialization) +// - Response (IF the operation was able to transmit the HTTP request) +type AfterAttemptInterceptor interface { + AfterAttempt(ctx context.Context, in *InterceptorContext) error +} + +// AfterExecutionInterceptor runs after everything else. It runs regardless of +// how far the operation progressed in its lifecycle, and regardless of whether +// the operation succeeded or failed. +// +// Available InterceptorContext fields: +// - Input +// - Output (IF the operation had a success-level response) +// - Request (IF the operation did not return an error during serialization) +// - Response (IF the operation was able to transmit the HTTP request) +type AfterExecutionInterceptor interface { + AfterExecution(ctx context.Context, in *InterceptorContext) error +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go b/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go new file mode 100644 index 00000000000..2cc4b57f894 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go @@ -0,0 +1,325 @@ +package http + +import ( + "context" + "errors" + + "github.com/aws/smithy-go/middleware" +) + +type ictxKey struct{} + +func withIctx(ctx context.Context) context.Context { + return middleware.WithStackValue(ctx, ictxKey{}, &InterceptorContext{}) +} + +func getIctx(ctx context.Context) *InterceptorContext { + return middleware.GetStackValue(ctx, ictxKey{}).(*InterceptorContext) +} + +// InterceptExecution runs Before/AfterExecutionInterceptors. +type InterceptExecution struct { + BeforeExecution []BeforeExecutionInterceptor + AfterExecution []AfterExecutionInterceptor +} + +// ID identifies the middleware. +func (m *InterceptExecution) ID() string { + return "InterceptExecution" +} + +// HandleInitialize runs the interceptors. +func (m *InterceptExecution) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, md middleware.Metadata, err error, +) { + ctx = withIctx(ctx) + getIctx(ctx).Input = in.Parameters + + for _, i := range m.BeforeExecution { + if err := i.BeforeExecution(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + out, md, err = next.HandleInitialize(ctx, in) + + for _, i := range m.AfterExecution { + if err := i.AfterExecution(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} + +// InterceptBeforeSerialization runs BeforeSerializationInterceptors. +type InterceptBeforeSerialization struct { + Interceptors []BeforeSerializationInterceptor +} + +// ID identifies the middleware. +func (m *InterceptBeforeSerialization) ID() string { + return "InterceptBeforeSerialization" +} + +// HandleSerialize runs the interceptors. +func (m *InterceptBeforeSerialization) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.Interceptors { + if err := i.BeforeSerialization(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleSerialize(ctx, in) +} + +// InterceptAfterSerialization runs AfterSerializationInterceptors. +type InterceptAfterSerialization struct { + Interceptors []AfterSerializationInterceptor +} + +// ID identifies the middleware. +func (m *InterceptAfterSerialization) ID() string { + return "InterceptAfterSerialization" +} + +// HandleSerialize runs the interceptors. +func (m *InterceptAfterSerialization) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, md middleware.Metadata, err error, +) { + getIctx(ctx).Request = in.Request.(*Request) + + for _, i := range m.Interceptors { + if err := i.AfterSerialization(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleSerialize(ctx, in) +} + +// InterceptBeforeRetryLoop runs BeforeRetryLoopInterceptors. +type InterceptBeforeRetryLoop struct { + Interceptors []BeforeRetryLoopInterceptor +} + +// ID identifies the middleware. +func (m *InterceptBeforeRetryLoop) ID() string { + return "InterceptBeforeRetryLoop" +} + +// HandleFinalize runs the interceptors. +func (m *InterceptBeforeRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.Interceptors { + if err := i.BeforeRetryLoop(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleFinalize(ctx, in) +} + +// InterceptBeforeSigning runs BeforeSigningInterceptors. +type InterceptBeforeSigning struct { + Interceptors []BeforeSigningInterceptor +} + +// ID identifies the middleware. +func (m *InterceptBeforeSigning) ID() string { + return "InterceptBeforeSigning" +} + +// HandleFinalize runs the interceptors. +func (m *InterceptBeforeSigning) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.Interceptors { + if err := i.BeforeSigning(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleFinalize(ctx, in) +} + +// InterceptAfterSigning runs AfterSigningInterceptors. +type InterceptAfterSigning struct { + Interceptors []AfterSigningInterceptor +} + +// ID identifies the middleware. +func (m *InterceptAfterSigning) ID() string { + return "InterceptAfterSigning" +} + +// HandleFinalize runs the interceptors. +func (m *InterceptAfterSigning) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.Interceptors { + if err := i.AfterSigning(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return next.HandleFinalize(ctx, in) +} + +// InterceptTransmit runs BeforeTransmitInterceptors and AfterTransmitInterceptors. +type InterceptTransmit struct { + BeforeTransmit []BeforeTransmitInterceptor + AfterTransmit []AfterTransmitInterceptor +} + +// ID identifies the middleware. +func (m *InterceptTransmit) ID() string { + return "InterceptTransmit" +} + +// HandleDeserialize runs the interceptors. +func (m *InterceptTransmit) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.BeforeTransmit { + if err := i.BeforeTransmit(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + out, md, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, md, err + } + + // the root of the decorated middleware guarantees this will be here + // (client.go: ClientHandler.Handle) + getIctx(ctx).Response = out.RawResponse.(*Response) + + for _, i := range m.AfterTransmit { + if err := i.AfterTransmit(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} + +// InterceptBeforeDeserialization runs BeforeDeserializationInterceptors. +type InterceptBeforeDeserialization struct { + Interceptors []BeforeDeserializationInterceptor +} + +// ID identifies the middleware. +func (m *InterceptBeforeDeserialization) ID() string { + return "InterceptBeforeDeserialization" +} + +// HandleDeserialize runs the interceptors. +func (m *InterceptBeforeDeserialization) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, md middleware.Metadata, err error, +) { + out, md, err = next.HandleDeserialize(ctx, in) + if err != nil { + var terr *RequestSendError + if errors.As(err, &terr) { + return out, md, err + } + } + + for _, i := range m.Interceptors { + if err := i.BeforeDeserialization(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} + +// InterceptAfterDeserialization runs AfterDeserializationInterceptors. +type InterceptAfterDeserialization struct { + Interceptors []AfterDeserializationInterceptor +} + +// ID identifies the middleware. +func (m *InterceptAfterDeserialization) ID() string { + return "InterceptAfterDeserialization" +} + +// HandleDeserialize runs the interceptors. +func (m *InterceptAfterDeserialization) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, md middleware.Metadata, err error, +) { + out, md, err = next.HandleDeserialize(ctx, in) + if err != nil { + var terr *RequestSendError + if errors.As(err, &terr) { + return out, md, err + } + } + + getIctx(ctx).Output = out.Result + + for _, i := range m.Interceptors { + if err := i.AfterDeserialization(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} + +// InterceptAttempt runs AfterAttemptInterceptors. +type InterceptAttempt struct { + BeforeAttempt []BeforeAttemptInterceptor + AfterAttempt []AfterAttemptInterceptor +} + +// ID identifies the middleware. +func (m *InterceptAttempt) ID() string { + return "InterceptAttempt" +} + +// HandleFinalize runs the interceptors. +func (m *InterceptAttempt) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, md middleware.Metadata, err error, +) { + for _, i := range m.BeforeAttempt { + if err := i.BeforeAttempt(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + out, md, err = next.HandleFinalize(ctx, in) + + for _, i := range m.AfterAttempt { + if err := i.AfterAttempt(ctx, getIctx(ctx)); err != nil { + return out, md, err + } + } + + return out, md, err +} diff --git a/vendor/github.com/go-chi/chi/v5/.gitignore b/vendor/github.com/go-chi/chi/v5/.gitignore new file mode 100644 index 00000000000..ba22c99a99e --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/.gitignore @@ -0,0 +1,3 @@ +.idea +*.sw? +.vscode diff --git a/vendor/github.com/go-chi/chi/v5/CHANGELOG.md b/vendor/github.com/go-chi/chi/v5/CHANGELOG.md new file mode 100644 index 00000000000..25b45b97430 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/CHANGELOG.md @@ -0,0 +1,341 @@ +# Changelog + +## v5.0.12 (2024-02-16) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.11...v5.0.12 + + +## v5.0.11 (2023-12-19) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.10...v5.0.11 + + +## v5.0.10 (2023-07-13) + +- Fixed small edge case in tests of v5.0.9 for older Go versions +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.9...v5.0.10 + + +## v5.0.9 (2023-07-13) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.8...v5.0.9 + + +## v5.0.8 (2022-12-07) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.7...v5.0.8 + + +## v5.0.7 (2021-11-18) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.6...v5.0.7 + + +## v5.0.6 (2021-11-15) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.5...v5.0.6 + + +## v5.0.5 (2021-10-27) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.4...v5.0.5 + + +## v5.0.4 (2021-08-29) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.3...v5.0.4 + + +## v5.0.3 (2021-04-29) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.2...v5.0.3 + + +## v5.0.2 (2021-03-25) + +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.1...v5.0.2 + + +## v5.0.1 (2021-03-10) + +- Small improvements +- History of changes: see https://github.com/go-chi/chi/compare/v5.0.0...v5.0.1 + + +## v5.0.0 (2021-02-27) + +- chi v5, `github.com/go-chi/chi/v5` introduces the adoption of Go's SIV to adhere to the current state-of-the-tools in Go. +- chi v1.5.x did not work out as planned, as the Go tooling is too powerful and chi's adoption is too wide. + The most responsible thing to do for everyone's benefit is to just release v5 with SIV, so I present to you all, + chi v5 at `github.com/go-chi/chi/v5`. I hope someday the developer experience and ergonomics I've been seeking + will still come to fruition in some form, see https://github.com/golang/go/issues/44550 +- History of changes: see https://github.com/go-chi/chi/compare/v1.5.4...v5.0.0 + + +## v1.5.4 (2021-02-27) + +- Undo prior retraction in v1.5.3 as we prepare for v5.0.0 release +- History of changes: see https://github.com/go-chi/chi/compare/v1.5.3...v1.5.4 + + +## v1.5.3 (2021-02-21) + +- Update go.mod to go 1.16 with new retract directive marking all versions without prior go.mod support +- History of changes: see https://github.com/go-chi/chi/compare/v1.5.2...v1.5.3 + + +## v1.5.2 (2021-02-10) + +- Reverting allocation optimization as a precaution as go test -race fails. +- Minor improvements, see history below +- History of changes: see https://github.com/go-chi/chi/compare/v1.5.1...v1.5.2 + + +## v1.5.1 (2020-12-06) + +- Performance improvement: removing 1 allocation by foregoing context.WithValue, thank you @bouk for + your contribution (https://github.com/go-chi/chi/pull/555). Note: new benchmarks posted in README. +- `middleware.CleanPath`: new middleware that clean's request path of double slashes +- deprecate & remove `chi.ServerBaseContext` in favour of stdlib `http.Server#BaseContext` +- plus other tiny improvements, see full commit history below +- History of changes: see https://github.com/go-chi/chi/compare/v4.1.2...v1.5.1 + + +## v1.5.0 (2020-11-12) - now with go.mod support + +`chi` dates back to 2016 with it's original implementation as one of the first routers to adopt the newly introduced +context.Context api to the stdlib -- set out to design a router that is faster, more modular and simpler than anything +else out there -- while not introducing any custom handler types or dependencies. Today, `chi` still has zero dependencies, +and in many ways is future proofed from changes, given it's minimal nature. Between versions, chi's iterations have been very +incremental, with the architecture and api being the same today as it was originally designed in 2016. For this reason it +makes chi a pretty easy project to maintain, as well thanks to the many amazing community contributions over the years +to who all help make chi better (total of 86 contributors to date -- thanks all!). + +Chi has been a labour of love, art and engineering, with the goals to offer beautiful ergonomics, flexibility, performance +and simplicity when building HTTP services with Go. I've strived to keep the router very minimal in surface area / code size, +and always improving the code wherever possible -- and as of today the `chi` package is just 1082 lines of code (not counting +middlewares, which are all optional). As well, I don't have the exact metrics, but from my analysis and email exchanges from +companies and developers, chi is used by thousands of projects around the world -- thank you all as there is no better form of +joy for me than to have art I had started be helpful and enjoyed by others. And of course I use chi in all of my own projects too :) + +For me, the aesthetics of chi's code and usage are very important. With the introduction of Go's module support +(which I'm a big fan of), chi's past versioning scheme choice to v2, v3 and v4 would mean I'd require the import path +of "github.com/go-chi/chi/v4", leading to the lengthy discussion at https://github.com/go-chi/chi/issues/462. +Haha, to some, you may be scratching your head why I've spent > 1 year stalling to adopt "/vXX" convention in the import +path -- which isn't horrible in general -- but for chi, I'm unable to accept it as I strive for perfection in it's API design, +aesthetics and simplicity. It just doesn't feel good to me given chi's simple nature -- I do not foresee a "v5" or "v6", +and upgrading between versions in the future will also be just incremental. + +I do understand versioning is a part of the API design as well, which is why the solution for a while has been to "do nothing", +as Go supports both old and new import paths with/out go.mod. However, now that Go module support has had time to iron out kinks and +is adopted everywhere, it's time for chi to get with the times. Luckily, I've discovered a path forward that will make me happy, +while also not breaking anyone's app who adopted a prior versioning from tags in v2/v3/v4. I've made an experimental release of +v1.5.0 with go.mod silently, and tested it with new and old projects, to ensure the developer experience is preserved, and it's +largely unnoticed. Fortunately, Go's toolchain will check the tags of a repo and consider the "latest" tag the one with go.mod. +However, you can still request a specific older tag such as v4.1.2, and everything will "just work". But new users can just +`go get github.com/go-chi/chi` or `go get github.com/go-chi/chi@latest` and they will get the latest version which contains +go.mod support, which is v1.5.0+. `chi` will not change very much over the years, just like it hasn't changed much from 4 years ago. +Therefore, we will stay on v1.x from here on, starting from v1.5.0. Any breaking changes will bump a "minor" release and +backwards-compatible improvements/fixes will bump a "tiny" release. + +For existing projects who want to upgrade to the latest go.mod version, run: `go get -u github.com/go-chi/chi@v1.5.0`, +which will get you on the go.mod version line (as Go's mod cache may still remember v4.x). Brand new systems can run +`go get -u github.com/go-chi/chi` or `go get -u github.com/go-chi/chi@latest` to install chi, which will install v1.5.0+ +built with go.mod support. + +My apologies to the developers who will disagree with the decisions above, but, hope you'll try it and see it's a very +minor request which is backwards compatible and won't break your existing installations. + +Cheers all, happy coding! + + +--- + + +## v4.1.2 (2020-06-02) + +- fix that handles MethodNotAllowed with path variables, thank you @caseyhadden for your contribution +- fix to replace nested wildcards correctly in RoutePattern, thank you @@unmultimedio for your contribution +- History of changes: see https://github.com/go-chi/chi/compare/v4.1.1...v4.1.2 + + +## v4.1.1 (2020-04-16) + +- fix for issue https://github.com/go-chi/chi/issues/411 which allows for overlapping regexp + route to the correct handler through a recursive tree search, thanks to @Jahaja for the PR/fix! +- new middleware.RouteHeaders as a simple router for request headers with wildcard support +- History of changes: see https://github.com/go-chi/chi/compare/v4.1.0...v4.1.1 + + +## v4.1.0 (2020-04-1) + +- middleware.LogEntry: Write method on interface now passes the response header + and an extra interface type useful for custom logger implementations. +- middleware.WrapResponseWriter: minor fix +- middleware.Recoverer: a bit prettier +- History of changes: see https://github.com/go-chi/chi/compare/v4.0.4...v4.1.0 + +## v4.0.4 (2020-03-24) + +- middleware.Recoverer: new pretty stack trace printing (https://github.com/go-chi/chi/pull/496) +- a few minor improvements and fixes +- History of changes: see https://github.com/go-chi/chi/compare/v4.0.3...v4.0.4 + + +## v4.0.3 (2020-01-09) + +- core: fix regexp routing to include default value when param is not matched +- middleware: rewrite of middleware.Compress +- middleware: suppress http.ErrAbortHandler in middleware.Recoverer +- History of changes: see https://github.com/go-chi/chi/compare/v4.0.2...v4.0.3 + + +## v4.0.2 (2019-02-26) + +- Minor fixes +- History of changes: see https://github.com/go-chi/chi/compare/v4.0.1...v4.0.2 + + +## v4.0.1 (2019-01-21) + +- Fixes issue with compress middleware: #382 #385 +- History of changes: see https://github.com/go-chi/chi/compare/v4.0.0...v4.0.1 + + +## v4.0.0 (2019-01-10) + +- chi v4 requires Go 1.10.3+ (or Go 1.9.7+) - we have deprecated support for Go 1.7 and 1.8 +- router: respond with 404 on router with no routes (#362) +- router: additional check to ensure wildcard is at the end of a url pattern (#333) +- middleware: deprecate use of http.CloseNotifier (#347) +- middleware: fix RedirectSlashes to include query params on redirect (#334) +- History of changes: see https://github.com/go-chi/chi/compare/v3.3.4...v4.0.0 + + +## v3.3.4 (2019-01-07) + +- Minor middleware improvements. No changes to core library/router. Moving v3 into its +- own branch as a version of chi for Go 1.7, 1.8, 1.9, 1.10, 1.11 +- History of changes: see https://github.com/go-chi/chi/compare/v3.3.3...v3.3.4 + + +## v3.3.3 (2018-08-27) + +- Minor release +- See https://github.com/go-chi/chi/compare/v3.3.2...v3.3.3 + + +## v3.3.2 (2017-12-22) + +- Support to route trailing slashes on mounted sub-routers (#281) +- middleware: new `ContentCharset` to check matching charsets. Thank you + @csucu for your community contribution! + + +## v3.3.1 (2017-11-20) + +- middleware: new `AllowContentType` handler for explicit whitelist of accepted request Content-Types +- middleware: new `SetHeader` handler for short-hand middleware to set a response header key/value +- Minor bug fixes + + +## v3.3.0 (2017-10-10) + +- New chi.RegisterMethod(method) to add support for custom HTTP methods, see _examples/custom-method for usage +- Deprecated LINK and UNLINK methods from the default list, please use `chi.RegisterMethod("LINK")` and `chi.RegisterMethod("UNLINK")` in an `init()` function + + +## v3.2.1 (2017-08-31) + +- Add new `Match(rctx *Context, method, path string) bool` method to `Routes` interface + and `Mux`. Match searches the mux's routing tree for a handler that matches the method/path +- Add new `RouteMethod` to `*Context` +- Add new `Routes` pointer to `*Context` +- Add new `middleware.GetHead` to route missing HEAD requests to GET handler +- Updated benchmarks (see README) + + +## v3.1.5 (2017-08-02) + +- Setup golint and go vet for the project +- As per golint, we've redefined `func ServerBaseContext(h http.Handler, baseCtx context.Context) http.Handler` + to `func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler` + + +## v3.1.0 (2017-07-10) + +- Fix a few minor issues after v3 release +- Move `docgen` sub-pkg to https://github.com/go-chi/docgen +- Move `render` sub-pkg to https://github.com/go-chi/render +- Add new `URLFormat` handler to chi/middleware sub-pkg to make working with url mime + suffixes easier, ie. parsing `/articles/1.json` and `/articles/1.xml`. See comments in + https://github.com/go-chi/chi/blob/master/middleware/url_format.go for example usage. + + +## v3.0.0 (2017-06-21) + +- Major update to chi library with many exciting updates, but also some *breaking changes* +- URL parameter syntax changed from `/:id` to `/{id}` for even more flexible routing, such as + `/articles/{month}-{day}-{year}-{slug}`, `/articles/{id}`, and `/articles/{id}.{ext}` on the + same router +- Support for regexp for routing patterns, in the form of `/{paramKey:regExp}` for example: + `r.Get("/articles/{name:[a-z]+}", h)` and `chi.URLParam(r, "name")` +- Add `Method` and `MethodFunc` to `chi.Router` to allow routing definitions such as + `r.Method("GET", "/", h)` which provides a cleaner interface for custom handlers like + in `_examples/custom-handler` +- Deprecating `mux#FileServer` helper function. Instead, we encourage users to create their + own using file handler with the stdlib, see `_examples/fileserver` for an example +- Add support for LINK/UNLINK http methods via `r.Method()` and `r.MethodFunc()` +- Moved the chi project to its own organization, to allow chi-related community packages to + be easily discovered and supported, at: https://github.com/go-chi +- *NOTE:* please update your import paths to `"github.com/go-chi/chi"` +- *NOTE:* chi v2 is still available at https://github.com/go-chi/chi/tree/v2 + + +## v2.1.0 (2017-03-30) + +- Minor improvements and update to the chi core library +- Introduced a brand new `chi/render` sub-package to complete the story of building + APIs to offer a pattern for managing well-defined request / response payloads. Please + check out the updated `_examples/rest` example for how it works. +- Added `MethodNotAllowed(h http.HandlerFunc)` to chi.Router interface + + +## v2.0.0 (2017-01-06) + +- After many months of v2 being in an RC state with many companies and users running it in + production, the inclusion of some improvements to the middlewares, we are very pleased to + announce v2.0.0 of chi. + + +## v2.0.0-rc1 (2016-07-26) + +- Huge update! chi v2 is a large refactor targeting Go 1.7+. As of Go 1.7, the popular + community `"net/context"` package has been included in the standard library as `"context"` and + utilized by `"net/http"` and `http.Request` to managing deadlines, cancelation signals and other + request-scoped values. We're very excited about the new context addition and are proud to + introduce chi v2, a minimal and powerful routing package for building large HTTP services, + with zero external dependencies. Chi focuses on idiomatic design and encourages the use of + stdlib HTTP handlers and middlewares. +- chi v2 deprecates its `chi.Handler` interface and requires `http.Handler` or `http.HandlerFunc` +- chi v2 stores URL routing parameters and patterns in the standard request context: `r.Context()` +- chi v2 lower-level routing context is accessible by `chi.RouteContext(r.Context()) *chi.Context`, + which provides direct access to URL routing parameters, the routing path and the matching + routing patterns. +- Users upgrading from chi v1 to v2, need to: + 1. Update the old chi.Handler signature, `func(ctx context.Context, w http.ResponseWriter, r *http.Request)` to + the standard http.Handler: `func(w http.ResponseWriter, r *http.Request)` + 2. Use `chi.URLParam(r *http.Request, paramKey string) string` + or `URLParamFromCtx(ctx context.Context, paramKey string) string` to access a url parameter value + + +## v1.0.0 (2016-07-01) + +- Released chi v1 stable https://github.com/go-chi/chi/tree/v1.0.0 for Go 1.6 and older. + + +## v0.9.0 (2016-03-31) + +- Reuse context objects via sync.Pool for zero-allocation routing [#33](https://github.com/go-chi/chi/pull/33) +- BREAKING NOTE: due to subtle API changes, previously `chi.URLParams(ctx)["id"]` used to access url parameters + has changed to: `chi.URLParam(ctx, "id")` diff --git a/vendor/github.com/go-chi/chi/v5/CONTRIBUTING.md b/vendor/github.com/go-chi/chi/v5/CONTRIBUTING.md new file mode 100644 index 00000000000..b4a6268d575 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing + +## Prerequisites + +1. [Install Go][go-install]. +2. Download the sources and switch the working directory: + + ```bash + go get -u -d github.com/go-chi/chi + cd $GOPATH/src/github.com/go-chi/chi + ``` + +## Submitting a Pull Request + +A typical workflow is: + +1. [Fork the repository.][fork] +2. [Create a topic branch.][branch] +3. Add tests for your change. +4. Run `go test`. If your tests pass, return to the step 3. +5. Implement the change and ensure the steps from the previous step pass. +6. Run `goimports -w .`, to ensure the new code conforms to Go formatting guideline. +7. [Add, commit and push your changes.][git-help] +8. [Submit a pull request.][pull-req] + +[go-install]: https://golang.org/doc/install +[fork]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo +[branch]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-branches +[git-help]: https://docs.github.com/en +[pull-req]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests + diff --git a/vendor/github.com/go-chi/chi/v5/LICENSE b/vendor/github.com/go-chi/chi/v5/LICENSE new file mode 100644 index 00000000000..d99f02ffac5 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka), Google Inc. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/go-chi/chi/v5/Makefile b/vendor/github.com/go-chi/chi/v5/Makefile new file mode 100644 index 00000000000..e0f18c7da21 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/Makefile @@ -0,0 +1,22 @@ +.PHONY: all +all: + @echo "**********************************************************" + @echo "** chi build tool **" + @echo "**********************************************************" + + +.PHONY: test +test: + go clean -testcache && $(MAKE) test-router && $(MAKE) test-middleware + +.PHONY: test-router +test-router: + go test -race -v . + +.PHONY: test-middleware +test-middleware: + go test -race -v ./middleware + +.PHONY: docs +docs: + npx docsify-cli serve ./docs diff --git a/vendor/github.com/go-chi/chi/v5/README.md b/vendor/github.com/go-chi/chi/v5/README.md new file mode 100644 index 00000000000..c58a0e20cea --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/README.md @@ -0,0 +1,505 @@ +# chi + + +[![GoDoc Widget]][GoDoc] + +`chi` is a lightweight, idiomatic and composable router for building Go HTTP services. It's +especially good at helping you write large REST API services that are kept maintainable as your +project grows and changes. `chi` is built on the new `context` package introduced in Go 1.7 to +handle signaling, cancelation and request-scoped values across a handler chain. + +The focus of the project has been to seek out an elegant and comfortable design for writing +REST API servers, written during the development of the Pressly API service that powers our +public API service, which in turn powers all of our client-side applications. + +The key considerations of chi's design are: project structure, maintainability, standard http +handlers (stdlib-only), developer productivity, and deconstructing a large system into many small +parts. The core router `github.com/go-chi/chi` is quite small (less than 1000 LOC), but we've also +included some useful/optional subpackages: [middleware](/middleware), [render](https://github.com/go-chi/render) +and [docgen](https://github.com/go-chi/docgen). We hope you enjoy it too! + +## Install + +```sh +go get -u github.com/go-chi/chi/v5 +``` + + +## Features + +* **Lightweight** - cloc'd in ~1000 LOC for the chi router +* **Fast** - yes, see [benchmarks](#benchmarks) +* **100% compatible with net/http** - use any http or middleware pkg in the ecosystem that is also compatible with `net/http` +* **Designed for modular/composable APIs** - middlewares, inline middlewares, route groups and sub-router mounting +* **Context control** - built on new `context` package, providing value chaining, cancellations and timeouts +* **Robust** - in production at Pressly, Cloudflare, Heroku, 99Designs, and many others (see [discussion](https://github.com/go-chi/chi/issues/91)) +* **Doc generation** - `docgen` auto-generates routing documentation from your source to JSON or Markdown +* **Go.mod support** - as of v5, go.mod support (see [CHANGELOG](https://github.com/go-chi/chi/blob/master/CHANGELOG.md)) +* **No external dependencies** - plain ol' Go stdlib + net/http + + +## Examples + +See [_examples/](https://github.com/go-chi/chi/blob/master/_examples/) for a variety of examples. + + +**As easy as:** + +```go +package main + +import ( + "net/http" + + "github.com/go-chi/chi/v5" + "github.com/go-chi/chi/v5/middleware" +) + +func main() { + r := chi.NewRouter() + r.Use(middleware.Logger) + r.Get("/", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("welcome")) + }) + http.ListenAndServe(":3000", r) +} +``` + +**REST Preview:** + +Here is a little preview of what routing looks like with chi. Also take a look at the generated routing docs +in JSON ([routes.json](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.json)) and in +Markdown ([routes.md](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.md)). + +I highly recommend reading the source of the [examples](https://github.com/go-chi/chi/blob/master/_examples/) listed +above, they will show you all the features of chi and serve as a good form of documentation. + +```go +import ( + //... + "context" + "github.com/go-chi/chi/v5" + "github.com/go-chi/chi/v5/middleware" +) + +func main() { + r := chi.NewRouter() + + // A good base middleware stack + r.Use(middleware.RequestID) + r.Use(middleware.RealIP) + r.Use(middleware.Logger) + r.Use(middleware.Recoverer) + + // Set a timeout value on the request context (ctx), that will signal + // through ctx.Done() that the request has timed out and further + // processing should be stopped. + r.Use(middleware.Timeout(60 * time.Second)) + + r.Get("/", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("hi")) + }) + + // RESTy routes for "articles" resource + r.Route("/articles", func(r chi.Router) { + r.With(paginate).Get("/", listArticles) // GET /articles + r.With(paginate).Get("/{month}-{day}-{year}", listArticlesByDate) // GET /articles/01-16-2017 + + r.Post("/", createArticle) // POST /articles + r.Get("/search", searchArticles) // GET /articles/search + + // Regexp url parameters: + r.Get("/{articleSlug:[a-z-]+}", getArticleBySlug) // GET /articles/home-is-toronto + + // Subrouters: + r.Route("/{articleID}", func(r chi.Router) { + r.Use(ArticleCtx) + r.Get("/", getArticle) // GET /articles/123 + r.Put("/", updateArticle) // PUT /articles/123 + r.Delete("/", deleteArticle) // DELETE /articles/123 + }) + }) + + // Mount the admin sub-router + r.Mount("/admin", adminRouter()) + + http.ListenAndServe(":3333", r) +} + +func ArticleCtx(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + articleID := chi.URLParam(r, "articleID") + article, err := dbGetArticle(articleID) + if err != nil { + http.Error(w, http.StatusText(404), 404) + return + } + ctx := context.WithValue(r.Context(), "article", article) + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +func getArticle(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + article, ok := ctx.Value("article").(*Article) + if !ok { + http.Error(w, http.StatusText(422), 422) + return + } + w.Write([]byte(fmt.Sprintf("title:%s", article.Title))) +} + +// A completely separate router for administrator routes +func adminRouter() http.Handler { + r := chi.NewRouter() + r.Use(AdminOnly) + r.Get("/", adminIndex) + r.Get("/accounts", adminListAccounts) + return r +} + +func AdminOnly(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + perm, ok := ctx.Value("acl.permission").(YourPermissionType) + if !ok || !perm.IsAdmin() { + http.Error(w, http.StatusText(403), 403) + return + } + next.ServeHTTP(w, r) + }) +} +``` + + +## Router interface + +chi's router is based on a kind of [Patricia Radix trie](https://en.wikipedia.org/wiki/Radix_tree). +The router is fully compatible with `net/http`. + +Built on top of the tree is the `Router` interface: + +```go +// Router consisting of the core routing methods used by chi's Mux, +// using only the standard net/http. +type Router interface { + http.Handler + Routes + + // Use appends one or more middlewares onto the Router stack. + Use(middlewares ...func(http.Handler) http.Handler) + + // With adds inline middlewares for an endpoint handler. + With(middlewares ...func(http.Handler) http.Handler) Router + + // Group adds a new inline-Router along the current routing + // path, with a fresh middleware stack for the inline-Router. + Group(fn func(r Router)) Router + + // Route mounts a sub-Router along a `pattern` string. + Route(pattern string, fn func(r Router)) Router + + // Mount attaches another http.Handler along ./pattern/* + Mount(pattern string, h http.Handler) + + // Handle and HandleFunc adds routes for `pattern` that matches + // all HTTP methods. + Handle(pattern string, h http.Handler) + HandleFunc(pattern string, h http.HandlerFunc) + + // Method and MethodFunc adds routes for `pattern` that matches + // the `method` HTTP method. + Method(method, pattern string, h http.Handler) + MethodFunc(method, pattern string, h http.HandlerFunc) + + // HTTP-method routing along `pattern` + Connect(pattern string, h http.HandlerFunc) + Delete(pattern string, h http.HandlerFunc) + Get(pattern string, h http.HandlerFunc) + Head(pattern string, h http.HandlerFunc) + Options(pattern string, h http.HandlerFunc) + Patch(pattern string, h http.HandlerFunc) + Post(pattern string, h http.HandlerFunc) + Put(pattern string, h http.HandlerFunc) + Trace(pattern string, h http.HandlerFunc) + + // NotFound defines a handler to respond whenever a route could + // not be found. + NotFound(h http.HandlerFunc) + + // MethodNotAllowed defines a handler to respond whenever a method is + // not allowed. + MethodNotAllowed(h http.HandlerFunc) +} + +// Routes interface adds two methods for router traversal, which is also +// used by the github.com/go-chi/docgen package to generate documentation for Routers. +type Routes interface { + // Routes returns the routing tree in an easily traversable structure. + Routes() []Route + + // Middlewares returns the list of middlewares in use by the router. + Middlewares() Middlewares + + // Match searches the routing tree for a handler that matches + // the method/path - similar to routing a http request, but without + // executing the handler thereafter. + Match(rctx *Context, method, path string) bool +} +``` + +Each routing method accepts a URL `pattern` and chain of `handlers`. The URL pattern +supports named params (ie. `/users/{userID}`) and wildcards (ie. `/admin/*`). URL parameters +can be fetched at runtime by calling `chi.URLParam(r, "userID")` for named parameters +and `chi.URLParam(r, "*")` for a wildcard parameter. + + +### Middleware handlers + +chi's middlewares are just stdlib net/http middleware handlers. There is nothing special +about them, which means the router and all the tooling is designed to be compatible and +friendly with any middleware in the community. This offers much better extensibility and reuse +of packages and is at the heart of chi's purpose. + +Here is an example of a standard net/http middleware where we assign a context key `"user"` +the value of `"123"`. This middleware sets a hypothetical user identifier on the request +context and calls the next handler in the chain. + +```go +// HTTP middleware setting a value on the request context +func MyMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // create new context from `r` request context, and assign key `"user"` + // to value of `"123"` + ctx := context.WithValue(r.Context(), "user", "123") + + // call the next handler in the chain, passing the response writer and + // the updated request object with the new context value. + // + // note: context.Context values are nested, so any previously set + // values will be accessible as well, and the new `"user"` key + // will be accessible from this point forward. + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} +``` + + +### Request handlers + +chi uses standard net/http request handlers. This little snippet is an example of a http.Handler +func that reads a user identifier from the request context - hypothetically, identifying +the user sending an authenticated request, validated+set by a previous middleware handler. + +```go +// HTTP handler accessing data from the request context. +func MyRequestHandler(w http.ResponseWriter, r *http.Request) { + // here we read from the request context and fetch out `"user"` key set in + // the MyMiddleware example above. + user := r.Context().Value("user").(string) + + // respond to the client + w.Write([]byte(fmt.Sprintf("hi %s", user))) +} +``` + + +### URL parameters + +chi's router parses and stores URL parameters right onto the request context. Here is +an example of how to access URL params in your net/http handlers. And of course, middlewares +are able to access the same information. + +```go +// HTTP handler accessing the url routing parameters. +func MyRequestHandler(w http.ResponseWriter, r *http.Request) { + // fetch the url parameter `"userID"` from the request of a matching + // routing pattern. An example routing pattern could be: /users/{userID} + userID := chi.URLParam(r, "userID") + + // fetch `"key"` from the request context + ctx := r.Context() + key := ctx.Value("key").(string) + + // respond to the client + w.Write([]byte(fmt.Sprintf("hi %v, %v", userID, key))) +} +``` + + +## Middlewares + +chi comes equipped with an optional `middleware` package, providing a suite of standard +`net/http` middlewares. Please note, any middleware in the ecosystem that is also compatible +with `net/http` can be used with chi's mux. + +### Core middlewares + +---------------------------------------------------------------------------------------------------- +| chi/middleware Handler | description | +| :--------------------- | :---------------------------------------------------------------------- | +| [AllowContentEncoding] | Enforces a whitelist of request Content-Encoding headers | +| [AllowContentType] | Explicit whitelist of accepted request Content-Types | +| [BasicAuth] | Basic HTTP authentication | +| [Compress] | Gzip compression for clients that accept compressed responses | +| [ContentCharset] | Ensure charset for Content-Type request headers | +| [CleanPath] | Clean double slashes from request path | +| [GetHead] | Automatically route undefined HEAD requests to GET handlers | +| [Heartbeat] | Monitoring endpoint to check the servers pulse | +| [Logger] | Logs the start and end of each request with the elapsed processing time | +| [NoCache] | Sets response headers to prevent clients from caching | +| [Profiler] | Easily attach net/http/pprof to your routers | +| [RealIP] | Sets a http.Request's RemoteAddr to either X-Real-IP or X-Forwarded-For | +| [Recoverer] | Gracefully absorb panics and prints the stack trace | +| [RequestID] | Injects a request ID into the context of each request | +| [RedirectSlashes] | Redirect slashes on routing paths | +| [RouteHeaders] | Route handling for request headers | +| [SetHeader] | Short-hand middleware to set a response header key/value | +| [StripSlashes] | Strip slashes on routing paths | +| [Sunset] | Sunset set Deprecation/Sunset header to response | +| [Throttle] | Puts a ceiling on the number of concurrent requests | +| [Timeout] | Signals to the request context when the timeout deadline is reached | +| [URLFormat] | Parse extension from url and put it on request context | +| [WithValue] | Short-hand middleware to set a key/value on the request context | +---------------------------------------------------------------------------------------------------- + +[AllowContentEncoding]: https://pkg.go.dev/github.com/go-chi/chi/middleware#AllowContentEncoding +[AllowContentType]: https://pkg.go.dev/github.com/go-chi/chi/middleware#AllowContentType +[BasicAuth]: https://pkg.go.dev/github.com/go-chi/chi/middleware#BasicAuth +[Compress]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Compress +[ContentCharset]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ContentCharset +[CleanPath]: https://pkg.go.dev/github.com/go-chi/chi/middleware#CleanPath +[GetHead]: https://pkg.go.dev/github.com/go-chi/chi/middleware#GetHead +[GetReqID]: https://pkg.go.dev/github.com/go-chi/chi/middleware#GetReqID +[Heartbeat]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Heartbeat +[Logger]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Logger +[NoCache]: https://pkg.go.dev/github.com/go-chi/chi/middleware#NoCache +[Profiler]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Profiler +[RealIP]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RealIP +[Recoverer]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Recoverer +[RedirectSlashes]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RedirectSlashes +[RequestLogger]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RequestLogger +[RequestID]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RequestID +[RouteHeaders]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RouteHeaders +[SetHeader]: https://pkg.go.dev/github.com/go-chi/chi/middleware#SetHeader +[StripSlashes]: https://pkg.go.dev/github.com/go-chi/chi/middleware#StripSlashes +[Sunset]: https://pkg.go.dev/github.com/go-chi/chi/v5/middleware#Sunset +[Throttle]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Throttle +[ThrottleBacklog]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleBacklog +[ThrottleWithOpts]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleWithOpts +[Timeout]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Timeout +[URLFormat]: https://pkg.go.dev/github.com/go-chi/chi/middleware#URLFormat +[WithLogEntry]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WithLogEntry +[WithValue]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WithValue +[Compressor]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Compressor +[DefaultLogFormatter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#DefaultLogFormatter +[EncoderFunc]: https://pkg.go.dev/github.com/go-chi/chi/middleware#EncoderFunc +[HeaderRoute]: https://pkg.go.dev/github.com/go-chi/chi/middleware#HeaderRoute +[HeaderRouter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#HeaderRouter +[LogEntry]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LogEntry +[LogFormatter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LogFormatter +[LoggerInterface]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LoggerInterface +[ThrottleOpts]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleOpts +[WrapResponseWriter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WrapResponseWriter + +### Extra middlewares & packages + +Please see https://github.com/go-chi for additional packages. + +-------------------------------------------------------------------------------------------------------------------- +| package | description | +|:---------------------------------------------------|:------------------------------------------------------------- +| [cors](https://github.com/go-chi/cors) | Cross-origin resource sharing (CORS) | +| [docgen](https://github.com/go-chi/docgen) | Print chi.Router routes at runtime | +| [jwtauth](https://github.com/go-chi/jwtauth) | JWT authentication | +| [hostrouter](https://github.com/go-chi/hostrouter) | Domain/host based request routing | +| [httplog](https://github.com/go-chi/httplog) | Small but powerful structured HTTP request logging | +| [httprate](https://github.com/go-chi/httprate) | HTTP request rate limiter | +| [httptracer](https://github.com/go-chi/httptracer) | HTTP request performance tracing library | +| [httpvcr](https://github.com/go-chi/httpvcr) | Write deterministic tests for external sources | +| [stampede](https://github.com/go-chi/stampede) | HTTP request coalescer | +-------------------------------------------------------------------------------------------------------------------- + + +## context? + +`context` is a tiny pkg that provides simple interface to signal context across call stacks +and goroutines. It was originally written by [Sameer Ajmani](https://github.com/Sajmani) +and is available in stdlib since go1.7. + +Learn more at https://blog.golang.org/context + +and.. +* Docs: https://golang.org/pkg/context +* Source: https://github.com/golang/go/tree/master/src/context + + +## Benchmarks + +The benchmark suite: https://github.com/pkieltyka/go-http-routing-benchmark + +Results as of Nov 29, 2020 with Go 1.15.5 on Linux AMD 3950x + +```shell +BenchmarkChi_Param 3075895 384 ns/op 400 B/op 2 allocs/op +BenchmarkChi_Param5 2116603 566 ns/op 400 B/op 2 allocs/op +BenchmarkChi_Param20 964117 1227 ns/op 400 B/op 2 allocs/op +BenchmarkChi_ParamWrite 2863413 420 ns/op 400 B/op 2 allocs/op +BenchmarkChi_GithubStatic 3045488 395 ns/op 400 B/op 2 allocs/op +BenchmarkChi_GithubParam 2204115 540 ns/op 400 B/op 2 allocs/op +BenchmarkChi_GithubAll 10000 113811 ns/op 81203 B/op 406 allocs/op +BenchmarkChi_GPlusStatic 3337485 359 ns/op 400 B/op 2 allocs/op +BenchmarkChi_GPlusParam 2825853 423 ns/op 400 B/op 2 allocs/op +BenchmarkChi_GPlus2Params 2471697 483 ns/op 400 B/op 2 allocs/op +BenchmarkChi_GPlusAll 194220 5950 ns/op 5200 B/op 26 allocs/op +BenchmarkChi_ParseStatic 3365324 356 ns/op 400 B/op 2 allocs/op +BenchmarkChi_ParseParam 2976614 404 ns/op 400 B/op 2 allocs/op +BenchmarkChi_Parse2Params 2638084 439 ns/op 400 B/op 2 allocs/op +BenchmarkChi_ParseAll 109567 11295 ns/op 10400 B/op 52 allocs/op +BenchmarkChi_StaticAll 16846 71308 ns/op 62802 B/op 314 allocs/op +``` + +Comparison with other routers: https://gist.github.com/pkieltyka/123032f12052520aaccab752bd3e78cc + +NOTE: the allocs in the benchmark above are from the calls to http.Request's +`WithContext(context.Context)` method that clones the http.Request, sets the `Context()` +on the duplicated (alloc'd) request and returns it the new request object. This is just +how setting context on a request in Go works. + + +## Credits + +* Carl Jackson for https://github.com/zenazn/goji + * Parts of chi's thinking comes from goji, and chi's middleware package + sources from [goji](https://github.com/zenazn/goji/tree/master/web/middleware). + * Please see goji's [LICENSE](https://github.com/zenazn/goji/blob/master/LICENSE) (MIT) +* Armon Dadgar for https://github.com/armon/go-radix +* Contributions: [@VojtechVitek](https://github.com/VojtechVitek) + +We'll be more than happy to see [your contributions](./CONTRIBUTING.md)! + + +## Beyond REST + +chi is just a http router that lets you decompose request handling into many smaller layers. +Many companies use chi to write REST services for their public APIs. But, REST is just a convention +for managing state via HTTP, and there's a lot of other pieces required to write a complete client-server +system or network of microservices. + +Looking beyond REST, I also recommend some newer works in the field: +* [webrpc](https://github.com/webrpc/webrpc) - Web-focused RPC client+server framework with code-gen +* [gRPC](https://github.com/grpc/grpc-go) - Google's RPC framework via protobufs +* [graphql](https://github.com/99designs/gqlgen) - Declarative query language +* [NATS](https://nats.io) - lightweight pub-sub + + +## License + +Copyright (c) 2015-present [Peter Kieltyka](https://github.com/pkieltyka) + +Licensed under [MIT License](./LICENSE) + +[GoDoc]: https://pkg.go.dev/github.com/go-chi/chi/v5 +[GoDoc Widget]: https://godoc.org/github.com/go-chi/chi?status.svg +[Travis]: https://travis-ci.org/go-chi/chi +[Travis Widget]: https://travis-ci.org/go-chi/chi.svg?branch=master diff --git a/vendor/github.com/go-chi/chi/v5/SECURITY.md b/vendor/github.com/go-chi/chi/v5/SECURITY.md new file mode 100644 index 00000000000..7e937f87f30 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/SECURITY.md @@ -0,0 +1,5 @@ +# Reporting Security Issues + +We appreciate your efforts to responsibly disclose your findings, and will make every effort to acknowledge your contributions. + +To report a security issue, please use the GitHub Security Advisory ["Report a Vulnerability"](https://github.com/go-chi/chi/security/advisories/new) tab. diff --git a/vendor/github.com/go-chi/chi/v5/chain.go b/vendor/github.com/go-chi/chi/v5/chain.go new file mode 100644 index 00000000000..a2278414f40 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/chain.go @@ -0,0 +1,49 @@ +package chi + +import "net/http" + +// Chain returns a Middlewares type from a slice of middleware handlers. +func Chain(middlewares ...func(http.Handler) http.Handler) Middlewares { + return Middlewares(middlewares) +} + +// Handler builds and returns a http.Handler from the chain of middlewares, +// with `h http.Handler` as the final handler. +func (mws Middlewares) Handler(h http.Handler) http.Handler { + return &ChainHandler{h, chain(mws, h), mws} +} + +// HandlerFunc builds and returns a http.Handler from the chain of middlewares, +// with `h http.Handler` as the final handler. +func (mws Middlewares) HandlerFunc(h http.HandlerFunc) http.Handler { + return &ChainHandler{h, chain(mws, h), mws} +} + +// ChainHandler is a http.Handler with support for handler composition and +// execution. +type ChainHandler struct { + Endpoint http.Handler + chain http.Handler + Middlewares Middlewares +} + +func (c *ChainHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + c.chain.ServeHTTP(w, r) +} + +// chain builds a http.Handler composed of an inline middleware stack and endpoint +// handler in the order they are passed. +func chain(middlewares []func(http.Handler) http.Handler, endpoint http.Handler) http.Handler { + // Return ahead of time if there aren't any middlewares for the chain + if len(middlewares) == 0 { + return endpoint + } + + // Wrap the end handler with the middleware chain + h := middlewares[len(middlewares)-1](endpoint) + for i := len(middlewares) - 2; i >= 0; i-- { + h = middlewares[i](h) + } + + return h +} diff --git a/vendor/github.com/go-chi/chi/v5/chi.go b/vendor/github.com/go-chi/chi/v5/chi.go new file mode 100644 index 00000000000..2b6ebd337c0 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/chi.go @@ -0,0 +1,137 @@ +// Package chi is a small, idiomatic and composable router for building HTTP services. +// +// chi requires Go 1.14 or newer. +// +// Example: +// +// package main +// +// import ( +// "net/http" +// +// "github.com/go-chi/chi/v5" +// "github.com/go-chi/chi/v5/middleware" +// ) +// +// func main() { +// r := chi.NewRouter() +// r.Use(middleware.Logger) +// r.Use(middleware.Recoverer) +// +// r.Get("/", func(w http.ResponseWriter, r *http.Request) { +// w.Write([]byte("root.")) +// }) +// +// http.ListenAndServe(":3333", r) +// } +// +// See github.com/go-chi/chi/_examples/ for more in-depth examples. +// +// URL patterns allow for easy matching of path components in HTTP +// requests. The matching components can then be accessed using +// chi.URLParam(). All patterns must begin with a slash. +// +// A simple named placeholder {name} matches any sequence of characters +// up to the next / or the end of the URL. Trailing slashes on paths must +// be handled explicitly. +// +// A placeholder with a name followed by a colon allows a regular +// expression match, for example {number:\\d+}. The regular expression +// syntax is Go's normal regexp RE2 syntax, except that / will never be +// matched. An anonymous regexp pattern is allowed, using an empty string +// before the colon in the placeholder, such as {:\\d+} +// +// The special placeholder of asterisk matches the rest of the requested +// URL. Any trailing characters in the pattern are ignored. This is the only +// placeholder which will match / characters. +// +// Examples: +// +// "/user/{name}" matches "/user/jsmith" but not "/user/jsmith/info" or "/user/jsmith/" +// "/user/{name}/info" matches "/user/jsmith/info" +// "/page/*" matches "/page/intro/latest" +// "/page/{other}/latest" also matches "/page/intro/latest" +// "/date/{yyyy:\\d\\d\\d\\d}/{mm:\\d\\d}/{dd:\\d\\d}" matches "/date/2017/04/01" +package chi + +import "net/http" + +// NewRouter returns a new Mux object that implements the Router interface. +func NewRouter() *Mux { + return NewMux() +} + +// Router consisting of the core routing methods used by chi's Mux, +// using only the standard net/http. +type Router interface { + http.Handler + Routes + + // Use appends one or more middlewares onto the Router stack. + Use(middlewares ...func(http.Handler) http.Handler) + + // With adds inline middlewares for an endpoint handler. + With(middlewares ...func(http.Handler) http.Handler) Router + + // Group adds a new inline-Router along the current routing + // path, with a fresh middleware stack for the inline-Router. + Group(fn func(r Router)) Router + + // Route mounts a sub-Router along a `pattern`` string. + Route(pattern string, fn func(r Router)) Router + + // Mount attaches another http.Handler along ./pattern/* + Mount(pattern string, h http.Handler) + + // Handle and HandleFunc adds routes for `pattern` that matches + // all HTTP methods. + Handle(pattern string, h http.Handler) + HandleFunc(pattern string, h http.HandlerFunc) + + // Method and MethodFunc adds routes for `pattern` that matches + // the `method` HTTP method. + Method(method, pattern string, h http.Handler) + MethodFunc(method, pattern string, h http.HandlerFunc) + + // HTTP-method routing along `pattern` + Connect(pattern string, h http.HandlerFunc) + Delete(pattern string, h http.HandlerFunc) + Get(pattern string, h http.HandlerFunc) + Head(pattern string, h http.HandlerFunc) + Options(pattern string, h http.HandlerFunc) + Patch(pattern string, h http.HandlerFunc) + Post(pattern string, h http.HandlerFunc) + Put(pattern string, h http.HandlerFunc) + Trace(pattern string, h http.HandlerFunc) + + // NotFound defines a handler to respond whenever a route could + // not be found. + NotFound(h http.HandlerFunc) + + // MethodNotAllowed defines a handler to respond whenever a method is + // not allowed. + MethodNotAllowed(h http.HandlerFunc) +} + +// Routes interface adds two methods for router traversal, which is also +// used by the `docgen` subpackage to generation documentation for Routers. +type Routes interface { + // Routes returns the routing tree in an easily traversable structure. + Routes() []Route + + // Middlewares returns the list of middlewares in use by the router. + Middlewares() Middlewares + + // Match searches the routing tree for a handler that matches + // the method/path - similar to routing a http request, but without + // executing the handler thereafter. + Match(rctx *Context, method, path string) bool + + // Find searches the routing tree for the pattern that matches + // the method/path. + Find(rctx *Context, method, path string) string +} + +// Middlewares type is a slice of standard middleware handlers with methods +// to compose middleware chains and http.Handler's. +type Middlewares []func(http.Handler) http.Handler diff --git a/vendor/github.com/go-chi/chi/v5/context.go b/vendor/github.com/go-chi/chi/v5/context.go new file mode 100644 index 00000000000..aacf6eff727 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/context.go @@ -0,0 +1,165 @@ +package chi + +import ( + "context" + "net/http" + "strings" +) + +// URLParam returns the url parameter from a http.Request object. +func URLParam(r *http.Request, key string) string { + if rctx := RouteContext(r.Context()); rctx != nil { + return rctx.URLParam(key) + } + return "" +} + +// URLParamFromCtx returns the url parameter from a http.Request Context. +func URLParamFromCtx(ctx context.Context, key string) string { + if rctx := RouteContext(ctx); rctx != nil { + return rctx.URLParam(key) + } + return "" +} + +// RouteContext returns chi's routing Context object from a +// http.Request Context. +func RouteContext(ctx context.Context) *Context { + val, _ := ctx.Value(RouteCtxKey).(*Context) + return val +} + +// NewRouteContext returns a new routing Context object. +func NewRouteContext() *Context { + return &Context{} +} + +var ( + // RouteCtxKey is the context.Context key to store the request context. + RouteCtxKey = &contextKey{"RouteContext"} +) + +// Context is the default routing context set on the root node of a +// request context to track route patterns, URL parameters and +// an optional routing path. +type Context struct { + Routes Routes + + // parentCtx is the parent of this one, for using Context as a + // context.Context directly. This is an optimization that saves + // 1 allocation. + parentCtx context.Context + + // Routing path/method override used during the route search. + // See Mux#routeHTTP method. + RoutePath string + RouteMethod string + + // URLParams are the stack of routeParams captured during the + // routing lifecycle across a stack of sub-routers. + URLParams RouteParams + + // Route parameters matched for the current sub-router. It is + // intentionally unexported so it can't be tampered. + routeParams RouteParams + + // The endpoint routing pattern that matched the request URI path + // or `RoutePath` of the current sub-router. This value will update + // during the lifecycle of a request passing through a stack of + // sub-routers. + routePattern string + + // Routing pattern stack throughout the lifecycle of the request, + // across all connected routers. It is a record of all matching + // patterns across a stack of sub-routers. + RoutePatterns []string + + methodsAllowed []methodTyp // allowed methods in case of a 405 + methodNotAllowed bool +} + +// Reset a routing context to its initial state. +func (x *Context) Reset() { + x.Routes = nil + x.RoutePath = "" + x.RouteMethod = "" + x.RoutePatterns = x.RoutePatterns[:0] + x.URLParams.Keys = x.URLParams.Keys[:0] + x.URLParams.Values = x.URLParams.Values[:0] + + x.routePattern = "" + x.routeParams.Keys = x.routeParams.Keys[:0] + x.routeParams.Values = x.routeParams.Values[:0] + x.methodNotAllowed = false + x.methodsAllowed = x.methodsAllowed[:0] + x.parentCtx = nil +} + +// URLParam returns the corresponding URL parameter value from the request +// routing context. +func (x *Context) URLParam(key string) string { + for k := len(x.URLParams.Keys) - 1; k >= 0; k-- { + if x.URLParams.Keys[k] == key { + return x.URLParams.Values[k] + } + } + return "" +} + +// RoutePattern builds the routing pattern string for the particular +// request, at the particular point during routing. This means, the value +// will change throughout the execution of a request in a router. That is +// why it's advised to only use this value after calling the next handler. +// +// For example, +// +// func Instrument(next http.Handler) http.Handler { +// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { +// next.ServeHTTP(w, r) +// routePattern := chi.RouteContext(r.Context()).RoutePattern() +// measure(w, r, routePattern) +// }) +// } +func (x *Context) RoutePattern() string { + if x == nil { + return "" + } + routePattern := strings.Join(x.RoutePatterns, "") + routePattern = replaceWildcards(routePattern) + if routePattern != "/" { + routePattern = strings.TrimSuffix(routePattern, "//") + routePattern = strings.TrimSuffix(routePattern, "/") + } + return routePattern +} + +// replaceWildcards takes a route pattern and recursively replaces all +// occurrences of "/*/" to "/". +func replaceWildcards(p string) string { + if strings.Contains(p, "/*/") { + return replaceWildcards(strings.Replace(p, "/*/", "/", -1)) + } + return p +} + +// RouteParams is a structure to track URL routing parameters efficiently. +type RouteParams struct { + Keys, Values []string +} + +// Add will append a URL parameter to the end of the route param +func (s *RouteParams) Add(key, value string) { + s.Keys = append(s.Keys, key) + s.Values = append(s.Values, value) +} + +// contextKey is a value for use with context.WithValue. It's used as +// a pointer so it fits in an interface{} without allocation. This technique +// for defining context keys was copied from Go 1.7's new use of context in net/http. +type contextKey struct { + name string +} + +func (k *contextKey) String() string { + return "chi context value " + k.name +} diff --git a/vendor/github.com/go-chi/chi/v5/mux.go b/vendor/github.com/go-chi/chi/v5/mux.go new file mode 100644 index 00000000000..f1266971b4c --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/mux.go @@ -0,0 +1,527 @@ +package chi + +import ( + "context" + "fmt" + "net/http" + "strings" + "sync" +) + +var _ Router = &Mux{} + +// Mux is a simple HTTP route multiplexer that parses a request path, +// records any URL params, and executes an end handler. It implements +// the http.Handler interface and is friendly with the standard library. +// +// Mux is designed to be fast, minimal and offer a powerful API for building +// modular and composable HTTP services with a large set of handlers. It's +// particularly useful for writing large REST API services that break a handler +// into many smaller parts composed of middlewares and end handlers. +type Mux struct { + // The computed mux handler made of the chained middleware stack and + // the tree router + handler http.Handler + + // The radix trie router + tree *node + + // Custom method not allowed handler + methodNotAllowedHandler http.HandlerFunc + + // A reference to the parent mux used by subrouters when mounting + // to a parent mux + parent *Mux + + // Routing context pool + pool *sync.Pool + + // Custom route not found handler + notFoundHandler http.HandlerFunc + + // The middleware stack + middlewares []func(http.Handler) http.Handler + + // Controls the behaviour of middleware chain generation when a mux + // is registered as an inline group inside another mux. + inline bool +} + +// NewMux returns a newly initialized Mux object that implements the Router +// interface. +func NewMux() *Mux { + mux := &Mux{tree: &node{}, pool: &sync.Pool{}} + mux.pool.New = func() interface{} { + return NewRouteContext() + } + return mux +} + +// ServeHTTP is the single method of the http.Handler interface that makes +// Mux interoperable with the standard library. It uses a sync.Pool to get and +// reuse routing contexts for each request. +func (mx *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // Ensure the mux has some routes defined on the mux + if mx.handler == nil { + mx.NotFoundHandler().ServeHTTP(w, r) + return + } + + // Check if a routing context already exists from a parent router. + rctx, _ := r.Context().Value(RouteCtxKey).(*Context) + if rctx != nil { + mx.handler.ServeHTTP(w, r) + return + } + + // Fetch a RouteContext object from the sync pool, and call the computed + // mx.handler that is comprised of mx.middlewares + mx.routeHTTP. + // Once the request is finished, reset the routing context and put it back + // into the pool for reuse from another request. + rctx = mx.pool.Get().(*Context) + rctx.Reset() + rctx.Routes = mx + rctx.parentCtx = r.Context() + + // NOTE: r.WithContext() causes 2 allocations and context.WithValue() causes 1 allocation + r = r.WithContext(context.WithValue(r.Context(), RouteCtxKey, rctx)) + + // Serve the request and once its done, put the request context back in the sync pool + mx.handler.ServeHTTP(w, r) + mx.pool.Put(rctx) +} + +// Use appends a middleware handler to the Mux middleware stack. +// +// The middleware stack for any Mux will execute before searching for a matching +// route to a specific handler, which provides opportunity to respond early, +// change the course of the request execution, or set request-scoped values for +// the next http.Handler. +func (mx *Mux) Use(middlewares ...func(http.Handler) http.Handler) { + if mx.handler != nil { + panic("chi: all middlewares must be defined before routes on a mux") + } + mx.middlewares = append(mx.middlewares, middlewares...) +} + +// Handle adds the route `pattern` that matches any http method to +// execute the `handler` http.Handler. +func (mx *Mux) Handle(pattern string, handler http.Handler) { + if method, rest, found := strings.Cut(pattern, " "); found { + mx.Method(method, rest, handler) + return + } + + mx.handle(mALL, pattern, handler) +} + +// HandleFunc adds the route `pattern` that matches any http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) HandleFunc(pattern string, handlerFn http.HandlerFunc) { + if method, rest, found := strings.Cut(pattern, " "); found { + mx.Method(method, rest, handlerFn) + return + } + + mx.handle(mALL, pattern, handlerFn) +} + +// Method adds the route `pattern` that matches `method` http method to +// execute the `handler` http.Handler. +func (mx *Mux) Method(method, pattern string, handler http.Handler) { + m, ok := methodMap[strings.ToUpper(method)] + if !ok { + panic(fmt.Sprintf("chi: '%s' http method is not supported.", method)) + } + mx.handle(m, pattern, handler) +} + +// MethodFunc adds the route `pattern` that matches `method` http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) MethodFunc(method, pattern string, handlerFn http.HandlerFunc) { + mx.Method(method, pattern, handlerFn) +} + +// Connect adds the route `pattern` that matches a CONNECT http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Connect(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mCONNECT, pattern, handlerFn) +} + +// Delete adds the route `pattern` that matches a DELETE http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Delete(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mDELETE, pattern, handlerFn) +} + +// Get adds the route `pattern` that matches a GET http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Get(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mGET, pattern, handlerFn) +} + +// Head adds the route `pattern` that matches a HEAD http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Head(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mHEAD, pattern, handlerFn) +} + +// Options adds the route `pattern` that matches an OPTIONS http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Options(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mOPTIONS, pattern, handlerFn) +} + +// Patch adds the route `pattern` that matches a PATCH http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Patch(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mPATCH, pattern, handlerFn) +} + +// Post adds the route `pattern` that matches a POST http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Post(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mPOST, pattern, handlerFn) +} + +// Put adds the route `pattern` that matches a PUT http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Put(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mPUT, pattern, handlerFn) +} + +// Trace adds the route `pattern` that matches a TRACE http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Trace(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mTRACE, pattern, handlerFn) +} + +// NotFound sets a custom http.HandlerFunc for routing paths that could +// not be found. The default 404 handler is `http.NotFound`. +func (mx *Mux) NotFound(handlerFn http.HandlerFunc) { + // Build NotFound handler chain + m := mx + hFn := handlerFn + if mx.inline && mx.parent != nil { + m = mx.parent + hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP + } + + // Update the notFoundHandler from this point forward + m.notFoundHandler = hFn + m.updateSubRoutes(func(subMux *Mux) { + if subMux.notFoundHandler == nil { + subMux.NotFound(hFn) + } + }) +} + +// MethodNotAllowed sets a custom http.HandlerFunc for routing paths where the +// method is unresolved. The default handler returns a 405 with an empty body. +func (mx *Mux) MethodNotAllowed(handlerFn http.HandlerFunc) { + // Build MethodNotAllowed handler chain + m := mx + hFn := handlerFn + if mx.inline && mx.parent != nil { + m = mx.parent + hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP + } + + // Update the methodNotAllowedHandler from this point forward + m.methodNotAllowedHandler = hFn + m.updateSubRoutes(func(subMux *Mux) { + if subMux.methodNotAllowedHandler == nil { + subMux.MethodNotAllowed(hFn) + } + }) +} + +// With adds inline middlewares for an endpoint handler. +func (mx *Mux) With(middlewares ...func(http.Handler) http.Handler) Router { + // Similarly as in handle(), we must build the mux handler once additional + // middleware registration isn't allowed for this stack, like now. + if !mx.inline && mx.handler == nil { + mx.updateRouteHandler() + } + + // Copy middlewares from parent inline muxs + var mws Middlewares + if mx.inline { + mws = make(Middlewares, len(mx.middlewares)) + copy(mws, mx.middlewares) + } + mws = append(mws, middlewares...) + + im := &Mux{ + pool: mx.pool, inline: true, parent: mx, tree: mx.tree, middlewares: mws, + notFoundHandler: mx.notFoundHandler, methodNotAllowedHandler: mx.methodNotAllowedHandler, + } + + return im +} + +// Group creates a new inline-Mux with a copy of middleware stack. It's useful +// for a group of handlers along the same routing path that use an additional +// set of middlewares. See _examples/. +func (mx *Mux) Group(fn func(r Router)) Router { + im := mx.With() + if fn != nil { + fn(im) + } + return im +} + +// Route creates a new Mux and mounts it along the `pattern` as a subrouter. +// Effectively, this is a short-hand call to Mount. See _examples/. +func (mx *Mux) Route(pattern string, fn func(r Router)) Router { + if fn == nil { + panic(fmt.Sprintf("chi: attempting to Route() a nil subrouter on '%s'", pattern)) + } + subRouter := NewRouter() + fn(subRouter) + mx.Mount(pattern, subRouter) + return subRouter +} + +// Mount attaches another http.Handler or chi Router as a subrouter along a routing +// path. It's very useful to split up a large API as many independent routers and +// compose them as a single service using Mount. See _examples/. +// +// Note that Mount() simply sets a wildcard along the `pattern` that will continue +// routing at the `handler`, which in most cases is another chi.Router. As a result, +// if you define two Mount() routes on the exact same pattern the mount will panic. +func (mx *Mux) Mount(pattern string, handler http.Handler) { + if handler == nil { + panic(fmt.Sprintf("chi: attempting to Mount() a nil handler on '%s'", pattern)) + } + + // Provide runtime safety for ensuring a pattern isn't mounted on an existing + // routing pattern. + if mx.tree.findPattern(pattern+"*") || mx.tree.findPattern(pattern+"/*") { + panic(fmt.Sprintf("chi: attempting to Mount() a handler on an existing path, '%s'", pattern)) + } + + // Assign sub-Router's with the parent not found & method not allowed handler if not specified. + subr, ok := handler.(*Mux) + if ok && subr.notFoundHandler == nil && mx.notFoundHandler != nil { + subr.NotFound(mx.notFoundHandler) + } + if ok && subr.methodNotAllowedHandler == nil && mx.methodNotAllowedHandler != nil { + subr.MethodNotAllowed(mx.methodNotAllowedHandler) + } + + mountHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + rctx := RouteContext(r.Context()) + + // shift the url path past the previous subrouter + rctx.RoutePath = mx.nextRoutePath(rctx) + + // reset the wildcard URLParam which connects the subrouter + n := len(rctx.URLParams.Keys) - 1 + if n >= 0 && rctx.URLParams.Keys[n] == "*" && len(rctx.URLParams.Values) > n { + rctx.URLParams.Values[n] = "" + } + + handler.ServeHTTP(w, r) + }) + + if pattern == "" || pattern[len(pattern)-1] != '/' { + mx.handle(mALL|mSTUB, pattern, mountHandler) + mx.handle(mALL|mSTUB, pattern+"/", mountHandler) + pattern += "/" + } + + method := mALL + subroutes, _ := handler.(Routes) + if subroutes != nil { + method |= mSTUB + } + n := mx.handle(method, pattern+"*", mountHandler) + + if subroutes != nil { + n.subroutes = subroutes + } +} + +// Routes returns a slice of routing information from the tree, +// useful for traversing available routes of a router. +func (mx *Mux) Routes() []Route { + return mx.tree.routes() +} + +// Middlewares returns a slice of middleware handler functions. +func (mx *Mux) Middlewares() Middlewares { + return mx.middlewares +} + +// Match searches the routing tree for a handler that matches the method/path. +// It's similar to routing a http request, but without executing the handler +// thereafter. +// +// Note: the *Context state is updated during execution, so manage +// the state carefully or make a NewRouteContext(). +func (mx *Mux) Match(rctx *Context, method, path string) bool { + return mx.Find(rctx, method, path) != "" +} + +// Find searches the routing tree for the pattern that matches +// the method/path. +// +// Note: the *Context state is updated during execution, so manage +// the state carefully or make a NewRouteContext(). +func (mx *Mux) Find(rctx *Context, method, path string) string { + m, ok := methodMap[method] + if !ok { + return "" + } + + node, _, _ := mx.tree.FindRoute(rctx, m, path) + pattern := rctx.routePattern + + if node != nil { + if node.subroutes == nil { + e := node.endpoints[m] + return e.pattern + } + + rctx.RoutePath = mx.nextRoutePath(rctx) + subPattern := node.subroutes.Find(rctx, method, rctx.RoutePath) + if subPattern == "" { + return "" + } + + pattern = strings.TrimSuffix(pattern, "/*") + pattern += subPattern + } + + return pattern +} + +// NotFoundHandler returns the default Mux 404 responder whenever a route +// cannot be found. +func (mx *Mux) NotFoundHandler() http.HandlerFunc { + if mx.notFoundHandler != nil { + return mx.notFoundHandler + } + return http.NotFound +} + +// MethodNotAllowedHandler returns the default Mux 405 responder whenever +// a method cannot be resolved for a route. +func (mx *Mux) MethodNotAllowedHandler(methodsAllowed ...methodTyp) http.HandlerFunc { + if mx.methodNotAllowedHandler != nil { + return mx.methodNotAllowedHandler + } + return methodNotAllowedHandler(methodsAllowed...) +} + +// handle registers a http.Handler in the routing tree for a particular http method +// and routing pattern. +func (mx *Mux) handle(method methodTyp, pattern string, handler http.Handler) *node { + if len(pattern) == 0 || pattern[0] != '/' { + panic(fmt.Sprintf("chi: routing pattern must begin with '/' in '%s'", pattern)) + } + + // Build the computed routing handler for this routing pattern. + if !mx.inline && mx.handler == nil { + mx.updateRouteHandler() + } + + // Build endpoint handler with inline middlewares for the route + var h http.Handler + if mx.inline { + mx.handler = http.HandlerFunc(mx.routeHTTP) + h = Chain(mx.middlewares...).Handler(handler) + } else { + h = handler + } + + // Add the endpoint to the tree and return the node + return mx.tree.InsertRoute(method, pattern, h) +} + +// routeHTTP routes a http.Request through the Mux routing tree to serve +// the matching handler for a particular http method. +func (mx *Mux) routeHTTP(w http.ResponseWriter, r *http.Request) { + // Grab the route context object + rctx := r.Context().Value(RouteCtxKey).(*Context) + + // The request routing path + routePath := rctx.RoutePath + if routePath == "" { + if r.URL.RawPath != "" { + routePath = r.URL.RawPath + } else { + routePath = r.URL.Path + } + if routePath == "" { + routePath = "/" + } + } + + // Check if method is supported by chi + if rctx.RouteMethod == "" { + rctx.RouteMethod = r.Method + } + method, ok := methodMap[rctx.RouteMethod] + if !ok { + mx.MethodNotAllowedHandler().ServeHTTP(w, r) + return + } + + // Find the route + if _, _, h := mx.tree.FindRoute(rctx, method, routePath); h != nil { + if supportsPathValue { + setPathValue(rctx, r) + } + + h.ServeHTTP(w, r) + return + } + if rctx.methodNotAllowed { + mx.MethodNotAllowedHandler(rctx.methodsAllowed...).ServeHTTP(w, r) + } else { + mx.NotFoundHandler().ServeHTTP(w, r) + } +} + +func (mx *Mux) nextRoutePath(rctx *Context) string { + routePath := "/" + nx := len(rctx.routeParams.Keys) - 1 // index of last param in list + if nx >= 0 && rctx.routeParams.Keys[nx] == "*" && len(rctx.routeParams.Values) > nx { + routePath = "/" + rctx.routeParams.Values[nx] + } + return routePath +} + +// Recursively update data on child routers. +func (mx *Mux) updateSubRoutes(fn func(subMux *Mux)) { + for _, r := range mx.tree.routes() { + subMux, ok := r.SubRoutes.(*Mux) + if !ok { + continue + } + fn(subMux) + } +} + +// updateRouteHandler builds the single mux handler that is a chain of the middleware +// stack, as defined by calls to Use(), and the tree router (Mux) itself. After this +// point, no other middlewares can be registered on this Mux's stack. But you can still +// compose additional middlewares via Group()'s or using a chained middleware handler. +func (mx *Mux) updateRouteHandler() { + mx.handler = chain(mx.middlewares, http.HandlerFunc(mx.routeHTTP)) +} + +// methodNotAllowedHandler is a helper function to respond with a 405, +// method not allowed. It sets the Allow header with the list of allowed +// methods for the route. +func methodNotAllowedHandler(methodsAllowed ...methodTyp) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + for _, m := range methodsAllowed { + w.Header().Add("Allow", reverseMethodMap[m]) + } + w.WriteHeader(405) + w.Write(nil) + } +} diff --git a/vendor/github.com/go-chi/chi/v5/path_value.go b/vendor/github.com/go-chi/chi/v5/path_value.go new file mode 100644 index 00000000000..77c840f0191 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/path_value.go @@ -0,0 +1,21 @@ +//go:build go1.22 && !tinygo +// +build go1.22,!tinygo + + +package chi + +import "net/http" + +// supportsPathValue is true if the Go version is 1.22 and above. +// +// If this is true, `net/http.Request` has methods `SetPathValue` and `PathValue`. +const supportsPathValue = true + +// setPathValue sets the path values in the Request value +// based on the provided request context. +func setPathValue(rctx *Context, r *http.Request) { + for i, key := range rctx.URLParams.Keys { + value := rctx.URLParams.Values[i] + r.SetPathValue(key, value) + } +} diff --git a/vendor/github.com/go-chi/chi/v5/path_value_fallback.go b/vendor/github.com/go-chi/chi/v5/path_value_fallback.go new file mode 100644 index 00000000000..749a8520a75 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/path_value_fallback.go @@ -0,0 +1,19 @@ +//go:build !go1.22 || tinygo +// +build !go1.22 tinygo + +package chi + +import "net/http" + +// supportsPathValue is true if the Go version is 1.22 and above. +// +// If this is true, `net/http.Request` has methods `SetPathValue` and `PathValue`. +const supportsPathValue = false + +// setPathValue sets the path values in the Request value +// based on the provided request context. +// +// setPathValue is only supported in Go 1.22 and above so +// this is just a blank function so that it compiles. +func setPathValue(rctx *Context, r *http.Request) { +} diff --git a/vendor/github.com/go-chi/chi/v5/tree.go b/vendor/github.com/go-chi/chi/v5/tree.go new file mode 100644 index 00000000000..85fcfdbb8d4 --- /dev/null +++ b/vendor/github.com/go-chi/chi/v5/tree.go @@ -0,0 +1,890 @@ +package chi + +// Radix tree implementation below is a based on the original work by +// Armon Dadgar in https://github.com/armon/go-radix/blob/master/radix.go +// (MIT licensed). It's been heavily modified for use as a HTTP routing tree. + +import ( + "fmt" + "net/http" + "regexp" + "sort" + "strconv" + "strings" +) + +type methodTyp uint + +const ( + mSTUB methodTyp = 1 << iota + mCONNECT + mDELETE + mGET + mHEAD + mOPTIONS + mPATCH + mPOST + mPUT + mTRACE +) + +var mALL = mCONNECT | mDELETE | mGET | mHEAD | + mOPTIONS | mPATCH | mPOST | mPUT | mTRACE + +var methodMap = map[string]methodTyp{ + http.MethodConnect: mCONNECT, + http.MethodDelete: mDELETE, + http.MethodGet: mGET, + http.MethodHead: mHEAD, + http.MethodOptions: mOPTIONS, + http.MethodPatch: mPATCH, + http.MethodPost: mPOST, + http.MethodPut: mPUT, + http.MethodTrace: mTRACE, +} + +var reverseMethodMap = map[methodTyp]string{ + mCONNECT: http.MethodConnect, + mDELETE: http.MethodDelete, + mGET: http.MethodGet, + mHEAD: http.MethodHead, + mOPTIONS: http.MethodOptions, + mPATCH: http.MethodPatch, + mPOST: http.MethodPost, + mPUT: http.MethodPut, + mTRACE: http.MethodTrace, +} + +// RegisterMethod adds support for custom HTTP method handlers, available +// via Router#Method and Router#MethodFunc +func RegisterMethod(method string) { + if method == "" { + return + } + method = strings.ToUpper(method) + if _, ok := methodMap[method]; ok { + return + } + n := len(methodMap) + if n > strconv.IntSize-2 { + panic(fmt.Sprintf("chi: max number of methods reached (%d)", strconv.IntSize)) + } + mt := methodTyp(2 << n) + methodMap[method] = mt + mALL |= mt +} + +type nodeTyp uint8 + +const ( + ntStatic nodeTyp = iota // /home + ntRegexp // /{id:[0-9]+} + ntParam // /{user} + ntCatchAll // /api/v1/* +) + +type node struct { + // subroutes on the leaf node + subroutes Routes + + // regexp matcher for regexp nodes + rex *regexp.Regexp + + // HTTP handler endpoints on the leaf node + endpoints endpoints + + // prefix is the common prefix we ignore + prefix string + + // child nodes should be stored in-order for iteration, + // in groups of the node type. + children [ntCatchAll + 1]nodes + + // first byte of the child prefix + tail byte + + // node type: static, regexp, param, catchAll + typ nodeTyp + + // first byte of the prefix + label byte +} + +// endpoints is a mapping of http method constants to handlers +// for a given route. +type endpoints map[methodTyp]*endpoint + +type endpoint struct { + // endpoint handler + handler http.Handler + + // pattern is the routing pattern for handler nodes + pattern string + + // parameter keys recorded on handler nodes + paramKeys []string +} + +func (s endpoints) Value(method methodTyp) *endpoint { + mh, ok := s[method] + if !ok { + mh = &endpoint{} + s[method] = mh + } + return mh +} + +func (n *node) InsertRoute(method methodTyp, pattern string, handler http.Handler) *node { + var parent *node + search := pattern + + for { + // Handle key exhaustion + if len(search) == 0 { + // Insert or update the node's leaf handler + n.setEndpoint(method, handler, pattern) + return n + } + + // We're going to be searching for a wild node next, + // in this case, we need to get the tail + var label = search[0] + var segTail byte + var segEndIdx int + var segTyp nodeTyp + var segRexpat string + if label == '{' || label == '*' { + segTyp, _, segRexpat, segTail, _, segEndIdx = patNextSegment(search) + } + + var prefix string + if segTyp == ntRegexp { + prefix = segRexpat + } + + // Look for the edge to attach to + parent = n + n = n.getEdge(segTyp, label, segTail, prefix) + + // No edge, create one + if n == nil { + child := &node{label: label, tail: segTail, prefix: search} + hn := parent.addChild(child, search) + hn.setEndpoint(method, handler, pattern) + + return hn + } + + // Found an edge to match the pattern + + if n.typ > ntStatic { + // We found a param node, trim the param from the search path and continue. + // This param/wild pattern segment would already be on the tree from a previous + // call to addChild when creating a new node. + search = search[segEndIdx:] + continue + } + + // Static nodes fall below here. + // Determine longest prefix of the search key on match. + commonPrefix := longestPrefix(search, n.prefix) + if commonPrefix == len(n.prefix) { + // the common prefix is as long as the current node's prefix we're attempting to insert. + // keep the search going. + search = search[commonPrefix:] + continue + } + + // Split the node + child := &node{ + typ: ntStatic, + prefix: search[:commonPrefix], + } + parent.replaceChild(search[0], segTail, child) + + // Restore the existing node + n.label = n.prefix[commonPrefix] + n.prefix = n.prefix[commonPrefix:] + child.addChild(n, n.prefix) + + // If the new key is a subset, set the method/handler on this node and finish. + search = search[commonPrefix:] + if len(search) == 0 { + child.setEndpoint(method, handler, pattern) + return child + } + + // Create a new edge for the node + subchild := &node{ + typ: ntStatic, + label: search[0], + prefix: search, + } + hn := child.addChild(subchild, search) + hn.setEndpoint(method, handler, pattern) + return hn + } +} + +// addChild appends the new `child` node to the tree using the `pattern` as the trie key. +// For a URL router like chi's, we split the static, param, regexp and wildcard segments +// into different nodes. In addition, addChild will recursively call itself until every +// pattern segment is added to the url pattern tree as individual nodes, depending on type. +func (n *node) addChild(child *node, prefix string) *node { + search := prefix + + // handler leaf node added to the tree is the child. + // this may be overridden later down the flow + hn := child + + // Parse next segment + segTyp, _, segRexpat, segTail, segStartIdx, segEndIdx := patNextSegment(search) + + // Add child depending on next up segment + switch segTyp { + + case ntStatic: + // Search prefix is all static (that is, has no params in path) + // noop + + default: + // Search prefix contains a param, regexp or wildcard + + if segTyp == ntRegexp { + rex, err := regexp.Compile(segRexpat) + if err != nil { + panic(fmt.Sprintf("chi: invalid regexp pattern '%s' in route param", segRexpat)) + } + child.prefix = segRexpat + child.rex = rex + } + + if segStartIdx == 0 { + // Route starts with a param + child.typ = segTyp + + if segTyp == ntCatchAll { + segStartIdx = -1 + } else { + segStartIdx = segEndIdx + } + if segStartIdx < 0 { + segStartIdx = len(search) + } + child.tail = segTail // for params, we set the tail + + if segStartIdx != len(search) { + // add static edge for the remaining part, split the end. + // its not possible to have adjacent param nodes, so its certainly + // going to be a static node next. + + search = search[segStartIdx:] // advance search position + + nn := &node{ + typ: ntStatic, + label: search[0], + prefix: search, + } + hn = child.addChild(nn, search) + } + + } else if segStartIdx > 0 { + // Route has some param + + // starts with a static segment + child.typ = ntStatic + child.prefix = search[:segStartIdx] + child.rex = nil + + // add the param edge node + search = search[segStartIdx:] + + nn := &node{ + typ: segTyp, + label: search[0], + tail: segTail, + } + hn = child.addChild(nn, search) + + } + } + + n.children[child.typ] = append(n.children[child.typ], child) + n.children[child.typ].Sort() + return hn +} + +func (n *node) replaceChild(label, tail byte, child *node) { + for i := 0; i < len(n.children[child.typ]); i++ { + if n.children[child.typ][i].label == label && n.children[child.typ][i].tail == tail { + n.children[child.typ][i] = child + n.children[child.typ][i].label = label + n.children[child.typ][i].tail = tail + return + } + } + panic("chi: replacing missing child") +} + +func (n *node) getEdge(ntyp nodeTyp, label, tail byte, prefix string) *node { + nds := n.children[ntyp] + for i := 0; i < len(nds); i++ { + if nds[i].label == label && nds[i].tail == tail { + if ntyp == ntRegexp && nds[i].prefix != prefix { + continue + } + return nds[i] + } + } + return nil +} + +func (n *node) setEndpoint(method methodTyp, handler http.Handler, pattern string) { + // Set the handler for the method type on the node + if n.endpoints == nil { + n.endpoints = make(endpoints) + } + + paramKeys := patParamKeys(pattern) + + if method&mSTUB == mSTUB { + n.endpoints.Value(mSTUB).handler = handler + } + if method&mALL == mALL { + h := n.endpoints.Value(mALL) + h.handler = handler + h.pattern = pattern + h.paramKeys = paramKeys + for _, m := range methodMap { + h := n.endpoints.Value(m) + h.handler = handler + h.pattern = pattern + h.paramKeys = paramKeys + } + } else { + h := n.endpoints.Value(method) + h.handler = handler + h.pattern = pattern + h.paramKeys = paramKeys + } +} + +func (n *node) FindRoute(rctx *Context, method methodTyp, path string) (*node, endpoints, http.Handler) { + // Reset the context routing pattern and params + rctx.routePattern = "" + rctx.routeParams.Keys = rctx.routeParams.Keys[:0] + rctx.routeParams.Values = rctx.routeParams.Values[:0] + + // Find the routing handlers for the path + rn := n.findRoute(rctx, method, path) + if rn == nil { + return nil, nil, nil + } + + // Record the routing params in the request lifecycle + rctx.URLParams.Keys = append(rctx.URLParams.Keys, rctx.routeParams.Keys...) + rctx.URLParams.Values = append(rctx.URLParams.Values, rctx.routeParams.Values...) + + // Record the routing pattern in the request lifecycle + if rn.endpoints[method].pattern != "" { + rctx.routePattern = rn.endpoints[method].pattern + rctx.RoutePatterns = append(rctx.RoutePatterns, rctx.routePattern) + } + + return rn, rn.endpoints, rn.endpoints[method].handler +} + +// Recursive edge traversal by checking all nodeTyp groups along the way. +// It's like searching through a multi-dimensional radix trie. +func (n *node) findRoute(rctx *Context, method methodTyp, path string) *node { + nn := n + search := path + + for t, nds := range nn.children { + ntyp := nodeTyp(t) + if len(nds) == 0 { + continue + } + + var xn *node + xsearch := search + + var label byte + if search != "" { + label = search[0] + } + + switch ntyp { + case ntStatic: + xn = nds.findEdge(label) + if xn == nil || !strings.HasPrefix(xsearch, xn.prefix) { + continue + } + xsearch = xsearch[len(xn.prefix):] + + case ntParam, ntRegexp: + // short-circuit and return no matching route for empty param values + if xsearch == "" { + continue + } + + // serially loop through each node grouped by the tail delimiter + for idx := 0; idx < len(nds); idx++ { + xn = nds[idx] + + // label for param nodes is the delimiter byte + p := strings.IndexByte(xsearch, xn.tail) + + if p < 0 { + if xn.tail == '/' { + p = len(xsearch) + } else { + continue + } + } else if ntyp == ntRegexp && p == 0 { + continue + } + + if ntyp == ntRegexp && xn.rex != nil { + if !xn.rex.MatchString(xsearch[:p]) { + continue + } + } else if strings.IndexByte(xsearch[:p], '/') != -1 { + // avoid a match across path segments + continue + } + + prevlen := len(rctx.routeParams.Values) + rctx.routeParams.Values = append(rctx.routeParams.Values, xsearch[:p]) + xsearch = xsearch[p:] + + if len(xsearch) == 0 { + if xn.isLeaf() { + h := xn.endpoints[method] + if h != nil && h.handler != nil { + rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...) + return xn + } + + for endpoints := range xn.endpoints { + if endpoints == mALL || endpoints == mSTUB { + continue + } + rctx.methodsAllowed = append(rctx.methodsAllowed, endpoints) + } + + // flag that the routing context found a route, but not a corresponding + // supported method + rctx.methodNotAllowed = true + } + } + + // recursively find the next node on this branch + fin := xn.findRoute(rctx, method, xsearch) + if fin != nil { + return fin + } + + // not found on this branch, reset vars + rctx.routeParams.Values = rctx.routeParams.Values[:prevlen] + xsearch = search + } + + rctx.routeParams.Values = append(rctx.routeParams.Values, "") + + default: + // catch-all nodes + rctx.routeParams.Values = append(rctx.routeParams.Values, search) + xn = nds[0] + xsearch = "" + } + + if xn == nil { + continue + } + + // did we find it yet? + if len(xsearch) == 0 { + if xn.isLeaf() { + h := xn.endpoints[method] + if h != nil && h.handler != nil { + rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...) + return xn + } + + for endpoints := range xn.endpoints { + if endpoints == mALL || endpoints == mSTUB { + continue + } + rctx.methodsAllowed = append(rctx.methodsAllowed, endpoints) + } + + // flag that the routing context found a route, but not a corresponding + // supported method + rctx.methodNotAllowed = true + } + } + + // recursively find the next node.. + fin := xn.findRoute(rctx, method, xsearch) + if fin != nil { + return fin + } + + // Did not find final handler, let's remove the param here if it was set + if xn.typ > ntStatic { + if len(rctx.routeParams.Values) > 0 { + rctx.routeParams.Values = rctx.routeParams.Values[:len(rctx.routeParams.Values)-1] + } + } + + } + + return nil +} + +func (n *node) findEdge(ntyp nodeTyp, label byte) *node { + nds := n.children[ntyp] + num := len(nds) + idx := 0 + + switch ntyp { + case ntStatic, ntParam, ntRegexp: + i, j := 0, num-1 + for i <= j { + idx = i + (j-i)/2 + if label > nds[idx].label { + i = idx + 1 + } else if label < nds[idx].label { + j = idx - 1 + } else { + i = num // breaks cond + } + } + if nds[idx].label != label { + return nil + } + return nds[idx] + + default: // catch all + return nds[idx] + } +} + +func (n *node) isLeaf() bool { + return n.endpoints != nil +} + +func (n *node) findPattern(pattern string) bool { + nn := n + for _, nds := range nn.children { + if len(nds) == 0 { + continue + } + + n = nn.findEdge(nds[0].typ, pattern[0]) + if n == nil { + continue + } + + var idx int + var xpattern string + + switch n.typ { + case ntStatic: + idx = longestPrefix(pattern, n.prefix) + if idx < len(n.prefix) { + continue + } + + case ntParam, ntRegexp: + idx = strings.IndexByte(pattern, '}') + 1 + + case ntCatchAll: + idx = longestPrefix(pattern, "*") + + default: + panic("chi: unknown node type") + } + + xpattern = pattern[idx:] + if len(xpattern) == 0 { + return true + } + + return n.findPattern(xpattern) + } + return false +} + +func (n *node) routes() []Route { + rts := []Route{} + + n.walk(func(eps endpoints, subroutes Routes) bool { + if eps[mSTUB] != nil && eps[mSTUB].handler != nil && subroutes == nil { + return false + } + + // Group methodHandlers by unique patterns + pats := make(map[string]endpoints) + + for mt, h := range eps { + if h.pattern == "" { + continue + } + p, ok := pats[h.pattern] + if !ok { + p = endpoints{} + pats[h.pattern] = p + } + p[mt] = h + } + + for p, mh := range pats { + hs := make(map[string]http.Handler) + if mh[mALL] != nil && mh[mALL].handler != nil { + hs["*"] = mh[mALL].handler + } + + for mt, h := range mh { + if h.handler == nil { + continue + } + m := methodTypString(mt) + if m == "" { + continue + } + hs[m] = h.handler + } + + rt := Route{subroutes, hs, p} + rts = append(rts, rt) + } + + return false + }) + + return rts +} + +func (n *node) walk(fn func(eps endpoints, subroutes Routes) bool) bool { + // Visit the leaf values if any + if (n.endpoints != nil || n.subroutes != nil) && fn(n.endpoints, n.subroutes) { + return true + } + + // Recurse on the children + for _, ns := range n.children { + for _, cn := range ns { + if cn.walk(fn) { + return true + } + } + } + return false +} + +// patNextSegment returns the next segment details from a pattern: +// node type, param key, regexp string, param tail byte, param starting index, param ending index +func patNextSegment(pattern string) (nodeTyp, string, string, byte, int, int) { + ps := strings.Index(pattern, "{") + ws := strings.Index(pattern, "*") + + if ps < 0 && ws < 0 { + return ntStatic, "", "", 0, 0, len(pattern) // we return the entire thing + } + + // Sanity check + if ps >= 0 && ws >= 0 && ws < ps { + panic("chi: wildcard '*' must be the last pattern in a route, otherwise use a '{param}'") + } + + var tail byte = '/' // Default endpoint tail to / byte + + if ps >= 0 { + // Param/Regexp pattern is next + nt := ntParam + + // Read to closing } taking into account opens and closes in curl count (cc) + cc := 0 + pe := ps + for i, c := range pattern[ps:] { + if c == '{' { + cc++ + } else if c == '}' { + cc-- + if cc == 0 { + pe = ps + i + break + } + } + } + if pe == ps { + panic("chi: route param closing delimiter '}' is missing") + } + + key := pattern[ps+1 : pe] + pe++ // set end to next position + + if pe < len(pattern) { + tail = pattern[pe] + } + + key, rexpat, isRegexp := strings.Cut(key, ":") + if isRegexp { + nt = ntRegexp + } + + if len(rexpat) > 0 { + if rexpat[0] != '^' { + rexpat = "^" + rexpat + } + if rexpat[len(rexpat)-1] != '$' { + rexpat += "$" + } + } + + return nt, key, rexpat, tail, ps, pe + } + + // Wildcard pattern as finale + if ws < len(pattern)-1 { + panic("chi: wildcard '*' must be the last value in a route. trim trailing text or use a '{param}' instead") + } + return ntCatchAll, "*", "", 0, ws, len(pattern) +} + +func patParamKeys(pattern string) []string { + pat := pattern + paramKeys := []string{} + for { + ptyp, paramKey, _, _, _, e := patNextSegment(pat) + if ptyp == ntStatic { + return paramKeys + } + for i := 0; i < len(paramKeys); i++ { + if paramKeys[i] == paramKey { + panic(fmt.Sprintf("chi: routing pattern '%s' contains duplicate param key, '%s'", pattern, paramKey)) + } + } + paramKeys = append(paramKeys, paramKey) + pat = pat[e:] + } +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 string) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +func methodTypString(method methodTyp) string { + for s, t := range methodMap { + if method == t { + return s + } + } + return "" +} + +type nodes []*node + +// Sort the list of nodes by label +func (ns nodes) Sort() { sort.Sort(ns); ns.tailSort() } +func (ns nodes) Len() int { return len(ns) } +func (ns nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } +func (ns nodes) Less(i, j int) bool { return ns[i].label < ns[j].label } + +// tailSort pushes nodes with '/' as the tail to the end of the list for param nodes. +// The list order determines the traversal order. +func (ns nodes) tailSort() { + for i := len(ns) - 1; i >= 0; i-- { + if ns[i].typ > ntStatic && ns[i].tail == '/' { + ns.Swap(i, len(ns)-1) + return + } + } +} + +func (ns nodes) findEdge(label byte) *node { + num := len(ns) + idx := 0 + i, j := 0, num-1 + for i <= j { + idx = i + (j-i)/2 + if label > ns[idx].label { + i = idx + 1 + } else if label < ns[idx].label { + j = idx - 1 + } else { + i = num // breaks cond + } + } + if ns[idx].label != label { + return nil + } + return ns[idx] +} + +// Route describes the details of a routing handler. +// Handlers map key is an HTTP method +type Route struct { + SubRoutes Routes + Handlers map[string]http.Handler + Pattern string +} + +// WalkFunc is the type of the function called for each method and route visited by Walk. +type WalkFunc func(method string, route string, handler http.Handler, middlewares ...func(http.Handler) http.Handler) error + +// Walk walks any router tree that implements Routes interface. +func Walk(r Routes, walkFn WalkFunc) error { + return walk(r, walkFn, "") +} + +func walk(r Routes, walkFn WalkFunc, parentRoute string, parentMw ...func(http.Handler) http.Handler) error { + for _, route := range r.Routes() { + mws := make([]func(http.Handler) http.Handler, len(parentMw)) + copy(mws, parentMw) + mws = append(mws, r.Middlewares()...) + + if route.SubRoutes != nil { + if err := walk(route.SubRoutes, walkFn, parentRoute+route.Pattern, mws...); err != nil { + return err + } + continue + } + + for method, handler := range route.Handlers { + if method == "*" { + // Ignore a "catchAll" method, since we pass down all the specific methods for each route. + continue + } + + fullRoute := parentRoute + route.Pattern + fullRoute = strings.Replace(fullRoute, "/*/", "/", -1) + + if chain, ok := handler.(*ChainHandler); ok { + if err := walkFn(method, fullRoute, chain.Endpoint, append(mws, chain.Middlewares...)...); err != nil { + return err + } + } else { + if err := walkFn(method, fullRoute, handler, mws...); err != nil { + return err + } + } + } + } + + return nil +} diff --git a/vendor/github.com/go-jose/go-jose/v4/.gitignore b/vendor/github.com/go-jose/go-jose/v4/.gitignore deleted file mode 100644 index eb29ebaefd8..00000000000 --- a/vendor/github.com/go-jose/go-jose/v4/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -jose-util/jose-util -jose-util.t.err \ No newline at end of file diff --git a/vendor/github.com/go-jose/go-jose/v4/.golangci.yml b/vendor/github.com/go-jose/go-jose/v4/.golangci.yml deleted file mode 100644 index 2a577a8f95b..00000000000 --- a/vendor/github.com/go-jose/go-jose/v4/.golangci.yml +++ /dev/null @@ -1,53 +0,0 @@ -# https://github.com/golangci/golangci-lint - -run: - skip-files: - - doc_test.go - modules-download-mode: readonly - -linters: - enable-all: true - disable: - - gochecknoglobals - - goconst - - lll - - maligned - - nakedret - - scopelint - - unparam - - funlen # added in 1.18 (requires go-jose changes before it can be enabled) - -linters-settings: - gocyclo: - min-complexity: 35 - -issues: - exclude-rules: - - text: "don't use ALL_CAPS in Go names" - linters: - - golint - - text: "hardcoded credentials" - linters: - - gosec - - text: "weak cryptographic primitive" - linters: - - gosec - - path: json/ - linters: - - dupl - - errcheck - - gocritic - - gocyclo - - golint - - govet - - ineffassign - - staticcheck - - structcheck - - stylecheck - - unused - - path: _test\.go - linters: - - scopelint - - path: jwk.go - linters: - - gocyclo diff --git a/vendor/github.com/go-jose/go-jose/v4/.travis.yml b/vendor/github.com/go-jose/go-jose/v4/.travis.yml deleted file mode 100644 index 48de631b003..00000000000 --- a/vendor/github.com/go-jose/go-jose/v4/.travis.yml +++ /dev/null @@ -1,33 +0,0 @@ -language: go - -matrix: - fast_finish: true - allow_failures: - - go: tip - -go: - - "1.13.x" - - "1.14.x" - - tip - -before_script: - - export PATH=$HOME/.local/bin:$PATH - -before_install: - - go get -u github.com/mattn/goveralls github.com/wadey/gocovmerge - - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.18.0 - - pip install cram --user - -script: - - go test -v -covermode=count -coverprofile=profile.cov . - - go test -v -covermode=count -coverprofile=cryptosigner/profile.cov ./cryptosigner - - go test -v -covermode=count -coverprofile=cipher/profile.cov ./cipher - - go test -v -covermode=count -coverprofile=jwt/profile.cov ./jwt - - go test -v ./json # no coverage for forked encoding/json package - - golangci-lint run - - cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util - - cd .. - -after_success: - - gocovmerge *.cov */*.cov > merged.coverprofile - - goveralls -coverprofile merged.coverprofile -service=travis-ci diff --git a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md deleted file mode 100644 index 6f717dbd86e..00000000000 --- a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md +++ /dev/null @@ -1,96 +0,0 @@ -# v4.0.4 - -## Fixed - - - Reverted "Allow unmarshalling JSONWebKeySets with unsupported key types" as a - breaking change. See #136 / #137. - -# v4.0.3 - -## Changed - - - Allow unmarshalling JSONWebKeySets with unsupported key types (#130) - - Document that OpaqueKeyEncrypter can't be implemented (for now) (#129) - - Dependency updates - -# v4.0.2 - -## Changed - - - Improved documentation of Verify() to note that JSONWebKeySet is a supported - argument type (#104) - - Defined exported error values for missing x5c header and unsupported elliptic - curves error cases (#117) - -# v4.0.1 - -## Fixed - - - An attacker could send a JWE containing compressed data that used large - amounts of memory and CPU when decompressed by `Decrypt` or `DecryptMulti`. - Those functions now return an error if the decompressed data would exceed - 250kB or 10x the compressed size (whichever is larger). Thanks to - Enze Wang@Alioth and Jianjun Chen@Zhongguancun Lab (@zer0yu and @chenjj) - for reporting. - -# v4.0.0 - -This release makes some breaking changes in order to more thoroughly -address the vulnerabilities discussed in [Three New Attacks Against JSON Web -Tokens][1], "Sign/encrypt confusion", "Billion hash attack", and "Polyglot -token". - -## Changed - - - Limit JWT encryption types (exclude password or public key types) (#78) - - Enforce minimum length for HMAC keys (#85) - - jwt: match any audience in a list, rather than requiring all audiences (#81) - - jwt: accept only Compact Serialization (#75) - - jws: Add expected algorithms for signatures (#74) - - Require specifying expected algorithms for ParseEncrypted, - ParseSigned, ParseDetached, jwt.ParseEncrypted, jwt.ParseSigned, - jwt.ParseSignedAndEncrypted (#69, #74) - - Usually there is a small, known set of appropriate algorithms for a program - to use and it's a mistake to allow unexpected algorithms. For instance the - "billion hash attack" relies in part on programs accepting the PBES2 - encryption algorithm and doing the necessary work even if they weren't - specifically configured to allow PBES2. - - Revert "Strip padding off base64 strings" (#82) - - The specs require base64url encoding without padding. - - Minimum supported Go version is now 1.21 - -## Added - - - ParseSignedCompact, ParseSignedJSON, ParseEncryptedCompact, ParseEncryptedJSON. - - These allow parsing a specific serialization, as opposed to ParseSigned and - ParseEncrypted, which try to automatically detect which serialization was - provided. It's common to require a specific serialization for a specific - protocol - for instance JWT requires Compact serialization. - -[1]: https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf - -# v3.0.2 - -## Fixed - - - DecryptMulti: handle decompression error (#19) - -## Changed - - - jwe/CompactSerialize: improve performance (#67) - - Increase the default number of PBKDF2 iterations to 600k (#48) - - Return the proper algorithm for ECDSA keys (#45) - -## Added - - - Add Thumbprint support for opaque signers (#38) - -# v3.0.1 - -## Fixed - - - Security issue: an attacker specifying a large "p2c" value can cause - JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large - amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the - disclosure and to Tom Tervoort for originally publishing the category of attack. - https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf diff --git a/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md b/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md deleted file mode 100644 index 4b4805add65..00000000000 --- a/vendor/github.com/go-jose/go-jose/v4/CONTRIBUTING.md +++ /dev/null @@ -1,9 +0,0 @@ -# Contributing - -If you would like to contribute code to go-jose you can do so through GitHub by -forking the repository and sending a pull request. - -When submitting code, please make every effort to follow existing conventions -and style in order to keep the code as readable as possible. Please also make -sure all tests pass by running `go test`, and format your code with `go fmt`. -We also recommend using `golint` and `errcheck`. diff --git a/vendor/github.com/go-jose/go-jose/v4/README.md b/vendor/github.com/go-jose/go-jose/v4/README.md deleted file mode 100644 index 02b5749546b..00000000000 --- a/vendor/github.com/go-jose/go-jose/v4/README.md +++ /dev/null @@ -1,106 +0,0 @@ -# Go JOSE - -[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4) -[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt) -[![license](https://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) -[![test](https://img.shields.io/github/checks-status/go-jose/go-jose/v4)](https://github.com/go-jose/go-jose/actions) - -Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. This includes support for JSON Web Encryption, -JSON Web Signature, and JSON Web Token standards. - -## Overview - -The implementation follows the -[JSON Web Encryption](https://dx.doi.org/10.17487/RFC7516) (RFC 7516), -[JSON Web Signature](https://dx.doi.org/10.17487/RFC7515) (RFC 7515), and -[JSON Web Token](https://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications. -Tables of supported algorithms are shown below. The library supports both -the compact and JWS/JWE JSON Serialization formats, and has optional support for -multiple recipients. It also comes with a small command-line utility -([`jose-util`](https://pkg.go.dev/github.com/go-jose/go-jose/jose-util)) -for dealing with JOSE messages in a shell. - -**Note**: We use a forked version of the `encoding/json` package from the Go -standard library which uses case-sensitive matching for member names (instead -of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). -This is to avoid differences in interpretation of messages between go-jose and -libraries in other languages. - -### Versions - -[Version 4](https://github.com/go-jose/go-jose) -([branch](https://github.com/go-jose/go-jose/tree/main), -[doc](https://pkg.go.dev/github.com/go-jose/go-jose/v4), [releases](https://github.com/go-jose/go-jose/releases)) is the current stable version: - - import "github.com/go-jose/go-jose/v4" - -The old [square/go-jose](https://github.com/square/go-jose) repo contains the prior v1 and v2 versions, which -are still useable but not actively developed anymore. - -Version 3, in this repo, is still receiving security fixes but not functionality -updates. - -### Supported algorithms - -See below for a table of supported algorithms. Algorithm identifiers match -the names in the [JSON Web Algorithms](https://dx.doi.org/10.17487/RFC7518) -standard where possible. The Godoc reference has a list of constants. - - Key encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSA-PKCS#1v1.5 | RSA1_5 - RSA-OAEP | RSA-OAEP, RSA-OAEP-256 - AES key wrap | A128KW, A192KW, A256KW - AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW - ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW - ECDH-ES (direct) | ECDH-ES1 - Direct encryption | dir1 - -1. Not supported in multi-recipient mode - - Signing / MAC | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 - RSASSA-PSS | PS256, PS384, PS512 - HMAC | HS256, HS384, HS512 - ECDSA | ES256, ES384, ES512 - Ed25519 | EdDSA2 - -2. Only available in version 2 of the package - - Content encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 - AES-GCM | A128GCM, A192GCM, A256GCM - - Compression | Algorithm identifiers(s) - :------------------------- | ------------------------------- - DEFLATE (RFC 1951) | DEF - -### Supported key types - -See below for a table of supported key types. These are understood by the -library, and can be passed to corresponding functions such as `NewEncrypter` or -`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which -allows attaching a key id. - - Algorithm(s) | Corresponding types - :------------------------- | ------------------------------- - RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey) - ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey) - EdDSA1 | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey) - AES, HMAC | []byte - -1. Only available in version 2 or later of the package - -## Examples - -[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4) -[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt) - -Examples can be found in the Godoc -reference for this package. The -[`jose-util`](https://github.com/go-jose/go-jose/tree/main/jose-util) -subdirectory also contains a small command-line utility which might be useful -as an example as well. diff --git a/vendor/github.com/go-jose/go-jose/v4/SECURITY.md b/vendor/github.com/go-jose/go-jose/v4/SECURITY.md deleted file mode 100644 index 2f18a75a822..00000000000 --- a/vendor/github.com/go-jose/go-jose/v4/SECURITY.md +++ /dev/null @@ -1,13 +0,0 @@ -# Security Policy -This document explains how to contact the Let's Encrypt security team to report security vulnerabilities. - -## Supported Versions -| Version | Supported | -| ------- | ----------| -| >= v3 | ✓ | -| v2 | ✗ | -| v1 | ✗ | - -## Reporting a vulnerability - -Please see [https://letsencrypt.org/contact/#security](https://letsencrypt.org/contact/#security) for the email address to report a vulnerability. Ensure that the subject line for your report contains the word `vulnerability` and is descriptive. Your email should be acknowledged within 24 hours. If you do not receive a response within 24 hours, please follow-up again with another email. diff --git a/vendor/github.com/go-jose/go-jose/v4/asymmetric.go b/vendor/github.com/go-jose/go-jose/v4/asymmetric.go deleted file mode 100644 index f8d5774ef56..00000000000 --- a/vendor/github.com/go-jose/go-jose/v4/asymmetric.go +++ /dev/null @@ -1,595 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto" - "crypto/aes" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "errors" - "fmt" - "math/big" - - josecipher "github.com/go-jose/go-jose/v4/cipher" - "github.com/go-jose/go-jose/v4/json" -) - -// A generic RSA-based encrypter/verifier -type rsaEncrypterVerifier struct { - publicKey *rsa.PublicKey -} - -// A generic RSA-based decrypter/signer -type rsaDecrypterSigner struct { - privateKey *rsa.PrivateKey -} - -// A generic EC-based encrypter/verifier -type ecEncrypterVerifier struct { - publicKey *ecdsa.PublicKey -} - -type edEncrypterVerifier struct { - publicKey ed25519.PublicKey -} - -// A key generator for ECDH-ES -type ecKeyGenerator struct { - size int - algID string - publicKey *ecdsa.PublicKey -} - -// A generic EC-based decrypter/signer -type ecDecrypterSigner struct { - privateKey *ecdsa.PrivateKey -} - -type edDecrypterSigner struct { - privateKey ed25519.PrivateKey -} - -// newRSARecipient creates recipientKeyInfo based on the given key. -func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch keyAlg { - case RSA1_5, RSA_OAEP, RSA_OAEP_256: - default: - return recipientKeyInfo{}, ErrUnsupportedAlgorithm - } - - if publicKey == nil { - return recipientKeyInfo{}, errors.New("invalid public key") - } - - return recipientKeyInfo{ - keyAlg: keyAlg, - keyEncrypter: &rsaEncrypterVerifier{ - publicKey: publicKey, - }, - }, nil -} - -// newRSASigner creates a recipientSigInfo based on the given key. -func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch sigAlg { - case RS256, RS384, RS512, PS256, PS384, PS512: - default: - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: staticPublicKey(&JSONWebKey{ - Key: privateKey.Public(), - }), - signer: &rsaDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) { - if sigAlg != EdDSA { - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: staticPublicKey(&JSONWebKey{ - Key: privateKey.Public(), - }), - signer: &edDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -// newECDHRecipient creates recipientKeyInfo based on the given key. -func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch keyAlg { - case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - default: - return recipientKeyInfo{}, ErrUnsupportedAlgorithm - } - - if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { - return recipientKeyInfo{}, errors.New("invalid public key") - } - - return recipientKeyInfo{ - keyAlg: keyAlg, - keyEncrypter: &ecEncrypterVerifier{ - publicKey: publicKey, - }, - }, nil -} - -// newECDSASigner creates a recipientSigInfo based on the given key. -func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { - // Verify that key management algorithm is supported by this encrypter - switch sigAlg { - case ES256, ES384, ES512: - default: - return recipientSigInfo{}, ErrUnsupportedAlgorithm - } - - if privateKey == nil { - return recipientSigInfo{}, errors.New("invalid private key") - } - - return recipientSigInfo{ - sigAlg: sigAlg, - publicKey: staticPublicKey(&JSONWebKey{ - Key: privateKey.Public(), - }), - signer: &ecDecrypterSigner{ - privateKey: privateKey, - }, - }, nil -} - -// Encrypt the given payload and update the object. -func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { - encryptedKey, err := ctx.encrypt(cek, alg) - if err != nil { - return recipientInfo{}, err - } - - return recipientInfo{ - encryptedKey: encryptedKey, - header: &rawHeader{}, - }, nil -} - -// Encrypt the given payload. Based on the key encryption algorithm, -// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). -func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { - switch alg { - case RSA1_5: - return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek) - case RSA_OAEP: - return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{}) - case RSA_OAEP_256: - return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{}) - } - - return nil, ErrUnsupportedAlgorithm -} - -// Decrypt the given payload and return the content encryption key. -func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator) -} - -// Decrypt the given payload. Based on the key encryption algorithm, -// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). -func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { - // Note: The random reader on decrypt operations is only used for blinding, - // so stubbing is meanlingless (hence the direct use of rand.Reader). - switch alg { - case RSA1_5: - defer func() { - // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload - // because of an index out of bounds error, which we want to ignore. - // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() - // only exists for preventing crashes with unpatched versions. - // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k - // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 - _ = recover() - }() - - // Perform some input validation. - keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 - if keyBytes != len(jek) { - // Input size is incorrect, the encrypted payload should always match - // the size of the public modulus (e.g. using a 2048 bit key will - // produce 256 bytes of output). Reject this since it's invalid input. - return nil, ErrCryptoFailure - } - - cek, _, err := generator.genKey() - if err != nil { - return nil, ErrCryptoFailure - } - - // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to - // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing - // the Million Message Attack on Cryptographic Message Syntax". We are - // therefore deliberately ignoring errors here. - _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) - - return cek, nil - case RSA_OAEP: - // Use rand.Reader for RSA blinding - return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) - case RSA_OAEP_256: - // Use rand.Reader for RSA blinding - return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) - } - - return nil, ErrUnsupportedAlgorithm -} - -// Sign the given payload -func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - var hash crypto.Hash - - switch alg { - case RS256, PS256: - hash = crypto.SHA256 - case RS384, PS384: - hash = crypto.SHA384 - case RS512, PS512: - hash = crypto.SHA512 - default: - return Signature{}, ErrUnsupportedAlgorithm - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - var out []byte - var err error - - switch alg { - case RS256, RS384, RS512: - // TODO(https://github.com/go-jose/go-jose/issues/40): As of go1.20, the - // random parameter is legacy and ignored, and it can be nil. - // https://cs.opensource.google/go/go/+/refs/tags/go1.20:src/crypto/rsa/pkcs1v15.go;l=263;bpv=0;bpt=1 - out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) - case PS256, PS384, PS512: - out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthEqualsHash, - }) - } - - if err != nil { - return Signature{}, err - } - - return Signature{ - Signature: out, - protected: &rawHeader{}, - }, nil -} - -// Verify the given payload -func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - var hash crypto.Hash - - switch alg { - case RS256, PS256: - hash = crypto.SHA256 - case RS384, PS384: - hash = crypto.SHA384 - case RS512, PS512: - hash = crypto.SHA512 - default: - return ErrUnsupportedAlgorithm - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - switch alg { - case RS256, RS384, RS512: - return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) - case PS256, PS384, PS512: - return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) - } - - return ErrUnsupportedAlgorithm -} - -// Encrypt the given payload and update the object. -func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { - switch alg { - case ECDH_ES: - // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. - return recipientInfo{ - header: &rawHeader{}, - }, nil - case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - default: - return recipientInfo{}, ErrUnsupportedAlgorithm - } - - generator := ecKeyGenerator{ - algID: string(alg), - publicKey: ctx.publicKey, - } - - switch alg { - case ECDH_ES_A128KW: - generator.size = 16 - case ECDH_ES_A192KW: - generator.size = 24 - case ECDH_ES_A256KW: - generator.size = 32 - } - - kek, header, err := generator.genKey() - if err != nil { - return recipientInfo{}, err - } - - block, err := aes.NewCipher(kek) - if err != nil { - return recipientInfo{}, err - } - - jek, err := josecipher.KeyWrap(block, cek) - if err != nil { - return recipientInfo{}, err - } - - return recipientInfo{ - encryptedKey: jek, - header: &header, - }, nil -} - -// Get key size for EC key generator -func (ctx ecKeyGenerator) keySize() int { - return ctx.size -} - -// Get a content encryption key for ECDH-ES -func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { - priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader) - if err != nil { - return nil, rawHeader{}, err - } - - out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) - - b, err := json.Marshal(&JSONWebKey{ - Key: &priv.PublicKey, - }) - if err != nil { - return nil, nil, err - } - - headers := rawHeader{ - headerEPK: makeRawMessage(b), - } - - return out, headers, nil -} - -// Decrypt the given payload and return the content encryption key. -func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { - epk, err := headers.getEPK() - if err != nil { - return nil, errors.New("go-jose/go-jose: invalid epk header") - } - if epk == nil { - return nil, errors.New("go-jose/go-jose: missing epk header") - } - - publicKey, ok := epk.Key.(*ecdsa.PublicKey) - if publicKey == nil || !ok { - return nil, errors.New("go-jose/go-jose: invalid epk header") - } - - if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { - return nil, errors.New("go-jose/go-jose: invalid public key in epk header") - } - - apuData, err := headers.getAPU() - if err != nil { - return nil, errors.New("go-jose/go-jose: invalid apu header") - } - apvData, err := headers.getAPV() - if err != nil { - return nil, errors.New("go-jose/go-jose: invalid apv header") - } - - deriveKey := func(algID string, size int) []byte { - return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size) - } - - var keySize int - - algorithm := headers.getAlgorithm() - switch algorithm { - case ECDH_ES: - // ECDH-ES uses direct key agreement, no key unwrapping necessary. - return deriveKey(string(headers.getEncryption()), generator.keySize()), nil - case ECDH_ES_A128KW: - keySize = 16 - case ECDH_ES_A192KW: - keySize = 24 - case ECDH_ES_A256KW: - keySize = 32 - default: - return nil, ErrUnsupportedAlgorithm - } - - key := deriveKey(string(algorithm), keySize) - block, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - return josecipher.KeyUnwrap(block, recipient.encryptedKey) -} - -func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - if alg != EdDSA { - return Signature{}, ErrUnsupportedAlgorithm - } - - sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0)) - if err != nil { - return Signature{}, err - } - - return Signature{ - Signature: sig, - protected: &rawHeader{}, - }, nil -} - -func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - if alg != EdDSA { - return ErrUnsupportedAlgorithm - } - ok := ed25519.Verify(ctx.publicKey, payload, signature) - if !ok { - return errors.New("go-jose/go-jose: ed25519 signature failed to verify") - } - return nil -} - -// Sign the given payload -func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { - var expectedBitSize int - var hash crypto.Hash - - switch alg { - case ES256: - expectedBitSize = 256 - hash = crypto.SHA256 - case ES384: - expectedBitSize = 384 - hash = crypto.SHA384 - case ES512: - expectedBitSize = 521 - hash = crypto.SHA512 - } - - curveBits := ctx.privateKey.Curve.Params().BitSize - if expectedBitSize != curveBits { - return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed) - if err != nil { - return Signature{}, err - } - - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes++ - } - - // We serialize the outputs (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) - - return Signature{ - Signature: out, - protected: &rawHeader{}, - }, nil -} - -// Verify the given payload -func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { - var keySize int - var hash crypto.Hash - - switch alg { - case ES256: - keySize = 32 - hash = crypto.SHA256 - case ES384: - keySize = 48 - hash = crypto.SHA384 - case ES512: - keySize = 66 - hash = crypto.SHA512 - default: - return ErrUnsupportedAlgorithm - } - - if len(signature) != 2*keySize { - return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) - } - - hasher := hash.New() - - // According to documentation, Write() on hash never fails - _, _ = hasher.Write(payload) - hashed := hasher.Sum(nil) - - r := big.NewInt(0).SetBytes(signature[:keySize]) - s := big.NewInt(0).SetBytes(signature[keySize:]) - - match := ecdsa.Verify(ctx.publicKey, hashed, r, s) - if !match { - return errors.New("go-jose/go-jose: ecdsa signature failed to verify") - } - - return nil -} diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go b/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go deleted file mode 100644 index af029cec0ba..00000000000 --- a/vendor/github.com/go-jose/go-jose/v4/cipher/cbc_hmac.go +++ /dev/null @@ -1,196 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/cipher" - "crypto/hmac" - "crypto/sha256" - "crypto/sha512" - "crypto/subtle" - "encoding/binary" - "errors" - "hash" -) - -const ( - nonceBytes = 16 -) - -// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. -func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { - keySize := len(key) / 2 - integrityKey := key[:keySize] - encryptionKey := key[keySize:] - - blockCipher, err := newBlockCipher(encryptionKey) - if err != nil { - return nil, err - } - - var hash func() hash.Hash - switch keySize { - case 16: - hash = sha256.New - case 24: - hash = sha512.New384 - case 32: - hash = sha512.New - } - - return &cbcAEAD{ - hash: hash, - blockCipher: blockCipher, - authtagBytes: keySize, - integrityKey: integrityKey, - }, nil -} - -// An AEAD based on CBC+HMAC -type cbcAEAD struct { - hash func() hash.Hash - authtagBytes int - integrityKey []byte - blockCipher cipher.Block -} - -func (ctx *cbcAEAD) NonceSize() int { - return nonceBytes -} - -func (ctx *cbcAEAD) Overhead() int { - // Maximum overhead is block size (for padding) plus auth tag length, where - // the length of the auth tag is equivalent to the key size. - return ctx.blockCipher.BlockSize() + ctx.authtagBytes -} - -// Seal encrypts and authenticates the plaintext. -func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { - // Output buffer -- must take care not to mangle plaintext input. - ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)] - copy(ciphertext, plaintext) - ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) - - cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) - - cbc.CryptBlocks(ciphertext, ciphertext) - authtag := ctx.computeAuthTag(data, nonce, ciphertext) - - ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag))) - copy(out, ciphertext) - copy(out[len(ciphertext):], authtag) - - return ret -} - -// Open decrypts and authenticates the ciphertext. -func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { - if len(ciphertext) < ctx.authtagBytes { - return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)") - } - - offset := len(ciphertext) - ctx.authtagBytes - expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) - match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) - if match != 1 { - return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)") - } - - cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) - - // Make copy of ciphertext buffer, don't want to modify in place - buffer := append([]byte{}, ciphertext[:offset]...) - - if len(buffer)%ctx.blockCipher.BlockSize() > 0 { - return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)") - } - - cbc.CryptBlocks(buffer, buffer) - - // Remove padding - plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) - if err != nil { - return nil, err - } - - ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext))) - copy(out, plaintext) - - return ret, nil -} - -// Compute an authentication tag -func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { - buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8) - n := 0 - n += copy(buffer, aad) - n += copy(buffer[n:], nonce) - n += copy(buffer[n:], ciphertext) - binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8) - - // According to documentation, Write() on hash.Hash never fails. - hmac := hmac.New(ctx.hash, ctx.integrityKey) - _, _ = hmac.Write(buffer) - - return hmac.Sum(nil)[:ctx.authtagBytes] -} - -// resize ensures that the given slice has a capacity of at least n bytes. -// If the capacity of the slice is less than n, a new slice is allocated -// and the existing data will be copied. -func resize(in []byte, n uint64) (head, tail []byte) { - if uint64(cap(in)) >= n { - head = in[:n] - } else { - head = make([]byte, n) - copy(head, in) - } - - tail = head[len(in):] - return -} - -// Apply padding -func padBuffer(buffer []byte, blockSize int) []byte { - missing := blockSize - (len(buffer) % blockSize) - ret, out := resize(buffer, uint64(len(buffer))+uint64(missing)) - padding := bytes.Repeat([]byte{byte(missing)}, missing) - copy(out, padding) - return ret -} - -// Remove padding -func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { - if len(buffer)%blockSize != 0 { - return nil, errors.New("go-jose/go-jose: invalid padding") - } - - last := buffer[len(buffer)-1] - count := int(last) - - if count == 0 || count > blockSize || count > len(buffer) { - return nil, errors.New("go-jose/go-jose: invalid padding") - } - - padding := bytes.Repeat([]byte{last}, count) - if !bytes.HasSuffix(buffer, padding) { - return nil, errors.New("go-jose/go-jose: invalid padding") - } - - return buffer[:len(buffer)-count], nil -} diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go b/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go deleted file mode 100644 index f62c3bdba5d..00000000000 --- a/vendor/github.com/go-jose/go-jose/v4/cipher/concat_kdf.go +++ /dev/null @@ -1,75 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto" - "encoding/binary" - "hash" - "io" -) - -type concatKDF struct { - z, info []byte - i uint32 - cache []byte - hasher hash.Hash -} - -// NewConcatKDF builds a KDF reader based on the given inputs. -func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { - buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo))) - n := 0 - n += copy(buffer, algID) - n += copy(buffer[n:], ptyUInfo) - n += copy(buffer[n:], ptyVInfo) - n += copy(buffer[n:], supPubInfo) - copy(buffer[n:], supPrivInfo) - - hasher := hash.New() - - return &concatKDF{ - z: z, - info: buffer, - hasher: hasher, - cache: []byte{}, - i: 1, - } -} - -func (ctx *concatKDF) Read(out []byte) (int, error) { - copied := copy(out, ctx.cache) - ctx.cache = ctx.cache[copied:] - - for copied < len(out) { - ctx.hasher.Reset() - - // Write on a hash.Hash never fails - _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) - _, _ = ctx.hasher.Write(ctx.z) - _, _ = ctx.hasher.Write(ctx.info) - - hash := ctx.hasher.Sum(nil) - chunkCopied := copy(out[copied:], hash) - copied += chunkCopied - ctx.cache = hash[chunkCopied:] - - ctx.i++ - } - - return copied, nil -} diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go b/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go deleted file mode 100644 index 093c646740b..00000000000 --- a/vendor/github.com/go-jose/go-jose/v4/cipher/ecdh_es.go +++ /dev/null @@ -1,86 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "encoding/binary" -) - -// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. -// It is an error to call this function with a private/public key that are not on the same -// curve. Callers must ensure that the keys are valid before calling this function. Output -// size may be at most 1<<16 bytes (64 KiB). -func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { - if size > 1<<16 { - panic("ECDH-ES output size too large, must be less than or equal to 1<<16") - } - - // algId, partyUInfo, partyVInfo inputs must be prefixed with the length - algID := lengthPrefixed([]byte(alg)) - ptyUInfo := lengthPrefixed(apuData) - ptyVInfo := lengthPrefixed(apvData) - - // suppPubInfo is the encoded length of the output size in bits - supPubInfo := make([]byte, 4) - binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) - - if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) { - panic("public key not on same curve as private key") - } - - z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) - zBytes := z.Bytes() - - // Note that calling z.Bytes() on a big.Int may strip leading zero bytes from - // the returned byte array. This can lead to a problem where zBytes will be - // shorter than expected which breaks the key derivation. Therefore we must pad - // to the full length of the expected coordinate here before calling the KDF. - octSize := dSize(priv.Curve) - if len(zBytes) != octSize { - zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...) - } - - reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) - key := make([]byte, size) - - // Read on the KDF will never fail - _, _ = reader.Read(key) - - return key -} - -// dSize returns the size in octets for a coordinate on a elliptic curve. -func dSize(curve elliptic.Curve) int { - order := curve.Params().P - bitLen := order.BitLen() - size := bitLen / 8 - if bitLen%8 != 0 { - size++ - } - return size -} - -func lengthPrefixed(data []byte) []byte { - out := make([]byte, len(data)+4) - binary.BigEndian.PutUint32(out, uint32(len(data))) - copy(out[4:], data) - return out -} diff --git a/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go b/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go deleted file mode 100644 index b9effbca8a4..00000000000 --- a/vendor/github.com/go-jose/go-jose/v4/cipher/key_wrap.go +++ /dev/null @@ -1,109 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "crypto/cipher" - "crypto/subtle" - "encoding/binary" - "errors" -) - -var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} - -// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. -func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { - if len(cek)%8 != 0 { - return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") - } - - n := len(cek) / 8 - r := make([][]byte, n) - - for i := range r { - r[i] = make([]byte, 8) - copy(r[i], cek[i*8:]) - } - - buffer := make([]byte, 16) - tBytes := make([]byte, 8) - copy(buffer, defaultIV) - - for t := 0; t < 6*n; t++ { - copy(buffer[8:], r[t%n]) - - block.Encrypt(buffer, buffer) - - binary.BigEndian.PutUint64(tBytes, uint64(t+1)) - - for i := 0; i < 8; i++ { - buffer[i] ^= tBytes[i] - } - copy(r[t%n], buffer[8:]) - } - - out := make([]byte, (n+1)*8) - copy(out, buffer[:8]) - for i := range r { - copy(out[(i+1)*8:], r[i]) - } - - return out, nil -} - -// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. -func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { - if len(ciphertext)%8 != 0 { - return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") - } - - n := (len(ciphertext) / 8) - 1 - r := make([][]byte, n) - - for i := range r { - r[i] = make([]byte, 8) - copy(r[i], ciphertext[(i+1)*8:]) - } - - buffer := make([]byte, 16) - tBytes := make([]byte, 8) - copy(buffer[:8], ciphertext[:8]) - - for t := 6*n - 1; t >= 0; t-- { - binary.BigEndian.PutUint64(tBytes, uint64(t+1)) - - for i := 0; i < 8; i++ { - buffer[i] ^= tBytes[i] - } - copy(buffer[8:], r[t%n]) - - block.Decrypt(buffer, buffer) - - copy(r[t%n], buffer[8:]) - } - - if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { - return nil, errors.New("go-jose/go-jose: failed to unwrap key") - } - - out := make([]byte, n*8) - for i := range r { - copy(out[i*8:], r[i]) - } - - return out, nil -} diff --git a/vendor/github.com/go-jose/go-jose/v4/crypter.go b/vendor/github.com/go-jose/go-jose/v4/crypter.go deleted file mode 100644 index d81b03b4474..00000000000 --- a/vendor/github.com/go-jose/go-jose/v4/crypter.go +++ /dev/null @@ -1,599 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto/ecdsa" - "crypto/rsa" - "errors" - "fmt" - - "github.com/go-jose/go-jose/v4/json" -) - -// Encrypter represents an encrypter which produces an encrypted JWE object. -type Encrypter interface { - Encrypt(plaintext []byte) (*JSONWebEncryption, error) - EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error) - Options() EncrypterOptions -} - -// A generic content cipher -type contentCipher interface { - keySize() int - encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) - decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) -} - -// A key generator (for generating/getting a CEK) -type keyGenerator interface { - keySize() int - genKey() ([]byte, rawHeader, error) -} - -// A generic key encrypter -type keyEncrypter interface { - encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key -} - -// A generic key decrypter -type keyDecrypter interface { - decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key -} - -// A generic encrypter based on the given key encrypter and content cipher. -type genericEncrypter struct { - contentAlg ContentEncryption - compressionAlg CompressionAlgorithm - cipher contentCipher - recipients []recipientKeyInfo - keyGenerator keyGenerator - extraHeaders map[HeaderKey]interface{} -} - -type recipientKeyInfo struct { - keyID string - keyAlg KeyAlgorithm - keyEncrypter keyEncrypter -} - -// EncrypterOptions represents options that can be set on new encrypters. -type EncrypterOptions struct { - Compression CompressionAlgorithm - - // Optional map of name/value pairs to be inserted into the protected - // header of a JWS object. Some specifications which make use of - // JWS require additional values here. - // - // Values will be serialized by [json.Marshal] and must be valid inputs to - // that function. - // - // [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal - ExtraHeaders map[HeaderKey]interface{} -} - -// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it -// if necessary, and returns the updated EncrypterOptions. -// -// The v parameter will be serialized by [json.Marshal] and must be a valid -// input to that function. -// -// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal -func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { - if eo.ExtraHeaders == nil { - eo.ExtraHeaders = map[HeaderKey]interface{}{} - } - eo.ExtraHeaders[k] = v - return eo -} - -// WithContentType adds a content type ("cty") header and returns the updated -// EncrypterOptions. -func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions { - return eo.WithHeader(HeaderContentType, contentType) -} - -// WithType adds a type ("typ") header and returns the updated EncrypterOptions. -func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { - return eo.WithHeader(HeaderType, typ) -} - -// Recipient represents an algorithm/key to encrypt messages to. -// -// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used -// on the password-based encryption algorithms PBES2-HS256+A128KW, -// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe -// default of 100000 will be used for the count and a 128-bit random salt will -// be generated. -type Recipient struct { - Algorithm KeyAlgorithm - // Key must have one of these types: - // - ed25519.PublicKey - // - *ecdsa.PublicKey - // - *rsa.PublicKey - // - *JSONWebKey - // - JSONWebKey - // - []byte (a symmetric key) - // - Any type that satisfies the OpaqueKeyEncrypter interface - // - // The type of Key must match the value of Algorithm. - Key interface{} - KeyID string - PBES2Count int - PBES2Salt []byte -} - -// NewEncrypter creates an appropriate encrypter based on the key type -func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) { - encrypter := &genericEncrypter{ - contentAlg: enc, - recipients: []recipientKeyInfo{}, - cipher: getContentCipher(enc), - } - if opts != nil { - encrypter.compressionAlg = opts.Compression - encrypter.extraHeaders = opts.ExtraHeaders - } - - if encrypter.cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - - var keyID string - var rawKey interface{} - switch encryptionKey := rcpt.Key.(type) { - case JSONWebKey: - keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key - case *JSONWebKey: - keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key - case OpaqueKeyEncrypter: - keyID, rawKey = encryptionKey.KeyID(), encryptionKey - default: - rawKey = encryptionKey - } - - switch rcpt.Algorithm { - case DIRECT: - // Direct encryption mode must be treated differently - keyBytes, ok := rawKey.([]byte) - if !ok { - return nil, ErrUnsupportedKeyType - } - if encrypter.cipher.keySize() != len(keyBytes) { - return nil, ErrInvalidKeySize - } - encrypter.keyGenerator = staticKeyGenerator{ - key: keyBytes, - } - recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, keyBytes) - recipientInfo.keyID = keyID - if rcpt.KeyID != "" { - recipientInfo.keyID = rcpt.KeyID - } - encrypter.recipients = []recipientKeyInfo{recipientInfo} - return encrypter, nil - case ECDH_ES: - // ECDH-ES (w/o key wrapping) is similar to DIRECT mode - keyDSA, ok := rawKey.(*ecdsa.PublicKey) - if !ok { - return nil, ErrUnsupportedKeyType - } - encrypter.keyGenerator = ecKeyGenerator{ - size: encrypter.cipher.keySize(), - algID: string(enc), - publicKey: keyDSA, - } - recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, keyDSA) - recipientInfo.keyID = keyID - if rcpt.KeyID != "" { - recipientInfo.keyID = rcpt.KeyID - } - encrypter.recipients = []recipientKeyInfo{recipientInfo} - return encrypter, nil - default: - // Can just add a standard recipient - encrypter.keyGenerator = randomKeyGenerator{ - size: encrypter.cipher.keySize(), - } - err := encrypter.addRecipient(rcpt) - return encrypter, err - } -} - -// NewMultiEncrypter creates a multi-encrypter based on the given parameters -func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) { - cipher := getContentCipher(enc) - - if cipher == nil { - return nil, ErrUnsupportedAlgorithm - } - if len(rcpts) == 0 { - return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty") - } - - encrypter := &genericEncrypter{ - contentAlg: enc, - recipients: []recipientKeyInfo{}, - cipher: cipher, - keyGenerator: randomKeyGenerator{ - size: cipher.keySize(), - }, - } - - if opts != nil { - encrypter.compressionAlg = opts.Compression - encrypter.extraHeaders = opts.ExtraHeaders - } - - for _, recipient := range rcpts { - err := encrypter.addRecipient(recipient) - if err != nil { - return nil, err - } - } - - return encrypter, nil -} - -func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) { - var recipientInfo recipientKeyInfo - - switch recipient.Algorithm { - case DIRECT, ECDH_ES: - return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm) - } - - recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key) - if recipient.KeyID != "" { - recipientInfo.keyID = recipient.KeyID - } - - switch recipient.Algorithm { - case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW: - if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok { - sr.p2c = recipient.PBES2Count - sr.p2s = recipient.PBES2Salt - } - } - - if err == nil { - ctx.recipients = append(ctx.recipients, recipientInfo) - } - return err -} - -func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { - switch encryptionKey := encryptionKey.(type) { - case *rsa.PublicKey: - return newRSARecipient(alg, encryptionKey) - case *ecdsa.PublicKey: - return newECDHRecipient(alg, encryptionKey) - case []byte: - return newSymmetricRecipient(alg, encryptionKey) - case string: - return newSymmetricRecipient(alg, []byte(encryptionKey)) - case *JSONWebKey: - recipient, err := makeJWERecipient(alg, encryptionKey.Key) - recipient.keyID = encryptionKey.KeyID - return recipient, err - case OpaqueKeyEncrypter: - return newOpaqueKeyEncrypter(alg, encryptionKey) - } - return recipientKeyInfo{}, ErrUnsupportedKeyType -} - -// newDecrypter creates an appropriate decrypter based on the key type -func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { - switch decryptionKey := decryptionKey.(type) { - case *rsa.PrivateKey: - return &rsaDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case *ecdsa.PrivateKey: - return &ecDecrypterSigner{ - privateKey: decryptionKey, - }, nil - case []byte: - return &symmetricKeyCipher{ - key: decryptionKey, - }, nil - case string: - return &symmetricKeyCipher{ - key: []byte(decryptionKey), - }, nil - case JSONWebKey: - return newDecrypter(decryptionKey.Key) - case *JSONWebKey: - return newDecrypter(decryptionKey.Key) - case OpaqueKeyDecrypter: - return &opaqueKeyDecrypter{decrypter: decryptionKey}, nil - default: - return nil, ErrUnsupportedKeyType - } -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) { - return ctx.EncryptWithAuthData(plaintext, nil) -} - -// Implementation of encrypt method producing a JWE object. -func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) { - obj := &JSONWebEncryption{} - obj.aad = aad - - obj.protected = &rawHeader{} - err := obj.protected.set(headerEncryption, ctx.contentAlg) - if err != nil { - return nil, err - } - - obj.recipients = make([]recipientInfo, len(ctx.recipients)) - - if len(ctx.recipients) == 0 { - return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to") - } - - cek, headers, err := ctx.keyGenerator.genKey() - if err != nil { - return nil, err - } - - obj.protected.merge(&headers) - - for i, info := range ctx.recipients { - recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) - if err != nil { - return nil, err - } - - err = recipient.header.set(headerAlgorithm, info.keyAlg) - if err != nil { - return nil, err - } - - if info.keyID != "" { - err = recipient.header.set(headerKeyID, info.keyID) - if err != nil { - return nil, err - } - } - obj.recipients[i] = recipient - } - - if len(ctx.recipients) == 1 { - // Move per-recipient headers into main protected header if there's - // only a single recipient. - obj.protected.merge(obj.recipients[0].header) - obj.recipients[0].header = nil - } - - if ctx.compressionAlg != NONE { - plaintext, err = compress(ctx.compressionAlg, plaintext) - if err != nil { - return nil, err - } - - err = obj.protected.set(headerCompression, ctx.compressionAlg) - if err != nil { - return nil, err - } - } - - for k, v := range ctx.extraHeaders { - b, err := json.Marshal(v) - if err != nil { - return nil, err - } - (*obj.protected)[k] = makeRawMessage(b) - } - - authData := obj.computeAuthData() - parts, err := ctx.cipher.encrypt(cek, authData, plaintext) - if err != nil { - return nil, err - } - - obj.iv = parts.iv - obj.ciphertext = parts.ciphertext - obj.tag = parts.tag - - return obj, nil -} - -func (ctx *genericEncrypter) Options() EncrypterOptions { - return EncrypterOptions{ - Compression: ctx.compressionAlg, - ExtraHeaders: ctx.extraHeaders, - } -} - -// Decrypt and validate the object and return the plaintext. This -// function does not support multi-recipient. If you desire multi-recipient -// decryption use DecryptMulti instead. -// -// The decryptionKey argument must contain a private or symmetric key -// and must have one of these types: -// - *ecdsa.PrivateKey -// - *rsa.PrivateKey -// - *JSONWebKey -// - JSONWebKey -// - *JSONWebKeySet -// - JSONWebKeySet -// - []byte (a symmetric key) -// - string (a symmetric key) -// - Any type that satisfies the OpaqueKeyDecrypter interface. -// -// Note that ed25519 is only available for signatures, not encryption, so is -// not an option here. -// -// Automatically decompresses plaintext, but returns an error if the decompressed -// data would be >250kB or >10x the size of the compressed data, whichever is larger. -func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { - headers := obj.mergedHeaders(nil) - - if len(obj.recipients) > 1 { - return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one") - } - - critical, err := headers.getCritical() - if err != nil { - return nil, fmt.Errorf("go-jose/go-jose: invalid crit header") - } - - if len(critical) > 0 { - return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") - } - - key, err := tryJWKS(decryptionKey, obj.Header) - if err != nil { - return nil, err - } - decrypter, err := newDecrypter(key) - if err != nil { - return nil, err - } - - cipher := getContentCipher(headers.getEncryption()) - if cipher == nil { - return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption())) - } - - generator := randomKeyGenerator{ - size: cipher.keySize(), - } - - parts := &aeadParts{ - iv: obj.iv, - ciphertext: obj.ciphertext, - tag: obj.tag, - } - - authData := obj.computeAuthData() - - var plaintext []byte - recipient := obj.recipients[0] - recipientHeaders := obj.mergedHeaders(&recipient) - - cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) - if err == nil { - // Found a valid CEK -- let's try to decrypt. - plaintext, err = cipher.decrypt(cek, authData, parts) - } - - if plaintext == nil { - return nil, ErrCryptoFailure - } - - // The "zip" header parameter may only be present in the protected header. - if comp := obj.protected.getCompression(); comp != "" { - plaintext, err = decompress(comp, plaintext) - if err != nil { - return nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err) - } - } - - return plaintext, nil -} - -// DecryptMulti decrypts and validates the object and returns the plaintexts, -// with support for multiple recipients. It returns the index of the recipient -// for which the decryption was successful, the merged headers for that recipient, -// and the plaintext. -// -// The decryptionKey argument must have one of the types allowed for the -// decryptionKey argument of Decrypt(). -// -// Automatically decompresses plaintext, but returns an error if the decompressed -// data would be >250kB or >3x the size of the compressed data, whichever is larger. -func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { - globalHeaders := obj.mergedHeaders(nil) - - critical, err := globalHeaders.getCritical() - if err != nil { - return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header") - } - - if len(critical) > 0 { - return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") - } - - key, err := tryJWKS(decryptionKey, obj.Header) - if err != nil { - return -1, Header{}, nil, err - } - decrypter, err := newDecrypter(key) - if err != nil { - return -1, Header{}, nil, err - } - - encryption := globalHeaders.getEncryption() - cipher := getContentCipher(encryption) - if cipher == nil { - return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption)) - } - - generator := randomKeyGenerator{ - size: cipher.keySize(), - } - - parts := &aeadParts{ - iv: obj.iv, - ciphertext: obj.ciphertext, - tag: obj.tag, - } - - authData := obj.computeAuthData() - - index := -1 - var plaintext []byte - var headers rawHeader - - for i, recipient := range obj.recipients { - recipientHeaders := obj.mergedHeaders(&recipient) - - cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) - if err == nil { - // Found a valid CEK -- let's try to decrypt. - plaintext, err = cipher.decrypt(cek, authData, parts) - if err == nil { - index = i - headers = recipientHeaders - break - } - } - } - - if plaintext == nil { - return -1, Header{}, nil, ErrCryptoFailure - } - - // The "zip" header parameter may only be present in the protected header. - if comp := obj.protected.getCompression(); comp != "" { - plaintext, err = decompress(comp, plaintext) - if err != nil { - return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err) - } - } - - sanitized, err := headers.sanitized() - if err != nil { - return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err) - } - - return index, sanitized, plaintext, err -} diff --git a/vendor/github.com/go-jose/go-jose/v4/encoding.go b/vendor/github.com/go-jose/go-jose/v4/encoding.go deleted file mode 100644 index 4f6e0d4a5cf..00000000000 --- a/vendor/github.com/go-jose/go-jose/v4/encoding.go +++ /dev/null @@ -1,228 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "compress/flate" - "encoding/base64" - "encoding/binary" - "fmt" - "io" - "math/big" - "strings" - "unicode" - - "github.com/go-jose/go-jose/v4/json" -) - -// Helper function to serialize known-good objects. -// Precondition: value is not a nil pointer. -func mustSerializeJSON(value interface{}) []byte { - out, err := json.Marshal(value) - if err != nil { - panic(err) - } - // We never want to serialize the top-level value "null," since it's not a - // valid JOSE message. But if a caller passes in a nil pointer to this method, - // MarshalJSON will happily serialize it as the top-level value "null". If - // that value is then embedded in another operation, for instance by being - // base64-encoded and fed as input to a signing algorithm - // (https://github.com/go-jose/go-jose/issues/22), the result will be - // incorrect. Because this method is intended for known-good objects, and a nil - // pointer is not a known-good object, we are free to panic in this case. - // Note: It's not possible to directly check whether the data pointed at by an - // interface is a nil pointer, so we do this hacky workaround. - // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I - if string(out) == "null" { - panic("Tried to serialize a nil pointer.") - } - return out -} - -// Strip all newlines and whitespace -func stripWhitespace(data string) string { - buf := strings.Builder{} - buf.Grow(len(data)) - for _, r := range data { - if !unicode.IsSpace(r) { - buf.WriteRune(r) - } - } - return buf.String() -} - -// Perform compression based on algorithm -func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - switch algorithm { - case DEFLATE: - return deflate(input) - default: - return nil, ErrUnsupportedAlgorithm - } -} - -// Perform decompression based on algorithm -func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { - switch algorithm { - case DEFLATE: - return inflate(input) - default: - return nil, ErrUnsupportedAlgorithm - } -} - -// deflate compresses the input. -func deflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - - // Writing to byte buffer, err is always nil - writer, _ := flate.NewWriter(output, 1) - _, _ = io.Copy(writer, bytes.NewBuffer(input)) - - err := writer.Close() - return output.Bytes(), err -} - -// inflate decompresses the input. -// -// Errors if the decompressed data would be >250kB or >10x the size of the -// compressed data, whichever is larger. -func inflate(input []byte) ([]byte, error) { - output := new(bytes.Buffer) - reader := flate.NewReader(bytes.NewBuffer(input)) - - maxCompressedSize := max(250_000, 10*int64(len(input))) - - limit := maxCompressedSize + 1 - n, err := io.CopyN(output, reader, limit) - if err != nil && err != io.EOF { - return nil, err - } - if n == limit { - return nil, fmt.Errorf("uncompressed data would be too large (>%d bytes)", maxCompressedSize) - } - - err = reader.Close() - return output.Bytes(), err -} - -// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. -type byteBuffer struct { - data []byte -} - -func newBuffer(data []byte) *byteBuffer { - if data == nil { - return nil - } - return &byteBuffer{ - data: data, - } -} - -func newFixedSizeBuffer(data []byte, length int) *byteBuffer { - if len(data) > length { - panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") - } - pad := make([]byte, length-len(data)) - return newBuffer(append(pad, data...)) -} - -func newBufferFromInt(num uint64) *byteBuffer { - data := make([]byte, 8) - binary.BigEndian.PutUint64(data, num) - return newBuffer(bytes.TrimLeft(data, "\x00")) -} - -func (b *byteBuffer) MarshalJSON() ([]byte, error) { - return json.Marshal(b.base64()) -} - -func (b *byteBuffer) UnmarshalJSON(data []byte) error { - var encoded string - err := json.Unmarshal(data, &encoded) - if err != nil { - return err - } - - if encoded == "" { - return nil - } - - decoded, err := base64.RawURLEncoding.DecodeString(encoded) - if err != nil { - return err - } - - *b = *newBuffer(decoded) - - return nil -} - -func (b *byteBuffer) base64() string { - return base64.RawURLEncoding.EncodeToString(b.data) -} - -func (b *byteBuffer) bytes() []byte { - // Handling nil here allows us to transparently handle nil slices when serializing. - if b == nil { - return nil - } - return b.data -} - -func (b byteBuffer) bigInt() *big.Int { - return new(big.Int).SetBytes(b.data) -} - -func (b byteBuffer) toInt() int { - return int(b.bigInt().Int64()) -} - -func base64EncodeLen(sl []byte) int { - return base64.RawURLEncoding.EncodedLen(len(sl)) -} - -func base64JoinWithDots(inputs ...[]byte) string { - if len(inputs) == 0 { - return "" - } - - // Count of dots. - totalCount := len(inputs) - 1 - - for _, input := range inputs { - totalCount += base64EncodeLen(input) - } - - out := make([]byte, totalCount) - startEncode := 0 - for i, input := range inputs { - base64.RawURLEncoding.Encode(out[startEncode:], input) - - if i == len(inputs)-1 { - continue - } - - startEncode += base64EncodeLen(input) - out[startEncode] = '.' - startEncode++ - } - - return string(out) -} diff --git a/vendor/github.com/go-jose/go-jose/v4/json/LICENSE b/vendor/github.com/go-jose/go-jose/v4/json/LICENSE deleted file mode 100644 index 74487567632..00000000000 --- a/vendor/github.com/go-jose/go-jose/v4/json/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-jose/go-jose/v4/json/README.md b/vendor/github.com/go-jose/go-jose/v4/json/README.md deleted file mode 100644 index 86de5e5581f..00000000000 --- a/vendor/github.com/go-jose/go-jose/v4/json/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Safe JSON - -This repository contains a fork of the `encoding/json` package from Go 1.6. - -The following changes were made: - -* Object deserialization uses case-sensitive member name matching instead of - [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). - This is to avoid differences in the interpretation of JOSE messages between - go-jose and libraries written in other languages. -* When deserializing a JSON object, we check for duplicate keys and reject the - input whenever we detect a duplicate. Rather than trying to work with malformed - data, we prefer to reject it right away. diff --git a/vendor/github.com/go-jose/go-jose/v4/json/decode.go b/vendor/github.com/go-jose/go-jose/v4/json/decode.go deleted file mode 100644 index 50634dd8478..00000000000 --- a/vendor/github.com/go-jose/go-jose/v4/json/decode.go +++ /dev/null @@ -1,1216 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Represents JSON data structure using native Go types: booleans, floats, -// strings, arrays, and maps. - -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "errors" - "fmt" - "math" - "reflect" - "runtime" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" -) - -// Unmarshal parses the JSON-encoded data and stores the result -// in the value pointed to by v. -// -// Unmarshal uses the inverse of the encodings that -// Marshal uses, allocating maps, slices, and pointers as necessary, -// with the following additional rules: -// -// To unmarshal JSON into a pointer, Unmarshal first handles the case of -// the JSON being the JSON literal null. In that case, Unmarshal sets -// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into -// the value pointed at by the pointer. If the pointer is nil, Unmarshal -// allocates a new value for it to point to. -// -// To unmarshal JSON into a struct, Unmarshal matches incoming object -// keys to the keys used by Marshal (either the struct field name or its tag), -// preferring an exact match but also accepting a case-insensitive match. -// Unmarshal will only set exported fields of the struct. -// -// To unmarshal JSON into an interface value, -// Unmarshal stores one of these in the interface value: -// -// bool, for JSON booleans -// float64, for JSON numbers -// string, for JSON strings -// []interface{}, for JSON arrays -// map[string]interface{}, for JSON objects -// nil for JSON null -// -// To unmarshal a JSON array into a slice, Unmarshal resets the slice length -// to zero and then appends each element to the slice. -// As a special case, to unmarshal an empty JSON array into a slice, -// Unmarshal replaces the slice with a new empty slice. -// -// To unmarshal a JSON array into a Go array, Unmarshal decodes -// JSON array elements into corresponding Go array elements. -// If the Go array is smaller than the JSON array, -// the additional JSON array elements are discarded. -// If the JSON array is smaller than the Go array, -// the additional Go array elements are set to zero values. -// -// To unmarshal a JSON object into a string-keyed map, Unmarshal first -// establishes a map to use, If the map is nil, Unmarshal allocates a new map. -// Otherwise Unmarshal reuses the existing map, keeping existing entries. -// Unmarshal then stores key-value pairs from the JSON object into the map. -// -// If a JSON value is not appropriate for a given target type, -// or if a JSON number overflows the target type, Unmarshal -// skips that field and completes the unmarshaling as best it can. -// If no more serious errors are encountered, Unmarshal returns -// an UnmarshalTypeError describing the earliest such error. -// -// The JSON null value unmarshals into an interface, map, pointer, or slice -// by setting that Go value to nil. Because null is often used in JSON to mean -// “not present,” unmarshaling a JSON null into any other Go type has no effect -// on the value and produces no error. -// -// When unmarshaling quoted strings, invalid UTF-8 or -// invalid UTF-16 surrogate pairs are not treated as an error. -// Instead, they are replaced by the Unicode replacement -// character U+FFFD. -func Unmarshal(data []byte, v interface{}) error { - // Check for well-formedness. - // Avoids filling out half a data structure - // before discovering a JSON syntax error. - var d decodeState - err := checkValid(data, &d.scan) - if err != nil { - return err - } - - d.init(data) - return d.unmarshal(v) -} - -// Unmarshaler is the interface implemented by objects -// that can unmarshal a JSON description of themselves. -// The input can be assumed to be a valid encoding of -// a JSON value. UnmarshalJSON must copy the JSON data -// if it wishes to retain the data after returning. -type Unmarshaler interface { - UnmarshalJSON([]byte) error -} - -// An UnmarshalTypeError describes a JSON value that was -// not appropriate for a value of a specific Go type. -type UnmarshalTypeError struct { - Value string // description of JSON value - "bool", "array", "number -5" - Type reflect.Type // type of Go value it could not be assigned to - Offset int64 // error occurred after reading Offset bytes -} - -func (e *UnmarshalTypeError) Error() string { - return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() -} - -// An UnmarshalFieldError describes a JSON object key that -// led to an unexported (and therefore unwritable) struct field. -// (No longer used; kept for compatibility.) -type UnmarshalFieldError struct { - Key string - Type reflect.Type - Field reflect.StructField -} - -func (e *UnmarshalFieldError) Error() string { - return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() -} - -// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. -// (The argument to Unmarshal must be a non-nil pointer.) -type InvalidUnmarshalError struct { - Type reflect.Type -} - -func (e *InvalidUnmarshalError) Error() string { - if e.Type == nil { - return "json: Unmarshal(nil)" - } - - if e.Type.Kind() != reflect.Ptr { - return "json: Unmarshal(non-pointer " + e.Type.String() + ")" - } - return "json: Unmarshal(nil " + e.Type.String() + ")" -} - -func (d *decodeState) unmarshal(v interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return &InvalidUnmarshalError{reflect.TypeOf(v)} - } - - d.scan.reset() - // We decode rv not rv.Elem because the Unmarshaler interface - // test must be applied at the top level of the value. - d.value(rv) - return d.savedError -} - -// A Number represents a JSON number literal. -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -// isValidNumber reports whether s is a valid JSON number literal. -func isValidNumber(s string) bool { - // This function implements the JSON numbers grammar. - // See https://tools.ietf.org/html/rfc7159#section-6 - // and http://json.org/number.gif - - if s == "" { - return false - } - - // Optional - - if s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - - // Digits - switch { - default: - return false - - case s[0] == '0': - s = s[1:] - - case '1' <= s[0] && s[0] <= '9': - s = s[1:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // . followed by 1 or more digits. - if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { - s = s[2:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // e or E followed by an optional - or + and - // 1 or more digits. - if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { - s = s[1:] - if s[0] == '+' || s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // Make sure we are at the end. - return s == "" -} - -type NumberUnmarshalType int - -const ( - // unmarshal a JSON number into an interface{} as a float64 - UnmarshalFloat NumberUnmarshalType = iota - // unmarshal a JSON number into an interface{} as a `json.Number` - UnmarshalJSONNumber - // unmarshal a JSON number into an interface{} as a int64 - // if value is an integer otherwise float64 - UnmarshalIntOrFloat -) - -// decodeState represents the state while decoding a JSON value. -type decodeState struct { - data []byte - off int // read offset in data - scan scanner - nextscan scanner // for calls to nextValue - savedError error - numberType NumberUnmarshalType -} - -// errPhase is used for errors that should not happen unless -// there is a bug in the JSON decoder or something is editing -// the data slice while the decoder executes. -var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") - -func (d *decodeState) init(data []byte) *decodeState { - d.data = data - d.off = 0 - d.savedError = nil - return d -} - -// error aborts the decoding by panicking with err. -func (d *decodeState) error(err error) { - panic(err) -} - -// saveError saves the first err it is called with, -// for reporting at the end of the unmarshal. -func (d *decodeState) saveError(err error) { - if d.savedError == nil { - d.savedError = err - } -} - -// next cuts off and returns the next full JSON value in d.data[d.off:]. -// The next value is known to be an object or array, not a literal. -func (d *decodeState) next() []byte { - c := d.data[d.off] - item, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // Our scanner has seen the opening brace/bracket - // and thinks we're still in the middle of the object. - // invent a closing brace/bracket to get it out. - if c == '{' { - d.scan.step(&d.scan, '}') - } else { - d.scan.step(&d.scan, ']') - } - - return item -} - -// scanWhile processes bytes in d.data[d.off:] until it -// receives a scan code not equal to op. -// It updates d.off and returns the new scan code. -func (d *decodeState) scanWhile(op int) int { - var newOp int - for { - if d.off >= len(d.data) { - newOp = d.scan.eof() - d.off = len(d.data) + 1 // mark processed EOF with len+1 - } else { - c := d.data[d.off] - d.off++ - newOp = d.scan.step(&d.scan, c) - } - if newOp != op { - break - } - } - return newOp -} - -// value decodes a JSON value from d.data[d.off:] into the value. -// it updates d.off to point past the decoded value. -func (d *decodeState) value(v reflect.Value) { - if !v.IsValid() { - _, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // d.scan thinks we're still at the beginning of the item. - // Feed in an empty string - the shortest, simplest value - - // so that it knows we got to the end of the value. - if d.scan.redo { - // rewind. - d.scan.redo = false - d.scan.step = stateBeginValue - } - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - - n := len(d.scan.parseState) - if n > 0 && d.scan.parseState[n-1] == parseObjectKey { - // d.scan thinks we just read an object key; finish the object - d.scan.step(&d.scan, ':') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '}') - } - - return - } - - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(v) - - case scanBeginObject: - d.object(v) - - case scanBeginLiteral: - d.literal(v) - } -} - -type unquotedValue struct{} - -// valueQuoted is like value but decodes a -// quoted string literal or literal null into an interface value. -// If it finds anything other than a quoted string literal or null, -// valueQuoted returns unquotedValue{}. -func (d *decodeState) valueQuoted() interface{} { - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(reflect.Value{}) - - case scanBeginObject: - d.object(reflect.Value{}) - - case scanBeginLiteral: - switch v := d.literalInterface().(type) { - case nil, string: - return v - } - } - return unquotedValue{} -} - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// array consumes an array from d.data[d.off-1:], decoding into the value v. -// the first byte of the array ('[') has been read already. -func (d *decodeState) array(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - } - - v = pv - - // Check type of target. - switch v.Kind() { - case reflect.Interface: - if v.NumMethod() == 0 { - // Decoding into nil interface? Switch to non-reflect code. - v.Set(reflect.ValueOf(d.arrayInterface())) - return - } - // Otherwise it's invalid. - fallthrough - default: - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - case reflect.Array: - case reflect.Slice: - break - } - - i := 0 - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - // Get element of array, growing if necessary. - if v.Kind() == reflect.Slice { - // Grow slice if necessary - if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) - } - if i >= v.Len() { - v.SetLen(i + 1) - } - } - - if i < v.Len() { - // Decode into element. - d.value(v.Index(i)) - } else { - // Ran out of fixed array: skip. - d.value(reflect.Value{}) - } - i++ - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - - if i < v.Len() { - if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) - for ; i < v.Len(); i++ { - v.Index(i).Set(z) - } - } else { - v.SetLen(i) - } - } - if i == 0 && v.Kind() == reflect.Slice { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } -} - -var nullLiteral = []byte("null") - -// object consumes an object from d.data[d.off-1:], decoding into the value v. -// the first byte ('{') of the object has been read already. -func (d *decodeState) object(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - v.Set(reflect.ValueOf(d.objectInterface())) - return - } - - // Check type of target: struct or map[string]T - switch v.Kind() { - case reflect.Map: - // map must have string kind - t := v.Type() - if t.Key().Kind() != reflect.String { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - if v.IsNil() { - v.Set(reflect.MakeMap(t)) - } - case reflect.Struct: - - default: - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - - var mapElem reflect.Value - keys := map[string]bool{} - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Check for duplicate keys. - _, ok = keys[key] - if !ok { - keys[key] = true - } else { - d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) - } - - // Figure out field corresponding to key. - var subv reflect.Value - destring := false // whether the value is wrapped in a string to be decoded first - - if v.Kind() == reflect.Map { - elemType := v.Type().Elem() - if !mapElem.IsValid() { - mapElem = reflect.New(elemType).Elem() - } else { - mapElem.Set(reflect.Zero(elemType)) - } - subv = mapElem - } else { - var f *field - fields := cachedTypeFields(v.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, []byte(key)) { - f = ff - break - } - } - if f != nil { - subv = v - destring = f.quoted - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - if destring { - switch qv := d.valueQuoted().(type) { - case nil: - d.literalStore(nullLiteral, subv, false) - case string: - d.literalStore([]byte(qv), subv, true) - default: - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) - } - } else { - d.value(subv) - } - - // Write value back to map; - // if using struct, subv points into struct already. - if v.Kind() == reflect.Map { - kv := reflect.ValueOf(key).Convert(v.Type().Key()) - v.SetMapIndex(kv, subv) - } - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } -} - -// literal consumes a literal from d.data[d.off-1:], decoding into the value v. -// The first byte of the literal has been read already -// (that's how the caller knows it's a literal). -func (d *decodeState) literal(v reflect.Value) { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - - d.literalStore(d.data[start:d.off], v, false) -} - -// convertNumber converts the number literal s to a float64, int64 or a Number -// depending on d.numberDecodeType. -func (d *decodeState) convertNumber(s string) (interface{}, error) { - switch d.numberType { - - case UnmarshalJSONNumber: - return Number(s), nil - case UnmarshalIntOrFloat: - v, err := strconv.ParseInt(s, 10, 64) - if err == nil { - return v, nil - } - - // tries to parse integer number in scientific notation - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} - } - - // if it has no decimal value use int64 - if fi, fd := math.Modf(f); fd == 0.0 { - return int64(fi), nil - } - return f, nil - default: - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} - } - return f, nil - } - -} - -var numberType = reflect.TypeOf(Number("")) - -// literalStore decodes a literal stored in item into v. -// -// fromQuoted indicates whether this literal came from unwrapping a -// string from the ",string" struct tag option. this is used only to -// produce more helpful error messages. -func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { - // Check for unmarshaler. - if len(item) == 0 { - //Empty string given - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - return - } - wantptr := item[0] == 'n' // null - u, ut, pv := d.indirect(v, wantptr) - if u != nil { - err := u.UnmarshalJSON(item) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - if item[0] != '"' { - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - return - } - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - err := ut.UnmarshalText(s) - if err != nil { - d.error(err) - } - return - } - - v = pv - - switch c := item[0]; c { - case 'n': // null - switch v.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - v.Set(reflect.Zero(v.Type())) - // otherwise, ignore null for primitives/string - } - case 't', 'f': // true, false - value := c == 't' - switch v.Kind() { - default: - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - case reflect.Bool: - v.SetBool(value) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(value)) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - } - - case '"': // string - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - switch v.Kind() { - default: - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - case reflect.Slice: - if v.Type().Elem().Kind() != reflect.Uint8 { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - break - } - b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) - n, err := base64.StdEncoding.Decode(b, s) - if err != nil { - d.saveError(err) - break - } - v.SetBytes(b[:n]) - case reflect.String: - v.SetString(string(s)) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(string(s))) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - } - - default: // number - if c != '-' && (c < '0' || c > '9') { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - s := string(item) - switch v.Kind() { - default: - if v.Kind() == reflect.String && v.Type() == numberType { - v.SetString(s) - if !isValidNumber(s) { - d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) - } - break - } - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - } - case reflect.Interface: - n, err := d.convertNumber(s) - if err != nil { - d.saveError(err) - break - } - if v.NumMethod() != 0 { - d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - break - } - v.Set(reflect.ValueOf(n)) - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(s, 10, 64) - if err != nil || v.OverflowInt(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetInt(n) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(s, 10, 64) - if err != nil || v.OverflowUint(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetUint(n) - - case reflect.Float32, reflect.Float64: - n, err := strconv.ParseFloat(s, v.Type().Bits()) - if err != nil || v.OverflowFloat(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetFloat(n) - } - } -} - -// The xxxInterface routines build up a value to be stored -// in an empty interface. They are not strictly necessary, -// but they avoid the weight of reflection in this common case. - -// valueInterface is like value but returns interface{} -func (d *decodeState) valueInterface() interface{} { - switch d.scanWhile(scanSkipSpace) { - default: - d.error(errPhase) - panic("unreachable") - case scanBeginArray: - return d.arrayInterface() - case scanBeginObject: - return d.objectInterface() - case scanBeginLiteral: - return d.literalInterface() - } -} - -// arrayInterface is like array but returns []interface{}. -func (d *decodeState) arrayInterface() []interface{} { - var v = make([]interface{}, 0) - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - v = append(v, d.valueInterface()) - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - return v -} - -// objectInterface is like object but returns map[string]interface{}. -func (d *decodeState) objectInterface() map[string]interface{} { - m := make(map[string]interface{}) - keys := map[string]bool{} - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read string key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Check for duplicate keys. - _, ok = keys[key] - if !ok { - keys[key] = true - } else { - d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - m[key] = d.valueInterface() - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } - return m -} - -// literalInterface is like literal but returns an interface value. -func (d *decodeState) literalInterface() interface{} { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - item := d.data[start:d.off] - - switch c := item[0]; c { - case 'n': // null - return nil - - case 't', 'f': // true, false - return c == 't' - - case '"': // string - s, ok := unquote(item) - if !ok { - d.error(errPhase) - } - return s - - default: // number - if c != '-' && (c < '0' || c > '9') { - d.error(errPhase) - } - n, err := d.convertNumber(string(item)) - if err != nil { - d.saveError(err) - } - return n - } -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - r, err := strconv.ParseUint(string(s[2:6]), 16, 64) - if err != nil { - return -1 - } - return rune(r) -} - -// unquote converts a quoted JSON string literal s into an actual string t. -// The rules are different than for Go, so cannot use strconv.Unquote. -func unquote(s []byte) (t string, ok bool) { - s, ok = unquoteBytes(s) - t = string(s) - return -} - -func unquoteBytes(s []byte) (t []byte, ok bool) { - if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { - return - } - s = s[1 : len(s)-1] - - // Check for unusual characters. If there are none, - // then no unquoting is needed, so return a slice of the - // original bytes. - r := 0 - for r < len(s) { - c := s[r] - if c == '\\' || c == '"' || c < ' ' { - break - } - if c < utf8.RuneSelf { - r++ - continue - } - rr, size := utf8.DecodeRune(s[r:]) - if rr == utf8.RuneError && size == 1 { - break - } - r += size - } - if r == len(s) { - return s, true - } - - b := make([]byte, len(s)+2*utf8.UTFMax) - w := copy(b, s[0:r]) - for r < len(s) { - // Out of room? Can only happen if s is full of - // malformed UTF-8 and we're replacing each - // byte with RuneError. - if w >= len(b)-2*utf8.UTFMax { - nb := make([]byte, (len(b)+utf8.UTFMax)*2) - copy(nb, b[0:w]) - b = nb - } - switch c := s[r]; { - case c == '\\': - r++ - if r >= len(s) { - return - } - switch s[r] { - default: - return - case '"', '\\', '/', '\'': - b[w] = s[r] - r++ - w++ - case 'b': - b[w] = '\b' - r++ - w++ - case 'f': - b[w] = '\f' - r++ - w++ - case 'n': - b[w] = '\n' - r++ - w++ - case 'r': - b[w] = '\r' - r++ - w++ - case 't': - b[w] = '\t' - r++ - w++ - case 'u': - r-- - rr := getu4(s[r:]) - if rr < 0 { - return - } - r += 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(s[r:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - // A valid pair; consume. - r += 6 - w += utf8.EncodeRune(b[w:], dec) - break - } - // Invalid surrogate; fall back to replacement rune. - rr = unicode.ReplacementChar - } - w += utf8.EncodeRune(b[w:], rr) - } - - // Quote, control characters are invalid. - case c == '"', c < ' ': - return - - // ASCII - case c < utf8.RuneSelf: - b[w] = c - r++ - w++ - - // Coerce to well-formed UTF-8. - default: - rr, size := utf8.DecodeRune(s[r:]) - r += size - w += utf8.EncodeRune(b[w:], rr) - } - } - return b[0:w], true -} diff --git a/vendor/github.com/go-jose/go-jose/v4/json/encode.go b/vendor/github.com/go-jose/go-jose/v4/json/encode.go deleted file mode 100644 index 98de68ce1e9..00000000000 --- a/vendor/github.com/go-jose/go-jose/v4/json/encode.go +++ /dev/null @@ -1,1197 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package json implements encoding and decoding of JSON objects as defined in -// RFC 4627. The mapping between JSON objects and Go values is described -// in the documentation for the Marshal and Unmarshal functions. -// -// See "JSON and Go" for an introduction to this package: -// https://golang.org/doc/articles/json_and_go.html -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "fmt" - "math" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// Marshal returns the JSON encoding of v. -// -// Marshal traverses the value v recursively. -// If an encountered value implements the Marshaler interface -// and is not a nil pointer, Marshal calls its MarshalJSON method -// to produce JSON. If no MarshalJSON method is present but the -// value implements encoding.TextMarshaler instead, Marshal calls -// its MarshalText method. -// The nil pointer exception is not strictly necessary -// but mimics a similar, necessary exception in the behavior of -// UnmarshalJSON. -// -// Otherwise, Marshal uses the following type-dependent default encodings: -// -// Boolean values encode as JSON booleans. -// -// Floating point, integer, and Number values encode as JSON numbers. -// -// String values encode as JSON strings coerced to valid UTF-8, -// replacing invalid bytes with the Unicode replacement rune. -// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" -// to keep some browsers from misinterpreting JSON output as HTML. -// Ampersand "&" is also escaped to "\u0026" for the same reason. -// -// Array and slice values encode as JSON arrays, except that -// []byte encodes as a base64-encoded string, and a nil slice -// encodes as the null JSON object. -// -// Struct values encode as JSON objects. Each exported struct field -// becomes a member of the object unless -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option. -// -// The empty values are false, 0, any -// nil pointer or interface value, and any array, slice, map, or string of -// length zero. The object's default key string is the struct field name -// but can be specified in the struct field's tag value. The "json" key in -// the struct field's tag value is the key name, followed by an optional comma -// and options. Examples: -// -// // Field is ignored by this package. -// Field int `json:"-"` -// -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` -// -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` -// -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` -// -// The "string" option signals that a field is stored as JSON inside a -// JSON-encoded string. It applies only to fields of string, floating point, -// integer, or boolean types. This extra level of encoding is sometimes used -// when communicating with JavaScript programs: -// -// Int64String int64 `json:",string"` -// -// The key name will be used if it's a non-empty string consisting of -// only Unicode letters, digits, dollar signs, percent signs, hyphens, -// underscores and slashes. -// -// Anonymous struct fields are usually marshaled as if their inner exported fields -// were fields in the outer struct, subject to the usual Go visibility rules amended -// as described in the next paragraph. -// An anonymous struct field with a name given in its JSON tag is treated as -// having that name, rather than being anonymous. -// An anonymous struct field of interface type is treated the same as having -// that type as its name, rather than being anonymous. -// -// The Go visibility rules for struct fields are amended for JSON when -// deciding which field to marshal or unmarshal. If there are -// multiple fields at the same level, and that level is the least -// nested (and would therefore be the nesting level selected by the -// usual Go rules), the following extra rules apply: -// -// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, -// even if there are multiple untagged fields that would otherwise conflict. -// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. -// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. -// -// Handling of anonymous struct fields is new in Go 1.1. -// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of -// an anonymous struct field in both current and earlier versions, give the field -// a JSON tag of "-". -// -// Map values encode as JSON objects. -// The map's key type must be string; the map keys are used as JSON object -// keys, subject to the UTF-8 coercion described for string values above. -// -// Pointer values encode as the value pointed to. -// A nil pointer encodes as the null JSON object. -// -// Interface values encode as the value contained in the interface. -// A nil interface value encodes as the null JSON object. -// -// Channel, complex, and function values cannot be encoded in JSON. -// Attempting to encode such a value causes Marshal to return -// an UnsupportedTypeError. -// -// JSON cannot represent cyclic data structures and Marshal does not -// handle them. Passing cyclic structures to Marshal will result in -// an infinite recursion. -func Marshal(v interface{}) ([]byte, error) { - e := &encodeState{} - err := e.marshal(v) - if err != nil { - return nil, err - } - return e.Bytes(), nil -} - -// MarshalIndent is like Marshal but applies Indent to format the output. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - b, err := Marshal(v) - if err != nil { - return nil, err - } - var buf bytes.Buffer - err = Indent(&buf, b, prefix, indent) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 -// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 -// so that the JSON will be safe to embed inside HTML