diff --git a/CHANGELOG.md b/CHANGELOG.md index 44bfad38748..695606c17d8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,10 +14,13 @@ * [CHANGE] Ingester: Chunks flushed via /flush stay in memory until retention period is reached. This affects `cortex_ingester_memory_chunks` metric. #2778 * [CHANGE] Querier: the error message returned when the query time range exceeds `-store.max-query-length` has changed from `invalid query, length > limit (X > Y)` to `the query time range exceeds the limit (query length: X, limit: Y)`. #2826 * [CHANGE] KV: The `role` label which was a label of `multi` KV store client only has been added to metrics of every KV store client. If KV store client is not `multi`, then the value of `role` label is `primary`. #2837 +* [CHANGE] Added the `engine` label to the metrics exposed by the Prometheus query engine, to distinguish between `ruler` and `querier` metrics. #2854 +* [CHANGE] Added ruler to the single binary when started with `-target=all` (default). #2854 * [CHANGE] Experimental TSDB: compact head when opening TSDB. This should only affect ingester startup after it was unable to compact head in previous run. #2870 * [FEATURE] Introduced `ruler.for-outage-tolerance`, Max time to tolerate outage for restoring "for" state of alert. #2783 * [FEATURE] Introduced `ruler.for-grace-period`, Minimum duration between alert and restored "for" state. This is maintained only for alerts with configured "for" time greater than grace period. #2783 * [FEATURE] Introduced `ruler.resend-delay`, Minimum amount of time to wait before resending an alert to Alertmanager. #2783 +* [FEATURE] Ruler: added `local` filesystem support to store rules (read-only). #2854 * [ENHANCEMENT] Upgraded Docker base images to `alpine:3.12`. #2862 * [ENHANCEMENT] Experimental: Querier can now optionally query secondary store. This is specified by using `-querier.second-store-engine` option, with values `chunks` or `tsdb`. Standard configuration options for this store are used. Additionally, this querying can be configured to happen only for queries that need data older than `-querier.use-second-store-before-time`. Default value of zero will always query secondary store. #2747 * [ENHANCEMENT] Query-tee: increased the `cortex_querytee_request_duration_seconds` metric buckets granularity. #2799 diff --git a/development/tsdb-blocks-storage-s3-single-binary/.data-minio/.gitignore b/development/tsdb-blocks-storage-s3-single-binary/.data-minio/.gitignore index 7efb0530d7d..dfd690b42f6 100644 --- a/development/tsdb-blocks-storage-s3-single-binary/.data-minio/.gitignore +++ b/development/tsdb-blocks-storage-s3-single-binary/.data-minio/.gitignore @@ -1,3 +1,4 @@ * +!cortex-rules !cortex-tsdb !.gitignore diff --git a/development/tsdb-blocks-storage-s3-single-binary/.data-minio/cortex-rules/.gitignore b/development/tsdb-blocks-storage-s3-single-binary/.data-minio/cortex-rules/.gitignore new file mode 100644 index 00000000000..d6b7ef32c84 --- /dev/null +++ b/development/tsdb-blocks-storage-s3-single-binary/.data-minio/cortex-rules/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore diff --git a/development/tsdb-blocks-storage-s3-single-binary/config/cortex.yaml b/development/tsdb-blocks-storage-s3-single-binary/config/cortex.yaml index f7c64f642b4..93e8aa8f9d9 100644 --- a/development/tsdb-blocks-storage-s3-single-binary/config/cortex.yaml +++ b/development/tsdb-blocks-storage-s3-single-binary/config/cortex.yaml @@ -56,3 +56,19 @@ tsdb: storage: engine: tsdb + +ruler: + enable_api: true + enable_sharding: true + poll_interval: 2s + storage: + type: s3 + s3: + bucketnames: cortex-rules + s3forcepathstyle: true + s3: http://cortex:supersecret@minio.:9000 + ring: + kvstore: + store: consul + consul: + host: consul:8500 diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 4e62b77efc8..112e902e2a3 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -818,7 +818,8 @@ ruler_client: [poll_interval: | default = 1m] storage: - # Method to use for backend rule storage (configdb, azure, gcs, s3) + # Method to use for backend rule storage (configdb, azure, gcs, s3, swift, + # local) # CLI flag: -ruler.storage.type [type: | default = "configdb"] @@ -998,6 +999,11 @@ storage: # CLI flag: -ruler.storage.swift.container-name [container_name: | default = "cortex"] + local: + # Directory to scan for rules + # CLI flag: -ruler.storage.local.directory + [directory: | default = ""] + # file path to store temporary rule files for the prometheus rule managers # CLI flag: -ruler.rule-path [rule_path: | default = "/rules"] diff --git a/docs/configuration/single-process-config-blocks-gossip-1.yaml b/docs/configuration/single-process-config-blocks-gossip-1.yaml index 3b13746d569..892cdf3133b 100644 --- a/docs/configuration/single-process-config-blocks-gossip-1.yaml +++ b/docs/configuration/single-process-config-blocks-gossip-1.yaml @@ -80,4 +80,15 @@ tsdb: dir: /tmp/cortex/storage frontend_worker: - match_max_concurrent: true \ No newline at end of file + match_max_concurrent: true + +ruler: + enable_api: true + enable_sharding: true + storage: + type: local + local: + directory: /tmp/cortex/rules + ring: + kvstore: + store: memberlist \ No newline at end of file diff --git a/docs/configuration/single-process-config-blocks-gossip-2.yaml b/docs/configuration/single-process-config-blocks-gossip-2.yaml index b3867c49169..6f70e23870a 100644 --- a/docs/configuration/single-process-config-blocks-gossip-2.yaml +++ b/docs/configuration/single-process-config-blocks-gossip-2.yaml @@ -79,4 +79,15 @@ tsdb: dir: /tmp/cortex/storage frontend_worker: - match_max_concurrent: true \ No newline at end of file + match_max_concurrent: true + +ruler: + enable_api: true + enable_sharding: true + storage: + type: local + local: + directory: /tmp/cortex/rules + ring: + kvstore: + store: memberlist \ No newline at end of file diff --git a/docs/configuration/single-process-config-blocks-tls.yaml b/docs/configuration/single-process-config-blocks-tls.yaml index 3a5b9214c31..72f4630b891 100644 --- a/docs/configuration/single-process-config-blocks-tls.yaml +++ b/docs/configuration/single-process-config-blocks-tls.yaml @@ -98,3 +98,11 @@ frontend_worker: tls_cert_path: "client.crt" tls_key_path: "client.key" tls_ca_path: "root.crt" + +ruler: + enable_api: true + enable_sharding: false + storage: + type: local + local: + directory: /tmp/cortex/rules \ No newline at end of file diff --git a/docs/configuration/single-process-config-blocks.yaml b/docs/configuration/single-process-config-blocks.yaml index 033ec1618fa..ccc3f64d8e8 100644 --- a/docs/configuration/single-process-config-blocks.yaml +++ b/docs/configuration/single-process-config-blocks.yaml @@ -84,4 +84,12 @@ compactor: store: inmemory frontend_worker: - match_max_concurrent: true \ No newline at end of file + match_max_concurrent: true + +ruler: + enable_api: true + enable_sharding: false + storage: + type: local + local: + directory: /tmp/cortex/rules \ No newline at end of file diff --git a/docs/configuration/single-process-config.md b/docs/configuration/single-process-config.md index 291d10847b6..08463bfc918 100644 --- a/docs/configuration/single-process-config.md +++ b/docs/configuration/single-process-config.md @@ -76,4 +76,14 @@ storage: # to max_concurrent on the queriers. frontend_worker: match_max_concurrent: true + +# Configure the ruler to scan the /tmp/cortex/rules directory for prometheus +# rules: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules +ruler: + enable_api: true + enable_sharding: false + storage: + type: local + local: + directory: /tmp/cortex/rules ``` diff --git a/docs/configuration/single-process-config.yaml b/docs/configuration/single-process-config.yaml index 665a9645cb3..d56294e3273 100644 --- a/docs/configuration/single-process-config.yaml +++ b/docs/configuration/single-process-config.yaml @@ -82,4 +82,12 @@ purger: object_store_type: filesystem frontend_worker: - match_max_concurrent: true \ No newline at end of file + match_max_concurrent: true + +ruler: + enable_api: true + enable_sharding: false + storage: + type: local + local: + directory: /tmp/cortex/rules \ No newline at end of file diff --git a/docs/guides/sharded_ruler.md b/docs/guides/sharded_ruler.md index e2705c898d4..5158222c6ee 100644 --- a/docs/guides/sharded_ruler.md +++ b/docs/guides/sharded_ruler.md @@ -26,3 +26,23 @@ In addition the ruler requires it's own ring to be configured, for instance: The only configuration that is required is to enable sharding and configure a key value store. From there the rulers will shard and handle the division of rules automatically. Unlike ingesters, rulers do not hand over responsibility: all rules are re-sharded randomly every time a ruler is added to or removed from the ring. + +## Ruler Storage + +The ruler supports six kinds of storage (configdb, azure, gcs, s3, swift, local). Most kinds of storage work with the sharded ruler configuration in an obvious way. i.e. configure all rulers to use the same backend. + +The local implementation reads [Prometheus recording rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) off of the local filesystem. This is a read only backend that does not support the creation and deletion of rules through [the API](https://cortexmetrics.io/docs/apis/#ruler). Despite the fact that it reads the local filesystem this method can still be used in a sharded ruler configuration if the operator takes care to load the same rules to every ruler. For instance this could be accomplished by mounting a [Kubernetes ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) onto every ruler pod. + +A typical local config may look something like: +``` + -ruler.storage.type=local + -ruler.storage.local.directory=/tmp/cortex/rules +``` + +With the above configuration the ruler would expect the following layout: +``` +/tmp/cortex/rules//rules1.yaml + /rules2.yaml +``` +Yaml files are expected to be in the [Prometheus format](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules). + diff --git a/integration/api_ruler_test.go b/integration/api_ruler_test.go index 8da8eb2c123..0f1e02aa3f3 100644 --- a/integration/api_ruler_test.go +++ b/integration/api_ruler_test.go @@ -3,6 +3,7 @@ package main import ( + "path/filepath" "testing" "github.com/stretchr/testify/require" @@ -94,3 +95,43 @@ func TestRulerAPI(t *testing.T) { // Ensure no service-specific metrics prefix is used by the wrong service. assertServiceMetricsPrefixes(t, Ruler, ruler) } + +func TestRulerAPISingleBinary(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + namespace := "ns" + user := "fake" + + configOverrides := map[string]string{ + "-ruler.storage.local.directory": filepath.Join(e2e.ContainerSharedDir, "ruler_configs"), + "-ruler.poll-interval": "2s", + } + + // Start Cortex components. + require.NoError(t, copyFileToSharedDir(s, "docs/configuration/single-process-config.yaml", cortexConfigFile)) + require.NoError(t, writeFileToSharedDir(s, filepath.Join("ruler_configs", user, namespace), []byte(cortexRulerUserConfigYaml))) + cortex := e2ecortex.NewSingleBinaryWithConfigFile("cortex", cortexConfigFile, configOverrides, "", 9009, 9095) + require.NoError(t, s.StartAndWaitReady(cortex)) + + // Create a client with the ruler address configured + c, err := e2ecortex.NewClient("", "", "", cortex.HTTPEndpoint(), "") + require.NoError(t, err) + + // Wait until the user manager is created + require.NoError(t, cortex.WaitSumMetrics(e2e.Equals(1), "cortex_ruler_managers_total")) + + // Check to ensure the rules running in the cortex match what was set + rgs, err := c.GetRuleGroups() + require.NoError(t, err) + + retrievedNamespace, exists := rgs[namespace] + require.True(t, exists) + require.Len(t, retrievedNamespace, 1) + require.Equal(t, retrievedNamespace[0].Name, "rule") + + // Check to make sure prometheus engine metrics are available for both engine types + require.NoError(t, cortex.WaitForMetricWithLabels(e2e.EqualsSingle(0), "prometheus_engine_queries", map[string]string{"engine": "querier"})) + require.NoError(t, cortex.WaitForMetricWithLabels(e2e.EqualsSingle(0), "prometheus_engine_queries", map[string]string{"engine": "ruler"})) +} diff --git a/integration/configs.go b/integration/configs.go index 60bbfc5a32d..02666a44752 100644 --- a/integration/configs.go +++ b/integration/configs.go @@ -45,6 +45,18 @@ const ( receivers: - name: "example_receiver" ` + + cortexRulerUserConfigYaml = `groups: +- name: rule + interval: 100s + rules: + - record: test_rule + alert: "" + expr: up + for: 0s + labels: {} + annotations: {} +` ) var ( diff --git a/pkg/cortex/cortex_test.go b/pkg/cortex/cortex_test.go index 14c44e21a27..bd41d0d6c62 100644 --- a/pkg/cortex/cortex_test.go +++ b/pkg/cortex/cortex_test.go @@ -1,20 +1,27 @@ package cortex import ( + "net/url" "testing" "github.com/stretchr/testify/require" + "github.com/cortexproject/cortex/pkg/chunk/aws" "github.com/cortexproject/cortex/pkg/chunk/storage" "github.com/cortexproject/cortex/pkg/ingester" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv" + "github.com/cortexproject/cortex/pkg/ruler" "github.com/cortexproject/cortex/pkg/storage/backend/s3" "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/services" ) func TestCortex(t *testing.T) { + rulerURL, err := url.Parse("inmemory:///rules") + require.NoError(t, err) + cfg := Config{ Storage: storage.Config{ Engine: storage.StorageEngineTSDB, // makes config easier @@ -47,6 +54,16 @@ func TestCortex(t *testing.T) { }, }, }, + Ruler: ruler.Config{ + StoreConfig: ruler.RuleStoreConfig{ + Type: "s3", + S3: aws.S3Config{ + S3: flagext.URLValue{ + URL: rulerURL, + }, + }, + }, + }, Target: All, } diff --git a/pkg/cortex/modules.go b/pkg/cortex/modules.go index 8051de5db78..7e2cb94d57c 100644 --- a/pkg/cortex/modules.go +++ b/pkg/cortex/modules.go @@ -169,7 +169,8 @@ func (t *Cortex) initDistributor() (serv services.Service, err error) { } func (t *Cortex) initQuerier() (serv services.Service, err error) { - queryable, engine := querier.New(t.Cfg.Querier, t.Overrides, t.Distributor, t.StoreQueryables, t.TombstonesLoader, prometheus.DefaultRegisterer) + querierRegisterer := prometheus.WrapRegistererWith(prometheus.Labels{"engine": "querier"}, prometheus.DefaultRegisterer) + queryable, engine := querier.New(t.Cfg.Querier, t.Overrides, t.Distributor, t.StoreQueryables, t.TombstonesLoader, querierRegisterer) // Prometheus histograms for requests to the querier. querierRequestDuration := promauto.With(prometheus.DefaultRegisterer).NewHistogramVec(prometheus.HistogramOpts{ @@ -454,9 +455,19 @@ func (t *Cortex) initTableManager() (services.Service, error) { } func (t *Cortex) initRuler() (serv services.Service, err error) { + // if the ruler is not configured and we're in single binary then let's just log an error and continue + // unfortunately there is no way to generate a "default" config and compare default against actual + // to determine if it's unconfigured. the following check, however, correctly tests this. + // Single binary integration tests will break if this ever drifts + if t.Cfg.Target == All && t.Cfg.Ruler.StoreConfig.Type == "configdb" && t.Cfg.Ruler.StoreConfig.ConfigDB.ConfigsAPIURL.URL == nil { + level.Info(util.Logger).Log("msg", "Ruler is not configured in single binary mode and will not be started.") + return nil, nil + } + t.Cfg.Ruler.Ring.ListenPort = t.Cfg.Server.GRPCListenPort t.Cfg.Ruler.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - queryable, engine := querier.New(t.Cfg.Querier, t.Overrides, t.Distributor, t.StoreQueryables, t.TombstonesLoader, prometheus.DefaultRegisterer) + rulerRegisterer := prometheus.WrapRegistererWith(prometheus.Labels{"engine": "ruler"}, prometheus.DefaultRegisterer) + queryable, engine := querier.New(t.Cfg.Querier, t.Overrides, t.Distributor, t.StoreQueryables, t.TombstonesLoader, rulerRegisterer) t.Ruler, err = ruler.NewRuler(t.Cfg.Ruler, engine, queryable, t.Distributor, prometheus.DefaultRegisterer, util.Logger) if err != nil { @@ -602,7 +613,7 @@ func (t *Cortex) setupModuleManager() error { Compactor: {API}, StoreGateway: {API}, Purger: {Store, DeleteRequestsStore, API}, - All: {QueryFrontend, Querier, Ingester, Distributor, TableManager, Purger, StoreGateway}, + All: {QueryFrontend, Querier, Ingester, Distributor, TableManager, Purger, StoreGateway, Ruler}, } for mod, targets := range deps { if err := mm.AddDependency(mod, targets...); err != nil { diff --git a/pkg/ruler/rules/local/local.go b/pkg/ruler/rules/local/local.go new file mode 100644 index 00000000000..c4c39963547 --- /dev/null +++ b/pkg/ruler/rules/local/local.go @@ -0,0 +1,136 @@ +package local + +import ( + "context" + "flag" + "io/ioutil" + "path/filepath" + + "github.com/pkg/errors" + + rulefmt "github.com/cortexproject/cortex/pkg/ruler/legacy_rulefmt" + "github.com/cortexproject/cortex/pkg/ruler/rules" +) + +type Config struct { + Directory string `yaml:"directory"` +} + +// RegisterFlags registers flags. +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.StringVar(&cfg.Directory, prefix+"local.directory", "", "Directory to scan for rules") +} + +// Client expects to load already existing rules located at: +// cfg.Directory / userID / namespace +type Client struct { + cfg Config +} + +func NewLocalRulesClient(cfg Config) (*Client, error) { + if cfg.Directory == "" { + return nil, errors.New("directory required for local rules config") + } + + return &Client{ + cfg: cfg, + }, nil +} + +// ListAllRuleGroups implements RuleStore +func (l *Client) ListAllRuleGroups(ctx context.Context) (map[string]rules.RuleGroupList, error) { + lists := make(map[string]rules.RuleGroupList) + + root := l.cfg.Directory + infos, err := ioutil.ReadDir(root) + if err != nil { + return nil, errors.Wrapf(err, "unable to read dir %s", root) + } + + for _, info := range infos { + if !info.IsDir() { + continue + } + + list, err := l.listAllRulesGroupsForUser(ctx, info.Name()) + if err != nil { + return nil, errors.Wrapf(err, "failed to list rule groups for user %s", info.Name()) + } + + lists[info.Name()] = list + } + + return lists, nil +} + +// ListRuleGroups implements RuleStore +func (l *Client) ListRuleGroups(ctx context.Context, userID string, namespace string) (rules.RuleGroupList, error) { + if namespace != "" { + return l.listAllRulesGroupsForUserAndNamespace(ctx, userID, namespace) + } + + return l.listAllRulesGroupsForUser(ctx, userID) +} + +// GetRuleGroup implements RuleStore +func (l *Client) GetRuleGroup(ctx context.Context, userID, namespace, group string) (*rules.RuleGroupDesc, error) { + return nil, errors.New("GetRuleGroup unsupported in rule local store") +} + +// SetRuleGroup implements RuleStore +func (l *Client) SetRuleGroup(ctx context.Context, userID, namespace string, group *rules.RuleGroupDesc) error { + return errors.New("SetRuleGroup unsupported in rule local store") +} + +// DeleteRuleGroup implements RuleStore +func (l *Client) DeleteRuleGroup(ctx context.Context, userID, namespace string, group string) error { + return errors.New("DeleteRuleGroup unsupported in rule local store") +} + +func (l *Client) listAllRulesGroupsForUser(ctx context.Context, userID string) (rules.RuleGroupList, error) { + var allLists rules.RuleGroupList + + root := filepath.Join(l.cfg.Directory, userID) + infos, err := ioutil.ReadDir(root) + if err != nil { + return nil, errors.Wrapf(err, "unable to read dir %s", root) + } + + for _, info := range infos { + if info.IsDir() { + continue + } + + list, err := l.listAllRulesGroupsForUserAndNamespace(ctx, userID, info.Name()) + if err != nil { + return nil, errors.Wrapf(err, "failed to list rule group for user %s and namespace %s", userID, info.Name()) + } + + allLists = append(allLists, list...) + } + + return allLists, nil +} + +func (l *Client) listAllRulesGroupsForUserAndNamespace(ctx context.Context, userID string, namespace string) (rules.RuleGroupList, error) { + filename := filepath.Join(l.cfg.Directory, userID, namespace) + + rulegroups, allErrors := rulefmt.ParseFile(filename) + if len(allErrors) > 0 { + return nil, errors.Wrapf(allErrors[0], "error parsing %s", filename) + } + + allErrors = rulegroups.Validate() + if len(allErrors) > 0 { + return nil, errors.Wrapf(allErrors[0], "error validating %s", filename) + } + + var list rules.RuleGroupList + + for _, group := range rulegroups.Groups { + desc := rules.ToProto(userID, namespace, group) + list = append(list, desc) + } + + return list, nil +} diff --git a/pkg/ruler/rules/local/local_test.go b/pkg/ruler/rules/local/local_test.go new file mode 100644 index 00000000000..202942bf0d5 --- /dev/null +++ b/pkg/ruler/rules/local/local_test.go @@ -0,0 +1,69 @@ +package local + +import ( + "context" + "io/ioutil" + "os" + "path" + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" + + rulefmt "github.com/cortexproject/cortex/pkg/ruler/legacy_rulefmt" + "github.com/cortexproject/cortex/pkg/ruler/rules" +) + +func TestClient_ListAllRuleGroups(t *testing.T) { + user := "user" + namespace := "ns" + + dir, err := ioutil.TempDir("", "") + require.NoError(t, err) + defer os.RemoveAll(dir) + + ruleGroups := rulefmt.RuleGroups{ + Groups: []rulefmt.RuleGroup{ + { + Name: "rule", + Interval: model.Duration(100 * time.Second), + Rules: []rulefmt.Rule{ + { + Record: "test_rule", + Expr: "up", + }, + }, + }, + }, + } + + b, err := yaml.Marshal(ruleGroups) + require.NoError(t, err) + + err = os.MkdirAll(path.Join(dir, user), 0777) + require.NoError(t, err) + + err = ioutil.WriteFile(path.Join(dir, user, namespace), b, 0777) + require.NoError(t, err) + + client, err := NewLocalRulesClient(Config{ + Directory: dir, + }) + require.NoError(t, err) + + ctx := context.Background() + userMap, err := client.ListAllRuleGroups(ctx) + require.NoError(t, err) + + actual, found := userMap[user] + require.True(t, found) + + require.Equal(t, len(ruleGroups.Groups), len(actual)) + for i, actualGroup := range actual { + expected := rules.ToProto(user, namespace, ruleGroups.Groups[i]) + + require.Equal(t, expected, actualGroup) + } +} diff --git a/pkg/ruler/storage.go b/pkg/ruler/storage.go index 8ab6657b7c3..dd82a0ba25f 100644 --- a/pkg/ruler/storage.go +++ b/pkg/ruler/storage.go @@ -14,6 +14,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk/openstack" "github.com/cortexproject/cortex/pkg/configs/client" "github.com/cortexproject/cortex/pkg/ruler/rules" + "github.com/cortexproject/cortex/pkg/ruler/rules/local" "github.com/cortexproject/cortex/pkg/ruler/rules/objectclient" ) @@ -27,6 +28,7 @@ type RuleStoreConfig struct { GCS gcp.GCSConfig `yaml:"gcs"` S3 aws.S3Config `yaml:"s3"` Swift openstack.SwiftConfig `yaml:"swift"` + Local local.Config `yaml:"local"` mock rules.RuleStore `yaml:"-"` } @@ -38,7 +40,9 @@ func (cfg *RuleStoreConfig) RegisterFlags(f *flag.FlagSet) { cfg.GCS.RegisterFlagsWithPrefix("ruler.storage.", f) cfg.S3.RegisterFlagsWithPrefix("ruler.storage.", f) cfg.Swift.RegisterFlagsWithPrefix("ruler.storage.", f) - f.StringVar(&cfg.Type, "ruler.storage.type", "configdb", "Method to use for backend rule storage (configdb, azure, gcs, s3)") + cfg.Local.RegisterFlagsWithPrefix("ruler.storage.", f) + + f.StringVar(&cfg.Type, "ruler.storage.type", "configdb", "Method to use for backend rule storage (configdb, azure, gcs, s3, swift, local)") } // Validate config and returns error on failure @@ -72,8 +76,10 @@ func NewRuleStorage(cfg RuleStoreConfig) (rules.RuleStore, error) { return newObjRuleStore(aws.NewS3ObjectClient(cfg.S3, "")) case "swift": return newObjRuleStore(openstack.NewSwiftObjectClient(cfg.Swift, "")) + case "local": + return local.NewLocalRulesClient(cfg.Local) default: - return nil, fmt.Errorf("Unrecognized rule storage mode %v, choose one of: configdb, gcs, s3, swift, azure", cfg.Type) + return nil, fmt.Errorf("Unrecognized rule storage mode %v, choose one of: configdb, gcs, s3, swift, azure, local", cfg.Type) } }