diff --git a/CHANGELOG.md b/CHANGELOG.md index a05116f7ca8..b08c34d8c5c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -73,6 +73,7 @@ * [CHANGE] Query Frontend: instant queries now honor the `-querier.max-retries-per-request` flag. #630 * [CHANGE] Alertmanager: removed `-alertmanager.storage.*` configuration options, with the exception of the CLI flags `-alertmanager.storage.path` and `-alertmanager.storage.retention`. Use `-alertmanager-storage.*` instead. #632 * [CHANGE] Ingester: active series metrics `cortex_ingester_active_series` and `cortex_ingester_active_series_custom_tracker` are now removed when their value is zero. #672 #690 +* [CHANGE] Querier / ruler: removed the `-store.query-chunk-limit` flag (and its respective YAML config option `max_chunks_per_query`). `-querier.max-fetched-chunks-per-query` (and its respective YAML config option `max_fetched_chunks_per_query`) should be used instead. #705 * [FEATURE] Query Frontend: Add `cortex_query_fetched_chunks_total` per-user counter to expose the number of chunks fetched as part of queries. This metric can be enabled with the `-frontend.query-stats-enabled` flag (or its respective YAML config option `query_stats_enabled`). #31 * [FEATURE] Query Frontend: Add experimental querysharding for the blocks storage (instant and range queries). You can now enable querysharding for blocks storage (`-store.engine=blocks`) by setting `-query-frontend.parallelize-shardable-queries` to `true`. The following additional config and exported metrics have been added. #79 #80 #100 #124 #140 #148 #150 #151 #153 #154 #155 #156 #157 #158 #159 #160 #163 #169 #172 #196 #205 #225 #226 #227 #228 #230 #235 #240 #239 #246 #244 #319 #330 #371 #385 #400 #458 #586 #630 #660 * New config options: diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index 6ce0575a591..6d59d79e166 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -3335,21 +3335,11 @@ The `limits_config` configures default and per-tenant limits imposed by services # CLI flag: -ingester.max-global-exemplars-per-user [max_global_exemplars_per_user: | default = 0] -# Deprecated. Use -querier.max-fetched-chunks-per-query CLI flag and its -# respective YAML config option instead. Maximum number of chunks that can be -# fetched in a single query. This limit is enforced when fetching chunks from -# the long-term storage only. When using chunks storage, this limit is enforced -# in the querier and ruler, while when using blocks storage this limit is -# enforced in the querier, ruler and store-gateway. 0 to disable. -# CLI flag: -store.query-chunk-limit -[max_chunks_per_query: | default = 2000000] - # Maximum number of chunks that can be fetched in a single query from ingesters # and long-term storage. This limit is enforced in the querier, ruler and -# store-gateway. Takes precedence over the deprecated -store.query-chunk-limit. -# 0 to disable. +# store-gateway. 0 to disable. # CLI flag: -querier.max-fetched-chunks-per-query -[max_fetched_chunks_per_query: | default = 0] +[max_fetched_chunks_per_query: | default = 2000000] # The maximum number of unique series for which a query can fetch samples from # each ingesters and blocks storage. This limit is enforced in the querier only diff --git a/pkg/chunk/chunk_store.go b/pkg/chunk/chunk_store.go index d5ef95ecd01..1a420016470 100644 --- a/pkg/chunk/chunk_store.go +++ b/pkg/chunk/chunk_store.go @@ -360,7 +360,7 @@ func (c *store) getMetricNameChunks(ctx context.Context, userID string, from, th filtered := filterChunksByTime(from, through, chunks) level.Debug(log).Log("Chunks post filtering", len(chunks)) - maxChunksPerQuery := c.limits.MaxChunksPerQueryFromStore(userID) + maxChunksPerQuery := c.limits.MaxChunksPerQuery(userID) if maxChunksPerQuery > 0 && len(filtered) > maxChunksPerQuery { err := QueryError(fmt.Sprintf("Query %v fetched too many chunks (%d > %d)", allMatchers, len(filtered), maxChunksPerQuery)) level.Error(log).Log("err", err) diff --git a/pkg/chunk/composite_store.go b/pkg/chunk/composite_store.go index a7bac7032d9..79c4f10de96 100644 --- a/pkg/chunk/composite_store.go +++ b/pkg/chunk/composite_store.go @@ -19,7 +19,7 @@ import ( // StoreLimits helps get Limits specific to Queries for Stores type StoreLimits interface { - MaxChunksPerQueryFromStore(userID string) int + MaxChunksPerQuery(userID string) int MaxQueryLength(userID string) time.Duration } diff --git a/pkg/chunk/series_store.go b/pkg/chunk/series_store.go index aebe5d0ac1f..88f6d3a6337 100644 --- a/pkg/chunk/series_store.go +++ b/pkg/chunk/series_store.go @@ -117,7 +117,7 @@ func (c *seriesStore) Get(ctx context.Context, userID string, from, through mode chunks := chks[0] fetcher := fetchers[0] // Protect ourselves against OOMing. - maxChunksPerQuery := c.limits.MaxChunksPerQueryFromStore(userID) + maxChunksPerQuery := c.limits.MaxChunksPerQuery(userID) if maxChunksPerQuery > 0 && len(chunks) > maxChunksPerQuery { err := QueryError(fmt.Sprintf("Query %v fetched too many chunks (%d > %d)", allMatchers, len(chunks), maxChunksPerQuery)) level.Error(log).Log("err", err) diff --git a/pkg/chunk/storage/factory.go b/pkg/chunk/storage/factory.go index 47fc2ef6e61..af7049d62a9 100644 --- a/pkg/chunk/storage/factory.go +++ b/pkg/chunk/storage/factory.go @@ -78,7 +78,7 @@ func RegisterIndexStore(name string, indexClientFactory IndexClientFactoryFunc, // StoreLimits helps get Limits specific to Queries for Stores type StoreLimits interface { CardinalityLimit(userID string) int - MaxChunksPerQueryFromStore(userID string) int + MaxChunksPerQuery(userID string) int MaxQueryLength(userID string) time.Duration } diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index ba139a2a38b..7f5ceea787d 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -100,7 +100,7 @@ type BlocksStoreLimits interface { bucket.TenantConfigProvider MaxLabelsQueryLength(userID string) time.Duration - MaxChunksPerQueryFromStore(userID string) int + MaxChunksPerQuery(userID string) int StoreGatewayTenantShardSize(userID string) int } @@ -452,7 +452,7 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* resSeriesSets = []storage.SeriesSet(nil) resWarnings = storage.Warnings(nil) - maxChunksLimit = q.limits.MaxChunksPerQueryFromStore(q.userID) + maxChunksLimit = q.limits.MaxChunksPerQuery(q.userID) leftChunksLimit = maxChunksLimit resultMtx sync.Mutex diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 7270fc63f27..3bd9ad4d22e 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -1761,7 +1761,7 @@ func (m *blocksStoreLimitsMock) MaxLabelsQueryLength(_ string) time.Duration { return m.maxLabelsQueryLength } -func (m *blocksStoreLimitsMock) MaxChunksPerQueryFromStore(_ string) int { +func (m *blocksStoreLimitsMock) MaxChunksPerQuery(_ string) int { return m.maxChunksPerQuery } diff --git a/pkg/storegateway/bucket_stores.go b/pkg/storegateway/bucket_stores.go index ec708d708fe..b1852aa292c 100644 --- a/pkg/storegateway/bucket_stores.go +++ b/pkg/storegateway/bucket_stores.go @@ -622,7 +622,7 @@ func newChunksLimiterFactory(limits *validation.Overrides, userID string) Chunks // Since limit overrides could be live reloaded, we have to get the current user's limit // each time a new limiter is instantiated. return &chunkLimiter{ - limiter: NewLimiter(uint64(limits.MaxChunksPerQueryFromStore(userID)), failedCounter), + limiter: NewLimiter(uint64(limits.MaxChunksPerQuery(userID)), failedCounter), } } } diff --git a/pkg/storegateway/gateway_test.go b/pkg/storegateway/gateway_test.go index f5dd8319504..65f4afe86a9 100644 --- a/pkg/storegateway/gateway_test.go +++ b/pkg/storegateway/gateway_test.go @@ -1193,7 +1193,7 @@ func TestStoreGateway_SeriesQueryingShouldEnforceMaxChunksPerQueryLimit(t *testi t.Run(testName, func(t *testing.T) { // Customise the limits. limits := defaultLimitsConfig() - limits.MaxChunksPerQueryFromStore = testData.limit + limits.MaxChunksPerQuery = testData.limit overrides, err := validation.NewOverrides(limits, nil) require.NoError(t, err) diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index 88b2a16de5c..45e60a44f91 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -77,7 +77,6 @@ type Limits struct { MaxGlobalExemplarsPerUser int `yaml:"max_global_exemplars_per_user" json:"max_global_exemplars_per_user"` // Querier enforced limits. - MaxChunksPerQueryFromStore int `yaml:"max_chunks_per_query" json:"max_chunks_per_query"` // TODO Remove in Cortex 1.12. MaxChunksPerQuery int `yaml:"max_fetched_chunks_per_query" json:"max_fetched_chunks_per_query"` MaxFetchedSeriesPerQuery int `yaml:"max_fetched_series_per_query" json:"max_fetched_series_per_query"` MaxFetchedChunkBytesPerQuery int `yaml:"max_fetched_chunk_bytes_per_query" json:"max_fetched_chunk_bytes_per_query"` @@ -168,8 +167,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxGlobalMetadataPerMetric, "ingester.max-global-metadata-per-metric", 0, "The maximum number of metadata per metric, across the cluster. 0 to disable.") f.IntVar(&l.MaxGlobalExemplarsPerUser, "ingester.max-global-exemplars-per-user", 0, "The maximum number of exemplars in memory, across the cluster. 0 to disable exemplars ingestion.") - f.IntVar(&l.MaxChunksPerQueryFromStore, "store.query-chunk-limit", 2e6, "Deprecated. Use -querier.max-fetched-chunks-per-query CLI flag and its respective YAML config option instead. Maximum number of chunks that can be fetched in a single query. This limit is enforced when fetching chunks from the long-term storage only. When using chunks storage, this limit is enforced in the querier and ruler, while when using blocks storage this limit is enforced in the querier, ruler and store-gateway. 0 to disable.") - f.IntVar(&l.MaxChunksPerQuery, "querier.max-fetched-chunks-per-query", 0, "Maximum number of chunks that can be fetched in a single query from ingesters and long-term storage. This limit is enforced in the querier, ruler and store-gateway. Takes precedence over the deprecated -store.query-chunk-limit. 0 to disable.") + f.IntVar(&l.MaxChunksPerQuery, "querier.max-fetched-chunks-per-query", 2e6, "Maximum number of chunks that can be fetched in a single query from ingesters and long-term storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable.") f.IntVar(&l.MaxFetchedSeriesPerQuery, "querier.max-fetched-series-per-query", 0, "The maximum number of unique series for which a query can fetch samples from each ingesters and blocks storage. This limit is enforced in the querier only when running with blocks storage. 0 to disable") f.IntVar(&l.MaxFetchedChunkBytesPerQuery, "querier.max-fetched-chunk-bytes-per-query", 0, "The maximum size of all chunks in bytes that a query can fetch from each ingester and storage. This limit is enforced in the querier and ruler only when running with blocks storage. 0 to disable.") f.Var(&l.MaxQueryLength, "store.max-query-length", "Limit the query time range (end - start time). This limit is enforced in the query-frontend (on the received query), in the querier (on the query possibly split by the query-frontend) and in the chunks storage. 0 to disable.") @@ -422,18 +420,6 @@ func (o *Overrides) MaxGlobalSeriesPerMetric(userID string) int { return o.getOverridesForUser(userID).MaxGlobalSeriesPerMetric } -// MaxChunksPerQueryFromStore returns the maximum number of chunks allowed per query when fetching -// chunks from the long-term storage. -func (o *Overrides) MaxChunksPerQueryFromStore(userID string) int { - // If the new config option is set, then it should take precedence. - if value := o.getOverridesForUser(userID).MaxChunksPerQuery; value > 0 { - return value - } - - // Fallback to the deprecated config option. - return o.getOverridesForUser(userID).MaxChunksPerQueryFromStore -} - func (o *Overrides) MaxChunksPerQuery(userID string) int { return o.getOverridesForUser(userID).MaxChunksPerQuery } diff --git a/pkg/util/validation/limits_test.go b/pkg/util/validation/limits_test.go index 7fb52129b05..2affa9e089e 100644 --- a/pkg/util/validation/limits_test.go +++ b/pkg/util/validation/limits_test.go @@ -12,7 +12,6 @@ import ( "testing" "time" - "github.com/grafana/dskit/flagext" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/relabel" "github.com/stretchr/testify/assert" @@ -76,43 +75,6 @@ func TestLimits_Validate(t *testing.T) { } } -func TestOverrides_MaxChunksPerQueryFromStore(t *testing.T) { - tests := map[string]struct { - setup func(limits *Limits) - expected int - }{ - "should return the default legacy setting with the default config": { - setup: func(limits *Limits) {}, - expected: 2000000, - }, - "the new config option should take precedence over the deprecated one": { - setup: func(limits *Limits) { - limits.MaxChunksPerQueryFromStore = 10 - limits.MaxChunksPerQuery = 20 - }, - expected: 20, - }, - "the deprecated config option should be used if the new config option is unset": { - setup: func(limits *Limits) { - limits.MaxChunksPerQueryFromStore = 10 - }, - expected: 10, - }, - } - - for testName, testData := range tests { - t.Run(testName, func(t *testing.T) { - limits := Limits{} - flagext.DefaultValues(&limits) - testData.setup(&limits) - - overrides, err := NewOverrides(limits, nil) - require.NoError(t, err) - assert.Equal(t, testData.expected, overrides.MaxChunksPerQueryFromStore("test")) - }) - } -} - func TestOverridesManager_GetOverrides(t *testing.T) { tenantLimits := map[string]*Limits{}