-
Notifications
You must be signed in to change notification settings - Fork 839
Add support for Max Series Per Query for block storage and streaming ingesters #4179
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 11 commits
12a12b7
9148855
60571d2
ad00485
01e59ea
4f31df8
f62d32d
e2e1c6c
04d51cd
0d9af4f
38005dd
6ec0b09
f4d0d4c
2b8cb1d
076571c
29a0fcf
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -40,6 +40,7 @@ import ( | |||||
| "github.com/cortexproject/cortex/pkg/util" | ||||||
| "github.com/cortexproject/cortex/pkg/util/chunkcompat" | ||||||
| "github.com/cortexproject/cortex/pkg/util/flagext" | ||||||
| "github.com/cortexproject/cortex/pkg/util/limiter" | ||||||
| util_math "github.com/cortexproject/cortex/pkg/util/math" | ||||||
| "github.com/cortexproject/cortex/pkg/util/services" | ||||||
| "github.com/cortexproject/cortex/pkg/util/test" | ||||||
|
|
@@ -945,6 +946,56 @@ func TestDistributor_QueryStream_ShouldReturnErrorIfMaxChunksPerQueryLimitIsReac | |||||
| assert.Contains(t, err.Error(), "the query hit the max number of chunks limit") | ||||||
| } | ||||||
|
|
||||||
| func TestDistributor_QueryStream_ShouldReturnErrorIfMaxSeriesPerQueryLimitIsReached(t *testing.T) { | ||||||
| const maxSeriesLimit = 10 | ||||||
|
|
||||||
| limits := &validation.Limits{} | ||||||
| flagext.DefaultValues(limits) | ||||||
| ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(maxSeriesLimit)) | ||||||
| // Prepare distributors. | ||||||
| ds, _, r, _ := prepare(t, prepConfig{ | ||||||
| numIngesters: 3, | ||||||
| happyIngesters: 3, | ||||||
| numDistributors: 1, | ||||||
| shardByAllLabels: true, | ||||||
| limits: limits, | ||||||
| }) | ||||||
| defer stopAll(ds, r) | ||||||
|
|
||||||
| // Push a number of series below the max series limit. Each series has 1 sample. | ||||||
|
||||||
| // Push a number of series below the max series limit. Each series has 1 sample. | |
| // Push a number of series below the max series limit. |
pracucci marked this conversation as resolved.
Show resolved
Hide resolved
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -19,6 +19,7 @@ import ( | |
| "github.com/cortexproject/cortex/pkg/util" | ||
| "github.com/cortexproject/cortex/pkg/util/extract" | ||
| grpc_util "github.com/cortexproject/cortex/pkg/util/grpc" | ||
| "github.com/cortexproject/cortex/pkg/util/limiter" | ||
| "github.com/cortexproject/cortex/pkg/util/validation" | ||
| ) | ||
|
|
||
|
|
@@ -187,8 +188,10 @@ func (d *Distributor) queryIngesters(ctx context.Context, replicationSet ring.Re | |
| // queryIngesterStream queries the ingesters using the new streaming API. | ||
| func (d *Distributor) queryIngesterStream(ctx context.Context, userID string, replicationSet ring.ReplicationSet, req *ingester_client.QueryRequest) (*ingester_client.QueryStreamResponse, error) { | ||
| var ( | ||
| chunksLimit = d.limits.MaxChunksPerQueryFromIngesters(userID) | ||
| chunksCount = atomic.Int32{} | ||
| chunksLimit = d.limits.MaxChunksPerQueryFromIngesters(userID) | ||
| chunksCount = atomic.Int32{} | ||
| queryLimiter = limiter.QueryLimiterFromContextWithFallback(ctx) | ||
| matchers, _ = ingester_client.FromLabelMatchers(req.Matchers) | ||
|
||
| ) | ||
|
|
||
| // Fetch samples from multiple ingesters | ||
|
|
@@ -226,10 +229,19 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, userID string, re | |
| // We expect to be always able to convert the label matchers back to Prometheus ones. | ||
| // In case we fail (unexpected) the error will not include the matchers, but the core | ||
| // logic doesn't break. | ||
| matchers, _ := ingester_client.FromLabelMatchers(req.Matchers) | ||
| return nil, validation.LimitError(fmt.Sprintf(errMaxChunksPerQueryLimit, util.LabelMatchersToString(matchers), chunksLimit)) | ||
| } | ||
| } | ||
| for _, series := range resp.Chunkseries { | ||
pracucci marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| if limitErr := queryLimiter.AddSeries(series.Labels); limitErr != nil { | ||
| return nil, limitErr | ||
| } | ||
| } | ||
| for _, series := range resp.Timeseries { | ||
| if limitErr := queryLimiter.AddSeries(series.Labels); limitErr != nil { | ||
| return nil, limitErr | ||
| } | ||
| } | ||
|
|
||
| result.Chunkseries = append(result.Chunkseries, resp.Chunkseries...) | ||
| result.Timeseries = append(result.Timeseries, resp.Timeseries...) | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is no more true. Please update the doc accordingly.