diff --git a/docs/reference/migration/migrate_8_0/cluster-node-setting-changes.asciidoc b/docs/reference/migration/migrate_8_0/cluster-node-setting-changes.asciidoc index 36201e4973547..090ab42948b32 100644 --- a/docs/reference/migration/migrate_8_0/cluster-node-setting-changes.asciidoc +++ b/docs/reference/migration/migrate_8_0/cluster-node-setting-changes.asciidoc @@ -16,7 +16,7 @@ The default for the `action.destructive_requires_name` setting changes from `fal to `true` in {es} 8.0.0. Previously, defaulting to `false` allowed users to use wildcard -patterns to delete, close, or change index blocks on indices. +patterns to delete, close, or change index blocks on indices. To prevent the accidental deletion of indices that happen to match a wildcard pattern, we now default to requiring that destructive operations explicitly name the indices to be modified. @@ -44,19 +44,28 @@ non-frozen node will result in an error on startup. ==== [[max_clause_count_change]] -.The `indices.query.bool.max_clause_count` setting now limits all query clauses. +.The `indices.query.bool.max_clause_count` setting has been deprecated, and no longer has any effect. [%collapsible] ==== *Details* + -Previously, the `indices.query.bool.max_clause_count` would apply to the number -of clauses of a single `bool` query. It now applies to the total number of -clauses of the rewritten query. To reduce chances of breaks, its -default value has been bumped from 1024 to 4096. +Elasticsearch will now dynamically set the maximum number of allowed clauses +in a query, using a heuristic based on the size of the search thread pool and +the size of the heap allocated to the JVM. This limit has a minimum value of +1024 and will in most cases be larger (for example, a node with 30Gb RAM and +48 CPUs will have a maximum clause count of around 27,000). Larger heaps lead +to higher values, and larger thread pools result in lower values. *Impact* + -Queries with many clauses should be avoided whenever possible. -If you previously bumped this setting to accommodate heavy queries, -you might need to increase it further. +Queries with many clauses should be avoided whenever possible. +If you previously bumped this setting to accommodate heavy queries, +you might need to increase the amount of memory available to Elasticsearch, +or to reduce the size of your search thread pool so that more memory is +available to each concurrent search. + +In previous versions of Lucene you could get around this limit by nesting +boolean queries within each other, but the limit is now based on the total +number of leaf queries within the query as a whole and this workaround will +no longer help. ==== [[ilm-poll-interval-limit]] @@ -224,7 +233,7 @@ Remove the `http.content_type.required` setting from `elasticsearch.yml`. Specif The `http.tcp_no_delay` setting was deprecated in 7.x and has been removed in 8.0. Use`http.tcp.no_delay` instead. *Impact* + -Replace the `http.tcp_no_delay` setting with `http.tcp.no_delay`. +Replace the `http.tcp_no_delay` setting with `http.tcp.no_delay`. Specifying `http.tcp_no_delay` in `elasticsearch.yml` will result in an error on startup. ==== @@ -237,7 +246,7 @@ The `network.tcp.connect_timeout` setting was deprecated in 7.x and has been rem was a fallback setting for `transport.connect_timeout`. *Impact* + -Remove the`network.tcp.connect_timeout` setting. +Remove the`network.tcp.connect_timeout` setting. Use the `transport.connect_timeout` setting to change the default connection timeout for client connections. Specifying `network.tcp.connect_timeout` in `elasticsearch.yml` will result in an @@ -282,7 +291,7 @@ since the 5.2 release of {es}. *Impact* + Remove the `xpack.security.authz.store.roles.index.cache.max_size` -and `xpack.security.authz.store.roles.index.cache.ttl` settings from `elasticsearch.yml` . +and `xpack.security.authz.store.roles.index.cache.ttl` settings from `elasticsearch.yml` . Specifying these settings will result in an error on startup. ==== diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/AdjacencyMatrixIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/AdjacencyMatrixIT.java index 1a95ec98b0087..45750663c3346 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/AdjacencyMatrixIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/AdjacencyMatrixIT.java @@ -8,14 +8,13 @@ package org.elasticsearch.search.aggregations.bucket; +import org.apache.lucene.search.IndexSearcher; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.adjacency.AdjacencyMatrix; import org.elasticsearch.search.aggregations.bucket.adjacency.AdjacencyMatrix.Bucket; @@ -276,20 +275,28 @@ public void testWithSubAggregation() throws Exception { } - public void testTooLargeMatrix() throws Exception { + public void testTooLargeMatrix() { - // Create more filters than is permitted by Lucene Bool clause settings. - MapBuilder filtersMap = new MapBuilder(); - int maxFilters = SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING.get(Settings.EMPTY); - for (int i = 0; i <= maxFilters; i++) { - filtersMap.add("tag" + i, termQuery("tag", "tag" + i)); - } + int originalMaxClauses = IndexSearcher.getMaxClauseCount(); try { - client().prepareSearch("idx").addAggregation(adjacencyMatrix("tags", "\t", filtersMap)).get(); - fail("SearchPhaseExecutionException should have been thrown"); - } catch (SearchPhaseExecutionException ex) { - assertThat(ex.getCause().getMessage(), containsString("Number of filters is too large")); + // Create more filters than is permitted by Lucene Bool clause settings. + MapBuilder filtersMap = new MapBuilder(); + int maxFilters = randomIntBetween(50, 100); + IndexSearcher.setMaxClauseCount(maxFilters); + for (int i = 0; i <= maxFilters; i++) { + filtersMap.add("tag" + i, termQuery("tag", "tag" + i)); + } + + try { + client().prepareSearch("idx").addAggregation(adjacencyMatrix("tags", "\t", filtersMap)).get(); + fail("SearchPhaseExecutionException should have been thrown"); + } catch (SearchPhaseExecutionException ex) { + assertThat(ex.getCause().getMessage(), containsString("Number of filters is too large")); + } + + } finally { + IndexSearcher.setMaxClauseCount(originalMaxClauses); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java index af70787278b85..412fda78dfb16 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.search.query; +import org.apache.lucene.search.IndexSearcher; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -17,12 +18,10 @@ import org.elasticsearch.index.query.QueryStringQueryBuilder; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import org.junit.Before; -import org.junit.BeforeClass; import java.io.IOException; import java.util.ArrayList; @@ -42,13 +41,6 @@ public class QueryStringIT extends ESIntegTestCase { - private static int CLUSTER_MAX_CLAUSE_COUNT; - - @BeforeClass - public static void createRandomClusterSetting() { - CLUSTER_MAX_CLAUSE_COUNT = randomIntBetween(50, 100); - } - @Before public void setup() throws Exception { String indexBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-query-index.json"); @@ -56,14 +48,6 @@ public void setup() throws Exception { ensureGreen("test"); } - @Override - protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal, otherSettings)) - .put(SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING.getKey(), CLUSTER_MAX_CLAUSE_COUNT) - .build(); - } - public void testBasicAllQuery() throws Exception { List reqs = new ArrayList<>(); reqs.add(client().prepareIndex("test").setId("1").setSource("f1", "foo bar baz")); @@ -250,32 +234,10 @@ public void testAllFieldsWithSpecifiedLeniency() throws IOException { assertThat(e.getCause().getMessage(), containsString("unit [D] not supported for date math [-2D]")); } - // The only expectation for this test is to not throw exception - public void testLimitOnExpandedFieldsButIgnoreUnmappedFields() throws Exception { - XContentBuilder builder = jsonBuilder(); - builder.startObject(); - builder.startObject("_doc"); - builder.startObject("properties"); - for (int i = 0; i < CLUSTER_MAX_CLAUSE_COUNT; i++) { - builder.startObject("field" + i).field("type", "text").endObject(); - } - builder.endObject(); // properties - builder.endObject(); // type1 - builder.endObject(); - - assertAcked(prepareCreate("ignoreunmappedfields").setMapping(builder)); - - client().prepareIndex("ignoreunmappedfields").setId("1").setSource("field1", "foo bar baz").get(); - refresh(); + public void testLimitOnExpandedFields() throws Exception { - QueryStringQueryBuilder qb = queryStringQuery("bar"); - if (randomBoolean()) { - qb.field("*").field("unmappedField1").field("unmappedField2").field("unmappedField3").field("unmappedField4"); - } - client().prepareSearch("ignoreunmappedfields").setQuery(qb).get(); - } + final int maxClauseCount = randomIntBetween(50, 100); - public void testLimitOnExpandedFields() throws Exception { XContentBuilder builder = jsonBuilder(); builder.startObject(); { @@ -283,7 +245,7 @@ public void testLimitOnExpandedFields() throws Exception { { builder.startObject("properties"); { - for (int i = 0; i < CLUSTER_MAX_CLAUSE_COUNT; i++) { + for (int i = 0; i < maxClauseCount; i++) { builder.startObject("field_A" + i).field("type", "text").endObject(); builder.startObject("field_B" + i).field("type", "text").endObject(); } @@ -296,25 +258,34 @@ public void testLimitOnExpandedFields() throws Exception { assertAcked( prepareCreate("testindex").setSettings( - Settings.builder().put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), CLUSTER_MAX_CLAUSE_COUNT + 100) + Settings.builder().put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), maxClauseCount + 100) ).setMapping(builder) ); client().prepareIndex("testindex").setId("1").setSource("field_A0", "foo bar baz").get(); refresh(); - // single field shouldn't trigger the limit - doAssertOneHitForQueryString("field_A0:foo"); - // expanding to the limit should work - doAssertOneHitForQueryString("field_A\\*:foo"); + int originalMaxClauses = IndexSearcher.getMaxClauseCount(); + try { + + IndexSearcher.setMaxClauseCount(maxClauseCount); + + // single field shouldn't trigger the limit + doAssertOneHitForQueryString("field_A0:foo"); + // expanding to the limit should work + doAssertOneHitForQueryString("field_A\\*:foo"); - // adding a non-existing field on top shouldn't overshoot the limit - doAssertOneHitForQueryString("field_A\\*:foo unmapped:something"); + // adding a non-existing field on top shouldn't overshoot the limit + doAssertOneHitForQueryString("field_A\\*:foo unmapped:something"); - // the following should exceed the limit - doAssertLimitExceededException("foo", CLUSTER_MAX_CLAUSE_COUNT * 2, "*"); - doAssertLimitExceededException("*:foo", CLUSTER_MAX_CLAUSE_COUNT * 2, "*"); - doAssertLimitExceededException("field_\\*:foo", CLUSTER_MAX_CLAUSE_COUNT * 2, "field_*"); + // the following should exceed the limit + doAssertLimitExceededException("foo", IndexSearcher.getMaxClauseCount() * 2, "*"); + doAssertLimitExceededException("*:foo", IndexSearcher.getMaxClauseCount() * 2, "*"); + doAssertLimitExceededException("field_\\*:foo", IndexSearcher.getMaxClauseCount() * 2, "field_*"); + + } finally { + IndexSearcher.setMaxClauseCount(originalMaxClauses); + } } private void doAssertOneHitForQueryString(String queryString) { @@ -340,7 +311,7 @@ private void doAssertLimitExceededException(String queryString, int exceedingFie "field expansion for [" + inputFieldPattern + "] matches too many fields, limit: " - + CLUSTER_MAX_CLAUSE_COUNT + + IndexSearcher.getMaxClauseCount() + ", got: " + exceedingFieldCount ) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java index 0c746cedf301d..3ff122300f5cd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java @@ -11,6 +11,7 @@ import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.search.IndexSearcher; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; @@ -29,13 +30,11 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; -import org.junit.BeforeClass; import java.io.IOException; import java.util.ArrayList; @@ -69,21 +68,6 @@ */ public class SimpleQueryStringIT extends ESIntegTestCase { - private static int CLUSTER_MAX_CLAUSE_COUNT; - - @BeforeClass - public static void createRandomClusterSetting() { - CLUSTER_MAX_CLAUSE_COUNT = randomIntBetween(60, 100); - } - - @Override - protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal, otherSettings)) - .put(SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING.getKey(), CLUSTER_MAX_CLAUSE_COUNT) - .build(); - } - @Override protected Collection> nodePlugins() { return Collections.singletonList(MockAnalysisPlugin.class); @@ -578,11 +562,14 @@ public void testAllFieldsWithSpecifiedLeniency() throws IOException { } public void testLimitOnExpandedFields() throws Exception { + + final int maxClauseCount = randomIntBetween(50, 100); + XContentBuilder builder = jsonBuilder(); builder.startObject(); builder.startObject("_doc"); builder.startObject("properties"); - for (int i = 0; i < CLUSTER_MAX_CLAUSE_COUNT + 1; i++) { + for (int i = 0; i < maxClauseCount + 1; i++) { builder.startObject("field" + i).field("type", "text").endObject(); } builder.endObject(); // properties @@ -591,15 +578,21 @@ public void testLimitOnExpandedFields() throws Exception { assertAcked( prepareCreate("toomanyfields").setSettings( - Settings.builder().put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), CLUSTER_MAX_CLAUSE_COUNT + 100) + Settings.builder().put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), maxClauseCount + 100) ).setMapping(builder) ); client().prepareIndex("toomanyfields").setId("1").setSource("field1", "foo bar baz").get(); refresh(); - doAssertLimitExceededException("*", CLUSTER_MAX_CLAUSE_COUNT + 1); - doAssertLimitExceededException("field*", CLUSTER_MAX_CLAUSE_COUNT + 1); + int originalMaxClauses = IndexSearcher.getMaxClauseCount(); + try { + IndexSearcher.setMaxClauseCount(maxClauseCount); + doAssertLimitExceededException("*", maxClauseCount + 1); + doAssertLimitExceededException("field*", maxClauseCount + 1); + } finally { + IndexSearcher.setMaxClauseCount(originalMaxClauses); + } } private void doAssertLimitExceededException(String field, int exceedingFieldCount) { @@ -610,7 +603,9 @@ private void doAssertLimitExceededException(String field, int exceedingFieldCoun }); assertThat( ExceptionsHelper.unwrap(e, IllegalArgumentException.class).getMessage(), - containsString("field expansion matches too many fields, limit: " + CLUSTER_MAX_CLAUSE_COUNT + ", got: " + exceedingFieldCount) + containsString( + "field expansion matches too many fields, limit: " + IndexSearcher.getMaxClauseCount() + ", got: " + exceedingFieldCount + ) ); } diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java b/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java index 6679af1cfdb5a..72e87f0e573c4 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java @@ -8,12 +8,12 @@ package org.elasticsearch.index.search; +import org.apache.lucene.search.IndexSearcher; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.TextSearchInfo; import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.search.SearchModule; import java.util.Collection; import java.util.HashMap; @@ -94,7 +94,7 @@ static Map resolveMappingFields( resolvedFields.put(field.getKey(), boost); } } - checkForTooManyFields(resolvedFields.size(), context, null); + checkForTooManyFields(resolvedFields.size(), null); return resolvedFields; } @@ -151,8 +151,8 @@ static Map resolveMappingField( return fields; } - static void checkForTooManyFields(int numberOfFields, SearchExecutionContext context, @Nullable String inputPattern) { - Integer limit = SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING.get(context.getIndexSettings().getSettings()); + static void checkForTooManyFields(int numberOfFields, @Nullable String inputPattern) { + int limit = IndexSearcher.getMaxClauseCount(); if (numberOfFields > limit) { StringBuilder errorMsg = new StringBuilder("field expansion "); if (inputPattern != null) { diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java index 5cbe111ae63af..e3dbf6ed67ec5 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java @@ -285,7 +285,7 @@ private Map extractMultiFields(String field, boolean quoted) { } else { extractedFields = fieldsAndWeights; } - checkForTooManyFields(extractedFields.size(), this.context, field); + checkForTooManyFields(extractedFields.size(), field); return extractedFields; } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 64687160ba4fd..47a9203ab976f 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.Constants; import org.apache.lucene.util.SetOnce; import org.elasticsearch.Assertions; @@ -156,6 +157,7 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.SearchService; +import org.elasticsearch.search.SearchUtils; import org.elasticsearch.search.aggregations.support.AggregationUsageService; import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.shutdown.PluginShutdownService; @@ -472,6 +474,7 @@ protected Node( final UsageService usageService = new UsageService(); SearchModule searchModule = new SearchModule(settings, pluginsService.filterPlugins(SearchPlugin.class)); + IndexSearcher.setMaxClauseCount(SearchUtils.calculateMaxClauseValue(threadPool)); List namedWriteables = Stream.of( NetworkModule.getNamedWriteables().stream(), IndicesModule.getNamedWriteables().stream(), diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index 22e5214785ba5..7b7fa2e8b4abf 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -8,7 +8,6 @@ package org.elasticsearch.search; -import org.apache.lucene.search.BooleanQuery; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.NamedRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -268,7 +267,8 @@ public class SearchModule { 4096, 1, Integer.MAX_VALUE, - Setting.Property.NodeScope + Setting.Property.NodeScope, + Setting.Property.DeprecatedWarning ); public static final Setting INDICES_MAX_NESTED_DEPTH_SETTING = Setting.intSetting( @@ -292,8 +292,6 @@ public class SearchModule { /** * Constructs a new SearchModule object * - * NOTE: This constructor should not be called in production unless an accurate {@link Settings} object is provided. - * When constructed, a static flag is set in Lucene {@link BooleanQuery#setMaxClauseCount} according to the settings. * @param settings Current settings * @param plugins List of included {@link SearchPlugin} objects. */ @@ -1057,7 +1055,6 @@ private void registerQueryParsers(List plugins) { registerQuery(new QuerySpec<>(MatchAllQueryBuilder.NAME, MatchAllQueryBuilder::new, MatchAllQueryBuilder::fromXContent)); registerQuery(new QuerySpec<>(QueryStringQueryBuilder.NAME, QueryStringQueryBuilder::new, QueryStringQueryBuilder::fromXContent)); registerQuery(new QuerySpec<>(BoostingQueryBuilder.NAME, BoostingQueryBuilder::new, BoostingQueryBuilder::fromXContent)); - BooleanQuery.setMaxClauseCount(INDICES_MAX_CLAUSE_COUNT_SETTING.get(settings)); registerBoolQuery(new ParseField(BoolQueryBuilder.NAME), BoolQueryBuilder::new); BoolQueryBuilder.setMaxNestedDepth(INDICES_MAX_NESTED_DEPTH_SETTING.get(settings)); registerQuery(new QuerySpec<>(TermQueryBuilder.NAME, TermQueryBuilder::new, TermQueryBuilder::fromXContent)); diff --git a/server/src/main/java/org/elasticsearch/search/SearchUtils.java b/server/src/main/java/org/elasticsearch/search/SearchUtils.java new file mode 100644 index 0000000000000..75575cd9b5875 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/SearchUtils.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search; + +import org.elasticsearch.monitor.jvm.JvmStats; +import org.elasticsearch.threadpool.ThreadPool; + +public final class SearchUtils { + + public static final int DEFAULT_MAX_CLAUSE_COUNT = 1024; + + public static int calculateMaxClauseValue(ThreadPool threadPool) { + int searchThreadPoolSize = threadPool.info(ThreadPool.Names.SEARCH).getMax(); + long heapSize = JvmStats.jvmStats().getMem().getHeapMax().getGb(); + return calculateMaxClauseValue(searchThreadPoolSize, heapSize); + } + + static int calculateMaxClauseValue(long threadPoolSize, double heapInGb) { + if (threadPoolSize <= 0 || heapInGb <= 0) { + return DEFAULT_MAX_CLAUSE_COUNT; + } + // In a worst-case scenario, each clause may end up using up to 16k of memory + // to load postings, positions, offsets, impacts, etc. So we calculate the + // maximum number of clauses we can support in a single thread pool by + // dividing the heap by 16k (or the equivalent, multiplying the heap in GB by + // 64k), and then divide that by the number of possible concurrent search + // threads. + int maxClauseCount = (int) (heapInGb * 65_536 / threadPoolSize); + return Math.max(DEFAULT_MAX_CLAUSE_COUNT, maxClauseCount); + } + + private SearchUtils() {} +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java index 527d40143850c..dad9a816705b3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java @@ -8,13 +8,12 @@ package org.elasticsearch.search.aggregations.bucket.adjacency; -import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.IndexSearcher; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.Rewriteable; -import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; @@ -204,17 +203,15 @@ protected AdjacencyMatrixAggregationBuilder doRewrite(QueryRewriteContext queryR @Override protected AggregatorFactory doBuild(AggregationContext context, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - int maxFilters = BooleanQuery.getMaxClauseCount(); + int maxFilters = IndexSearcher.getMaxClauseCount(); if (filters.size() > maxFilters) { throw new IllegalArgumentException( "Number of filters is too large, must be less than or equal to: [" + maxFilters + "] but was [" + filters.size() - + "]." - + "This limit can be set by changing the [" - + SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING.getKey() - + "] setting." + + "]. " + + "You can increase this limit by scaling up your java heap" ); } return new AdjacencyMatrixAggregatorFactory(name, filters, separator, context, parent, subFactoriesBuilder, metadata); diff --git a/server/src/test/java/org/elasticsearch/index/search/QueryParserHelperTests.java b/server/src/test/java/org/elasticsearch/index/search/QueryParserHelperTests.java new file mode 100644 index 0000000000000..4255b735c0587 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/search/QueryParserHelperTests.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.search; + +import org.apache.lucene.search.IndexSearcher; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.MapperServiceTestCase; +import org.elasticsearch.index.query.SearchExecutionContext; + +import java.io.IOException; +import java.util.Map; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasSize; + +public class QueryParserHelperTests extends MapperServiceTestCase { + + public void testUnmappedFieldsDoNotContributeToFieldCount() throws IOException { + + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("field1").field("type", "text").endObject(); + b.startObject("field2").field("type", "text").endObject(); + })); + + SearchExecutionContext context = createSearchExecutionContext(mapperService); + { + Map resolvedFields = QueryParserHelper.resolveMappingFields(context, Map.of("*", 1.0f)); + assertThat(resolvedFields.keySet(), containsInAnyOrder("field1", "field2")); + } + + { + Map resolvedFields = QueryParserHelper.resolveMappingFields(context, Map.of("*", 1.0f, "unmapped", 2.0f)); + assertThat(resolvedFields.keySet(), containsInAnyOrder("field1", "field2")); + assertFalse(resolvedFields.containsKey("unmapped")); + } + + { + Map resolvedFields = QueryParserHelper.resolveMappingFields(context, Map.of("unmapped", 1.0f)); + assertTrue(resolvedFields.isEmpty()); + } + } + + public void testFieldExpansionAboveLimitThrowsException() throws IOException { + MapperService mapperService = createMapperService(mapping(b -> { + for (int i = 0; i < 10; i++) { + b.startObject("field" + i).field("type", "long").endObject(); + } + })); + SearchExecutionContext context = createSearchExecutionContext(mapperService); + + int originalMaxClauseCount = IndexSearcher.getMaxClauseCount(); + try { + IndexSearcher.setMaxClauseCount(4); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> QueryParserHelper.resolveMappingFields(context, Map.of("field*", 1.0f)) + ); + assertThat(e.getMessage(), containsString("field expansion matches too many fields")); + + IndexSearcher.setMaxClauseCount(10); + assertThat(QueryParserHelper.resolveMappingFields(context, Map.of("field*", 1.0f)).keySet(), hasSize(10)); + } finally { + IndexSearcher.setMaxClauseCount(originalMaxClauseCount); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/search/SearchUtilsTests.java b/server/src/test/java/org/elasticsearch/search/SearchUtilsTests.java new file mode 100644 index 0000000000000..a1f21af4b9ffa --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/SearchUtilsTests.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search; + +import org.elasticsearch.test.ESTestCase; + +public class SearchUtilsTests extends ESTestCase { + + public void testConfigureMaxClauses() { + + // Heap below 1 Gb + assertEquals(8192, SearchUtils.calculateMaxClauseValue(4, 0.5)); + + // Number of processors not available + assertEquals(1024, SearchUtils.calculateMaxClauseValue(-1, 1)); + + // Insanely high configured search thread pool size + assertEquals(1024, SearchUtils.calculateMaxClauseValue(1024, 1)); + + // 1Gb heap, 8 processors + assertEquals(5041, SearchUtils.calculateMaxClauseValue(13, 1)); + + // 30Gb heap, 48 processors + assertEquals(26932, SearchUtils.calculateMaxClauseValue(73, 30)); + } + +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorTests.java index b27c4cfaa228c..93624110407f3 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorTests.java @@ -8,11 +8,10 @@ package org.elasticsearch.search.aggregations.bucket.adjacency; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AggregatorTestCase; import java.io.IOException; @@ -22,10 +21,11 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.startsWith; public class AdjacencyMatrixAggregatorTests extends AggregatorTestCase { - public void testTooManyFilters() throws Exception { - int maxFilters = SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING.get(Settings.EMPTY); + public void testTooManyFilters() { + int maxFilters = IndexSearcher.getMaxClauseCount(); int maxFiltersPlusOne = maxFilters + 1; Map filters = new HashMap<>(maxFilters); @@ -39,15 +39,8 @@ public void testTooManyFilters() throws Exception { ); assertThat( ex.getMessage(), - equalTo( - "Number of filters is too large, must be less than or equal to: [" - + maxFilters - + "] but was [" - + maxFiltersPlusOne - + "]." - + "This limit can be set by changing the [" - + SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING.getKey() - + "] setting." + startsWith( + "Number of filters is too large, must be less than or equal to: [" + maxFilters + "] but was [" + maxFiltersPlusOne + "]." ) ); }