From c51600937771367ff082886936b6099add248a09 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sun, 5 Feb 2023 09:05:15 +0000 Subject: [PATCH] Add setting to limit total number of shards on a cluster (#6143) Signed-off-by: Rishav Sagar (cherry picked from commit e42b76f74a706157b510ec9cecae93c7fdf5f715) Signed-off-by: github-actions[bot] --- CHANGELOG.md | 1 + .../cluster/shards/ClusterShardLimitIT.java | 77 ++++++++----- .../common/settings/ClusterSettings.java | 1 + .../indices/ShardLimitValidator.java | 102 +++++++++++++++++- .../indices/ShardLimitValidatorTests.java | 102 +++++++++++++++++- 5 files changed, 248 insertions(+), 35 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5afcaced03447..d32ba1cee2d42 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add support to disallow search request with preference parameter with strict weighted shard routing([#5874](https://github.com/opensearch-project/OpenSearch/pull/5874)) - Added support to apply index create block ([#4603](https://github.com/opensearch-project/OpenSearch/issues/4603)) - Adds support for minimum compatible version for extensions ([#6003](https://github.com/opensearch-project/OpenSearch/pull/6003)) +- Add a guardrail to limit maximum number of shard on the cluster ([#6143](https://github.com/opensearch-project/OpenSearch/pull/6143)) ### Dependencies - Update nebula-publishing-plugin to 19.2.0 ([#5704](https://github.com/opensearch-project/OpenSearch/pull/5704)) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java index a88d42c07f8d6..8059266916f3f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java @@ -41,6 +41,7 @@ import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.Priority; import org.opensearch.common.network.NetworkModule; @@ -68,6 +69,8 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE; +import static org.opensearch.indices.ShardLimitValidator.SETTING_MAX_SHARDS_PER_CLUSTER_KEY; import static org.opensearch.test.NodeRoles.dataNode; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; @@ -75,12 +78,12 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) public class ClusterShardLimitIT extends OpenSearchIntegTestCase { - private static final String shardsPerNodeKey = ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(); + private static final String shardsPerNodeKey = SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(); private static final String ignoreDotIndexKey = ShardLimitValidator.SETTING_CLUSTER_IGNORE_DOT_INDEXES.getKey(); public void testSettingClusterMaxShards() { int shardsPerNode = between(1, 500_000); - setShardsPerNode(shardsPerNode); + setMaxShardLimit(shardsPerNode, shardsPerNodeKey); } public void testSettingIgnoreDotIndexes() { @@ -118,7 +121,7 @@ public void testIndexCreationOverLimit() { ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes); - setShardsPerNode(counts.getShardsPerNode()); + setMaxShardLimit(counts.getShardsPerNode(), shardsPerNodeKey); // Create an index that will bring us up to the limit createIndex( "test", @@ -155,7 +158,7 @@ public void testIndexCreationOverLimitForDotIndexesSucceeds() { int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); // Setting the cluster.max_shards_per_node setting according to the data node count. - setShardsPerNode(dataNodes); + setMaxShardLimit(dataNodes, shardsPerNodeKey); setIgnoreDotIndex(true); /* @@ -176,9 +179,7 @@ public void testIndexCreationOverLimitForDotIndexesSucceeds() { // Getting cluster.max_shards_per_node setting ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - String maxShardsPerNode = clusterState.getMetadata() - .settings() - .get(ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()); + String maxShardsPerNode = clusterState.getMetadata().settings().get(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()); // Checking if the total shards created are equivalent to dataNodes * cluster.max_shards_per_node assertEquals(dataNodes * Integer.parseInt(maxShardsPerNode), currentActiveShards); @@ -203,7 +204,7 @@ public void testIndexCreationOverLimitForDotIndexesFail() { int maxAllowedShards = dataNodes * dataNodes; // Setting the cluster.max_shards_per_node setting according to the data node count. - setShardsPerNode(dataNodes); + setMaxShardLimit(dataNodes, shardsPerNodeKey); /* Create an index that will bring us up to the limit. It would create index with primary equal to the @@ -223,9 +224,7 @@ public void testIndexCreationOverLimitForDotIndexesFail() { // Getting cluster.max_shards_per_node setting ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - String maxShardsPerNode = clusterState.getMetadata() - .settings() - .get(ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()); + String maxShardsPerNode = clusterState.getMetadata().settings().get(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()); // Checking if the total shards created are equivalent to dataNodes * cluster.max_shards_per_node assertEquals(dataNodes * Integer.parseInt(maxShardsPerNode), currentActiveShards); @@ -247,6 +246,27 @@ public void testIndexCreationOverLimitForDotIndexesFail() { assertFalse(clusterState.getMetadata().hasIndex(".test-index")); } + public void testCreateIndexWithMaxClusterShardSetting() { + int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + setMaxShardLimit(dataNodes, shardsPerNodeKey); + + int maxAllowedShards = dataNodes + 1; + int extraShardCount = maxAllowedShards + 1; + // Getting total active shards in the cluster. + int currentActiveShards = client().admin().cluster().prepareHealth().get().getActiveShards(); + try { + setMaxShardLimit(maxAllowedShards, SETTING_MAX_SHARDS_PER_CLUSTER_KEY); + prepareCreate("test_index_with_cluster_shard_limit").setSettings( + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, extraShardCount).put(SETTING_NUMBER_OF_REPLICAS, 0).build() + ).get(); + } catch (final IllegalArgumentException ex) { + verifyException(Math.min(maxAllowedShards, dataNodes * dataNodes), currentActiveShards, extraShardCount, ex); + } finally { + setMaxShardLimit(-1, SETTING_MAX_SHARDS_PER_CLUSTER_KEY); + } + } + /** * The test checks if the index starting with the .ds- can be created if the node has * number of shards equivalent to the cluster.max_shards_per_node and the cluster.ignore_Dot_indexes @@ -258,7 +278,7 @@ public void testIndexCreationOverLimitForDataStreamIndexes() { int maxAllowedShards = dataNodes * dataNodes; // Setting the cluster.max_shards_per_node setting according to the data node count. - setShardsPerNode(dataNodes); + setMaxShardLimit(dataNodes, shardsPerNodeKey); setIgnoreDotIndex(true); /* @@ -279,9 +299,7 @@ public void testIndexCreationOverLimitForDataStreamIndexes() { // Getting cluster.max_shards_per_node setting ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - String maxShardsPerNode = clusterState.getMetadata() - .settings() - .get(ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()); + String maxShardsPerNode = clusterState.getMetadata().settings().get(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()); // Checking if the total shards created are equivalent to dataNodes * cluster.max_shards_per_node assertEquals(dataNodes * Integer.parseInt(maxShardsPerNode), currentActiveShards); @@ -308,7 +326,7 @@ public void testIndexCreationOverLimitFromTemplate() { final ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes); - setShardsPerNode(counts.getShardsPerNode()); + setMaxShardLimit(counts.getShardsPerNode(), shardsPerNodeKey); if (counts.getFirstIndexShards() > 0) { createIndex( @@ -351,7 +369,7 @@ public void testIncreaseReplicasOverLimit() { int firstShardCount = between(2, 10); int shardsPerNode = firstShardCount - 1; - setShardsPerNode(shardsPerNode); + setMaxShardLimit(shardsPerNode, shardsPerNodeKey); prepareCreate( "growing-should-fail", @@ -397,7 +415,7 @@ public void testChangingMultipleIndicesOverLimit() { int secondIndexReplicas = dataNodes; int shardsPerNode = firstIndexFactor + (secondIndexFactor * (1 + secondIndexReplicas)); - setShardsPerNode(shardsPerNode); + setMaxShardLimit(shardsPerNode, shardsPerNodeKey); createIndex( "test-1-index", @@ -448,7 +466,7 @@ public void testPreserveExistingSkipsCheck() { int firstShardCount = between(2, 10); int shardsPerNode = firstShardCount - 1; - setShardsPerNode(shardsPerNode); + setMaxShardLimit(shardsPerNode, shardsPerNodeKey); prepareCreate( "test-index", @@ -521,7 +539,7 @@ public void testRestoreSnapshotOverLimit() { cluster().wipeIndices("snapshot-index"); // Reduce the shard limit and fill it up - setShardsPerNode(counts.getShardsPerNode()); + setMaxShardLimit(counts.getShardsPerNode(), shardsPerNodeKey); createIndex( "test-fill", Settings.builder() @@ -570,7 +588,7 @@ public void testOpenIndexOverLimit() { assertTrue(closeIndexResponse.isAcknowledged()); // Fill up the cluster - setShardsPerNode(counts.getShardsPerNode()); + setMaxShardLimit(counts.getShardsPerNode(), shardsPerNodeKey); createIndex( "test-fill", Settings.builder() @@ -704,27 +722,34 @@ private int ensureMultipleDataNodes(int dataNodes) { return dataNodes; } - private void setShardsPerNode(int shardsPerNode) { + /** + * Set max shard limit on either per node level or on cluster level. + * + * @param limit the limit value to set. + * @param key node level or cluster level setting key. + */ + private void setMaxShardLimit(int limit, String key) { try { ClusterUpdateSettingsResponse response; if (frequently()) { response = client().admin() .cluster() .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(shardsPerNodeKey, shardsPerNode).build()) + .setPersistentSettings(Settings.builder().put(key, limit).build()) .get(); - assertEquals(shardsPerNode, response.getPersistentSettings().getAsInt(shardsPerNodeKey, -1).intValue()); + assertEquals(limit, response.getPersistentSettings().getAsInt(key, -1).intValue()); } else { response = client().admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(shardsPerNodeKey, shardsPerNode).build()) + .setTransientSettings(Settings.builder().put(key, limit).build()) .get(); - assertEquals(shardsPerNode, response.getTransientSettings().getAsInt(shardsPerNodeKey, -1).intValue()); + assertEquals(limit, response.getTransientSettings().getAsInt(key, -1).intValue()); } } catch (IllegalArgumentException ex) { fail(ex.getMessage()); } + } private void setIgnoreDotIndex(boolean ignoreDotIndex) { diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 426395c48d331..8a9bed758dc68 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -263,6 +263,7 @@ public void apply(Settings value, Settings current, Settings previous) { Metadata.DEFAULT_REPLICA_COUNT_SETTING, Metadata.SETTING_CREATE_INDEX_BLOCK_SETTING, ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE, + ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_CLUSTER, ShardLimitValidator.SETTING_CLUSTER_IGNORE_DOT_INDEXES, RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, diff --git a/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java b/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java index e803e387448bc..63a0cad402061 100644 --- a/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java +++ b/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java @@ -43,6 +43,10 @@ import java.util.Arrays; import java.util.Optional; +import java.util.Map; +import java.util.List; +import java.util.Collections; +import java.util.Iterator; import java.util.concurrent.atomic.AtomicInteger; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING; @@ -58,10 +62,22 @@ * @opensearch.internal */ public class ShardLimitValidator { + + public static final String SETTING_MAX_SHARDS_PER_CLUSTER_KEY = "cluster.routing.allocation.total_shards_limit"; public static final Setting SETTING_CLUSTER_MAX_SHARDS_PER_NODE = Setting.intSetting( "cluster.max_shards_per_node", 1000, 1, + new MaxShardPerNodeLimitValidator(), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting SETTING_CLUSTER_MAX_SHARDS_PER_CLUSTER = Setting.intSetting( + SETTING_MAX_SHARDS_PER_CLUSTER_KEY, + -1, + -1, + new MaxShardPerClusterLimitValidator(), Setting.Property.Dynamic, Setting.Property.NodeScope ); @@ -74,13 +90,17 @@ public class ShardLimitValidator { ); protected final AtomicInteger shardLimitPerNode = new AtomicInteger(); + protected final AtomicInteger shardLimitPerCluster = new AtomicInteger(); private final SystemIndices systemIndices; private volatile boolean ignoreDotIndexes; public ShardLimitValidator(final Settings settings, ClusterService clusterService, SystemIndices systemIndices) { this.shardLimitPerNode.set(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.get(settings)); + this.shardLimitPerCluster.set(SETTING_CLUSTER_MAX_SHARDS_PER_CLUSTER.get(settings)); this.ignoreDotIndexes = SETTING_CLUSTER_IGNORE_DOT_INDEXES.get(settings); clusterService.getClusterSettings().addSettingsUpdateConsumer(SETTING_CLUSTER_MAX_SHARDS_PER_NODE, this::setShardLimitPerNode); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(SETTING_CLUSTER_MAX_SHARDS_PER_CLUSTER, this::setShardLimitPerCluster); clusterService.getClusterSettings().addSettingsUpdateConsumer(SETTING_CLUSTER_IGNORE_DOT_INDEXES, this::setIgnoreDotIndexes); this.systemIndices = systemIndices; } @@ -89,6 +109,10 @@ private void setShardLimitPerNode(int newValue) { this.shardLimitPerNode.set(newValue); } + private void setShardLimitPerCluster(int newValue) { + this.shardLimitPerCluster.set(newValue); + } + /** * Gets the currently configured value of the {@link ShardLimitValidator#SETTING_CLUSTER_MAX_SHARDS_PER_NODE} setting. * @return the current value of the setting @@ -97,6 +121,14 @@ public int getShardLimitPerNode() { return shardLimitPerNode.get(); } + /** + * Gets the currently configured value of the {@link ShardLimitValidator#SETTING_CLUSTER_MAX_SHARDS_PER_CLUSTER} setting. + * @return the current value of the setting. + */ + public int getShardLimitPerCluster() { + return shardLimitPerCluster.get(); + } + private void setIgnoreDotIndexes(boolean newValue) { this.ignoreDotIndexes = newValue; } @@ -211,11 +243,16 @@ private boolean isDataStreamIndex(String indexName) { * an operation. If empty, a sign that the operation is valid. */ public Optional checkShardLimit(int newShards, ClusterState state) { - return checkShardLimit(newShards, state, getShardLimitPerNode()); + return checkShardLimit(newShards, state, getShardLimitPerNode(), getShardLimitPerCluster()); } // package-private for testing - static Optional checkShardLimit(int newShards, ClusterState state, int maxShardsPerNodeSetting) { + static Optional checkShardLimit( + int newShards, + ClusterState state, + int maxShardsPerNodeSetting, + int maxShardsPerClusterSetting + ) { int nodeCount = state.getNodes().getDataNodes().size(); // Only enforce the shard limit if we have at least one data node, so that we don't block @@ -223,10 +260,15 @@ static Optional checkShardLimit(int newShards, ClusterState state, int m if (nodeCount == 0 || newShards < 0) { return Optional.empty(); } - int maxShardsPerNode = maxShardsPerNodeSetting; - int maxShardsInCluster = maxShardsPerNode * nodeCount; - int currentOpenShards = state.getMetadata().getTotalOpenIndexShards(); + int maxShardsInCluster = maxShardsPerClusterSetting; + if (maxShardsInCluster == -1) { + maxShardsInCluster = maxShardsPerNodeSetting * nodeCount; + } else { + maxShardsInCluster = Math.min(maxShardsInCluster, maxShardsPerNodeSetting * nodeCount); + } + + int currentOpenShards = state.getMetadata().getTotalOpenIndexShards(); if ((currentOpenShards + newShards) > maxShardsInCluster) { String errorMessage = "this action would add [" + newShards @@ -239,4 +281,54 @@ static Optional checkShardLimit(int newShards, ClusterState state, int m } return Optional.empty(); } + + /** + * Validates the MaxShadPerCluster threshold. + */ + static final class MaxShardPerClusterLimitValidator implements Setting.Validator { + + @Override + public void validate(Integer value) {} + + @Override + public void validate(Integer maxShardPerCluster, Map, Object> settings) { + final int maxShardPerNode = (int) settings.get(SETTING_CLUSTER_MAX_SHARDS_PER_NODE); + doValidate(maxShardPerCluster, maxShardPerNode); + } + + @Override + public Iterator> settings() { + final List> settings = Collections.singletonList(SETTING_CLUSTER_MAX_SHARDS_PER_NODE); + return settings.iterator(); + } + } + + /** + * Validates the MaxShadPerNode threshold. + */ + static final class MaxShardPerNodeLimitValidator implements Setting.Validator { + + @Override + public void validate(Integer value) {} + + @Override + public void validate(Integer maxShardPerNode, Map, Object> settings) { + final int maxShardPerCluster = (int) settings.get(SETTING_CLUSTER_MAX_SHARDS_PER_CLUSTER); + doValidate(maxShardPerCluster, maxShardPerNode); + } + + @Override + public Iterator> settings() { + final List> settings = Collections.singletonList(SETTING_CLUSTER_MAX_SHARDS_PER_CLUSTER); + return settings.iterator(); + } + } + + private static void doValidate(final int maxShardPerCluster, final int maxShardPerNode) { + if (maxShardPerCluster != -1 && maxShardPerCluster < maxShardPerNode) { + throw new IllegalArgumentException( + "MaxShardPerCluster " + maxShardPerCluster + " should be greater than or equal to MaxShardPerNode " + maxShardPerNode + ); + } + } } diff --git a/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java b/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java index f19689565dd92..8edd62438fb1d 100644 --- a/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java +++ b/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java @@ -59,10 +59,11 @@ import static org.opensearch.cluster.metadata.MetadataIndexStateServiceTests.addClosedIndex; import static org.opensearch.cluster.metadata.MetadataIndexStateServiceTests.addOpenedIndex; import static org.opensearch.cluster.shards.ShardCounts.forDataNodeCount; -import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_IGNORE_DOT_INDEXES; -import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_IGNORE_DOT_INDEXES; +import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE; +import static org.opensearch.indices.ShardLimitValidator.SETTING_MAX_SHARDS_PER_CLUSTER_KEY; public class ShardLimitValidatorTests extends OpenSearchTestCase { @@ -75,7 +76,7 @@ public void testOverShardLimit() { ClusterState state = createClusterForShardLimitTest(nodesInCluster, counts.getFirstIndexShards(), counts.getFirstIndexReplicas()); int shardsToAdd = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); - Optional errorMessage = ShardLimitValidator.checkShardLimit(shardsToAdd, state, counts.getShardsPerNode()); + Optional errorMessage = ShardLimitValidator.checkShardLimit(shardsToAdd, state, counts.getShardsPerNode(), -1); int totalShards = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); int currentShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); @@ -93,6 +94,36 @@ public void testOverShardLimit() { ); } + public void testOverShardLimitWithMaxShardCountLimit() { + int nodesInCluster = randomIntBetween(1, 90); + ShardCounts counts = forDataNodeCount(nodesInCluster); + + ClusterState state = createClusterForShardLimitTest(nodesInCluster, counts.getFirstIndexShards(), counts.getFirstIndexReplicas()); + int shardsToAdd = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); + int maxShardLimitOnCluster = shardsToAdd - 1; + Optional errorMessage = ShardLimitValidator.checkShardLimit( + shardsToAdd, + state, + counts.getShardsPerNode(), + maxShardLimitOnCluster + ); + + int totalShards = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); + int currentShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); + int maxShards = Math.min(counts.getShardsPerNode() * nodesInCluster, maxShardLimitOnCluster); + assertTrue(errorMessage.isPresent()); + assertEquals( + "this action would add [" + + totalShards + + "] total shards, but this cluster currently has [" + + currentShards + + "]/[" + + maxShards + + "] maximum shards open", + errorMessage.get() + ); + } + public void testUnderShardLimit() { int nodesInCluster = randomIntBetween(2, 90); // Calculate the counts for a cluster 1 node smaller than we have to ensure we have headroom @@ -104,7 +135,7 @@ public void testUnderShardLimit() { int existingShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); int shardsToAdd = randomIntBetween(1, (counts.getShardsPerNode() * nodesInCluster) - existingShards); - Optional errorMessage = ShardLimitValidator.checkShardLimit(shardsToAdd, state, counts.getShardsPerNode()); + Optional errorMessage = ShardLimitValidator.checkShardLimit(shardsToAdd, state, counts.getShardsPerNode(), -1); assertFalse(errorMessage.isPresent()); } @@ -152,6 +183,53 @@ public void testNonSystemIndexCreationFails() { ); } + public void testNonSystemIndexCreationFailsWithMaxShardLimitOnCluster() { + final int maxShardLimitOnCluster = 1; + Settings limitOnlySettings = Settings.builder() + .put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), 1) + .put(SETTING_CLUSTER_IGNORE_DOT_INDEXES.getKey(), false) + .put(SETTING_MAX_SHARDS_PER_CLUSTER_KEY, maxShardLimitOnCluster) + .build(); + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(limitOnlySettings); + final Settings settings = Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + final ClusterState state = createClusterForShardLimitTest(1, 1, 0); + final ValidationException exception = expectThrows( + ValidationException.class, + () -> shardLimitValidator.validateShardLimit("abc", settings, state) + ); + assertEquals( + "Validation Failed: 1: this action would add [" + + 2 + + "] total shards, but this cluster currently has [" + + 1 + + "]/[" + + maxShardLimitOnCluster + + "] maximum shards open;", + exception.getMessage() + ); + } + + public void testNonSystemIndexCreationPassesWithMaxShardLimitOnCluster() { + final int maxShardLimitOnCluster = 5; + Settings limitOnlySettings = Settings.builder() + .put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), 3) + .put(SETTING_CLUSTER_IGNORE_DOT_INDEXES.getKey(), false) + .put(SETTING_MAX_SHARDS_PER_CLUSTER_KEY, maxShardLimitOnCluster) + .build(); + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(limitOnlySettings); + final Settings settings = Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + final ClusterState state = createClusterForShardLimitTest(1, 1, 0); + shardLimitValidator.validateShardLimit("abc", settings, state); + } + /** * This test validates that index starting with dot creation Succeeds * when the setting cluster.ignore_dot_indexes is set to true. @@ -489,6 +567,22 @@ public static ClusterState createClusterForShardLimitTest( ); } + /** + * Creates a {@link ShardLimitValidator} for testing with the given setting and a mocked cluster service. + * + * @param limitOnlySettings the setting used for creating ShardLimitValidator. + * @return a test instance + */ + private static ShardLimitValidator createTestShardLimitService(final Settings limitOnlySettings) { + // Use a mocked clusterService - for unit tests we won't be updating the setting anyway. + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings(limitOnlySettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + + return new ShardLimitValidator(limitOnlySettings, clusterService, new SystemIndices(emptyMap())); + } + /** * Creates a {@link ShardLimitValidator} for testing with the given setting and a mocked cluster service. *