Skip to content

Commit d15445e

Browse files
authored
Remove usage of RAM accounting of segments (#75674)
This is a pre-requisite for the upgrade to Lucene 9, which removes the ability to estimate RAM usage of segments.
1 parent b7fc0ac commit d15445e

File tree

36 files changed

+154
-744
lines changed

36 files changed

+154
-744
lines changed

docs/reference/cat/segments.asciidoc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ The API returns the following response:
127127
["source","txt",subs="attributes,callouts"]
128128
--------------------------------------------------
129129
index shard prirep ip segment generation docs.count docs.deleted size size.memory committed searchable version compound
130-
test 0 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true
131-
test1 0 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true
130+
test 0 p 127.0.0.1 _0 0 1 0 3kb 0 false true {lucene_version} true
131+
test1 0 p 127.0.0.1 _0 0 1 0 3kb 0 false true {lucene_version} true
132132
--------------------------------------------------
133133
// TESTRESPONSE[s/3kb/\\d+(\\.\\d+)?[mk]?b/ s/2042/\\d+/ non_json]

docs/reference/cluster/stats.asciidoc

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1317,19 +1317,19 @@ The API returns the following response:
13171317
"segments": {
13181318
"count": 4,
13191319
"memory": "8.6kb",
1320-
"memory_in_bytes": 8898,
1321-
"terms_memory": "6.3kb",
1322-
"terms_memory_in_bytes": 6522,
1323-
"stored_fields_memory": "1.2kb",
1324-
"stored_fields_memory_in_bytes": 1248,
1320+
"memory_in_bytes": 0,
1321+
"terms_memory": "0b",
1322+
"terms_memory_in_bytes": 0,
1323+
"stored_fields_memory": "0b",
1324+
"stored_fields_memory_in_bytes": 0,
13251325
"term_vectors_memory": "0b",
13261326
"term_vectors_memory_in_bytes": 0,
1327-
"norms_memory": "384b",
1328-
"norms_memory_in_bytes": 384,
1327+
"norms_memory": "0b",
1328+
"norms_memory_in_bytes": 0,
13291329
"points_memory" : "0b",
13301330
"points_memory_in_bytes" : 0,
1331-
"doc_values_memory": "744b",
1332-
"doc_values_memory_in_bytes": 744,
1331+
"doc_values_memory": "0b",
1332+
"doc_values_memory_in_bytes": 0,
13331333
"index_writer_memory": "0b",
13341334
"index_writer_memory_in_bytes": 0,
13351335
"version_map_memory": "0b",

docs/reference/indices/segments.asciidoc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ The API returns the following response:
161161
"num_docs": 1,
162162
"deleted_docs": 0,
163163
"size_in_bytes": 3800,
164-
"memory_in_bytes": 1410,
164+
"memory_in_bytes": 0,
165165
"committed": false,
166166
"search": true,
167167
"version": "7.0.0",

server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java

Lines changed: 0 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,6 @@
1212
import org.elasticsearch.ExceptionsHelper;
1313
import org.elasticsearch.Version;
1414
import org.elasticsearch.action.ActionListener;
15-
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
16-
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
1715
import org.elasticsearch.action.index.IndexRequest;
1816
import org.elasticsearch.action.search.SearchRequest;
1917
import org.elasticsearch.action.search.SearchResponse;
@@ -33,7 +31,6 @@
3331
import org.elasticsearch.core.CheckedRunnable;
3432
import org.elasticsearch.common.Strings;
3533
import org.elasticsearch.common.UUIDs;
36-
import org.elasticsearch.common.breaker.CircuitBreaker;
3734
import org.elasticsearch.common.bytes.BytesArray;
3835
import org.elasticsearch.common.lucene.uid.Versions;
3936
import org.elasticsearch.common.settings.Settings;
@@ -53,7 +50,6 @@
5350
import org.elasticsearch.index.engine.CommitStats;
5451
import org.elasticsearch.index.engine.Engine;
5552
import org.elasticsearch.index.engine.NoOpEngine;
56-
import org.elasticsearch.index.engine.SegmentsStats;
5753
import org.elasticsearch.index.flush.FlushStats;
5854
import org.elasticsearch.index.mapper.SourceToParse;
5955
import org.elasticsearch.index.seqno.RetentionLeaseSyncer;
@@ -63,10 +59,8 @@
6359
import org.elasticsearch.index.translog.TranslogStats;
6460
import org.elasticsearch.indices.IndicesService;
6561
import org.elasticsearch.indices.breaker.CircuitBreakerService;
66-
import org.elasticsearch.indices.breaker.CircuitBreakerStats;
6762
import org.elasticsearch.indices.recovery.RecoveryState;
6863
import org.elasticsearch.plugins.Plugin;
69-
import org.elasticsearch.search.aggregations.AggregationBuilders;
7064
import org.elasticsearch.search.builder.SearchSourceBuilder;
7165
import org.elasticsearch.test.DummyShardLock;
7266
import org.elasticsearch.test.ESSingleNodeTestCase;
@@ -114,7 +108,6 @@
114108
import static org.hamcrest.Matchers.equalTo;
115109
import static org.hamcrest.Matchers.greaterThan;
116110
import static org.hamcrest.Matchers.instanceOf;
117-
import static org.hamcrest.Matchers.notNullValue;
118111

119112
public class IndexShardIT extends ESSingleNodeTestCase {
120113

@@ -551,71 +544,6 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResul
551544
}
552545
}
553546

554-
/** Check that the accounting breaker correctly matches the segments API for memory usage */
555-
private void checkAccountingBreaker() {
556-
CircuitBreakerService breakerService = getInstanceFromNode(CircuitBreakerService.class);
557-
CircuitBreaker acctBreaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING);
558-
long usedMem = acctBreaker.getUsed();
559-
assertThat(usedMem, greaterThan(0L));
560-
NodesStatsResponse response = client().admin().cluster().prepareNodesStats().setIndices(true).setBreaker(true).get();
561-
NodeStats stats = response.getNodes().get(0);
562-
assertNotNull(stats);
563-
SegmentsStats segmentsStats = stats.getIndices().getSegments();
564-
CircuitBreakerStats breakerStats = stats.getBreaker().getStats(CircuitBreaker.ACCOUNTING);
565-
assertEquals(usedMem, segmentsStats.getMemoryInBytes());
566-
assertEquals(usedMem, breakerStats.getEstimated());
567-
}
568-
569-
public void testCircuitBreakerIncrementedByIndexShard() throws Exception {
570-
client().admin().cluster().prepareUpdateSettings()
571-
.setTransientSettings(Settings.builder().put("network.breaker.inflight_requests.overhead", 0.0)).get();
572-
573-
// Generate a couple of segments
574-
client().prepareIndex("test").setId("1")
575-
.setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
576-
.setRefreshPolicy(IMMEDIATE).get();
577-
// Use routing so 2 documents are guaranteed to be on the same shard
578-
String routing = randomAlphaOfLength(5);
579-
client().prepareIndex("test").setId("2")
580-
.setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
581-
.setRefreshPolicy(IMMEDIATE).setRouting(routing).get();
582-
client().prepareIndex("test").setId("3")
583-
.setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
584-
.setRefreshPolicy(IMMEDIATE).setRouting(routing).get();
585-
586-
checkAccountingBreaker();
587-
// Test that force merging causes the breaker to be correctly adjusted
588-
logger.info("--> force merging to a single segment");
589-
client().admin().indices().prepareForceMerge("test").setMaxNumSegments(1).setFlush(randomBoolean()).get();
590-
client().admin().indices().prepareRefresh().get();
591-
checkAccountingBreaker();
592-
593-
client().admin().cluster().prepareUpdateSettings()
594-
.setTransientSettings(Settings.builder().put("indices.breaker.total.limit", "1kb")).get();
595-
596-
// Test that we're now above the parent limit due to the segments
597-
Exception e = expectThrows(Exception.class,
598-
() -> client().prepareSearch("test")
599-
.addAggregation(AggregationBuilders.terms("foo_terms").field("foo.keyword")).get());
600-
logger.info("--> got an expected exception", e);
601-
assertThat(e.getCause(), notNullValue());
602-
assertThat(e.getCause().getMessage(), containsString("[parent] Data too large, data for [preallocate[aggregations]]"));
603-
604-
client().admin().cluster().prepareUpdateSettings()
605-
.setTransientSettings(Settings.builder()
606-
.putNull("indices.breaker.total.limit")
607-
.putNull("network.breaker.inflight_requests.overhead")).get();
608-
609-
// Test that deleting the index causes the breaker to correctly be decremented
610-
logger.info("--> deleting index");
611-
client().admin().indices().prepareDelete("test").get();
612-
613-
// Accounting breaker should now be 0
614-
CircuitBreakerService breakerService = getInstanceFromNode(CircuitBreakerService.class);
615-
CircuitBreaker acctBreaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING);
616-
assertThat(acctBreaker.getUsed(), equalTo(0L));
617-
}
618-
619547
public static final IndexShard recoverShard(IndexShard newShard) throws IOException {
620548
DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
621549
newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null));

server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -75,8 +75,6 @@ private void reset() {
7575
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING,
7676
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING,
7777
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING,
78-
HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_LIMIT_SETTING,
79-
HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_OVERHEAD_SETTING,
8078
HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING,
8179
HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING,
8280
HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING).forEach(s -> resetSettings.putNull(s.getKey()));

server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -615,7 +615,6 @@ public void testSegmentsStats() {
615615

616616
assertThat(stats.getTotal().getSegments(), notNullValue());
617617
assertThat(stats.getTotal().getSegments().getCount(), equalTo((long) test1.totalNumShards));
618-
assertThat(stats.getTotal().getSegments().getMemoryInBytes(), greaterThan(0L));
619618
if (includeSegmentFileSizes) {
620619
assertThat(stats.getTotal().getSegments().getFiles().size(), greaterThan(0));
621620
for (ObjectObjectCursor<String, SegmentsStats.FileStats> cursor : stats.getTotal().getSegments().getFiles()) {

server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ protected void addCustomXContentFields(XContentBuilder builder, Params params) t
113113
builder.field(Fields.NUM_DOCS, segment.getNumDocs());
114114
builder.field(Fields.DELETED_DOCS, segment.getDeletedDocs());
115115
builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, segment.getSize());
116-
builder.humanReadableField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, new ByteSizeValue(segment.getMemoryInBytes()));
116+
builder.humanReadableField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, new ByteSizeValue(0));
117117
builder.field(Fields.COMMITTED, segment.isCommitted());
118118
builder.field(Fields.SEARCH, segment.isSearch());
119119
if (segment.getVersion() != null) {

server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -529,7 +529,7 @@ public ShardCountStats getShards() {
529529

530530
/**
531531
* Utility method which computes total memory by adding
532-
* FieldData, PercolatorCache, Segments (memory, index writer, version map)
532+
* FieldData, PercolatorCache, Segments (index writer, version map)
533533
*/
534534
public ByteSizeValue getTotalMemory() {
535535
long size = 0;
@@ -540,8 +540,7 @@ public ByteSizeValue getTotalMemory() {
540540
size += this.getQueryCache().getMemorySizeInBytes();
541541
}
542542
if (this.getSegments() != null) {
543-
size += this.getSegments().getMemoryInBytes() +
544-
this.getSegments().getIndexWriterMemoryInBytes() +
543+
size += this.getSegments().getIndexWriterMemoryInBytes() +
545544
this.getSegments().getVersionMapMemoryInBytes();
546545
}
547546

server/src/main/java/org/elasticsearch/common/breaker/CircuitBreaker.java

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -43,12 +43,6 @@ public interface CircuitBreaker {
4343
* writing requests on the network layer.
4444
*/
4545
String IN_FLIGHT_REQUESTS = "inflight_requests";
46-
/**
47-
* The accounting breaker tracks things held in memory that is independent
48-
* of the request lifecycle. This includes memory used by Lucene for
49-
* segments.
50-
*/
51-
String ACCOUNTING = "accounting";
5246

5347
enum Type {
5448
// A regular or ChildMemoryCircuitBreaker

server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -281,8 +281,6 @@ public void apply(Settings value, Settings current, Settings previous) {
281281
HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING,
282282
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING,
283283
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING,
284-
HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_LIMIT_SETTING,
285-
HierarchyCircuitBreakerService.ACCOUNTING_CIRCUIT_BREAKER_OVERHEAD_SETTING,
286284
IndexModule.NODE_STORE_ALLOW_MMAP,
287285
ClusterApplierService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING,
288286
ClusterService.USER_DEFINED_METADATA,

0 commit comments

Comments
 (0)