2828import org .elasticsearch .common .unit .ByteSizeValue ;
2929import org .elasticsearch .core .Nullable ;
3030import org .elasticsearch .core .Predicates ;
31- import org .elasticsearch .index .IndexService ;
3231import org .elasticsearch .index .cache .query .QueryCacheStats ;
33- import org .elasticsearch .index .shard .IndexShard ;
3432import org .elasticsearch .index .shard .ShardId ;
3533
3634import java .io .Closeable ;
3735import java .io .IOException ;
3836import java .util .Collections ;
39- import java .util .HashMap ;
4037import java .util .IdentityHashMap ;
4138import java .util .Map ;
4239import java .util .Set ;
@@ -72,38 +69,6 @@ public class IndicesQueryCache implements QueryCache, Closeable {
7269 private final Map <ShardId , Stats > shardStats = new ConcurrentHashMap <>();
7370 private volatile long sharedRamBytesUsed ;
7471
75- /**
76- * Calculates a map of {@link ShardId} to {@link Long} which contains the calculated share of the {@link IndicesQueryCache} shared ram
77- * size for a given shard (that is, the sum of all the longs is the size of the indices query cache). Since many shards will not
78- * participate in the cache, shards whose calculated share is zero will not be contained in the map at all. As a consequence, the
79- * correct pattern for using the returned map will be via {@link Map#getOrDefault(Object, Object)} with a {@code defaultValue} of
80- * {@code 0L}.
81- */
82- public static Map <ShardId , Long > getSharedRamSizeForAllShards (IndicesService indicesService ) {
83- Map <ShardId , Long > shardIdToSharedRam = new HashMap <>();
84- IndicesQueryCache .CacheTotals cacheTotals = IndicesQueryCache .getCacheTotalsForAllShards (indicesService );
85- for (IndexService indexService : indicesService ) {
86- for (IndexShard indexShard : indexService ) {
87- final var queryCache = indicesService .getIndicesQueryCache ();
88- long sharedRam = (queryCache == null ) ? 0L : queryCache .getSharedRamSizeForShard (indexShard .shardId (), cacheTotals );
89- // as a size optimization, only store non-zero values in the map
90- if (sharedRam > 0L ) {
91- shardIdToSharedRam .put (indexShard .shardId (), sharedRam );
92- }
93- }
94- }
95- return Collections .unmodifiableMap (shardIdToSharedRam );
96- }
97-
98- public long getCacheSizeForShard (ShardId shardId ) {
99- Stats stats = shardStats .get (shardId );
100- return stats != null ? stats .cacheSize : 0L ;
101- }
102-
103- public long getSharedRamBytesUsed () {
104- return sharedRamBytesUsed ;
105- }
106-
10772 // This is a hack for the fact that the close listener for the
10873 // ShardCoreKeyMap will be called before onDocIdSetEviction
10974 // See onDocIdSetEviction for more info
@@ -126,58 +91,40 @@ private static QueryCacheStats toQueryCacheStatsSafe(@Nullable Stats stats) {
12691 return stats == null ? new QueryCacheStats () : stats .toQueryCacheStats ();
12792 }
12893
129- /**
130- * This computes the total cache size in bytes, and the total shard count in the cache for all shards.
131- * @param indicesService
132- * @return A CacheTotals object containing the computed total number of items in the cache and the number of shards seen in the cache
133- */
134- private static CacheTotals getCacheTotalsForAllShards (IndicesService indicesService ) {
135- IndicesQueryCache queryCache = indicesService .getIndicesQueryCache ();
136- boolean hasQueryCache = queryCache != null ;
94+ private long getShareOfAdditionalRamBytesUsed (long itemsInCacheForShard ) {
95+ if (sharedRamBytesUsed == 0L ) {
96+ return 0L ;
97+ }
98+
99+ /*
100+ * We have some shared ram usage that we try to distribute proportionally to the number of segment-requests in the cache for each
101+ * shard.
102+ */
103+ // TODO avoid looping over all local shards here - see https://github.com/elastic/elasticsearch/issues/97222
137104 long totalItemsInCache = 0L ;
138105 int shardCount = 0 ;
139- for (final IndexService indexService : indicesService ) {
140- for (final IndexShard indexShard : indexService ) {
141- final var shardId = indexShard .shardId ();
142- long cacheSize = hasQueryCache ? queryCache .getCacheSizeForShard (shardId ) : 0L ;
143- shardCount ++;
144- assert cacheSize >= 0 : "Unexpected cache size of " + cacheSize + " for shard " + shardId ;
145- totalItemsInCache += cacheSize ;
106+ if (itemsInCacheForShard == 0L ) {
107+ for (final var stats : shardStats .values ()) {
108+ shardCount += 1 ;
109+ if (stats .cacheSize > 0L ) {
110+ // some shard has nonzero cache footprint, so we apportion the shared size by cache footprint, and this shard has none
111+ return 0L ;
112+ }
113+ }
114+ } else {
115+ // branchless loop for the common case
116+ for (final var stats : shardStats .values ()) {
117+ shardCount += 1 ;
118+ totalItemsInCache += stats .cacheSize ;
146119 }
147- }
148- return new CacheTotals (totalItemsInCache , shardCount );
149- }
150-
151- public static long getSharedRamSizeForShard (IndicesService indicesService , ShardId shardId ) {
152- IndicesQueryCache .CacheTotals cacheTotals = IndicesQueryCache .getCacheTotalsForAllShards (indicesService );
153- final var queryCache = indicesService .getIndicesQueryCache ();
154- return (queryCache == null ) ? 0L : queryCache .getSharedRamSizeForShard (shardId , cacheTotals );
155- }
156-
157- /**
158- * This method computes the shared RAM size in bytes for the given indexShard.
159- * @param shardId The shard to compute the shared RAM size for
160- * @param cacheTotals Shard totals computed in getCacheTotalsForAllShards()
161- * @return the shared RAM size in bytes allocated to the given shard, or 0 if unavailable
162- */
163- private long getSharedRamSizeForShard (ShardId shardId , CacheTotals cacheTotals ) {
164- long sharedRamBytesUsed = getSharedRamBytesUsed ();
165- if (sharedRamBytesUsed == 0L ) {
166- return 0L ;
167120 }
168121
169- int shardCount = cacheTotals .shardCount ();
170122 if (shardCount == 0 ) {
171123 // Sometimes it's not possible to do this when there are no shard entries at all, which can happen as the shared ram usage can
172124 // extend beyond the closing of all shards.
173125 return 0L ;
174126 }
175- /*
176- * We have some shared ram usage that we try to distribute proportionally to the number of segment-requests in the cache for each
177- * shard.
178- */
179- long totalItemsInCache = cacheTotals .totalItemsInCache ();
180- long itemsInCacheForShard = getCacheSizeForShard (shardId );
127+
181128 final long additionalRamBytesUsed ;
182129 if (totalItemsInCache == 0 ) {
183130 // all shards have zero cache footprint, so we apportion the size of the shared bytes equally across all shards
@@ -198,12 +145,10 @@ private long getSharedRamSizeForShard(ShardId shardId, CacheTotals cacheTotals)
198145 return additionalRamBytesUsed ;
199146 }
200147
201- private record CacheTotals (long totalItemsInCache , int shardCount ) {}
202-
203148 /** Get usage statistics for the given shard. */
204- public QueryCacheStats getStats (ShardId shard , long precomputedSharedRamBytesUsed ) {
149+ public QueryCacheStats getStats (ShardId shard ) {
205150 final QueryCacheStats queryCacheStats = toQueryCacheStatsSafe (shardStats .get (shard ));
206- queryCacheStats .addRamBytesUsed (precomputedSharedRamBytesUsed );
151+ queryCacheStats .addRamBytesUsed (getShareOfAdditionalRamBytesUsed ( queryCacheStats . getCacheSize ()) );
207152 return queryCacheStats ;
208153 }
209154
@@ -312,7 +257,7 @@ QueryCacheStats toQueryCacheStats() {
312257 public String toString () {
313258 return "{shardId="
314259 + shardId
315- + ", ramBytesUsed ="
260+ + ", ramBytedUsed ="
316261 + ramBytesUsed
317262 + ", hitCount="
318263 + hitCount
@@ -409,7 +354,11 @@ protected void onDocIdSetCache(Object readerCoreKey, long ramBytesUsed) {
409354 shardStats .cacheCount += 1 ;
410355 shardStats .ramBytesUsed += ramBytesUsed ;
411356
412- StatsAndCount statsAndCount = stats2 .computeIfAbsent (readerCoreKey , ignored -> new StatsAndCount (shardStats ));
357+ StatsAndCount statsAndCount = stats2 .get (readerCoreKey );
358+ if (statsAndCount == null ) {
359+ statsAndCount = new StatsAndCount (shardStats );
360+ stats2 .put (readerCoreKey , statsAndCount );
361+ }
413362 statsAndCount .count += 1 ;
414363 }
415364
@@ -422,7 +371,7 @@ protected void onDocIdSetEviction(Object readerCoreKey, int numEntries, long sum
422371 if (numEntries > 0 ) {
423372 // We can't use ShardCoreKeyMap here because its core closed
424373 // listener is called before the listener of the cache which
425- // triggers this eviction. So instead we use stats2 that
374+ // triggers this eviction. So instead we use use stats2 that
426375 // we only evict when nothing is cached anymore on the segment
427376 // instead of relying on close listeners
428377 final StatsAndCount statsAndCount = stats2 .get (readerCoreKey );
0 commit comments