diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index f321d034bc6b..472e352cda61 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -221,7 +221,7 @@ public class BucketCache implements BlockCache, HeapSize { */ transient final IdReadWriteLock offsetLock; - private final NavigableSet blocksByHFile = new ConcurrentSkipListSet<>((a, b) -> { + final NavigableSet blocksByHFile = new ConcurrentSkipListSet<>((a, b) -> { int nameComparison = a.getHfileName().compareTo(b.getHfileName()); if (nameComparison != 0) { return nameComparison; @@ -643,12 +643,14 @@ void blockEvicted(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean decre blocksByHFile.remove(cacheKey); if (decrementBlockNumber) { this.blockNumber.decrement(); + if (ioEngine.isPersistent()) { + removeFileFromPrefetch(cacheKey.getHfileName()); + } } if (evictedByEvictionProcess) { cacheStats.evicted(bucketEntry.getCachedTime(), cacheKey.isPrimary()); } if (ioEngine.isPersistent()) { - removeFileFromPrefetch(cacheKey.getHfileName()); setCacheInconsistent(true); } } @@ -1083,6 +1085,7 @@ public void run() { */ protected void putIntoBackingMap(BlockCacheKey key, BucketEntry bucketEntry) { BucketEntry previousEntry = backingMap.put(key, bucketEntry); + blocksByHFile.add(key); if (previousEntry != null && previousEntry != bucketEntry) { previousEntry.withWriteLock(offsetLock, () -> { blockEvicted(key, previousEntry, false, false); @@ -1163,10 +1166,6 @@ void doDrain(final List entries, ByteBuffer metaBuff) throws Inte index++; continue; } - BlockCacheKey cacheKey = re.getKey(); - if (ramCache.containsKey(cacheKey)) { - blocksByHFile.add(cacheKey); - } // Reset the position for reuse. // It should be guaranteed that the data in the metaBuff has been transferred to the // ioEngine safely. Otherwise, this reuse is problematic. Fortunately, the data is already @@ -1517,6 +1516,7 @@ private void disableCache() { if (!ioEngine.isPersistent() || persistencePath == null) { // If persistent ioengine and a path, we will serialize out the backingMap. this.backingMap.clear(); + this.blocksByHFile.clear(); this.fullyCachedFiles.clear(); this.regionCachedSizeMap.clear(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java index 4b729f334116..429fffa38f6c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java @@ -122,6 +122,8 @@ public void testTooBigEntry() throws InterruptedException { Mockito.when(tooBigCacheable.getSerializedLength()).thenReturn(Integer.MAX_VALUE); this.bc.cacheBlock(this.plainKey, tooBigCacheable); doDrainOfOneEntry(this.bc, this.wt, this.q); + assertTrue(bc.blocksByHFile.isEmpty()); + assertTrue(bc.getBackingMap().isEmpty()); } /** @@ -138,6 +140,8 @@ public void testIOE() throws IOException, InterruptedException { Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()); this.q.add(spiedRqe); doDrainOfOneEntry(bc, wt, q); + assertTrue(bc.blocksByHFile.isEmpty()); + assertTrue(bc.getBackingMap().isEmpty()); // Cache disabled when ioes w/o ever healing. assertTrue(!bc.isCacheEnabled()); }