diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index bb57fbe06b60..fa394ee4e4e6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -81,6 +81,13 @@ public class CacheConfig { */ public static final String PREFETCH_BLOCKS_ON_OPEN_KEY = "hbase.rs.prefetchblocksonopen"; + /** + * Configuration key to cache blocks when a compacted file is written, predicated on prefetching + * being enabled for the column family. + */ + public static final String PREFETCH_COMPACTED_BLOCKS_ON_WRITE_KEY = + "hbase.rs.prefetchcompactedblocksonwrite"; + public static final String DROP_BEHIND_CACHE_COMPACTION_KEY = "hbase.hfile.drop.behind.compaction"; @@ -93,6 +100,7 @@ public class CacheConfig { public static final boolean DEFAULT_EVICT_ON_CLOSE = false; public static final boolean DEFAULT_CACHE_DATA_COMPRESSED = false; public static final boolean DEFAULT_PREFETCH_ON_OPEN = false; + public static final boolean DEFAULT_PREFETCH_COMPACTED_BLOCKS_ON_WRITE = false; public static final boolean DROP_BEHIND_CACHE_COMPACTION_DEFAULT = true; /** @@ -124,6 +132,12 @@ public class CacheConfig { /** Whether data blocks should be prefetched into the cache */ private final boolean prefetchOnOpen; + /** + * Whether data blocks should be cached when compacted file is written for column families with + * prefetching + */ + private final boolean prefetchCompactedDataOnWrite; + private final boolean dropBehindCompaction; // Local reference to the block cache @@ -174,6 +188,8 @@ public CacheConfig(Configuration conf, ColumnFamilyDescriptor family, BlockCache (family == null ? false : family.isEvictBlocksOnClose()); this.prefetchOnOpen = conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN) || (family == null ? false : family.isPrefetchBlocksOnOpen()); + this.prefetchCompactedDataOnWrite = conf.getBoolean(PREFETCH_COMPACTED_BLOCKS_ON_WRITE_KEY, + DEFAULT_PREFETCH_COMPACTED_BLOCKS_ON_WRITE); this.blockCache = blockCache; this.byteBuffAllocator = byteBuffAllocator; LOG.info("Created cacheConfig: " + this + (family == null ? "" : " for family " + family) + @@ -193,6 +209,7 @@ public CacheConfig(CacheConfig cacheConf) { this.evictOnClose = cacheConf.evictOnClose; this.cacheDataCompressed = cacheConf.cacheDataCompressed; this.prefetchOnOpen = cacheConf.prefetchOnOpen; + this.prefetchCompactedDataOnWrite = cacheConf.prefetchCompactedDataOnWrite; this.dropBehindCompaction = cacheConf.dropBehindCompaction; this.blockCache = cacheConf.blockCache; this.byteBuffAllocator = cacheConf.byteBuffAllocator; @@ -207,6 +224,7 @@ private CacheConfig() { this.evictOnClose = false; this.cacheDataCompressed = false; this.prefetchOnOpen = false; + this.prefetchCompactedDataOnWrite = false; this.dropBehindCompaction = false; this.blockCache = null; this.byteBuffAllocator = ByteBuffAllocator.HEAP; @@ -319,6 +337,13 @@ public boolean shouldPrefetchOnOpen() { return this.prefetchOnOpen; } + /** + * @return true if blocks should be cached while writing during compaction, false if not + */ + public boolean shouldCacheCompactedBlocksOnWrite() { + return this.prefetchCompactedDataOnWrite && this.prefetchOnOpen; + } + /** * Return true if we may find this type of block in block cache. *
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 447bc736789a..e6935673421a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -1118,9 +1118,9 @@ public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm boolean shouldDropBehind) throws IOException { final CacheConfig writerCacheConf; if (isCompaction) { - // Don't cache data on write on compactions. + // Don't cache data on write on compactions, unless specifically configured to do so writerCacheConf = new CacheConfig(cacheConf); - writerCacheConf.setCacheDataOnWrite(false); + writerCacheConf.setCacheDataOnWrite(cacheConf.shouldCacheCompactedBlocksOnWrite()); } else { writerCacheConf = cacheConf; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index 3a769b09ba9f..5e76b2dbbd5d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -53,6 +53,8 @@ import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HStore; +import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -405,8 +407,14 @@ private void writeStoreFile(boolean useTags) throws IOException { storeFilePath = sfw.getPath(); } - private void testNotCachingDataBlocksDuringCompactionInternals(boolean useTags) + private void testCachingDataBlocksDuringCompactionInternals(boolean useTags, + boolean prefetchCompactedBlocksOnWrite) throws IOException, InterruptedException { + + // Set the conf if testing caching compacted blocks on write + conf.setBoolean(CacheConfig.PREFETCH_COMPACTED_BLOCKS_ON_WRITE_KEY, + prefetchCompactedBlocksOnWrite); + // TODO: need to change this test if we add a cache size threshold for // compactions, or if we implement some other kind of intelligent logic for // deciding what blocks to cache-on-write on compaction. @@ -415,7 +423,8 @@ private void testNotCachingDataBlocksDuringCompactionInternals(boolean useTags) final byte[] cfBytes = Bytes.toBytes(cf); final int maxVersions = 3; ColumnFamilyDescriptor cfd = - ColumnFamilyDescriptorBuilder.newBuilder(cfBytes).setCompressionType(compress) + ColumnFamilyDescriptorBuilder.newBuilder(cfBytes) + .setPrefetchBlocksOnOpen(prefetchCompactedBlocksOnWrite).setCompressionType(compress) .setBloomFilterType(BLOOM_TYPE).setMaxVersions(maxVersions) .setDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()).build(); HRegion region = TEST_UTIL.createTestRegion(table, cfd, blockCache); @@ -448,16 +457,42 @@ private void testNotCachingDataBlocksDuringCompactionInternals(boolean useTags) } region.flush(true); } + + // If prefetching is enabled, cancel all prefetches before clearing block cache + if (prefetchCompactedBlocksOnWrite) { + for (HStore store : region.getStores()) { + for (HStoreFile storeFile : store.getStorefiles()) { + PrefetchExecutor.cancel(storeFile.getPath()); + } + } + } clearBlockCache(blockCache); assertEquals(0, blockCache.getBlockCount()); + region.compact(false); LOG.debug("compactStores() returned"); + boolean dataBlockCached = false; for (CachedBlock block: blockCache) { - assertNotEquals(BlockType.ENCODED_DATA, block.getBlockType()); - assertNotEquals(BlockType.DATA, block.getBlockType()); + if (BlockType.ENCODED_DATA.equals(block.getBlockType()) + || BlockType.DATA.equals(block.getBlockType())) { + dataBlockCached = true; + break; + } } + + // Data blocks should be cached in instances where we are caching blocks on write. In the case + // of testing + // BucketCache, we cannot verify block type as it is not stored in the cache. + assertTrue( + "\nTest description: " + testDescription + "\nprefetchCompactedBlocksOnWrite: " + + prefetchCompactedBlocksOnWrite + "\n", + (prefetchCompactedBlocksOnWrite && !(blockCache instanceof BucketCache)) == dataBlockCached); + region.close(); + + // Clear the cache on write setting + conf.setBoolean(CacheConfig.PREFETCH_COMPACTED_BLOCKS_ON_WRITE_KEY, false); } @Test @@ -467,8 +502,9 @@ public void testStoreFileCacheOnWrite() throws IOException { } @Test - public void testNotCachingDataBlocksDuringCompaction() throws IOException, InterruptedException { - testNotCachingDataBlocksDuringCompactionInternals(false); - testNotCachingDataBlocksDuringCompactionInternals(true); + public void testCachingDataBlocksDuringCompaction() throws IOException, InterruptedException { + testCachingDataBlocksDuringCompactionInternals(false, false); + testCachingDataBlocksDuringCompactionInternals(true, false); + testCachingDataBlocksDuringCompactionInternals(true, true); } }