diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java index bf22d38e373b..b769ea4fa615 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java @@ -39,24 +39,16 @@ public class BlockCacheKey implements HeapSize, java.io.Serializable { * @param hfileName The name of the HFile this block belongs to. * @param offset Offset of the block into the file */ - public BlockCacheKey(String hfileName, long offset) { - this(hfileName, offset, true, BlockType.DATA); - } - - public BlockCacheKey(String hfileName, long offset, boolean isPrimaryReplica, - BlockType blockType) { - this.isPrimaryReplicaBlock = isPrimaryReplica; - this.hfileName = hfileName; - this.offset = offset; - this.blockType = blockType; + public BlockCacheKey(Path hfilePath, long offset) { + this(hfilePath, offset, true, BlockType.DATA); } public BlockCacheKey(Path hfilePath, long offset, boolean isPrimaryReplica, BlockType blockType) { - this.filePath = hfilePath; this.isPrimaryReplicaBlock = isPrimaryReplica; this.hfileName = hfilePath.getName(); this.offset = offset; this.blockType = blockType; + this.filePath = hfilePath; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index 592c19c866cf..d6ff22e17a7a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -30,6 +30,7 @@ import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; @@ -978,7 +979,7 @@ public static class BlockIndexWriter implements InlineBlockWriter { private CacheConfig cacheConf; /** Name to use for computing cache keys */ - private String nameForCaching; + private Path pathForCaching; /** Type of encoding used for index blocks in HFile */ private HFileIndexBlockEncoder indexBlockEncoder; @@ -995,15 +996,15 @@ public BlockIndexWriter() { * @param cacheConf used to determine when and how a block should be cached-on-write. */ public BlockIndexWriter(HFileBlock.Writer blockWriter, CacheConfig cacheConf, - String nameForCaching, HFileIndexBlockEncoder indexBlockEncoder) { - if ((cacheConf == null) != (nameForCaching == null)) { + Path pathForCaching, HFileIndexBlockEncoder indexBlockEncoder) { + if ((cacheConf == null) != (pathForCaching == null)) { throw new IllegalArgumentException( "Block cache and file name for " + "caching must be both specified or both null"); } this.blockWriter = blockWriter; this.cacheConf = cacheConf; - this.nameForCaching = nameForCaching; + this.pathForCaching = pathForCaching; this.maxChunkSize = HFileBlockIndex.DEFAULT_MAX_CHUNK_SIZE; this.minIndexNumEntries = HFileBlockIndex.DEFAULT_MIN_INDEX_NUM_ENTRIES; this.indexBlockEncoder = @@ -1070,7 +1071,7 @@ public long writeIndexBlocks(FSDataOutputStream out) throws IOException { if (cacheConf != null) { cacheConf.getBlockCache().ifPresent(cache -> { HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf); - cache.cacheBlock(new BlockCacheKey(nameForCaching, rootLevelIndexPos, true, + cache.cacheBlock(new BlockCacheKey(pathForCaching, rootLevelIndexPos, true, blockForCaching.getBlockType()), blockForCaching); }); } @@ -1162,7 +1163,7 @@ private void writeIntermediateBlock(FSDataOutputStream out, BlockIndexChunk pare cacheConf.getBlockCache().ifPresent(cache -> { HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf); cache.cacheBlock( - new BlockCacheKey(nameForCaching, beginOffset, true, blockForCaching.getBlockType()), + new BlockCacheKey(pathForCaching, beginOffset, true, blockForCaching.getBlockType()), blockForCaching); }); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java index 6063ffe68891..debaab9e1f41 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java @@ -79,7 +79,7 @@ public void run() { // so we check first if the block exists on its in-memory index, if so, we just // update the offset and move on to the next block without actually going read all // the way to the cache. - BlockCacheKey cacheKey = new BlockCacheKey(name, offset); + BlockCacheKey cacheKey = new BlockCacheKey(path, offset); if (cache.isAlreadyCached(cacheKey).orElse(false)) { // Right now, isAlreadyCached is only supported by BucketCache, which should // always cache data blocks. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index e0f27af71458..e2eeb7a281ba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -1201,7 +1201,7 @@ public HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock) throws // Check cache for block. If found return. long metaBlockOffset = metaBlockIndexReader.getRootBlockOffset(block); BlockCacheKey cacheKey = - new BlockCacheKey(name, metaBlockOffset, this.isPrimaryReplicaReader(), BlockType.META); + new BlockCacheKey(path, metaBlockOffset, this.isPrimaryReplicaReader(), BlockType.META); cacheBlock &= cacheConf.shouldCacheBlockOnRead(BlockType.META.getCategory()); HFileBlock cachedBlock = diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java index d2dfaf62106a..786797c43908 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -315,7 +315,7 @@ protected void finishInit(final Configuration conf) { // Data block index writer boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite(); dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(blockWriter, - cacheIndexesOnWrite ? cacheConf : null, cacheIndexesOnWrite ? name : null, indexBlockEncoder); + cacheIndexesOnWrite ? cacheConf : null, cacheIndexesOnWrite ? path : null, indexBlockEncoder); dataBlockIndexWriter.setMaxChunkSize(HFileBlockIndex.getMaxChunkSize(conf)); dataBlockIndexWriter.setMinIndexNumEntries(HFileBlockIndex.getMinIndexNumEntries(conf)); inlineBlockWriters.add(dataBlockIndexWriter); @@ -556,7 +556,7 @@ private void doCacheOnWrite(long offset) { cacheConf.getBlockCache().ifPresent(cache -> { HFileBlock cacheFormatBlock = blockWriter.getBlockForCaching(cacheConf); try { - cache.cacheBlock(new BlockCacheKey(name, offset, true, cacheFormatBlock.getBlockType()), + cache.cacheBlock(new BlockCacheKey(path, offset, true, cacheFormatBlock.getBlockType()), cacheFormatBlock, cacheConf.isInMemory(), true); } finally { // refCnt will auto increase when block add to Cache, see RAMCache#putIfAbsent diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 71bfc757e51e..fbe9105a822d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -27,6 +27,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -141,8 +142,8 @@ public class BucketCache implements BlockCache, HeapSize { /** Statistics thread */ private static final int statThreadPeriod = 5 * 60; - final static int DEFAULT_WRITER_THREADS = 3; - final static int DEFAULT_WRITER_QUEUE_ITEMS = 64; + public final static int DEFAULT_WRITER_THREADS = 3; + public final static int DEFAULT_WRITER_QUEUE_ITEMS = 64; // Store/read block data transient final IOEngine ioEngine; @@ -682,7 +683,7 @@ public void fileCacheCompleted(Path filePath, long size) { } private void updateRegionCachedSize(Path filePath, long cachedSize) { - if (filePath != null) { + if (filePath != null && filePath.getParent() != null && filePath.getParent().getParent() != null) { String regionName = filePath.getParent().getParent().getName(); regionCachedSize.merge(regionName, cachedSize, (previousSize, newBlockSize) -> previousSize + newBlockSize); @@ -1670,8 +1671,8 @@ public int evictBlocksByHfileName(String hfileName) { } private Set getAllCacheKeysForFile(String hfileName) { - return blocksByHFile.subSet(new BlockCacheKey(hfileName, Long.MIN_VALUE), true, - new BlockCacheKey(hfileName, Long.MAX_VALUE), true); + return blocksByHFile.subSet(new BlockCacheKey(new Path(hfileName), Long.MIN_VALUE, true, BlockType.DATA), true, + new BlockCacheKey(new Path(hfileName), Long.MAX_VALUE, true, BlockType.DATA), true); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java index 4b42414fb9c5..8aa1a1143ccc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java @@ -25,6 +25,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentSkipListSet; import java.util.function.Function; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.io.ByteBuffAllocator.Recycler; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; @@ -32,15 +33,20 @@ import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager; import org.apache.hadoop.hbase.io.hfile.HFileBlock; +import org.apache.hadoop.hbase.regionserver.DataTieringManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hadoop.hbase.shaded.protobuf.generated.BucketCacheProtos; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @InterfaceAudience.Private final class BucketProtoUtils { + private static final Logger LOG = LoggerFactory.getLogger(BucketProtoUtils.class); + private BucketProtoUtils() { } @@ -130,10 +136,30 @@ static Pair, NavigableSet result = new ConcurrentHashMap<>(); NavigableSet resultSet = new ConcurrentSkipListSet<>(Comparator .comparing(BlockCacheKey::getHfileName).thenComparingLong(BlockCacheKey::getOffset)); + + Map allFilePaths = null; + DataTieringManager dataTieringManager; + try { + dataTieringManager = DataTieringManager.getInstance(); + allFilePaths = dataTieringManager.getAllFilesList(); + } catch (IllegalStateException e) { + // Data-Tiering manager has not been set up. + // Ignore the error and proceed with the normal flow. + LOG.warn("Error while getting DataTieringManager instance: {}", e.getMessage()); + } + for (BucketCacheProtos.BackingMapEntry entry : backingMap.getEntryList()) { BucketCacheProtos.BlockCacheKey protoKey = entry.getKey(); - BlockCacheKey key = new BlockCacheKey(protoKey.getHfilename(), protoKey.getOffset(), - protoKey.getPrimaryReplicaBlock(), fromPb(protoKey.getBlockType())); + + BlockCacheKey key = null; + if (allFilePaths != null) { + key = new BlockCacheKey(allFilePaths.get(protoKey.getHfilename()), protoKey.getOffset(), + protoKey.getPrimaryReplicaBlock(), fromPb(protoKey.getBlockType())); + } else { + key = new BlockCacheKey(new Path(protoKey.getHfilename()), protoKey.getOffset(), + protoKey.getPrimaryReplicaBlock(), fromPb(protoKey.getBlockType())); + } + BucketCacheProtos.BucketEntry protoValue = entry.getValue(); // TODO:We use ByteBuffAllocator.HEAP here, because we could not get the ByteBuffAllocator // which created by RpcServer elegantly. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java new file mode 100644 index 000000000000..8d356422f6e0 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringException.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public class DataTieringException extends Exception { + DataTieringException(String reason) { + super(reason); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java new file mode 100644 index 000000000000..28f0ddeebca2 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java @@ -0,0 +1,243 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.OptionalLong; +import java.util.Set; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * The DataTieringManager class categorizes data into hot data and cold data based on the specified + * {@link DataTieringType} when DataTiering is enabled. DataTiering is disabled by default with + * {@link DataTieringType} set to {@link DataTieringType#NONE}. The {@link DataTieringType} + * determines the logic for distinguishing data into hot or cold. By default, all data is considered + * as hot. + */ +@InterfaceAudience.Private +public class DataTieringManager { + private static final Logger LOG = LoggerFactory.getLogger(DataTieringManager.class); + public static final String DATATIERING_KEY = "hbase.hstore.datatiering.type"; + public static final String DATATIERING_HOT_DATA_AGE_KEY = + "hbase.hstore.datatiering.hot.age.millis"; + public static final DataTieringType DEFAULT_DATATIERING = DataTieringType.NONE; + public static final long DEFAULT_DATATIERING_HOT_DATA_AGE = 7 * 24 * 60 * 60 * 1000; // 7 Days + private static DataTieringManager instance; + private final Map onlineRegions; + + private DataTieringManager(Map onlineRegions) { + this.onlineRegions = onlineRegions; + } + + /** + * Initializes the DataTieringManager instance with the provided map of online regions. + * @param onlineRegions A map containing online regions. + */ + public static synchronized void instantiate(Map onlineRegions) { + if (instance == null) { + instance = new DataTieringManager(onlineRegions); + LOG.info("DataTieringManager instantiated successfully."); + } else { + LOG.warn("DataTieringManager is already instantiated."); + } + } + + /** + * Retrieves the instance of DataTieringManager. + * @return The instance of DataTieringManager. + * @throws IllegalStateException if DataTieringManager has not been instantiated. + */ + public static synchronized DataTieringManager getInstance() { + if (instance == null) { + throw new IllegalStateException( + "DataTieringManager has not been instantiated. Call instantiate() first."); + } + return instance; + } + + /** + * Determines whether data tiering is enabled for the given block cache key. + * @param key the block cache key + * @return {@code true} if data tiering is enabled for the HFile associated with the key, + * {@code false} otherwise + * @throws DataTieringException if there is an error retrieving the HFile path or configuration + */ + public boolean isDataTieringEnabled(BlockCacheKey key) throws DataTieringException { + Path hFilePath = key.getFilePath(); + if (hFilePath == null) { + throw new DataTieringException("BlockCacheKey Doesn't Contain HFile Path"); + } + return isDataTieringEnabled(hFilePath); + } + + /** + * Determines whether data tiering is enabled for the given HFile path. + * @param hFilePath the path to the HFile + * @return {@code true} if data tiering is enabled, {@code false} otherwise + * @throws DataTieringException if there is an error retrieving the configuration + */ + public boolean isDataTieringEnabled(Path hFilePath) throws DataTieringException { + Configuration configuration = getConfiguration(hFilePath); + DataTieringType dataTieringType = getDataTieringType(configuration); + return !dataTieringType.equals(DataTieringType.NONE); + } + + /** + * Determines whether the data associated with the given block cache key is considered hot. + * @param key the block cache key + * @return {@code true} if the data is hot, {@code false} otherwise + * @throws DataTieringException if there is an error retrieving data tiering information or the + * HFile maximum timestamp + */ + public boolean isHotData(BlockCacheKey key) throws DataTieringException { + Path hFilePath = key.getFilePath(); + if (hFilePath == null) { + throw new DataTieringException("BlockCacheKey Doesn't Contain HFile Path"); + } + return isHotData(hFilePath); + } + + /** + * Determines whether the data in the HFile at the given path is considered hot based on the + * configured data tiering type and hot data age. + * @param hFilePath the path to the HFile + * @return {@code true} if the data is hot, {@code false} otherwise + * @throws DataTieringException if there is an error retrieving data tiering information or the + * HFile maximum timestamp + */ + public boolean isHotData(Path hFilePath) throws DataTieringException { + Configuration configuration = getConfiguration(hFilePath); + DataTieringType dataTieringType = getDataTieringType(configuration); + + if (dataTieringType.equals(DataTieringType.TIME_RANGE)) { + long hotDataAge = getDataTieringHotDataAge(configuration); + + HStoreFile hStoreFile = getHStoreFile(hFilePath); + if (hStoreFile == null) { + LOG.error("HStoreFile corresponding to " + hFilePath + " doesn't exist"); + return false; + } + OptionalLong maxTimestamp = hStoreFile.getMaximumTimestamp(); + if (!maxTimestamp.isPresent()) { + throw new DataTieringException("Maximum timestamp not present for " + hFilePath); + } + + long currentTimestamp = EnvironmentEdgeManager.getDelegate().currentTime(); + long diff = currentTimestamp - maxTimestamp.getAsLong(); + return diff <= hotDataAge; + } + // DataTieringType.NONE or other types are considered hot by default + return true; + } + + /** + * Returns a set of cold data filenames from the given set of cached blocks. Cold data is + * determined by the configured data tiering type and hot data age. + * @param allCachedBlocks a set of all cached block cache keys + * @return a set of cold data filenames + * @throws DataTieringException if there is an error determining whether a block is hot + */ + public Set getColdDataFiles(Set allCachedBlocks) + throws DataTieringException { + Set coldHFiles = new HashSet<>(); + for (BlockCacheKey key : allCachedBlocks) { + if (coldHFiles.contains(key.getHfileName())) { + continue; + } + if (!isHotData(key)) { + coldHFiles.add(key.getHfileName()); + } + } + return coldHFiles; + } + + private HRegion getHRegion(Path hFilePath) throws DataTieringException { + if (hFilePath.getParent() == null || hFilePath.getParent().getParent() == null) { + throw new DataTieringException("Incorrect HFile Path: " + hFilePath); + } + String regionId = hFilePath.getParent().getParent().getName(); + HRegion hRegion = this.onlineRegions.get(regionId); + if (hRegion == null) { + throw new DataTieringException("HRegion corresponding to " + hFilePath + " doesn't exist"); + } + return hRegion; + } + + private HStore getHStore(Path hFilePath) throws DataTieringException { + HRegion hRegion = getHRegion(hFilePath); + String columnFamily = hFilePath.getParent().getName(); + HStore hStore = hRegion.getStore(Bytes.toBytes(columnFamily)); + if (hStore == null) { + throw new DataTieringException("HStore corresponding to " + hFilePath + " doesn't exist"); + } + return hStore; + } + + private HStoreFile getHStoreFile(Path hFilePath) throws DataTieringException { + HStore hStore = getHStore(hFilePath); + for (HStoreFile file : hStore.getStorefiles()) { + if (file.getPath().equals(hFilePath)) { + return file; + } + } + return null; + } + + private Configuration getConfiguration(Path hFilePath) throws DataTieringException { + HStore hStore = getHStore(hFilePath); + return hStore.getReadOnlyConfiguration(); + } + + private DataTieringType getDataTieringType(Configuration conf) { + return DataTieringType.valueOf(conf.get(DATATIERING_KEY, DEFAULT_DATATIERING.name())); + } + + private long getDataTieringHotDataAge(Configuration conf) { + return Long.parseLong( + conf.get(DATATIERING_HOT_DATA_AGE_KEY, String.valueOf(DEFAULT_DATATIERING_HOT_DATA_AGE))); + } + + /* + * This API browses through all the regions and returns a map of all file names + * pointing to their paths. + * @return Map with entries containing a mapping from filename to filepath + */ + public Map getAllFilesList() { + Map allFileList = new HashMap<>(); + for (HRegion r : this.onlineRegions.values()) { + for (HStore hStore : r.getStores()) { + Configuration conf = hStore.getReadOnlyConfiguration(); + for (HStoreFile hStoreFile : hStore.getStorefiles()) { + String hFileName = + hStoreFile.getFileInfo().getHFileInfo().getHFileContext().getHFileName(); + allFileList.put(hFileName, hStoreFile.getPath()); + } + } + } + return allFileList; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringType.java new file mode 100644 index 000000000000..ee54576a6487 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringType.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Public +public enum DataTieringType { + NONE, + TIME_RANGE +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java index d15a6c92ef0b..8fdbb6035ae2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java @@ -41,6 +41,9 @@ @InterfaceAudience.Private public class DateTieredStoreEngine extends StoreEngine { + + public static final String DATE_TIERED_STORE_ENGINE = DateTieredStoreEngine.class.getName(); + @Override public boolean needsCompaction(List filesCompacting) { return compactionPolicy.needsCompaction(storeFileManager.getStorefiles(), filesCompacting); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 88863c06e4bd..f55d35e69cac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -530,6 +530,7 @@ public HRegionServer(final Configuration conf) throws IOException { regionServerAccounting = new RegionServerAccounting(conf); + DataTieringManager.instantiate(onlineRegions); blockCache = BlockCacheFactory.createBlockCache(conf); mobFileCache = new MobFileCache(conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java index 94e2e4bbfa08..471583b32b7e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/TableDescriptorChecker.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.util; +import static org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine.DATE_TIERED_STORE_ENGINE; + import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompoundConfiguration; @@ -28,10 +30,13 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.fs.ErasureCodingUtils; +import org.apache.hadoop.hbase.regionserver.DataTieringManager; +import org.apache.hadoop.hbase.regionserver.DataTieringType; import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy; +import org.apache.hadoop.hbase.regionserver.StoreEngine; import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy; import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy; import org.apache.yetus.audience.InterfaceAudience; @@ -191,6 +196,8 @@ public static void sanityCheck(final Configuration c, final TableDescriptor td) // check in-memory compaction warnOrThrowExceptionForFailure(logWarn, hcd::getInMemoryCompaction); + + checkDateTieredCompactionForTimeRangeDataTiering(conf, td); } } @@ -210,6 +217,35 @@ private static void checkReplicationScope(final Configuration conf, final TableD }); } + private static void checkDateTieredCompactionForTimeRangeDataTiering(final Configuration conf, + final TableDescriptor td) throws IOException { + // Table level configurations + checkDateTieredCompactionForTimeRangeDataTiering(conf); + for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) { + // Column family level configurations + Configuration cfdConf = + new CompoundConfiguration().add(conf).addStringMap(cfd.getConfiguration()); + checkDateTieredCompactionForTimeRangeDataTiering(cfdConf); + } + } + + private static void checkDateTieredCompactionForTimeRangeDataTiering(final Configuration conf) + throws IOException { + final String errorMessage = + "Time Range Data Tiering should be enabled with Date Tiered Compaction."; + + warnOrThrowExceptionForFailure(false, () -> { + + // Determine whether Date Tiered Compaction will be enabled when Time Range Data Tiering is + // enabled after the configuration change. + if (DataTieringType.TIME_RANGE.name().equals(conf.get(DataTieringManager.DATATIERING_KEY))) { + if (!DATE_TIERED_STORE_ENGINE.equals(conf.get(StoreEngine.STORE_ENGINE_CLASS_KEY))) { + throw new IllegalArgumentException(errorMessage); + } + } + }); + } + private static void checkCompactionPolicy(final Configuration conf, final TableDescriptor td) throws IOException { warnOrThrowExceptionForFailure(false, () -> { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java index 520d200f28a0..f3f45dfd718a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java @@ -28,6 +28,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -238,7 +239,7 @@ public void run() { // evict all the blocks while (iterator.hasNext()) { CachedBlock next = iterator.next(); - BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); + BlockCacheKey cacheKey = new BlockCacheKey(new Path(next.getFilename()), next.getOffset()); cacheList.add(cacheKey); // evict what ever is available cache.evictBlock(cacheKey); @@ -373,7 +374,7 @@ public void run() { // evict all the blocks while (iterator.hasNext()) { CachedBlock next = iterator.next(); - BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); + BlockCacheKey cacheKey = new BlockCacheKey(new Path(next.getFilename()), next.getOffset()); cacheList.add(cacheKey); /** * There is only one Block referenced by rpc,here we evict blocks which have no rpc @@ -412,7 +413,7 @@ public void run() { iterator = cache.iterator(); while (iterator.hasNext()) { CachedBlock next = iterator.next(); - BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); + BlockCacheKey cacheKey = new BlockCacheKey(new Path(next.getFilename()), next.getOffset()); newCacheList.add(cacheKey); } for (BlockCacheKey key : cacheList) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java index 86df2bab8d6a..dc0d595b210a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java @@ -32,6 +32,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; @@ -434,7 +435,7 @@ public void testGetsWithMultiColumnsAndExplicitTracker() int noOfBlocksWithRef = 0; while (iterator.hasNext()) { CachedBlock next = iterator.next(); - BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); + BlockCacheKey cacheKey = new BlockCacheKey(new Path(next.getFilename()), next.getOffset()); if (cache instanceof BucketCache) { refCount = ((BucketCache) cache).getRpcRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { @@ -528,7 +529,7 @@ public void testGetWithMultipleColumnFamilies() throws IOException, InterruptedE int noOfBlocksWithRef = 0; while (iterator.hasNext()) { CachedBlock next = iterator.next(); - BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); + BlockCacheKey cacheKey = new BlockCacheKey(new Path(next.getFilename()), next.getOffset()); if (cache instanceof BucketCache) { refCount = ((BucketCache) cache).getRpcRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { @@ -675,7 +676,7 @@ public void testMultiGets() throws IOException, InterruptedException { boolean foundNonZeroBlock = false; while (iterator.hasNext()) { CachedBlock next = iterator.next(); - BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); + BlockCacheKey cacheKey = new BlockCacheKey(new Path(next.getFilename()), next.getOffset()); if (cache instanceof BucketCache) { refCount = ((BucketCache) cache).getRpcRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { @@ -763,7 +764,7 @@ public void testScanWithMultipleColumnFamilies() throws IOException, Interrupted int noOfBlocksWithRef = 0; while (iterator.hasNext()) { CachedBlock next = iterator.next(); - BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); + BlockCacheKey cacheKey = new BlockCacheKey(new Path(next.getFilename()), next.getOffset()); if (cache instanceof BucketCache) { refCount = ((BucketCache) cache).getRpcRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { @@ -928,7 +929,7 @@ private void testScanWithCompactionInternals(String tableNameStr, boolean revers boolean usedBlocksFound = false; while (iterator.hasNext()) { CachedBlock next = iterator.next(); - BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); + BlockCacheKey cacheKey = new BlockCacheKey(new Path(next.getFilename()), next.getOffset()); if (cache instanceof BucketCache) { refCount = ((BucketCache) cache).getRpcRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { @@ -955,7 +956,7 @@ private void testScanWithCompactionInternals(String tableNameStr, boolean revers iterator = cache.iterator(); while (iterator.hasNext()) { CachedBlock next = iterator.next(); - BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); + BlockCacheKey cacheKey = new BlockCacheKey(new Path(next.getFilename()), next.getOffset()); if (cache instanceof BucketCache) { refCount = ((BucketCache) cache).getRpcRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { @@ -1045,7 +1046,7 @@ public void testBlockEvictionAfterHBASE13082WithCompactionAndFlush() boolean usedBlocksFound = false; while (iterator.hasNext()) { CachedBlock next = iterator.next(); - BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); + BlockCacheKey cacheKey = new BlockCacheKey(new Path(next.getFilename()), next.getOffset()); if (cache instanceof BucketCache) { refCount = ((BucketCache) cache).getRpcRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { @@ -1081,7 +1082,7 @@ public void testBlockEvictionAfterHBASE13082WithCompactionAndFlush() iterator = cache.iterator(); while (iterator.hasNext()) { CachedBlock next = iterator.next(); - BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); + BlockCacheKey cacheKey = new BlockCacheKey(new Path(next.getFilename()), next.getOffset()); if (cache instanceof BucketCache) { refCount = ((BucketCache) cache).getRpcRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { @@ -1160,7 +1161,7 @@ public void testScanWithException() throws IOException, InterruptedException { int refCount = 0; while (iterator.hasNext()) { CachedBlock next = iterator.next(); - BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); + BlockCacheKey cacheKey = new BlockCacheKey(new Path(next.getFilename()), next.getOffset()); if (cache instanceof BucketCache) { refCount = ((BucketCache) cache).getRpcRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { @@ -1186,7 +1187,7 @@ public void testScanWithException() throws IOException, InterruptedException { refCount = 0; while (iterator.hasNext()) { CachedBlock next = iterator.next(); - BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); + BlockCacheKey cacheKey = new BlockCacheKey(new Path(next.getFilename()), next.getOffset()); if (cache instanceof BucketCache) { refCount = ((BucketCache) cache).getRpcRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { @@ -1214,7 +1215,7 @@ private void iterateBlockCache(BlockCache cache, Iterator iterator) int refCount; while (iterator.hasNext()) { CachedBlock next = iterator.next(); - BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); + BlockCacheKey cacheKey = new BlockCacheKey(new Path(next.getFilename()), next.getOffset()); if (cache instanceof BucketCache) { refCount = ((BucketCache) cache).getRpcRefCount(cacheKey); LOG.info("BucketCache {} {}", cacheKey, refCount); @@ -1294,7 +1295,7 @@ private void checkForBlockEviction(BlockCache cache, boolean getClosed, boolean int refCount = 0; while (iterator.hasNext()) { CachedBlock next = iterator.next(); - BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); + BlockCacheKey cacheKey = new BlockCacheKey(new Path(next.getFilename()), next.getOffset()); if (cache instanceof BucketCache) { refCount = ((BucketCache) cache).getRpcRefCount(cacheKey); } else if (cache instanceof CombinedBlockCache) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java index a67c43d31fa7..666cfbdf54a9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIllegalTableDescriptor.java @@ -31,6 +31,9 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.regionserver.DataTieringManager; +import org.apache.hadoop.hbase.regionserver.DataTieringType; +import org.apache.hadoop.hbase.regionserver.StoreEngine; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -187,6 +190,48 @@ public void testIllegalTableDescriptor() throws Exception { + "cause very frequent flushing.")); } + @Test + public void testIllegalTableDescriptorWithDataTiering() throws IOException { + // table level configuration changes + TableDescriptorBuilder builder = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + ColumnFamilyDescriptorBuilder cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY); + builder.setColumnFamily(cfBuilder.build()); + + // First scenario: DataTieringType set to TIME_RANGE without DateTieredStoreEngine + builder.setValue(DataTieringManager.DATATIERING_KEY, DataTieringType.TIME_RANGE.name()); + checkTableIsIllegal(builder.build()); + + // Second scenario: DataTieringType set to TIME_RANGE with DateTieredStoreEngine + builder.setValue(StoreEngine.STORE_ENGINE_CLASS_KEY, + "org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine"); + checkTableIsLegal(builder.build()); + + // Third scenario: Disabling DateTieredStoreEngine while Time Range DataTiering is active + builder.setValue(StoreEngine.STORE_ENGINE_CLASS_KEY, + "org.apache.hadoop.hbase.regionserver.DefaultStoreEngine"); + checkTableIsIllegal(builder.build()); + + // column family level configuration changes + builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + cfBuilder = ColumnFamilyDescriptorBuilder.newBuilder(FAMILY); + + // First scenario: DataTieringType set to TIME_RANGE without DateTieredStoreEngine + cfBuilder.setConfiguration(DataTieringManager.DATATIERING_KEY, + DataTieringType.TIME_RANGE.name()); + checkTableIsIllegal(builder.setColumnFamily(cfBuilder.build()).build()); + + // Second scenario: DataTieringType set to TIME_RANGE with DateTieredStoreEngine + cfBuilder.setConfiguration(StoreEngine.STORE_ENGINE_CLASS_KEY, + "org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine"); + checkTableIsLegal(builder.modifyColumnFamily(cfBuilder.build()).build()); + + // Third scenario: Disabling DateTieredStoreEngine while Time Range DataTiering is active + cfBuilder.setConfiguration(StoreEngine.STORE_ENGINE_CLASS_KEY, + "org.apache.hadoop.hbase.regionserver.DefaultStoreEngine"); + checkTableIsIllegal(builder.modifyColumnFamily(cfBuilder.build()).build()); + } + private void checkTableIsLegal(TableDescriptor tableDescriptor) throws IOException { Admin admin = TEST_UTIL.getAdmin(); admin.createTable(tableDescriptor); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java index 262408e91a82..2b1c174824ee 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java @@ -32,6 +32,7 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MultithreadedTestUtil; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread; @@ -164,7 +165,7 @@ public static void testCacheSimple(BlockCache toBeTested, int blockSize, int num public static void hammerSingleKey(final BlockCache toBeTested, int numThreads, int numQueries) throws Exception { - final BlockCacheKey key = new BlockCacheKey("key", 0); + final BlockCacheKey key = new BlockCacheKey(new Path("key"), 0); final byte[] buf = new byte[5 * 1024]; Arrays.fill(buf, (byte) 5); @@ -308,7 +309,7 @@ public static HFileBlockPair[] generateHFileBlocks(int blockSize, int numBlocks) } returnedBlocks[i] = new HFileBlockPair(); - returnedBlocks[i].blockName = new BlockCacheKey(strKey, 0); + returnedBlocks[i].blockName = new BlockCacheKey(new Path(strKey), 0); returnedBlocks[i].block = generated; } return returnedBlocks; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java index cda6c3c13723..9c100a07d4d2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java @@ -25,6 +25,7 @@ import java.util.NavigableSet; import java.util.Objects; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -58,8 +59,8 @@ private void addDataAndHits(final BlockCache bc, final int count) { Cacheable dce = new DataCacheEntry(); Cacheable ice = new IndexCacheEntry(); for (int i = 0; i < count; i++) { - BlockCacheKey bckd = new BlockCacheKey("f", i); - BlockCacheKey bcki = new BlockCacheKey("f", i + count); + BlockCacheKey bckd = new BlockCacheKey(new Path("f"), i); + BlockCacheKey bcki = new BlockCacheKey(new Path("f"), i + count); bc.getBlock(bckd, true, false, true); bc.cacheBlock(bckd, dce); bc.cacheBlock(bcki, ice); @@ -67,8 +68,8 @@ private void addDataAndHits(final BlockCache bc, final int count) { bc.getBlock(bcki, true, false, true); } assertEquals(2 * count /* Data and Index blocks */, bc.getStats().getHitCount()); - BlockCacheKey bckd = new BlockCacheKey("f", 0); - BlockCacheKey bcki = new BlockCacheKey("f", 0 + count); + BlockCacheKey bckd = new BlockCacheKey(new Path("f"), 0); + BlockCacheKey bcki = new BlockCacheKey(new Path("f"), 0 + count); bc.evictBlock(bckd); bc.evictBlock(bcki); bc.getStats().getEvictedCount(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java index 48dfe6caebce..beaa45c3dee9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java @@ -168,7 +168,7 @@ public void setUp() throws Exception { void basicBlockCacheOps(final BlockCache bc, final CacheConfig cc, final boolean doubling, final boolean sizing) { assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory()); - BlockCacheKey bck = new BlockCacheKey("f", 0); + BlockCacheKey bck = new BlockCacheKey(new Path("f"), 0); Cacheable c = new DataCacheEntry(); // Do asserts on block counting. long initialBlockCount = bc.getBlockCount(); @@ -336,13 +336,13 @@ public void testBucketCacheConfigL1L2Setup() { long initialL1BlockCount = lbc.getBlockCount(); long initialL2BlockCount = bc.getBlockCount(); Cacheable c = new DataCacheEntry(); - BlockCacheKey bck = new BlockCacheKey("bck", 0); + BlockCacheKey bck = new BlockCacheKey(new Path("bck"), 0); lbc.cacheBlock(bck, c, false); assertEquals(initialL1BlockCount + 1, lbc.getBlockCount()); assertEquals(initialL2BlockCount, bc.getBlockCount()); // Force evictions by putting in a block too big. final long justTooBigSize = ((LruBlockCache) lbc).acceptableSize() + 1; - lbc.cacheBlock(new BlockCacheKey("bck2", 0), new DataCacheEntry() { + lbc.cacheBlock(new BlockCacheKey(new Path("bck2"), 0), new DataCacheEntry() { @Override public long heapSize() { return justTooBigSize; @@ -380,7 +380,7 @@ public void testIndexOnlyLruBlockCache() { assertTrue(blockCache instanceof IndexOnlyLruBlockCache); // reject data block long initialBlockCount = blockCache.getBlockCount(); - BlockCacheKey bck = new BlockCacheKey("bck", 0); + BlockCacheKey bck = new BlockCacheKey(new Path("bck"), 0); Cacheable c = new DataCacheEntry(); blockCache.cacheBlock(bck, c, true); // accept index block diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index 6a03cfcad3e7..e89e5ac53152 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -215,7 +215,7 @@ private void clearBlockCache(BlockCache blockCache) throws InterruptedException Thread.sleep(10); } for (CachedBlock block : Lists.newArrayList(blockCache)) { - BlockCacheKey key = new BlockCacheKey(block.getFilename(), block.getOffset()); + BlockCacheKey key = new BlockCacheKey(new Path(block.getFilename()), block.getOffset()); // CombinedBucketCache may need evict two times. for (int evictCount = 0; blockCache.evictBlock(key); evictCount++) { if (evictCount > 1) { @@ -284,7 +284,7 @@ private void readStoreFile(boolean useTags) throws IOException { // Also, pass null for expected block type to avoid checking it. HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, encodingInCache); - BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset); + BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getPath(), offset); HFileBlock fromCache = (HFileBlock) blockCache.getBlock(blockCacheKey, true, false, true); boolean isCached = fromCache != null; cachedBlocksOffset.add(offset); @@ -338,7 +338,7 @@ private void readStoreFile(boolean useTags) throws IOException { Iterator iterator = cachedBlocksOffset.iterator(); while (iterator.hasNext()) { Long entry = iterator.next(); - BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), entry); + BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getPath(), entry); Pair blockPair = cachedBlocks.get(entry); if (blockPair != null) { // Call return twice because for the isCache cased the counter would have got incremented diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java index a821b78ae647..400fc9671c65 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import java.nio.ByteBuffer; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -117,7 +118,7 @@ public void testQueueSmallBlockEdgeCase() throws Exception { private static class CachedBlock extends org.apache.hadoop.hbase.io.hfile.LruCachedBlock { public CachedBlock(final long heapSize, String name, long accessTime) { - super(new BlockCacheKey(name, 0), new Cacheable() { + super(new BlockCacheKey(new Path(name), 0), new Cacheable() { @Override public long heapSize() { return ((int) (heapSize - CachedBlock.PER_BLOCK_OVERHEAD)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCombinedBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCombinedBlockCache.java index b9bca1ba6b4e..413e3607345c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCombinedBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCombinedBlockCache.java @@ -24,6 +24,7 @@ import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; @@ -145,7 +146,7 @@ private CombinedBlockCache createCombinedBlockCache() { public void testCombinedBlockCacheStats(BlockType type, int expectedL1Miss, int expectedL2Miss) throws Exception { CombinedBlockCache blockCache = createCombinedBlockCache(); - BlockCacheKey key = new BlockCacheKey("key1", 0, false, type); + BlockCacheKey key = new BlockCacheKey(new Path("key1"), 0, false, type); int size = 100; int length = HConstants.HFILEBLOCK_HEADER_SIZE + size; byte[] byteArr = new byte[length]; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java index 7624e2197914..7ca425f1638d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java @@ -183,7 +183,7 @@ public void testReaderWithLRUBlockCache() throws Exception { HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConfig, true, conf); long offset = 0; while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { - BlockCacheKey key = new BlockCacheKey(storeFilePath.getName(), offset); + BlockCacheKey key = new BlockCacheKey(storeFilePath, offset); HFileBlock block = reader.readBlock(offset, -1, true, true, false, true, null, null); offset += block.getOnDiskSizeWithHeader(); // Ensure the block is an heap one. @@ -228,7 +228,7 @@ public void testReaderWithCombinedBlockCache() throws Exception { HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConfig, true, conf); long offset = 0; while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { - BlockCacheKey key = new BlockCacheKey(storeFilePath.getName(), offset); + BlockCacheKey key = new BlockCacheKey(storeFilePath, offset); HFileBlock block = reader.readBlock(offset, -1, true, true, false, true, null, null); offset += block.getOnDiskSizeWithHeader(); // Read the cached block. @@ -1028,7 +1028,7 @@ private void testReaderCombinedCache(final String l1CachePolicy) throws Exceptio long offset = 0; Cacheable cachedBlock = null; while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { - BlockCacheKey key = new BlockCacheKey(storeFilePath.getName(), offset); + BlockCacheKey key = new BlockCacheKey(storeFilePath, offset); HFileBlock block = reader.readBlock(offset, -1, true, true, false, true, null, null); offset += block.getOnDiskSizeWithHeader(); // Read the cached block. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java index 7134b19ccec3..8a33f24061e7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileDataBlockEncoder.java @@ -28,6 +28,7 @@ import java.util.Collection; import java.util.List; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -91,7 +92,7 @@ private void testEncodingWithCacheInternals(boolean useTag) throws IOException { HFileBlock cacheBlock = createBlockOnDisk(conf, kvs, block, useTag); LruBlockCache blockCache = new LruBlockCache(8 * 1024 * 1024, 32 * 1024); - BlockCacheKey cacheKey = new BlockCacheKey("test", 0); + BlockCacheKey cacheKey = new BlockCacheKey(new Path("test"), 0); blockCache.cacheBlock(cacheKey, cacheBlock); HeapSize heapSize = blockCache.getBlock(cacheKey, false, false, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java index c87897de8187..e063201a5623 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileReaderImpl.java @@ -147,7 +147,7 @@ public void testSeekBefore() throws Exception { for (CachedBlock cachedBlock : Lists.newArrayList(bucketcache)) { BlockCacheKey cacheKey = - new BlockCacheKey(cachedBlock.getFilename(), cachedBlock.getOffset()); + new BlockCacheKey(new Path(cachedBlock.getFilename()), cachedBlock.getOffset()); int refCount = bucketcache.getRpcRefCount(cacheKey); assertEquals(0, refCount); } @@ -160,7 +160,7 @@ public void testSeekBefore() throws Exception { scanner.close(); for (CachedBlock cachedBlock : Lists.newArrayList(bucketcache)) { BlockCacheKey cacheKey = - new BlockCacheKey(cachedBlock.getFilename(), cachedBlock.getOffset()); + new BlockCacheKey(new Path(cachedBlock.getFilename()), cachedBlock.getOffset()); int refCount = bucketcache.getRpcRefCount(cacheKey); assertEquals(0, refCount); } @@ -170,7 +170,7 @@ public void testSeekBefore() throws Exception { // clear bucketcache for (CachedBlock cachedBlock : Lists.newArrayList(bucketcache)) { BlockCacheKey cacheKey = - new BlockCacheKey(cachedBlock.getFilename(), cachedBlock.getOffset()); + new BlockCacheKey(new Path(cachedBlock.getFilename()), cachedBlock.getOffset()); bucketcache.evictBlock(cacheKey); } bucketcache.shutdown(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruAdaptiveBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruAdaptiveBlockCache.java index b065c7c6374a..5eb2581ed65b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruAdaptiveBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruAdaptiveBlockCache.java @@ -32,6 +32,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -810,7 +811,7 @@ public void testCacheBlockNextBlockMetadataMissing() { 1.2f, // limit false, 1024, 10, 500, 0.01f); - BlockCacheKey key = new BlockCacheKey("key1", 0); + BlockCacheKey key = new BlockCacheKey(new Path("key1"), 0); ByteBuffer actualBuffer = ByteBuffer.allocate(length); ByteBuffer block1Buffer = ByteBuffer.allocate(length); ByteBuffer block2Buffer = ByteBuffer.allocate(length); @@ -885,12 +886,12 @@ private static class CachedItem implements Cacheable { int size; CachedItem(String blockName, int size, int offset) { - this.cacheKey = new BlockCacheKey(blockName, offset); + this.cacheKey = new BlockCacheKey(new Path(blockName), offset); this.size = size; } CachedItem(String blockName, int size) { - this.cacheKey = new BlockCacheKey(blockName, 0); + this.cacheKey = new BlockCacheKey(new Path(blockName), 0); this.size = size; } @@ -931,7 +932,7 @@ static void testMultiThreadGetAndEvictBlockInternal(BlockCache cache) throws Exc int length = HConstants.HFILEBLOCK_HEADER_SIZE + size; byte[] byteArr = new byte[length]; HFileContext meta = new HFileContextBuilder().build(); - BlockCacheKey key = new BlockCacheKey("key1", 0); + BlockCacheKey key = new BlockCacheKey(new Path("key1"), 0); HFileBlock blk = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(ByteBuffer.wrap(byteArr, 0, size)), HFileBlock.FILL_HEADER, -1, 52, -1, meta, HEAP); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index ecd5fa2dd9a4..9c1830b0e922 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -32,6 +32,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -812,7 +813,7 @@ public void testCacheBlockNextBlockMetadataMissing() { 1.2f, // limit false, 1024); - BlockCacheKey key = new BlockCacheKey("key1", 0); + BlockCacheKey key = new BlockCacheKey(new Path("key1"), 0); ByteBuffer actualBuffer = ByteBuffer.allocate(length); ByteBuffer block1Buffer = ByteBuffer.allocate(length); ByteBuffer block2Buffer = ByteBuffer.allocate(length); @@ -887,12 +888,12 @@ private static class CachedItem implements Cacheable { int size; CachedItem(String blockName, int size, int offset) { - this.cacheKey = new BlockCacheKey(blockName, offset); + this.cacheKey = new BlockCacheKey(new Path(blockName), offset); this.size = size; } CachedItem(String blockName, int size) { - this.cacheKey = new BlockCacheKey(blockName, 0); + this.cacheKey = new BlockCacheKey(new Path(blockName), 0); this.size = size; } @@ -933,7 +934,7 @@ static void testMultiThreadGetAndEvictBlockInternal(BlockCache cache) throws Exc int length = HConstants.HFILEBLOCK_HEADER_SIZE + size; byte[] byteArr = new byte[length]; HFileContext meta = new HFileContextBuilder().build(); - BlockCacheKey key = new BlockCacheKey("key1", 0); + BlockCacheKey key = new BlockCacheKey(new Path("key1"), 0); HFileBlock blk = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(ByteBuffer.wrap(byteArr, 0, size)), HFileBlock.FILL_HEADER, -1, 52, -1, meta, HEAP); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruCachedBlock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruCachedBlock.java index eb57b0acd652..ccc598724f57 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruCachedBlock.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruCachedBlock.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -42,8 +43,8 @@ public class TestLruCachedBlock { @Before public void setUp() throws Exception { - BlockCacheKey cacheKey = new BlockCacheKey("name", 0); - BlockCacheKey otherKey = new BlockCacheKey("name2", 1); + BlockCacheKey cacheKey = new BlockCacheKey(new Path("name"), 0); + BlockCacheKey otherKey = new BlockCacheKey(new Path("name2"), 1); Cacheable cacheable = Mockito.mock(Cacheable.class); Cacheable otheCacheable = Mockito.mock(Cacheable.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java index 85b9199638c0..0864fe84ca66 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java @@ -303,7 +303,7 @@ private void readStoreFile(Path storeFilePath, long offset = 0; while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { HFileBlock block = readFunction.apply(reader, offset); - BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset); + BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getPath(), offset); validationFunction.accept(blockCacheKey, block); offset += block.getOnDiskSizeWithHeader(); } @@ -371,7 +371,7 @@ private void testPrefetchWhenRefs(boolean compactionEnabled, Consumer long offset = 0; while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, null, true); - BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset); + BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getPath(), offset); if (block.getBlockType() == BlockType.DATA) { test.accept(blockCache.getBlock(blockCacheKey, true, false, true)); } @@ -417,7 +417,7 @@ private void testPrefetchWhenHFileLink(Consumer test) throws Exceptio long offset = 0; while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, null, true); - BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset); + BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getPath(), offset); if (block.getBlockType() == BlockType.DATA) { test.accept(blockCache.getBlock(blockCacheKey, true, false, true)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java index 2d0a85962ef9..cf4f5ffc16ad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.io.hfile.bucket.BucketEntry; +import org.apache.hadoop.hbase.regionserver.DataTieringManager; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -244,7 +245,7 @@ private void readStoreFile(Path storeFilePath, long sizeForDataBlocks = 0; while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { HFileBlock block = readFunction.apply(reader, offset); - BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset); + BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getPath(), offset); validationFunction.accept(blockCacheKey, block); offset += block.getOnDiskSizeWithHeader(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestTinyLfuBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestTinyLfuBlockCache.java index 31166bd5fa45..e7c9810bbcef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestTinyLfuBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestTinyLfuBlockCache.java @@ -25,6 +25,7 @@ import java.nio.ByteBuffer; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.testclassification.IOTests; @@ -266,7 +267,7 @@ private static class CachedItem implements Cacheable { int size; CachedItem(String blockName, int size) { - this.cacheKey = new BlockCacheKey(blockName, 0); + this.cacheKey = new BlockCacheKey(new Path(blockName), 0); this.size = size; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java index 6a9b5bf382a6..158a8e87b777 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java @@ -249,7 +249,7 @@ private void cacheAndWaitUntilFlushedToBucket(BucketCache cache, BlockCacheKey c @Test public void testMemoryLeak() throws Exception { - final BlockCacheKey cacheKey = new BlockCacheKey("dummy", 1L); + final BlockCacheKey cacheKey = new BlockCacheKey(new Path("dummy"), 1L); cacheAndWaitUntilFlushedToBucket(cache, cacheKey, new CacheTestUtils.ByteArrayCacheable(new byte[10]), true); long lockId = cache.backingMap.get(cacheKey).offset(); @@ -609,7 +609,7 @@ public void testEvictionCount() throws InterruptedException { HFileBlock blockWithoutNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf2), HFileBlock.FILL_HEADER, -1, -1, -1, meta, allocator); - BlockCacheKey key = new BlockCacheKey("testEvictionCount", 0); + BlockCacheKey key = new BlockCacheKey(new Path("testEvictionCount"), 0); ByteBuffer actualBuffer = ByteBuffer.allocate(length); ByteBuffer block1Buffer = ByteBuffer.allocate(length); ByteBuffer block2Buffer = ByteBuffer.allocate(length); @@ -659,7 +659,7 @@ public void testCacheBlockNextBlockMetadataMissing() throws Exception { HFileBlock blockWithoutNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf2), HFileBlock.FILL_HEADER, -1, -1, -1, meta, allocator); - BlockCacheKey key = new BlockCacheKey("testCacheBlockNextBlockMetadataMissing", 0); + BlockCacheKey key = new BlockCacheKey(new Path("testCacheBlockNextBlockMetadataMissing"), 0); ByteBuffer actualBuffer = ByteBuffer.allocate(length); ByteBuffer block1Buffer = ByteBuffer.allocate(length); ByteBuffer block2Buffer = ByteBuffer.allocate(length); @@ -714,8 +714,8 @@ public void testRAMCache() { HFileContext meta = new HFileContextBuilder().build(); RAMCache cache = new RAMCache(); - BlockCacheKey key1 = new BlockCacheKey("file-1", 1); - BlockCacheKey key2 = new BlockCacheKey("file-2", 2); + BlockCacheKey key1 = new BlockCacheKey(new Path("file-1"), 1); + BlockCacheKey key2 = new BlockCacheKey(new Path("file-2"), 2); HFileBlock blk1 = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, 52, -1, meta, ByteBuffAllocator.HEAP); HFileBlock blk2 = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf), @@ -767,7 +767,7 @@ public void testFreeBlockWhenIOEngineWriteFailure() throws IOException { long availableSpace = 1024 * 1024 * 1024L; BucketAllocator allocator = new BucketAllocator(availableSpace, null); - BlockCacheKey key = new BlockCacheKey("dummy", 1L); + BlockCacheKey key = new BlockCacheKey(new Path("dummy"), 1L); RAMQueueEntry re = new RAMQueueEntry(key, block, 1, true, false); Assert.assertEquals(0, allocator.getUsedSize()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java index d60d2c53ef6d..528c98eb20c3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCachePersister.java @@ -193,7 +193,7 @@ public void readStoreFile(Path storeFilePath, long offset, FileSystem fs, CacheC Thread.sleep(1000); } HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, null); - BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset); + BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getPath(), offset); BucketEntry be = bucketCache.backingMap.get(blockCacheKey); boolean isCached = bucketCache.getBlock(blockCacheKey, true, false, true) != null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCacheRefCnt.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCacheRefCnt.java index e71817e6a3f7..4519dcc9c3e0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCacheRefCnt.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCacheRefCnt.java @@ -30,6 +30,7 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.io.ByteBuffAllocator; @@ -93,7 +94,7 @@ private static HFileBlock createBlock(int offset, int size, ByteBuffAllocator al } private static BlockCacheKey createKey(String hfileName, long offset) { - return new BlockCacheKey(hfileName, offset); + return new BlockCacheKey(new Path(hfileName), offset); } private void disableWriter() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java index 429fffa38f6c..9dea31ecc1fe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.java @@ -27,6 +27,7 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.BlockingQueue; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; @@ -91,7 +92,7 @@ public void setUp() throws Exception { this.q = bc.writerQueues.get(0); wt.disableWriter(); - this.plainKey = new BlockCacheKey("f", 0); + this.plainKey = new BlockCacheKey(new Path("f"), 0); this.plainCacheable = Mockito.mock(Cacheable.class); assertThat(bc.ramCache.isEmpty(), is(true)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRAMCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRAMCache.java index 58d9385f57e9..69da6ab887aa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRAMCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestRAMCache.java @@ -20,6 +20,7 @@ import java.nio.ByteBuffer; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.ByteBuffAllocator; @@ -86,7 +87,7 @@ public void testAtomicRAMCache() throws Exception { byte[] byteArr = new byte[length]; RAMCache cache = new RAMCache(); - BlockCacheKey key = new BlockCacheKey("file-1", 1); + BlockCacheKey key = new BlockCacheKey(new Path("file-1"), 1); MockHFileBlock blk = new MockHFileBlock(BlockType.DATA, size, size, -1, ByteBuffer.wrap(byteArr, 0, size), HFileBlock.FILL_HEADER, -1, 52, -1, new HFileContextBuilder().build(), ByteBuffAllocator.HEAP); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java index 85cee077dcb6..d480ec41ae86 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java @@ -233,7 +233,7 @@ private void readStoreFile(Path path) throws IOException { // Also, pass null for expected block type to avoid checking it. HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, DataBlockEncoding.NONE); - BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset); + BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getPath(), offset); boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null; boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType()); final BlockType blockType = block.getBlockType(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java new file mode 100644 index 000000000000..91f1b948c7de --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java @@ -0,0 +1,455 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import static org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.DEFAULT_ERROR_TOLERATION_DURATION; +import static org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.DEFAULT_WRITER_THREADS; +import static org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.DEFAULT_WRITER_QUEUE_ITEMS; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; +import static org.junit.Assert.assertNotNull; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.fs.HFileSystem; +import org.apache.hadoop.hbase.io.hfile.BlockCache; +import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; +import org.apache.hadoop.hbase.io.hfile.BlockType; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.CacheTestUtils; +import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; +import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * This class is used to test the functionality of the DataTieringManager. + * + * The mock online regions are stored in {@link TestDataTieringManager#testOnlineRegions}. + * For all tests, the setup of {@link TestDataTieringManager#testOnlineRegions} occurs only once. + * Please refer to {@link TestDataTieringManager#setupOnlineRegions()} for the structure. + * Additionally, a list of all store files is maintained in {@link TestDataTieringManager#hStoreFiles}. + * The characteristics of these store files are listed below: + * @formatter:off ## HStoreFile Information + * + * | HStoreFile | Region | Store | DataTiering | isHot | + * |------------------|--------------------|---------------------|-----------------------|-------| + * | hStoreFile0 | region1 | hStore11 | TIME_RANGE | true | + * | hStoreFile1 | region1 | hStore12 | NONE | true | + * | hStoreFile2 | region2 | hStore21 | TIME_RANGE | true | + * | hStoreFile3 | region2 | hStore22 | TIME_RANGE | false | + * @formatter:on + */ + +@Category({ RegionServerTests.class, SmallTests.class }) +public class TestDataTieringManager { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestDataTieringManager.class); + + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + private static Configuration defaultConf; + private static FileSystem fs; + private static CacheConfig cacheConf; + private static Path testDir; + private static Map testOnlineRegions; + + private static DataTieringManager dataTieringManager; + private static List hStoreFiles; + + final static long capacitySize = 32 * 1024 * 1024; + final static int writeThreads = DEFAULT_WRITER_THREADS; + final static int writerQLen = DEFAULT_WRITER_QUEUE_ITEMS; + final static int[] bucketSizes = new int[] { 8 * 1024 + 1024 }; + + @BeforeClass + public static void setupBeforeClass() throws Exception { + testDir = TEST_UTIL.getDataTestDir(TestDataTieringManager.class.getSimpleName()); + defaultConf = TEST_UTIL.getConfiguration(); + fs = HFileSystem.get(defaultConf); + fs.mkdirs(testDir); + + BlockCache blockCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, + 8192, bucketSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", + DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); + + cacheConf = new CacheConfig(defaultConf, blockCache); + setupOnlineRegions(); + DataTieringManager.instantiate(testOnlineRegions); + dataTieringManager = DataTieringManager.getInstance(); + } + + @FunctionalInterface + interface DataTieringMethodCallerWithPath { + boolean call(DataTieringManager manager, Path path) throws DataTieringException; + } + + @FunctionalInterface + interface DataTieringMethodCallerWithKey { + boolean call(DataTieringManager manager, BlockCacheKey key) throws DataTieringException; + } + + @Test + public void testDataTieringEnabledWithKey() { + DataTieringMethodCallerWithKey methodCallerWithKey = DataTieringManager::isDataTieringEnabled; + + // Test with valid key + BlockCacheKey key = new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, BlockType.DATA); + testDataTieringMethodWithKeyNoException(methodCallerWithKey, key, true); + + // Test with another valid key + key = new BlockCacheKey(hStoreFiles.get(1).getPath(), 0, true, BlockType.DATA); + testDataTieringMethodWithKeyNoException(methodCallerWithKey, key, false); + } + + @Test + public void testDataTieringEnabledWithPath() { + DataTieringMethodCallerWithPath methodCallerWithPath = DataTieringManager::isDataTieringEnabled; + + // Test with valid path + Path hFilePath = hStoreFiles.get(1).getPath(); + testDataTieringMethodWithPathNoException(methodCallerWithPath, hFilePath, false); + + // Test with another valid path + hFilePath = hStoreFiles.get(3).getPath(); + testDataTieringMethodWithPathNoException(methodCallerWithPath, hFilePath, true); + + // Test with an incorrect path + hFilePath = new Path("incorrectPath"); + testDataTieringMethodWithPathExpectingException(methodCallerWithPath, hFilePath, + new DataTieringException("Incorrect HFile Path: " + hFilePath)); + + // Test with a non-existing HRegion path + Path basePath = hStoreFiles.get(0).getPath().getParent().getParent().getParent(); + hFilePath = new Path(basePath, "incorrectRegion/cf1/filename"); + testDataTieringMethodWithPathExpectingException(methodCallerWithPath, hFilePath, + new DataTieringException("HRegion corresponding to " + hFilePath + " doesn't exist")); + + // Test with a non-existing HStore path + basePath = hStoreFiles.get(0).getPath().getParent().getParent(); + hFilePath = new Path(basePath, "incorrectCf/filename"); + testDataTieringMethodWithPathExpectingException(methodCallerWithPath, hFilePath, + new DataTieringException("HStore corresponding to " + hFilePath + " doesn't exist")); + } + + @Test + public void testHotDataWithKey() { + DataTieringMethodCallerWithKey methodCallerWithKey = DataTieringManager::isHotData; + + // Test with valid key + BlockCacheKey key = new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, BlockType.DATA); + testDataTieringMethodWithKeyNoException(methodCallerWithKey, key, true); + + // Test with another valid key + key = new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA); + testDataTieringMethodWithKeyNoException(methodCallerWithKey, key, false); + } + + @Test + public void testHotDataWithPath() { + DataTieringMethodCallerWithPath methodCallerWithPath = DataTieringManager::isHotData; + + // Test with valid path + Path hFilePath = hStoreFiles.get(2).getPath(); + testDataTieringMethodWithPathNoException(methodCallerWithPath, hFilePath, true); + + // Test with another valid path + hFilePath = hStoreFiles.get(3).getPath(); + testDataTieringMethodWithPathNoException(methodCallerWithPath, hFilePath, false); + + // Test with a filename where corresponding HStoreFile in not present + hFilePath = new Path(hStoreFiles.get(0).getPath().getParent(), "incorrectFileName"); + testDataTieringMethodWithPathNoException(methodCallerWithPath, hFilePath, false); + } + + @Test + public void testColdDataFiles() { + Set allCachedBlocks = new HashSet<>(); + for (HStoreFile file : hStoreFiles) { + allCachedBlocks.add(new BlockCacheKey(file.getPath(), 0, true, BlockType.DATA)); + } + + // Verify hStoreFile3 is identified as cold data + DataTieringMethodCallerWithPath methodCallerWithPath = DataTieringManager::isHotData; + Path hFilePath = hStoreFiles.get(3).getPath(); + testDataTieringMethodWithPathNoException(methodCallerWithPath, hFilePath, false); + + // Verify all the other files in hStoreFiles are hot data + for (int i = 0; i < hStoreFiles.size() - 1; i++) { + hFilePath = hStoreFiles.get(i).getPath(); + testDataTieringMethodWithPathNoException(methodCallerWithPath, hFilePath, true); + } + + try { + Set coldFilePaths = dataTieringManager.getColdDataFiles(allCachedBlocks); + assertEquals(1, coldFilePaths.size()); + } catch (DataTieringException e) { + fail("Unexpected DataTieringException: " + e.getMessage()); + } + } + + @Test + public void testAllDataFiles() { + Set allCachedBlocks = new HashSet<>(); + for (HStoreFile file : hStoreFiles) { + allCachedBlocks.add(new BlockCacheKey(file.getPath(), 0, true, BlockType.DATA)); + } + Map allFilePaths = dataTieringManager.getAllFilesList(); + assertEquals(hStoreFiles.size(), allFilePaths.size()); + } + + @Test + public void testAllDataFilesAfterRestart() throws Exception { + Set cacheKeys = new HashSet<>(); + // Create Cache keys + for (HStoreFile file : hStoreFiles) { + cacheKeys.add(new BlockCacheKey(file.getPath(), 0, true, BlockType.DATA)); + } + // Create dummy data to be cached. + CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 4); + BucketCache cache = (BucketCache) cacheConf.getBlockCache().get(); + int blocksIter = 0; + for(BlockCacheKey key: cacheKeys) { + cache.cacheBlock(key, blocks[blocksIter++].getBlock()); + // Ensure that the block is persisted to the file. + while (!cache.getBackingMap().containsKey(key)) { + Thread.sleep(100); + } + } + + // shutting down the cache persists the backmap to disk. + cache.shutdown(); + + // create a new cache which is populated from the disk which simulates a server restart. + BucketCache newBucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, + 8192, bucketSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", + DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); + + Set keySet = newBucketCache.getBackingMap().keySet(); + assertEquals(hStoreFiles.size(), keySet.size()); + int filesIter = 0; + for(BlockCacheKey key: keySet) { + assertNotNull(key.getFilePath()); + assert(filePathExists(key)); + } + } + + boolean filePathExists(BlockCacheKey key) { + for(HStoreFile file: hStoreFiles) { + if (file.getPath().equals(key.getFilePath())) { + return true; + } + } + return false; + } + + private void testDataTieringMethodWithPath(DataTieringMethodCallerWithPath caller, Path path, + boolean expectedResult, DataTieringException exception) { + try { + boolean value = caller.call(dataTieringManager, path); + if (exception != null) { + fail("Expected DataTieringException to be thrown"); + } + assertEquals(expectedResult, value); + } catch (DataTieringException e) { + if (exception == null) { + fail("Unexpected DataTieringException: " + e.getMessage()); + } + assertEquals(exception.getMessage(), e.getMessage()); + } + } + + private void testDataTieringMethodWithKey(DataTieringMethodCallerWithKey caller, + BlockCacheKey key, boolean expectedResult, DataTieringException exception) { + try { + boolean value = caller.call(dataTieringManager, key); + if (exception != null) { + fail("Expected DataTieringException to be thrown"); + } + assertEquals(expectedResult, value); + } catch (DataTieringException e) { + if (exception == null) { + fail("Unexpected DataTieringException: " + e.getMessage()); + } + assertEquals(exception.getMessage(), e.getMessage()); + } + } + + private void testDataTieringMethodWithPathExpectingException( + DataTieringMethodCallerWithPath caller, Path path, DataTieringException exception) { + testDataTieringMethodWithPath(caller, path, false, exception); + } + + private void testDataTieringMethodWithPathNoException(DataTieringMethodCallerWithPath caller, + Path path, boolean expectedResult) { + testDataTieringMethodWithPath(caller, path, expectedResult, null); + } + + private void testDataTieringMethodWithKeyExpectingException(DataTieringMethodCallerWithKey caller, + BlockCacheKey key, DataTieringException exception) { + testDataTieringMethodWithKey(caller, key, false, exception); + } + + private void testDataTieringMethodWithKeyNoException(DataTieringMethodCallerWithKey caller, + BlockCacheKey key, boolean expectedResult) { + testDataTieringMethodWithKey(caller, key, expectedResult, null); + } + + private static void setupOnlineRegions() throws IOException { + testOnlineRegions = new HashMap<>(); + hStoreFiles = new ArrayList<>(); + + long day = 24 * 60 * 60 * 1000; + long currentTime = System.currentTimeMillis(); + + HRegion region1 = createHRegion("table1"); + + HStore hStore11 = createHStore(region1, "cf1", getConfWithTimeRangeDataTieringEnabled(day)); + hStoreFiles + .add(createHStoreFile(hStore11.getStoreContext().getFamilyStoreDirectoryPath(), currentTime)); + hStore11.refreshStoreFiles(); + HStore hStore12 = createHStore(region1, "cf2"); + hStoreFiles.add(createHStoreFile(hStore12.getStoreContext().getFamilyStoreDirectoryPath(), + currentTime - day)); + hStore12.refreshStoreFiles(); + + region1.stores.put(Bytes.toBytes("cf1"), hStore11); + region1.stores.put(Bytes.toBytes("cf2"), hStore12); + + HRegion region2 = + createHRegion("table2", getConfWithTimeRangeDataTieringEnabled((long) (2.5 * day))); + + HStore hStore21 = createHStore(region2, "cf1"); + hStoreFiles.add(createHStoreFile(hStore21.getStoreContext().getFamilyStoreDirectoryPath(), + currentTime - 2 * day)); + hStore21.refreshStoreFiles(); + HStore hStore22 = createHStore(region2, "cf2"); + hStoreFiles.add(createHStoreFile(hStore22.getStoreContext().getFamilyStoreDirectoryPath(), + currentTime - 3 * day)); + hStore22.refreshStoreFiles(); + + region2.stores.put(Bytes.toBytes("cf1"), hStore21); + region2.stores.put(Bytes.toBytes("cf2"), hStore22); + + for (HStoreFile file : hStoreFiles) { + file.initReader(); + } + + testOnlineRegions.put(region1.getRegionInfo().getEncodedName(), region1); + testOnlineRegions.put(region2.getRegionInfo().getEncodedName(), region2); + } + + private static HRegion createHRegion(String table) throws IOException { + return createHRegion(table, defaultConf); + } + + private static HRegion createHRegion(String table, Configuration conf) throws IOException { + TableName tableName = TableName.valueOf(table); + + TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) + .setValue(DataTieringManager.DATATIERING_KEY, conf.get(DataTieringManager.DATATIERING_KEY)) + .setValue(DataTieringManager.DATATIERING_HOT_DATA_AGE_KEY, + conf.get(DataTieringManager.DATATIERING_HOT_DATA_AGE_KEY)) + .build(); + RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).build(); + + Configuration testConf = new Configuration(conf); + CommonFSUtils.setRootDir(testConf, testDir); + HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(testConf, fs, + CommonFSUtils.getTableDir(testDir, hri.getTable()), hri); + + return new HRegion(regionFs, null, conf, htd, null); + } + + private static HStore createHStore(HRegion region, String columnFamily) throws IOException { + return createHStore(region, columnFamily, defaultConf); + } + + private static HStore createHStore(HRegion region, String columnFamily, Configuration conf) + throws IOException { + ColumnFamilyDescriptor columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(columnFamily)) + .setValue(DataTieringManager.DATATIERING_KEY, conf.get(DataTieringManager.DATATIERING_KEY)) + .setValue(DataTieringManager.DATATIERING_HOT_DATA_AGE_KEY, + conf.get(DataTieringManager.DATATIERING_HOT_DATA_AGE_KEY)) + .build(); + + return new HStore(region, columnFamilyDescriptor, conf, false); + } + + private static Configuration getConfWithTimeRangeDataTieringEnabled(long hotDataAge) { + Configuration conf = new Configuration(defaultConf); + conf.set(DataTieringManager.DATATIERING_KEY, DataTieringType.TIME_RANGE.name()); + conf.set(DataTieringManager.DATATIERING_HOT_DATA_AGE_KEY, String.valueOf(hotDataAge)); + return conf; + } + + private static HStoreFile createHStoreFile(Path storeDir, long timestamp) throws IOException { + String columnFamily = storeDir.getName(); + + StoreFileWriter storeFileWriter = new StoreFileWriter.Builder(defaultConf, cacheConf, fs) + .withOutputDir(storeDir).withFileContext(new HFileContextBuilder().build()).build(); + + writeStoreFileRandomData(storeFileWriter, Bytes.toBytes(columnFamily), Bytes.toBytes("random"), + timestamp); + + return new HStoreFile(fs, storeFileWriter.getPath(), defaultConf, cacheConf, BloomType.NONE, + true); + } + + private static void writeStoreFileRandomData(final StoreFileWriter writer, byte[] columnFamily, + byte[] qualifier, long timestamp) throws IOException { + try { + for (char d = 'a'; d <= 'z'; d++) { + for (char e = 'a'; e <= 'z'; e++) { + byte[] b = new byte[] { (byte) d, (byte) e }; + writer.append(new KeyValue(b, columnFamily, qualifier, timestamp, b)); + } + } + } finally { + writer.appendTrackedTimestampsToMetadata(); + writer.close(); + } + } +}