diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java index 19937ecdf00f..429fdd66be1b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.mob.MobFileName; import org.apache.hadoop.hbase.mob.MobStoreEngine; import org.apache.hadoop.hbase.mob.MobUtils; +import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.util.IdLock; import org.apache.yetus.audience.InterfaceAudience; @@ -108,6 +109,7 @@ public HMobStore(final HRegion region, final ColumnFamilyDescriptor family, this.homePath = MobUtils.getMobHome(conf); this.mobFamilyPath = MobUtils.getMobFamilyPath(conf, this.getTableName(), getColumnFamilyName()); + CommonFSUtils.setStoragePolicy(this.getFileSystem(), mobFamilyPath, this.policyName); List locations = new ArrayList<>(2); locations.add(mobFamilyPath); TableName tn = region.getTableDescriptor().getTableName(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 814a57a63302..6b420f3f1fa9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -212,6 +212,7 @@ public class HStore private AtomicLong majorCompactedCellsSize = new AtomicLong(); private final StoreContext storeContext; + protected String policyName; // Used to track the store files which are currently being written. For compaction, if we want to // compact store file [a, b, c] to [d], then here we will record 'd'. And we will also use it to @@ -264,11 +265,10 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, region.getRegionFileSystem().createStoreDir(family.getNameAsString()); // set block storage policy for store directory - String policyName = family.getStoragePolicy(); - if (null == policyName) { - policyName = this.conf.get(BLOCK_STORAGE_POLICY_KEY, DEFAULT_BLOCK_STORAGE_POLICY); - } - region.getRegionFileSystem().setStoragePolicy(family.getNameAsString(), policyName.trim()); + this.policyName = Optional.ofNullable(family.getStoragePolicy()) + .orElseGet(() -> conf.get(BLOCK_STORAGE_POLICY_KEY, DEFAULT_BLOCK_STORAGE_POLICY)) + .trim(); + region.getRegionFileSystem().setStoragePolicy(family.getNameAsString(), policyName); this.dataBlockEncoder = new HFileDataBlockEncoderImpl(family.getDataBlockEncoding()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java index d3a67b64f6a6..72908637bcc6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.fs.HFileSystem; +import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; @@ -184,6 +185,62 @@ public void testBlockStoragePolicy() throws Exception { TEST_UTIL.shutdownMiniCluster(); } } + @Test + public void testMobStoreStoragePolicy() throws Exception { + TEST_UTIL = new HBaseTestingUtil(); + Configuration conf = TEST_UTIL.getConfiguration(); + TEST_UTIL.startMiniCluster(); + Table table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES); + assertEquals("Should start with empty table", 0, TEST_UTIL.countRows(table)); + HRegionFileSystem regionFs = getHRegionFS(TEST_UTIL.getConnection(), table, conf); + try (Admin admin = TEST_UTIL.getConnection().getAdmin()) { + ColumnFamilyDescriptorBuilder cfdA = ColumnFamilyDescriptorBuilder.newBuilder(FAMILIES[0]); + cfdA.setValue(HStore.BLOCK_STORAGE_POLICY_KEY, "ONE_SSD"); + cfdA.setMobEnabled(true); + cfdA.setMobThreshold(2L); + admin.modifyColumnFamily(TABLE_NAME, cfdA.build()); + while ( + TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates() + .hasRegionsInTransition() + ) { + Thread.sleep(200); + LOG.debug("Waiting on table to finish schema altering"); + } + + // flush memstore snapshot into 3 files + for (long i = 0; i < 3; i++) { + Put put = new Put(Bytes.toBytes(i)); + put.addColumn(FAMILIES[0], Bytes.toBytes(i), Bytes.toBytes(i)); + put.addColumn(FAMILIES[0], Bytes.toBytes(i + "qf"), Bytes.toBytes(i + "value")); + table.put(put); + admin.flush(TABLE_NAME); + } + // there should be 3 files in store dir + FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem(); + Path storePath = regionFs.getStoreDir(Bytes.toString(FAMILIES[0])); + Path mobStorePath = MobUtils.getMobFamilyPath(conf, TABLE_NAME, Bytes.toString(FAMILIES[0])); + + FileStatus[] storeFiles = CommonFSUtils.listStatus(fs, storePath); + FileStatus[] mobStoreFiles = CommonFSUtils.listStatus(fs, mobStorePath); + assertNotNull(storeFiles); + assertEquals(3, storeFiles.length); + assertNotNull(mobStoreFiles); + assertEquals(3, mobStoreFiles.length); + + for (FileStatus status : storeFiles) { + assertEquals("ONE_SSD", + ((HFileSystem) regionFs.getFileSystem()).getStoragePolicyName(status.getPath())); + } + for (FileStatus status : mobStoreFiles) { + assertEquals("ONE_SSD", + ((HFileSystem) regionFs.getFileSystem()).getStoragePolicyName(status.getPath())); + } + } finally { + table.close(); + TEST_UTIL.deleteTable(TABLE_NAME); + TEST_UTIL.shutdownMiniCluster(); + } + } private HRegionFileSystem getHRegionFS(Connection conn, Table table, Configuration conf) throws IOException {