Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -45,17 +45,17 @@ public class CloseRegionProcedure extends RegionRemoteProcedureBase {
// wrong(but do not make it wrong intentionally). The client can handle this error.
private ServerName assignCandidate;

private boolean evictCache;
private boolean isSplit;

public CloseRegionProcedure() {
super();
}

public CloseRegionProcedure(TransitRegionStateProcedure parent, RegionInfo region,
ServerName targetServer, ServerName assignCandidate, boolean evictCache) {
ServerName targetServer, ServerName assignCandidate, boolean isSplit) {
super(parent, region, targetServer);
this.assignCandidate = assignCandidate;
this.evictCache = evictCache;
this.isSplit = isSplit;
}

@Override
Expand All @@ -65,7 +65,7 @@ public TableOperationType getTableOperationType() {

@Override
public RemoteOperation newRemoteOperation(MasterProcedureEnv env) {
return new RegionCloseOperation(this, region, getProcId(), assignCandidate, evictCache,
return new RegionCloseOperation(this, region, getProcId(), assignCandidate, isSplit,
env.getMasterServices().getMasterActiveTime());
}

Expand All @@ -76,7 +76,7 @@ protected void serializeStateData(ProcedureStateSerializer serializer) throws IO
if (assignCandidate != null) {
builder.setAssignCandidate(ProtobufUtil.toServerName(assignCandidate));
}
builder.setEvictCache(evictCache);
builder.setEvictCache(isSplit);
serializer.serialize(builder.build());
}

Expand All @@ -88,7 +88,7 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws
if (data.hasAssignCandidate()) {
assignCandidate = ProtobufUtil.toServerName(data.getAssignCandidate());
}
evictCache = data.getEvictCache();
isSplit = data.getEvictCache();
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -335,13 +335,15 @@ private void closeRegion(MasterProcedureEnv env, RegionStateNode regionNode) thr
if (regionNode.isInState(State.OPEN, State.CLOSING, State.MERGING, State.SPLITTING)) {
// this is the normal case
env.getAssignmentManager().regionClosing(regionNode);
CloseRegionProcedure closeProc = isSplit
? new CloseRegionProcedure(this, getRegion(), regionNode.getRegionLocation(),
assignCandidate,
env.getMasterConfiguration().getBoolean(EVICT_BLOCKS_ON_SPLIT_KEY,
DEFAULT_EVICT_ON_SPLIT))
: new CloseRegionProcedure(this, getRegion(), regionNode.getRegionLocation(),
assignCandidate, evictCache);
LOG.debug("Close region: isSplit: {}: evictOnSplit: {}: evictOnClose: {}", isSplit,
env.getMasterConfiguration().getBoolean(EVICT_BLOCKS_ON_SPLIT_KEY, DEFAULT_EVICT_ON_SPLIT),
evictCache);
// Splits/Merges are special cases, rather than deciding on the cache eviction behaviour here
// at
// Master, we just need to tell this close is for a split/merge and let RSes decide on the
// eviction. See HBASE-28811 for more context.
CloseRegionProcedure closeProc = new CloseRegionProcedure(this, getRegion(),
regionNode.getRegionLocation(), assignCandidate, isSplit);
addChildProcedure(closeProc);
setNextState(RegionStateTransitionState.REGION_STATE_TRANSITION_CONFIRM_CLOSED);
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,11 @@
*/
package org.apache.hadoop.hbase.regionserver.handler;

import static org.apache.hadoop.hbase.io.hfile.CacheConfig.DEFAULT_EVICT_ON_CLOSE;
import static org.apache.hadoop.hbase.io.hfile.CacheConfig.DEFAULT_EVICT_ON_SPLIT;
import static org.apache.hadoop.hbase.io.hfile.CacheConfig.EVICT_BLOCKS_ON_CLOSE_KEY;
import static org.apache.hadoop.hbase.io.hfile.CacheConfig.EVICT_BLOCKS_ON_SPLIT_KEY;

import edu.umd.cs.findbugs.annotations.Nullable;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
Expand Down Expand Up @@ -63,21 +68,21 @@ public class UnassignRegionHandler extends EventHandler {

private final RetryCounter retryCounter;

private boolean evictCache;
private boolean isSplit;

// active time of the master that sent this unassign request, used for fencing
private final long initiatingMasterActiveTime;

public UnassignRegionHandler(HRegionServer server, String encodedName, long closeProcId,
boolean abort, @Nullable ServerName destination, EventType eventType,
long initiatingMasterActiveTime, boolean evictCache) {
long initiatingMasterActiveTime, boolean isSplit) {
super(server, eventType);
this.encodedName = encodedName;
this.closeProcId = closeProcId;
this.abort = abort;
this.destination = destination;
this.retryCounter = HandlerUtil.getRetryCounter();
this.evictCache = evictCache;
this.isSplit = isSplit;
this.initiatingMasterActiveTime = initiatingMasterActiveTime;
}

Expand Down Expand Up @@ -127,7 +132,11 @@ public void process() throws IOException {
}
// This should be true only in the case of splits/merges closing the parent regions, as
// there's no point on keep blocks for those region files.
region.getStores().forEach(s -> s.getCacheConfig().setEvictOnClose(evictCache));
final boolean evictCacheOnClose = isSplit
? server.getConfiguration().getBoolean(EVICT_BLOCKS_ON_SPLIT_KEY, DEFAULT_EVICT_ON_SPLIT)
: server.getConfiguration().getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE);
LOG.debug("Unassign region: split region: {}: evictCache: {}", isSplit, evictCacheOnClose);
region.getStores().forEach(s -> s.getCacheConfig().setEvictOnClose(evictCacheOnClose));

if (region.close(abort) == null) {
// XXX: Is this still possible? The old comment says about split, but now split is done at
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,12 @@
import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY;
import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY;
import static org.apache.hadoop.hbase.io.hfile.CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY;
import static org.apache.hadoop.hbase.io.hfile.CacheConfig.EVICT_BLOCKS_ON_CLOSE_KEY;
import static org.apache.hadoop.hbase.io.hfile.CacheConfig.EVICT_BLOCKS_ON_SPLIT_KEY;
import static org.apache.hadoop.hbase.io.hfile.CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY;
import static org.junit.Assert.assertTrue;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
Expand All @@ -47,13 +49,13 @@
import org.slf4j.LoggerFactory;

@Category({ MiscTests.class, MediumTests.class })
public class TestSplitWithCache {
public class TestCacheEviction {

@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestSplitWithCache.class);
HBaseClassTestRule.forClass(TestCacheEviction.class);

private static final Logger LOG = LoggerFactory.getLogger(TestSplitWithCache.class);
private static final Logger LOG = LoggerFactory.getLogger(TestCacheEviction.class);

private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();

Expand All @@ -69,42 +71,44 @@ public static void setUp() throws Exception {

@Test
public void testEvictOnSplit() throws Exception {
doTest("testEvictOnSplit", true,
doTestEvictOnSplit("testEvictOnSplit", true,
(f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null),
(f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) == null));
}

@Test
public void testDoesntEvictOnSplit() throws Exception {
doTest("testDoesntEvictOnSplit", false,
doTestEvictOnSplit("testDoesntEvictOnSplit", false,
(f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null),
(f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null));
}

private void doTest(String table, boolean evictOnSplit,
@Test
public void testEvictOnClose() throws Exception {
doTestEvictOnClose("testEvictOnClose", true,
(f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null),
(f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) == null));
}

@Test
public void testDoesntEvictOnClose() throws Exception {
doTestEvictOnClose("testDoesntEvictOnClose", false,
(f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null),
(f, m) -> Waiter.waitFor(UTIL.getConfiguration(), 1000, () -> m.get(f) != null));
}

private void doTestEvictOnSplit(String table, boolean evictOnSplit,
BiConsumer<String, Map<String, Pair<String, Long>>> predicateBeforeSplit,
BiConsumer<String, Map<String, Pair<String, Long>>> predicateAfterSplit) throws Exception {
UTIL.getConfiguration().setBoolean(EVICT_BLOCKS_ON_SPLIT_KEY, evictOnSplit);
UTIL.startMiniCluster(1);
try {
TableName tableName = TableName.valueOf(table);
byte[] family = Bytes.toBytes("CF");
TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
UTIL.getAdmin().createTable(td);
UTIL.waitTableAvailable(tableName);
Table tbl = UTIL.getConnection().getTable(tableName);
List<Put> puts = new ArrayList<>();
for (int i = 0; i < 1000; i++) {
Put p = new Put(Bytes.toBytes("row-" + i));
p.addColumn(family, Bytes.toBytes(1), Bytes.toBytes("val-" + i));
puts.add(p);
}
tbl.put(puts);
UTIL.getAdmin().flush(tableName);
createAndCacheTable(tableName);
Collection<HStoreFile> files =
UTIL.getMiniHBaseCluster().getRegions(tableName).get(0).getStores().get(0).getStorefiles();
checkCacheForBlocks(tableName, files, predicateBeforeSplit);
UTIL.getMiniHBaseCluster().getRegionServer(0).getConfiguration()
.setBoolean(EVICT_BLOCKS_ON_SPLIT_KEY, evictOnSplit);
UTIL.getAdmin().split(tableName, Bytes.toBytes("row-500"));
Waiter.waitFor(UTIL.getConfiguration(), 30000,
() -> UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 2);
Expand All @@ -113,7 +117,43 @@ private void doTest(String table, boolean evictOnSplit,
} finally {
UTIL.shutdownMiniCluster();
}
}

private void doTestEvictOnClose(String table, boolean evictOnClose,
BiConsumer<String, Map<String, Pair<String, Long>>> predicateBeforeClose,
BiConsumer<String, Map<String, Pair<String, Long>>> predicateAfterClose) throws Exception {
UTIL.startMiniCluster(1);
try {
TableName tableName = TableName.valueOf(table);
createAndCacheTable(tableName);
Collection<HStoreFile> files =
UTIL.getMiniHBaseCluster().getRegions(tableName).get(0).getStores().get(0).getStorefiles();
checkCacheForBlocks(tableName, files, predicateBeforeClose);
UTIL.getMiniHBaseCluster().getRegionServer(0).getConfiguration()
.setBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, evictOnClose);
UTIL.getAdmin().disableTable(tableName);
UTIL.waitUntilNoRegionsInTransition();
checkCacheForBlocks(tableName, files, predicateAfterClose);
} finally {
UTIL.shutdownMiniCluster();
}
}

private void createAndCacheTable(TableName tableName) throws IOException, InterruptedException {
byte[] family = Bytes.toBytes("CF");
TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
UTIL.getAdmin().createTable(td);
UTIL.waitTableAvailable(tableName);
Table tbl = UTIL.getConnection().getTable(tableName);
List<Put> puts = new ArrayList<>();
for (int i = 0; i < 1000; i++) {
Put p = new Put(Bytes.toBytes("row-" + i));
p.addColumn(family, Bytes.toBytes(1), Bytes.toBytes("val-" + i));
puts.add(p);
}
tbl.put(puts);
UTIL.getAdmin().flush(tableName);
}

private void checkCacheForBlocks(TableName tableName, Collection<HStoreFile> files,
Expand Down