Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -550,11 +550,11 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel
progress.cancel();
return false;
}
if (kvs != null && bytesWrittenProgressForShippedCall > shippedCallSizeLimit) {
((ShipperListener) writer).beforeShipped();
kvs.shipped();
bytesWrittenProgressForShippedCall = 0;
}
}
if (kvs != null && bytesWrittenProgressForShippedCall > shippedCallSizeLimit) {
((ShipperListener) writer).beforeShipped();
kvs.shipped();
bytesWrittenProgressForShippedCall = 0;
}
// Log the progress of long running compactions every minute if
// logging at DEBUG level
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -323,11 +323,11 @@ protected boolean performCompaction(FileDetails fd, InternalScanner scanner, Cel
progress.cancel();
return false;
}
if (kvs != null && bytesWrittenProgressForShippedCall > shippedCallSizeLimit) {
((ShipperListener) writer).beforeShipped();
kvs.shipped();
bytesWrittenProgressForShippedCall = 0;
}
}
if (kvs != null && bytesWrittenProgressForShippedCall > shippedCallSizeLimit) {
((ShipperListener) writer).beforeShipped();
kvs.shipped();
bytesWrittenProgressForShippedCall = 0;
}
// Log the progress of long running compactions every minute if
// logging at DEBUG level
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.io.ByteBuffAllocator;
import org.apache.hadoop.hbase.io.DeallocateRewriteByteBuffAllocator;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
Expand Down Expand Up @@ -64,6 +65,8 @@ public class TestCompactionWithByteBuff {
@BeforeClass
public static void setupBeforeClass() throws Exception {
conf.setBoolean(ByteBuffAllocator.ALLOCATOR_POOL_ENABLED_KEY, true);
conf.set(ByteBuffAllocator.BYTEBUFF_ALLOCATOR_CLASS,
DeallocateRewriteByteBuffAllocator.class.getName());
conf.setInt(ByteBuffAllocator.BUFFER_SIZE_KEY, 1024 * 5);
conf.setInt(CompactSplit.SMALL_COMPACTION_THREADS, REGION_COUNT * 2);
conf.setInt(CompactSplit.LARGE_COMPACTION_THREADS, REGION_COUNT * 2);
Expand All @@ -78,11 +81,9 @@ public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}

@Test
public void testCompaction() throws Exception {
TableName table = TableName.valueOf("t1");
private void testCompaction(TableName table, boolean isMob) throws Exception {
admin.compactionSwitch(false, new ArrayList<>(0));
try (Table t = createTable(TEST_UTIL, table)) {
try (Table t = createTable(TEST_UTIL, table, isMob)) {
for (int i = 0; i < 2; i++) {
put(t);
admin.flush(table);
Expand All @@ -108,9 +109,22 @@ public void testCompaction() throws Exception {
}
}

private Table createTable(HBaseTestingUtil util, TableName tableName) throws IOException {
TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(
ColumnFamilyDescriptorBuilder.newBuilder(COLUMN).setBlocksize(1024 * 4).build()).build();
@Test
public void testCompaction() throws Exception {
testCompaction(TableName.valueOf(name.getMethodName()), false);
}

@Test
public void testCompactionForMobTable() throws Exception {
testCompaction(TableName.valueOf(name.getMethodName()), true);
}

private Table createTable(HBaseTestingUtil util, TableName tableName, boolean isMob)
throws IOException {
TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName)
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(COLUMN).setBlocksize(1024 * 4)
.setMobEnabled(isMob).setMobThreshold(10240).build())
.build();
byte[][] splits = new byte[REGION_COUNT - 1][];
for (int i = 1; i < REGION_COUNT; i++) {
splits[i - 1] = Bytes.toBytes(buildRow((int) (ROW_COUNT / REGION_COUNT * i)));
Expand Down