Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,6 @@
import java.time.Duration;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
Expand Down Expand Up @@ -109,8 +106,7 @@ public void testReplicasAreReportedForClosedContainerAfterRestart()
throws Exception {
// Create some keys to write data into the open containers
for (int i = 0; i < 10; i++) {
TestDataUtil.createKey(bucket, "key" + i, ReplicationConfig
.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE),
TestDataUtil.createKey(bucket, "key" + i,
"this is the content".getBytes(StandardCharsets.UTF_8));
}
StorageContainerManager scm = cluster.getStorageContainerManager();
Expand Down Expand Up @@ -155,8 +151,7 @@ public void testCloseClosedContainer()
throws Exception {
// Create some keys to write data into the open containers
for (int i = 0; i < 10; i++) {
TestDataUtil.createKey(bucket, "key" + i, ReplicationConfig
.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE),
TestDataUtil.createKey(bucket, "key" + i,
"this is the content".getBytes(StandardCharsets.UTF_8));
}
StorageContainerManager scm = cluster.getStorageContainerManager();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,18 +106,23 @@ public static OzoneVolume createVolume(OzoneClient client,

public static void createKey(OzoneBucket bucket, String keyName,
byte[] content) throws IOException {
ReplicationConfig replicationConfig = ReplicationConfig.
fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.ONE);
createKey(bucket, keyName, replicationConfig, content);
createKey(bucket, keyName, null, content);

}

public static OutputStream createOutputStream(OzoneBucket bucket, String keyName,
ReplicationConfig repConfig, byte[] content)
throws IOException {
return repConfig == null
? bucket.createKey(keyName, content.length)
: bucket.createKey(keyName, content.length, repConfig, new HashMap<>());
}

public static void createKey(OzoneBucket bucket, String keyName,
ReplicationConfig repConfig, byte[] content)
throws IOException {
try (OutputStream stream = bucket
.createKey(keyName, content.length, repConfig,
new HashMap<>())) {
try (OutputStream stream = createOutputStream(bucket, keyName,
repConfig, content)) {
stream.write(content);
}
}
Expand Down Expand Up @@ -214,7 +219,9 @@ public static Map<String, OmKeyInfo> createKeys(MiniOzoneCluster cluster, int nu
OzoneBucket bucket = createVolumeAndBucket(client);
for (int i = 0; i < numOfKeys; i++) {
String keyName = RandomStringUtils.randomAlphabetic(5) + i;
createKey(bucket, keyName, RandomStringUtils.randomAlphabetic(5).getBytes(UTF_8));
createKey(bucket, keyName, ReplicationConfig
.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.ONE),
RandomStringUtils.randomAlphabetic(5).getBytes(UTF_8));
keyLocationMap.put(keyName, lookupOmKeyInfo(cluster, bucket, keyName));
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,6 @@
import java.util.Optional;
import java.util.stream.Stream;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.TestDataUtil;
Expand Down Expand Up @@ -377,8 +375,7 @@ private static void createAndAssertKeys(OzoneBucket ozoneBucket, List<String> ke
byte[] input = new byte[length];
Arrays.fill(input, (byte) 96);
for (String key : keys) {
createKey(ozoneBucket, key,
ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), input);
createKey(ozoneBucket, key, input);
// Read the key with given key name.
readkey(ozoneBucket, key, length, input);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,6 @@
import java.util.Optional;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.hdds.utils.IOUtils;
Expand Down Expand Up @@ -649,8 +647,7 @@ private static void createAndAssertKeys(OzoneBucket ozoneBucket, List<String> ke
byte[] input = new byte[length];
Arrays.fill(input, (byte) 96);
for (String key : keys) {
createKey(ozoneBucket, key, ReplicationConfig
.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), input);
createKey(ozoneBucket, key, input);
// Read the key with given key name.
readkey(ozoneBucket, key, length, input);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,9 @@
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
Expand Down Expand Up @@ -676,9 +679,11 @@ private void prepSnapshotData() throws Exception {
.createVolumeAndBucket(client);

// Create dummy keys for snapshotting.
TestDataUtil.createKey(bucket, UUID.randomUUID().toString(),
TestDataUtil.createKey(bucket, UUID.randomUUID().toString(), ReplicationConfig
.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.ONE),
"content".getBytes(StandardCharsets.UTF_8));
TestDataUtil.createKey(bucket, UUID.randomUUID().toString(),
TestDataUtil.createKey(bucket, UUID.randomUUID().toString(), ReplicationConfig
.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.ONE),
"content".getBytes(StandardCharsets.UTF_8));

snapshotDirName =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,6 @@
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
import org.apache.commons.compress.utils.Lists;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.utils.IOUtils;
Expand Down Expand Up @@ -202,10 +199,8 @@ public void testMultipleSnapshotKeyReclaim() throws Exception {
OzoneBucket bucket2 = TestDataUtil.createBucket(
client, VOLUME_NAME, bucketArgs, BUCKET_NAME_TWO);
// Create key1 and key2
TestDataUtil.createKey(bucket2, "bucket2key1", ReplicationConfig.
fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), CONTENT.array());
TestDataUtil.createKey(bucket2, "bucket2key2", ReplicationConfig.
fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), CONTENT.array());
TestDataUtil.createKey(bucket2, "bucket2key1", CONTENT.array());
TestDataUtil.createKey(bucket2, "bucket2key2", CONTENT.array());

// Create Snapshot
client.getObjectStore().createSnapshot(VOLUME_NAME, BUCKET_NAME_TWO,
Expand Down Expand Up @@ -265,16 +260,12 @@ public void testSnapshotWithFSO() throws Exception {

// Create 10 keys
for (int i = 1; i <= 10; i++) {
TestDataUtil.createKey(bucket2, "key" + i, ReplicationConfig.
fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE),
CONTENT.array());
TestDataUtil.createKey(bucket2, "key" + i, CONTENT.array());
}

// Create 5 keys to overwrite
for (int i = 11; i <= 15; i++) {
TestDataUtil.createKey(bucket2, "key" + i, ReplicationConfig.
fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE),
CONTENT.array());
TestDataUtil.createKey(bucket2, "key" + i, CONTENT.array());
}

// Create Directory and Sub
Expand All @@ -287,8 +278,7 @@ public void testSnapshotWithFSO() throws Exception {
String childDir = "/childDir" + j;
client.getProxy().createDirectory(VOLUME_NAME,
BUCKET_NAME_FSO, parent + childDir);
TestDataUtil.createKey(bucket2, parent + childFile, ReplicationConfig.
fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), CONTENT.array());
TestDataUtil.createKey(bucket2, parent + childFile, CONTENT.array());
}
}

Expand All @@ -304,8 +294,7 @@ public void testSnapshotWithFSO() throws Exception {

// Overwrite 3 keys -> Moves previous version to deletedTable
for (int i = 11; i <= 13; i++) {
TestDataUtil.createKey(bucket2, "key" + i, ReplicationConfig.
fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), CONTENT.array());
TestDataUtil.createKey(bucket2, "key" + i, CONTENT.array());
}
assertTableRowCount(keyTable, 24);

Expand Down Expand Up @@ -369,9 +358,7 @@ public void testSnapshotWithFSO() throws Exception {

// Overwrite 2 keys
for (int i = 14; i <= 15; i++) {
TestDataUtil.createKey(bucket2, "key" + i, ReplicationConfig.
fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE),
CONTENT.array());
TestDataUtil.createKey(bucket2, "key" + i, CONTENT.array());
}

// Delete 2 more keys
Expand Down Expand Up @@ -727,12 +714,8 @@ private synchronized void createSnapshotDataForBucket(OzoneBucket bucket) throws
OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl)
om.getMetadataManager();

TestDataUtil.createKey(bucket, bucket.getName() + "key0", ReplicationConfig.
fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE),
CONTENT.array());
TestDataUtil.createKey(bucket, bucket.getName() + "key1", ReplicationConfig.
fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE),
CONTENT.array());
TestDataUtil.createKey(bucket, bucket.getName() + "key0", CONTENT.array());
TestDataUtil.createKey(bucket, bucket.getName() + "key1", CONTENT.array());
assertTableRowCount(keyTable, 2);

// Create Snapshot 1.
Expand All @@ -742,10 +725,8 @@ private synchronized void createSnapshotDataForBucket(OzoneBucket bucket) throws

// Overwrite bucket1key0, This is a newer version of the key which should
// reclaimed as this is a different version of the key.
TestDataUtil.createKey(bucket, bucket.getName() + "key0", ReplicationConfig.
fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), CONTENT.array());
TestDataUtil.createKey(bucket, bucket.getName() + "key2", ReplicationConfig.
fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), CONTENT.array());
TestDataUtil.createKey(bucket, bucket.getName() + "key0", CONTENT.array());
TestDataUtil.createKey(bucket, bucket.getName() + "key2", CONTENT.array());

// Key 1 cannot be reclaimed as it is still referenced by Snapshot 1.
client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(),
Expand All @@ -769,10 +750,8 @@ private synchronized void createSnapshotDataForBucket(OzoneBucket bucket) throws
// deletedTable when Snapshot 2 is taken.
assertTableRowCount(deletedTable, 0);

TestDataUtil.createKey(bucket, bucket.getName() + "key3", ReplicationConfig.
fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), CONTENT.array());
TestDataUtil.createKey(bucket, bucket.getName() + "key4", ReplicationConfig.
fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), CONTENT.array());
TestDataUtil.createKey(bucket, bucket.getName() + "key3", CONTENT.array());
TestDataUtil.createKey(bucket, bucket.getName() + "key4", CONTENT.array());
client.getProxy().deleteKey(bucket.getVolumeName(), bucket.getName(),
bucket.getName() + "key4", false);
assertTableRowCount(keyTable, 1);
Expand Down Expand Up @@ -832,19 +811,15 @@ private synchronized void createSnapshotFSODataForBucket(OzoneBucket bucket) thr
throw new RuntimeException(ex);
}
}));
TestDataUtil.createKey(bucket, "dir0/" + bucket.getName() + "key0", ReplicationConfig.
fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), CONTENT.array());
TestDataUtil.createKey(bucket, "dir1/" + bucket.getName() + "key1", ReplicationConfig.
fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), CONTENT.array());
TestDataUtil.createKey(bucket, "dir0/" + bucket.getName() + "key0", CONTENT.array());
TestDataUtil.createKey(bucket, "dir1/" + bucket.getName() + "key1", CONTENT.array());
assertTableRowCount(keyTable, countMap.get(keyTable.getName()) + 2);
assertTableRowCount(dirTable, countMap.get(dirTable.getName()) + 2);

// Overwrite bucket1key0, This is a newer version of the key which should
// reclaimed as this is a different version of the key.
TestDataUtil.createKey(bucket, "dir0/" + bucket.getName() + "key0", ReplicationConfig.
fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), CONTENT.array());
TestDataUtil.createKey(bucket, "dir2/" + bucket.getName() + "key2", ReplicationConfig.
fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), CONTENT.array());
TestDataUtil.createKey(bucket, "dir0/" + bucket.getName() + "key0", CONTENT.array());
TestDataUtil.createKey(bucket, "dir2/" + bucket.getName() + "key2", CONTENT.array());
assertTableRowCount(keyTable, countMap.get(keyTable.getName()) + 3);
assertTableRowCount(dirTable, countMap.get(dirTable.getName()) + 3);
assertTableRowCount(deletedTable, countMap.get(deletedTable.getName()) + 1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,6 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
Expand Down Expand Up @@ -183,8 +180,7 @@ private static void writeKey(String keyName) throws IOException {
TestDataUtil.createVolumeAndBucket(client, VOLUME_NAME, BUCKET_NAME);
TestDataUtil.createKey(
client.getObjectStore().getVolume(VOLUME_NAME).getBucket(BUCKET_NAME),
keyName, ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE),
"test".getBytes(StandardCharsets.UTF_8));
keyName, "test".getBytes(StandardCharsets.UTF_8));
}
}

Expand Down