Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -374,7 +374,7 @@ public void testMakeDirsWithAnFakeDirectory() throws Exception {
String fakeGrandpaKey = "dir1";
String fakeParentKey = fakeGrandpaKey + "/dir2";
String fullKeyName = fakeParentKey + "/key1";
TestDataUtil.createKey(ozoneBucket, fullKeyName, "");
TestDataUtil.createKey(ozoneBucket, fullKeyName, new byte[0]);

// /dir1/dir2 should not exist
assertFalse(fs.exists(new Path(fakeParentKey)));
Expand Down Expand Up @@ -888,7 +888,7 @@ public void testListStatusOnKeyNameContainDelimiter() throws Exception {
* the "/dir1", "/dir1/dir2/" are fake directory
* */
String keyName = "dir1/dir2/key1";
TestDataUtil.createKey(ozoneBucket, keyName, "");
TestDataUtil.createKey(ozoneBucket, keyName, new byte[0]);
FileStatus[] fileStatuses;

fileStatuses = fs.listStatus(ROOT, EXCLUDE_TRASH);
Expand Down Expand Up @@ -1396,7 +1396,7 @@ public void testRenameContainDelimiterFile() throws Exception {
String fakeParentKey = fakeGrandpaKey + "/dir2";
String sourceKeyName = fakeParentKey + "/key1";
String targetKeyName = fakeParentKey + "/key2";
TestDataUtil.createKey(ozoneBucket, sourceKeyName, "");
TestDataUtil.createKey(ozoneBucket, sourceKeyName, new byte[0]);

Path sourcePath = new Path(fs.getUri().toString() + "/" + sourceKeyName);
Path targetPath = new Path(fs.getUri().toString() + "/" + targetKeyName);
Expand Down Expand Up @@ -1894,8 +1894,7 @@ public void testProcessingDetails() throws IOException, InterruptedException {
GenericTestUtils.LogCapturer logCapturer =
GenericTestUtils.LogCapturer.captureLogs(log);
int keySize = 1024;
TestDataUtil.createKey(ozoneBucket, "key1", new String(new byte[keySize],
UTF_8));
TestDataUtil.createKey(ozoneBucket, "key1", new byte[keySize]);
logCapturer.stopCapturing();
String logContent = logCapturer.getOutput();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,11 @@
import static org.junit.jupiter.api.Assertions.assertThrows;

import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
Expand Down Expand Up @@ -107,8 +109,9 @@ public void testReplicasAreReportedForClosedContainerAfterRestart()
throws Exception {
// Create some keys to write data into the open containers
for (int i = 0; i < 10; i++) {
TestDataUtil.createKey(bucket, "key" + i, ReplicationFactor.THREE,
ReplicationType.RATIS, "this is the content");
TestDataUtil.createKey(bucket, "key" + i, ReplicationConfig
.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE),
"this is the content".getBytes(StandardCharsets.UTF_8));
}
StorageContainerManager scm = cluster.getStorageContainerManager();

Expand Down Expand Up @@ -152,8 +155,9 @@ public void testCloseClosedContainer()
throws Exception {
// Create some keys to write data into the open containers
for (int i = 0; i < 10; i++) {
TestDataUtil.createKey(bucket, "key" + i, ReplicationFactor.THREE,
ReplicationType.RATIS, "this is the content");
TestDataUtil.createKey(bucket, "key" + i, ReplicationConfig
.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE),
"this is the content".getBytes(StandardCharsets.UTF_8));
}
StorageContainerManager scm = cluster.getStorageContainerManager();
// Pick any container on the cluster and close it via client
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
import static org.junit.jupiter.api.Assertions.assertTrue;

import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
Expand Down Expand Up @@ -831,7 +832,7 @@ private void generateData(int keyCount, String keyPrefix,
ReplicationConfig replicationConfig) throws IOException {
for (int i = 0; i < keyCount; i++) {
TestDataUtil.createKey(bucket, keyPrefix + i, replicationConfig,
"this is the content");
"this is the content".getBytes(StandardCharsets.UTF_8));
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
Expand Down Expand Up @@ -105,31 +104,14 @@ public static OzoneVolume createVolume(OzoneClient client,

}

public static void createKey(OzoneBucket bucket, String keyName,
String content) throws IOException {
createKey(bucket, keyName, ReplicationFactor.ONE,
ReplicationType.RATIS, content.getBytes(UTF_8));
}

public static void createKey(OzoneBucket bucket, String keyName,
byte[] content) throws IOException {
createKey(bucket, keyName, ReplicationFactor.ONE,
ReplicationType.RATIS, content);
ReplicationConfig replicationConfig = ReplicationConfig.
fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.ONE);
createKey(bucket, keyName, replicationConfig, content);

}

public static void createKey(OzoneBucket bucket, String keyName,
ReplicationFactor repFactor, ReplicationType repType, byte[] content)
throws IOException {
ReplicationConfig repConfig = ReplicationConfig
.fromTypeAndFactor(repType, repFactor);
try (OutputStream stream = bucket
.createKey(keyName, content.length, repConfig,
new HashMap<>())) {
stream.write(content);
}
}

public static void createKey(OzoneBucket bucket, String keyName,
ReplicationConfig repConfig, byte[] content)
throws IOException {
Expand All @@ -140,32 +122,6 @@ public static void createKey(OzoneBucket bucket, String keyName,
}
}

public static void createKey(OzoneBucket bucket, String keyName,
ReplicationFactor repFactor, ReplicationType repType, String content)
throws IOException {
ReplicationConfig repConfig = ReplicationConfig
.fromTypeAndFactor(repType, repFactor);
createKey(bucket, keyName, repConfig, content.getBytes(UTF_8));
}

public static void createKey(OzoneBucket bucket, String keyName,
ReplicationConfig repConfig, String content)
throws IOException {
createKey(bucket, keyName, repConfig, content.getBytes(UTF_8));
}
Comment on lines -151 to -155
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we should also remove this one:

public static void createKey(OzoneBucket bucket, String keyName,
ReplicationFactor repFactor, ReplicationType repType, byte[] content)
throws IOException {
ReplicationConfig repConfig = ReplicationConfig
.fromTypeAndFactor(repType, repFactor);
try (OutputStream stream = bucket
.createKey(keyName, content.length, repConfig,
new HashMap<>())) {
stream.write(content);
}
}

OzoneBucket#createKey with separate ReplicationFactor and ReplicationType parameters is deprecated:

@Deprecated
public OzoneOutputStream createKey(String key, long size,
ReplicationType type,
ReplicationFactor factor,
Map<String, String> keyMetadata)
throws IOException {
return proxy
.createKey(volumeName, name, key, size, type, factor, keyMetadata);
}

callers can be changed to use the variant that accepts a ReplicationConfig by using:

/**
* Get an instance of Ratis Replication Config with the requested factor.
* The same static instance will be returned for all requests for the same
* factor.
* @param factor Replication Factor requested
* @return RatisReplicationConfig object of the requested factor
*/
public static RatisReplicationConfig getInstance(ReplicationFactor factor) {
if (factor == ONE) {
return RATIS_ONE_CONFIG;
} else if (factor == THREE) {
return RATIS_THREE_CONFIG;
}

And this one can also be removed:

public static void createKey(OzoneBucket bucket, String keyName,
ReplicationFactor repFactor, ReplicationType repType,
ByteBuffer data) throws IOException {
ReplicationConfig repConfig = ReplicationConfig
.fromTypeAndFactor(repType, repFactor);
try (OutputStream stream = bucket
.createKey(keyName, data.capacity(), repConfig,
new HashMap<>())) {
stream.write(data.array());
}
}

ByteBuffer can be backed by array (HeapByteBuffer) or native memory (DirectByteBuffer). This method only works with the first type, so callers can be changed to simply pass buffer.array() and is used only by a single test class.


public static void createKey(OzoneBucket bucket, String keyName,
ReplicationFactor repFactor, ReplicationType repType,
ByteBuffer data) throws IOException {
ReplicationConfig repConfig = ReplicationConfig
.fromTypeAndFactor(repType, repFactor);
try (OutputStream stream = bucket
.createKey(keyName, data.capacity(), repConfig,
new HashMap<>())) {
stream.write(data.array());
}
}

public static String getKey(OzoneBucket bucket, String keyName)
throws IOException {
try (InputStream stream = bucket.readKey(keyName)) {
Expand Down Expand Up @@ -258,7 +214,7 @@ public static Map<String, OmKeyInfo> createKeys(MiniOzoneCluster cluster, int nu
OzoneBucket bucket = createVolumeAndBucket(client);
for (int i = 0; i < numOfKeys; i++) {
String keyName = RandomStringUtils.randomAlphabetic(5) + i;
createKey(bucket, keyName, RandomStringUtils.randomAlphabetic(5));
createKey(bucket, keyName, RandomStringUtils.randomAlphabetic(5).getBytes(UTF_8));
keyLocationMap.put(keyName, lookupOmKeyInfo(cluster, bucket, keyName));
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1083,16 +1083,17 @@ public void testDeleteAuditLog() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();

String value = "sample value";
int valueLength = value.getBytes(UTF_8).length;
byte[] value = "sample value".getBytes(UTF_8);
int valueLength = value.length;
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);

// create a three replica file
String keyName1 = "key1";
TestDataUtil.createKey(bucket, keyName1, THREE, RATIS, value);
TestDataUtil.createKey(bucket, keyName1, ReplicationConfig
.fromTypeAndFactor(RATIS, THREE), value);

// create a EC replica file
String keyName2 = "key2";
Expand All @@ -1103,7 +1104,8 @@ public void testDeleteAuditLog() throws Exception {
String dirName = "dir1";
bucket.createDirectory(dirName);
String keyName3 = "key3";
TestDataUtil.createKey(bucket, keyName3, THREE, RATIS, value);
TestDataUtil.createKey(bucket, keyName3, ReplicationConfig
.fromTypeAndFactor(RATIS, THREE), value);

// delete files and directory
output.reset();
Expand All @@ -1113,7 +1115,8 @@ public void testDeleteAuditLog() throws Exception {

// create keys for deleteKeys case
String keyName4 = "key4";
TestDataUtil.createKey(bucket, dirName + "/" + keyName4, THREE, RATIS, value);
TestDataUtil.createKey(bucket, dirName + "/" + keyName4,
ReplicationConfig.fromTypeAndFactor(RATIS, THREE), value);

String keyName5 = "key5";
TestDataUtil.createKey(bucket, dirName + "/" + keyName5, replicationConfig, value);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,8 @@ private static void createAndAssertKeys(OzoneBucket ozoneBucket, List<String> ke
byte[] input = new byte[length];
Arrays.fill(input, (byte) 96);
for (String key : keys) {
createKey(ozoneBucket, key, ReplicationFactor.THREE, ReplicationType.RATIS, input);
createKey(ozoneBucket, key,
ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), input);
// Read the key with given key name.
readkey(ozoneBucket, key, length, input);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -649,8 +649,8 @@ private static void createAndAssertKeys(OzoneBucket ozoneBucket, List<String> ke
byte[] input = new byte[length];
Arrays.fill(input, (byte) 96);
for (String key : keys) {
createKey(ozoneBucket, key, ReplicationFactor.THREE,
ReplicationType.RATIS, input);
createKey(ozoneBucket, key, ReplicationConfig
.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.THREE), input);
// Read the key with given key name.
readkey(ozoneBucket, key, length, input);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -677,9 +677,9 @@ private void prepSnapshotData() throws Exception {

// Create dummy keys for snapshotting.
TestDataUtil.createKey(bucket, UUID.randomUUID().toString(),
"content");
"content".getBytes(StandardCharsets.UTF_8));
TestDataUtil.createKey(bucket, UUID.randomUUID().toString(),
"content");
"content".getBytes(StandardCharsets.UTF_8));

snapshotDirName =
createSnapshot(bucket.getVolumeName(), bucket.getName());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
import static org.junit.jupiter.api.Assertions.assertThrows;

import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.IOUtils;
Expand Down Expand Up @@ -172,7 +173,7 @@ public void testCreateKeyPermissionDenied() throws Exception {
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client);

OMException exception = assertThrows(OMException.class,
() -> TestDataUtil.createKey(bucket, "testKey", "testcontent"));
() -> TestDataUtil.createKey(bucket, "testKey", "testcontent".getBytes(StandardCharsets.UTF_8)));
assertEquals(ResultCodes.PERMISSION_DENIED, exception.getResult());
assertThat(logCapturer.getOutput()).contains("doesn't have CREATE " +
"permission to access key");
Expand All @@ -181,7 +182,7 @@ public void testCreateKeyPermissionDenied() throws Exception {
@Test
public void testReadKeyPermissionDenied() throws Exception {
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client);
TestDataUtil.createKey(bucket, "testKey", "testcontent");
TestDataUtil.createKey(bucket, "testKey", "testcontent".getBytes(StandardCharsets.UTF_8));

TestOmAcls.keyAclAllow = false;
OMException exception = assertThrows(OMException.class,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
import static org.junit.jupiter.api.Assertions.assertEquals;

import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang3.RandomStringUtils;
Expand Down Expand Up @@ -166,7 +167,7 @@ public void testReadLatestVersion() throws Exception {

String dataString = RandomStringUtils.randomAlphabetic(100);

TestDataUtil.createKey(bucket, keyName, dataString);
TestDataUtil.createKey(bucket, keyName, dataString.getBytes(StandardCharsets.UTF_8));
assertEquals(dataString, TestDataUtil.getKey(bucket, keyName));
OmKeyInfo keyInfo = ozoneManager.lookupKey(omKeyArgs);
assertEquals(0, keyInfo.getLatestVersionLocations().getVersion());
Expand All @@ -175,7 +176,7 @@ public void testReadLatestVersion() throws Exception {

// When bucket versioning is disabled, overwriting a key doesn't increment
// its version count. Rather it always resets the version to 0
TestDataUtil.createKey(bucket, keyName, dataString);
TestDataUtil.createKey(bucket, keyName, dataString.getBytes(StandardCharsets.UTF_8));

keyInfo = ozoneManager.lookupKey(omKeyArgs);
assertEquals(dataString, TestDataUtil.getKey(bucket, keyName));
Expand All @@ -184,7 +185,7 @@ public void testReadLatestVersion() throws Exception {
keyInfo.getLatestVersionLocations().getLocationList().size());

dataString = RandomStringUtils.randomAlphabetic(200);
TestDataUtil.createKey(bucket, keyName, dataString);
TestDataUtil.createKey(bucket, keyName, dataString.getBytes(StandardCharsets.UTF_8));

keyInfo = ozoneManager.lookupKey(omKeyArgs);
assertEquals(dataString, TestDataUtil.getKey(bucket, keyName));
Expand Down
Loading