diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java index 027afa3965ba..29832701f6f9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java @@ -83,7 +83,7 @@ public static OzoneBucket createVolumeAndBucket(MiniOzoneCluster cluster, public static void createKey(OzoneBucket bucket, String keyName, String content) throws IOException { createKey(bucket, keyName, ReplicationFactor.ONE, - ReplicationType.STAND_ALONE, content); + ReplicationType.RATIS, content); } public static void createKey(OzoneBucket bucket, String keyName, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java index 6116212deadc..add89d5919f0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java @@ -369,6 +369,7 @@ public void testBlockDeletingThrottling() throws Exception { .setNumDatanodes(1) .build(); cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000); try { DeletedBlockLog delLog = cluster.getStorageContainerManager() @@ -746,6 +747,7 @@ public void testCloseContainerCommandOnRestart() throws Exception { .setNumDatanodes(1) .build(); cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000); try { TestStorageContainerManagerHelper helper = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index d5e60d2fd73a..9a4d69151adb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -74,7 +74,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; +import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; import org.junit.AfterClass; import org.junit.Assert; @@ -235,7 +235,7 @@ private void createAndVerifyKeyData(OzoneBucket bucket) throws Exception { String value = "sample value"; try (OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(StandardCharsets.UTF_8).length, - ReplicationType.STAND_ALONE, + ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>())) { out.write(value.getBytes(StandardCharsets.UTF_8)); } @@ -259,7 +259,7 @@ private void createAndVerifyKeyData(OzoneBucket bucket) throws Exception { Assert.assertEquals(len, value.length()); Assert.assertTrue(verifyRatisReplication(bucket.getVolumeName(), - bucket.getName(), keyName, ReplicationType.STAND_ALONE, + bucket.getName(), keyName, ReplicationType.RATIS, ReplicationFactor.ONE)); Assert.assertEquals(value, new String(fileContent, StandardCharsets.UTF_8)); Assert.assertFalse(key.getCreationTime().isBefore(testStartTime)); @@ -323,7 +323,7 @@ public void testKeyWithEncryptionAndGdpr() throws Exception { keyMetadata.put(OzoneConsts.GDPR_FLAG, "true"); try (OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(StandardCharsets.UTF_8).length, - ReplicationType.STAND_ALONE, + ReplicationType.RATIS, ReplicationFactor.ONE, keyMetadata)) { out.write(value.getBytes(StandardCharsets.UTF_8)); } @@ -340,7 +340,7 @@ public void testKeyWithEncryptionAndGdpr() throws Exception { Assert.assertEquals(len, value.length()); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, ReplicationType.STAND_ALONE, + keyName, ReplicationType.RATIS, ReplicationFactor.ONE)); Assert.assertEquals(value, new String(fileContent, StandardCharsets.UTF_8)); Assert.assertFalse(key.getCreationTime().isBefore(testStartTime)); @@ -463,7 +463,7 @@ public void testMultipartUploadWithEncryption(OzoneBucket bucket, String keyName = "mpu_test_key_" + numParts; // Initiate multipart upload - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); // Upload Parts diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index d772a3f20f84..e44cf2d809a7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -74,7 +74,7 @@ import java.util.UUID; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; +import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -161,7 +161,7 @@ public void testInitiateMultipartUploadWithReplicationInformationSet() throws volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); Assert.assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); @@ -173,7 +173,7 @@ public void testInitiateMultipartUploadWithReplicationInformationSet() throws // Call initiate multipart upload for the same key again, this should // generate a new uploadID. multipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); Assert.assertNotNull(multipartInfo); Assert.assertEquals(volumeName, multipartInfo.getVolumeName()); @@ -227,7 +227,7 @@ public void testUploadPartWithNoOverride() throws IOException { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); Assert.assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); @@ -321,7 +321,7 @@ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); // Initiate multipart upload - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); // Upload Parts @@ -352,7 +352,7 @@ public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent() volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); // We have not uploaded any parts, but passing some list it should throw @@ -376,7 +376,7 @@ public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName() volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); @@ -400,7 +400,7 @@ public void testMultipartUploadWithMissingParts() throws Exception { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); @@ -426,7 +426,7 @@ public void testCommitPartAfterCompleteUpload() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); Assert.assertNotNull(omMultipartInfo.getUploadID()); @@ -505,7 +505,7 @@ public void testAbortUploadFailWithInProgressPartUpload() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); Assert.assertNotNull(omMultipartInfo.getUploadID()); @@ -540,7 +540,7 @@ public void testAbortUploadSuccessWithOutAnyParts() throws Exception { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); bucket.abortMultipartUpload(keyName, uploadID); } @@ -563,7 +563,7 @@ public void testAbortUploadSuccessWithParts() throws Exception { ozoneManager.getMetadataManager().getBucketTable().get(buckKey); BucketLayout bucketLayout = buckInfo.getBucketLayout(); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); String partName = uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); @@ -603,7 +603,7 @@ public void testListMultipartUploadParts() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); Map partsMap = new TreeMap<>(); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); String partName1 = uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); @@ -620,7 +620,7 @@ public void testListMultipartUploadParts() throws Exception { OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 3); - Assert.assertEquals(STAND_ALONE, + Assert.assertEquals(RATIS, ozoneMultipartUploadPartListParts.getReplicationType()); Assert.assertEquals(3, ozoneMultipartUploadPartListParts.getPartInfoList().size()); @@ -705,7 +705,7 @@ public void testListMultipartUploadPartsWithContinuation() OzoneBucket bucket = volume.getBucket(bucketName); Map partsMap = new TreeMap<>(); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); String partName1 = uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); @@ -722,7 +722,7 @@ public void testListMultipartUploadPartsWithContinuation() OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 2); - Assert.assertEquals(STAND_ALONE, + Assert.assertEquals(RATIS, ozoneMultipartUploadPartListParts.getReplicationType()); Assert.assertEquals(2, @@ -808,7 +808,7 @@ public void testListPartsWithPartMarkerGreaterThanPartCount() OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); @@ -821,7 +821,7 @@ public void testListPartsWithPartMarkerGreaterThanPartCount() Assert.assertEquals(0, ozoneMultipartUploadPartListParts.getPartInfoList().size()); - Assert.assertEquals(STAND_ALONE, + Assert.assertEquals(RATIS, ozoneMultipartUploadPartListParts.getReplicationType()); // As we don't have any parts with greater than partNumberMarker and list @@ -866,11 +866,11 @@ public void testListMultipartUpload() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); // Initiate multipart upload - String uploadID1 = initiateMultipartUpload(bucket, key1, STAND_ALONE, + String uploadID1 = initiateMultipartUpload(bucket, key1, RATIS, ONE); - String uploadID2 = initiateMultipartUpload(bucket, key2, STAND_ALONE, + String uploadID2 = initiateMultipartUpload(bucket, key2, RATIS, ONE); - String uploadID3 = initiateMultipartUpload(bucket, key3, STAND_ALONE, + String uploadID3 = initiateMultipartUpload(bucket, key3, RATIS, ONE); // Upload Parts diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 897ad2295f5d..e70087a088d4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -116,8 +116,7 @@ import static org.apache.hadoop.hdds.StringUtils.string2Bytes; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE; -import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; +import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; import static org.apache.hadoop.ozone.OmUtils.MAX_TRXN_ID; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; @@ -860,7 +859,7 @@ public void testPutKey() throws IOException { String keyName = UUID.randomUUID().toString(); OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes(UTF_8).length, STAND_ALONE, + value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>()); out.write(value.getBytes(UTF_8)); out.close(); @@ -870,7 +869,7 @@ public void testPutKey() throws IOException { byte[] fileContent = new byte[value.getBytes(UTF_8).length]; is.read(fileContent); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, STAND_ALONE, + keyName, RATIS, ONE)); Assert.assertEquals(value, new String(fileContent, UTF_8)); Assert.assertFalse(key.getCreationTime().isBefore(testStartTime)); @@ -934,7 +933,7 @@ public void testCheckUsedBytesQuota() throws IOException { try { OzoneOutputStream out = bucket.createKey(UUID.randomUUID().toString(), - valueLength, STAND_ALONE, ONE, new HashMap<>()); + valueLength, RATIS, ONE, new HashMap<>()); for (int i = 0; i <= (4 * blockSize) / value.length(); i++) { out.write(value.getBytes(UTF_8)); } @@ -959,7 +958,7 @@ public void testCheckUsedBytesQuota() throws IOException { bucket.setQuota(OzoneQuota.parseQuota( 5 * blockSize + " B", "100")); OzoneOutputStream out = bucket.createKey(UUID.randomUUID().toString(), - valueLength, STAND_ALONE, ONE, new HashMap<>()); + valueLength, RATIS, ONE, new HashMap<>()); out.close(); Assert.assertEquals(4 * blockSize, store.getVolume(volumeName).getBucket(bucketName).getUsedBytes()); @@ -1093,7 +1092,7 @@ public void testBucketUsedNamespace() throws IOException { private void writeKey(OzoneBucket bucket, String keyName, ReplicationFactor replication, String value, int valueLength) throws IOException{ - OzoneOutputStream out = bucket.createKey(keyName, valueLength, STAND_ALONE, + OzoneOutputStream out = bucket.createKey(keyName, valueLength, RATIS, replication, new HashMap<>()); out.write(value.getBytes(UTF_8)); out.close(); @@ -1102,7 +1101,7 @@ private void writeKey(OzoneBucket bucket, String keyName, private void writeFile(OzoneBucket bucket, String keyName, ReplicationFactor replication, String value, int valueLength) throws IOException{ - OzoneOutputStream out = bucket.createFile(keyName, valueLength, STAND_ALONE, + OzoneOutputStream out = bucket.createFile(keyName, valueLength, RATIS, replication, true, true); out.write(value.getBytes(UTF_8)); out.close(); @@ -1124,7 +1123,7 @@ public void testUsedBytesWithUploadPart() throws IOException { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); @@ -1162,7 +1161,7 @@ public void testValidateBlockLengthWithCommitKey() throws IOException { // create the initial key with size 0, write will allocate the first block. OzoneOutputStream out = bucket.createKey(keyName, 0, - STAND_ALONE, ONE, new HashMap<>()); + RATIS, ONE, new HashMap<>()); out.write(value.getBytes(UTF_8)); out.close(); OmKeyArgs.Builder builder = new OmKeyArgs.Builder(); @@ -1428,7 +1427,7 @@ public void testGetKeyDetails() throws IOException { //String keyValue = "this is a test value.glx"; // create the initial key with size 0, write will allocate the first block. OzoneOutputStream out = bucket.createKey(keyName, - keyValue.getBytes(UTF_8).length, STAND_ALONE, + keyValue.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>()); out.write(keyValue.getBytes(UTF_8)); out.close(); @@ -1780,7 +1779,7 @@ public void testDeleteKey() volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes(UTF_8).length, STAND_ALONE, + value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>()); out.write(value.getBytes(UTF_8)); out.close(); @@ -2057,25 +2056,25 @@ public void testListKey() byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8); OzoneOutputStream one = volAbucketA.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, + value.length, RATIS, ONE, new HashMap<>()); one.write(value); one.close(); OzoneOutputStream two = volAbucketB.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, + value.length, RATIS, ONE, new HashMap<>()); two.write(value); two.close(); OzoneOutputStream three = volBbucketA.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, + value.length, RATIS, ONE, new HashMap<>()); three.write(value); three.close(); OzoneOutputStream four = volBbucketB.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, + value.length, RATIS, ONE, new HashMap<>()); four.write(value); four.close(); @@ -2090,25 +2089,25 @@ public void testListKey() byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8); OzoneOutputStream one = volAbucketA.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, + value.length, RATIS, ONE, new HashMap<>()); one.write(value); one.close(); OzoneOutputStream two = volAbucketB.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, + value.length, RATIS, ONE, new HashMap<>()); two.write(value); two.close(); OzoneOutputStream three = volBbucketA.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, + value.length, RATIS, ONE, new HashMap<>()); three.write(value); three.close(); OzoneOutputStream four = volBbucketB.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, + value.length, RATIS, ONE, new HashMap<>()); four.write(value); four.close(); @@ -2189,7 +2188,7 @@ public void testInitiateMultipartUploadWithReplicationInformationSet() throws volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); @@ -2201,7 +2200,7 @@ public void testInitiateMultipartUploadWithReplicationInformationSet() throws // Call initiate multipart upload for the same key again, this should // generate a new uploadID. multipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); assertNotNull(multipartInfo); Assert.assertEquals(volumeName, multipartInfo.getVolumeName()); @@ -2256,7 +2255,7 @@ public void testUploadPartWithNoOverride() throws IOException { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); @@ -2293,7 +2292,7 @@ public void testUploadPartOverrideWithStandAlone() throws IOException { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); @@ -2562,7 +2561,7 @@ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); // Initiate multipart upload - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); // Upload Parts @@ -2595,7 +2594,7 @@ public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent() volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); // We have not uploaded any parts, but passing some list it should throw @@ -2620,7 +2619,7 @@ public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName() volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); @@ -2645,7 +2644,7 @@ public void testMultipartUploadWithMissingParts() throws Exception { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); @@ -2685,7 +2684,7 @@ public void testAbortUploadFailWithInProgressPartUpload() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); Assert.assertNotNull(omMultipartInfo.getUploadID()); @@ -2720,7 +2719,7 @@ public void testCommitPartAfterCompleteUpload() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); Assert.assertNotNull(omMultipartInfo.getUploadID()); @@ -2782,7 +2781,7 @@ public void testAbortUploadSuccessWithOutAnyParts() throws Exception { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); bucket.abortMultipartUpload(keyName, uploadID); } @@ -2798,7 +2797,7 @@ public void testAbortUploadSuccessWithParts() throws Exception { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); bucket.abortMultipartUpload(keyName, uploadID); @@ -2816,7 +2815,7 @@ public void testListMultipartUploadParts() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); Map partsMap = new TreeMap<>(); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); String partName1 = uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); @@ -2833,7 +2832,7 @@ public void testListMultipartUploadParts() throws Exception { OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 3); - Assert.assertEquals(STAND_ALONE, + Assert.assertEquals(RATIS, ozoneMultipartUploadPartListParts.getReplicationType()); Assert.assertEquals(3, ozoneMultipartUploadPartListParts.getPartInfoList().size()); @@ -2867,7 +2866,7 @@ public void testListMultipartUploadPartsWithContinuation() OzoneBucket bucket = volume.getBucket(bucketName); Map partsMap = new TreeMap<>(); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); String partName1 = uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); @@ -2884,7 +2883,7 @@ public void testListMultipartUploadPartsWithContinuation() OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 2); - Assert.assertEquals(STAND_ALONE, + Assert.assertEquals(RATIS, ozoneMultipartUploadPartListParts.getReplicationType()); Assert.assertEquals(2, @@ -2972,7 +2971,7 @@ public void testListPartsWithPartMarkerGreaterThanPartCount() OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); @@ -2985,7 +2984,7 @@ public void testListPartsWithPartMarkerGreaterThanPartCount() Assert.assertEquals(0, ozoneMultipartUploadPartListParts.getPartInfoList().size()); - Assert.assertEquals(STAND_ALONE, + Assert.assertEquals(RATIS, ozoneMultipartUploadPartListParts.getReplicationType()); // As we don't have any parts with greater than partNumberMarker and list @@ -3331,7 +3330,7 @@ private void validateOzoneAccessAcl(OzoneObj ozObj) throws IOException { } private void writeKey(String key1, OzoneBucket bucket) throws IOException { - OzoneOutputStream out = bucket.createKey(key1, 1024, STAND_ALONE, + OzoneOutputStream out = bucket.createKey(key1, 1024, RATIS, ONE, new HashMap<>()); out.write(RandomStringUtils.random(1024).getBytes(UTF_8)); out.close(); @@ -3453,7 +3452,7 @@ private void completeMultipartUpload(OzoneBucket bucket, String keyName, private void createTestKey(OzoneBucket bucket, String keyName, String keyValue) throws IOException { OzoneOutputStream out = bucket.createKey(keyName, - keyValue.getBytes(UTF_8).length, STAND_ALONE, + keyValue.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>()); out.write(keyValue.getBytes(UTF_8)); out.close(); @@ -3512,7 +3511,7 @@ public void testKeyReadWriteForGDPR() throws Exception { Map keyMetadata = new HashMap<>(); keyMetadata.put(OzoneConsts.GDPR_FLAG, "true"); OzoneOutputStream out = bucket.createKey(keyName, - text.getBytes(UTF_8).length, STAND_ALONE, ONE, keyMetadata); + text.getBytes(UTF_8).length, RATIS, ONE, keyMetadata); out.write(text.getBytes(UTF_8)); out.close(); Assert.assertNull(keyMetadata.get(OzoneConsts.GDPR_SECRET)); @@ -3530,7 +3529,7 @@ public void testKeyReadWriteForGDPR() throws Exception { byte[] fileContent = new byte[text.getBytes(UTF_8).length]; is.read(fileContent); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, STAND_ALONE, + keyName, RATIS, ONE)); Assert.assertEquals(text, new String(fileContent, UTF_8)); @@ -3593,7 +3592,7 @@ public void testDeletedKeyForGDPR() throws Exception { Map keyMetadata = new HashMap<>(); keyMetadata.put(OzoneConsts.GDPR_FLAG, "true"); OzoneOutputStream out = bucket.createKey(keyName, - text.getBytes(UTF_8).length, STAND_ALONE, ONE, keyMetadata); + text.getBytes(UTF_8).length, RATIS, ONE, keyMetadata); out.write(text.getBytes(UTF_8)); out.close(); @@ -3610,7 +3609,7 @@ public void testDeletedKeyForGDPR() throws Exception { byte[] fileContent = new byte[text.getBytes(UTF_8).length]; is.read(fileContent); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, STAND_ALONE, + keyName, RATIS, ONE)); Assert.assertEquals(text, new String(fileContent, UTF_8)); @@ -3658,7 +3657,8 @@ public void testHeadObject() throws IOException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); ReplicationConfig replicationConfig = ReplicationConfig - .fromProtoTypeAndFactor(RATIS, HddsProtos.ReplicationFactor.THREE); + .fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE); String value = "sample value"; store.createVolume(volumeName); @@ -3700,7 +3700,8 @@ private void createRequiredForVersioningTest(String volumeName, String bucketName, String keyName, boolean versioning) throws Exception { ReplicationConfig replicationConfig = ReplicationConfig - .fromProtoTypeAndFactor(RATIS, HddsProtos.ReplicationFactor.THREE); + .fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE); String value = "sample value"; store.createVolume(volumeName); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java index b44afe37d7e5..a52549956287 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java @@ -160,7 +160,7 @@ public void testPutKeySuccessWithBlockToken() throws Exception { String keyName = UUID.randomUUID().toString(); try (OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes(UTF_8).length, ReplicationType.STAND_ALONE, + value.getBytes(UTF_8).length, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>())) { out.write(value.getBytes(UTF_8)); } @@ -174,7 +174,7 @@ public void testPutKeySuccessWithBlockToken() throws Exception { } Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, ReplicationType.STAND_ALONE, + keyName, ReplicationType.RATIS, ReplicationFactor.ONE)); Assert.assertEquals(value, new String(fileContent, UTF_8)); Assert.assertFalse(key.getCreationTime().isBefore(testStartTime)); @@ -203,7 +203,7 @@ public void testKeyOpFailureWithoutBlockToken() throws Exception { String keyName = UUID.randomUUID().toString(); try (OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes(UTF_8).length, ReplicationType.STAND_ALONE, + value.getBytes(UTF_8).length, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>())) { LambdaTestUtils.intercept(IOException.class, "UNAUTHENTICATED: Fail " + "to find any token ", diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java index 85d46ca4d801..cf81bd981a15 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java @@ -119,9 +119,12 @@ public static int countReplicas(long containerID, public static OzoneOutputStream createKey(String keyName, ReplicationType type, long size, ObjectStore objectStore, String volumeName, String bucketName) throws Exception { + if (type == ReplicationType.STAND_ALONE) { + throw new IllegalArgumentException(ReplicationType.STAND_ALONE + + " replication type should not be used in tests to write keys anymore." + ); + } org.apache.hadoop.hdds.client.ReplicationFactor factor = - type == ReplicationType.STAND_ALONE ? - org.apache.hadoop.hdds.client.ReplicationFactor.ONE : org.apache.hadoop.hdds.client.ReplicationFactor.THREE; return objectStore.getVolume(volumeName).getBucket(bucketName) .createKey(keyName, size, type, factor, new HashMap<>()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java index cb85161eec06..9b0ccd585670 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java @@ -77,6 +77,8 @@ public void setup() throws Exception { conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1).build(); + cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(ONE, 30000); } @After @@ -96,7 +98,7 @@ public void test() throws Exception { objectStore.createVolume("test"); objectStore.getVolume("test").createBucket("test"); OzoneOutputStream key = objectStore.getVolume("test").getBucket("test") - .createKey("test", 1024, ReplicationType.STAND_ALONE, + .createKey("test", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); key.write("test".getBytes(UTF_8)); key.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java index e3eccb5ff634..0a0e0fa08786 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java @@ -87,6 +87,7 @@ public static void setup() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1).build(); cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(ONE, 30000); OzoneClient client = OzoneClientFactory.getRpcClient(conf); objectStore = client.getObjectStore(); @@ -243,7 +244,7 @@ public void testDeleteContainerRequestHandlerOnOpenContainer() private void createKey(String keyName) throws IOException { OzoneOutputStream key = objectStore.getVolume(volumeName) .getBucket(bucketName) - .createKey(keyName, 1024, ReplicationType.STAND_ALONE, + .createKey(keyName, 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); key.write("test".getBytes(UTF_8)); key.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java index 658746a5c0cb..898119f2ef13 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java @@ -70,7 +70,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; +import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; /** * This class tests the data scrubber functionality. @@ -101,6 +101,7 @@ public static void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(1) .build(); cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000); ozClient = OzoneClientFactory.getRpcClient(ozoneConfig); store = ozClient.getObjectStore(); ozoneManager = cluster.getOzoneManager(); @@ -137,7 +138,7 @@ public void testOpenContainerIntegrity() throws Exception { String keyName = UUID.randomUUID().toString(); OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes(UTF_8).length, STAND_ALONE, + value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>()); out.write(value.getBytes(UTF_8)); out.close(); @@ -147,7 +148,7 @@ public void testOpenContainerIntegrity() throws Exception { byte[] fileContent = new byte[value.getBytes(UTF_8).length]; is.read(fileContent); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, STAND_ALONE, + keyName, RATIS, ONE)); Assert.assertEquals(value, new String(fileContent, UTF_8)); Assert.assertFalse(key.getCreationTime().isBefore(testStartTime)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java index 2a7fbe826de4..af39055ad264 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java @@ -62,7 +62,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; +import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE; @@ -109,6 +109,7 @@ public void init() throws Exception { .setNumDataVolumes(1) .build(); cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000); ozClient = OzoneClientFactory.getRpcClient(ozoneConfig); store = ozClient.getObjectStore(); @@ -141,7 +142,7 @@ public void testHddsVolumeFailureOnChunkFileCorrupt() throws Exception { String keyName = UUID.randomUUID().toString(); String value = "sample value"; OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes(UTF_8).length, STAND_ALONE, + value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>()); out.write(value.getBytes(UTF_8)); out.close(); @@ -243,7 +244,7 @@ public void testHddsVolumeFailureOnDbFileCorrupt() throws Exception { String keyName = UUID.randomUUID().toString(); String value = "sample value"; OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes(UTF_8).length, STAND_ALONE, + value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>()); out.write(value.getBytes(UTF_8)); out.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java index 467817676840..9104d987c2fd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java @@ -107,7 +107,7 @@ public void testContainerReportKeyWrite() throws Exception { objectStore.getVolume(volumeName).createBucket(bucketName); OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey(keyName, keySize, ReplicationType.STAND_ALONE, + .createKey(keyName, keySize, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); String dataString = RandomStringUtils.randomAlphabetic(keySize); key.write(dataString.getBytes(UTF_8)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java index 232dfab55b43..88f0dca71ab3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java @@ -112,7 +112,7 @@ public void testKeysPurgingByKeyDeletingService() throws Exception { String keyName = keyBase + "-" + i; keys.add(keyName); OzoneOutputStream keyStream = TestHelper.createKey( - keyName, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + keyName, ReplicationType.RATIS, ReplicationFactor.ONE, KEY_SIZE, store, volumeName, bucketName); keyStream.write(data); keyStream.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java index b8f37ce4ac8c..54b0b256fc3b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java @@ -68,7 +68,7 @@ import java.util.concurrent.TimeoutException; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; +import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME; @@ -693,7 +693,7 @@ private void assertKeyRenamedEx(OzoneBucket bucket, String keyName) private void createTestKey(OzoneBucket bucket, String keyName, String keyValue) throws IOException { OzoneOutputStream out = bucket.createKey(keyName, - keyValue.getBytes(StandardCharsets.UTF_8).length, STAND_ALONE, + keyValue.getBytes(StandardCharsets.UTF_8).length, RATIS, ONE, new HashMap<>()); out.write(keyValue.getBytes(StandardCharsets.UTF_8)); out.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java index b3af2f990c84..26523171fde4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java @@ -217,7 +217,7 @@ public static String createKey(OzoneBucket ozoneBucket) throws IOException { String keyName = "key" + RandomStringUtils.randomNumeric(5); String data = "data" + RandomStringUtils.randomNumeric(5); OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName, - data.length(), ReplicationType.STAND_ALONE, + data.length(), ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); ozoneOutputStream.write(data.getBytes(UTF_8), 0, data.length()); ozoneOutputStream.close(); @@ -380,7 +380,7 @@ protected void createKeyTest(boolean checkSuccess) throws Exception { String value = "random data"; OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName, - value.length(), ReplicationType.STAND_ALONE, + value.length(), ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length()); ozoneOutputStream.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java index fe4429eecba6..003f970b4e39 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java @@ -373,7 +373,7 @@ private void writeTestData(String volumeName, byte[] data = ContainerTestHelper.getFixedLengthString( keyString, 100).getBytes(UTF_8); OzoneOutputStream keyStream = TestHelper.createKey( - keyName, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + keyName, ReplicationType.RATIS, ReplicationFactor.ONE, 100, store, volumeName, bucketName); keyStream.write(data); keyStream.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java index f3dc4b66ca56..c16583cce285 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.client.ObjectStore; @@ -73,8 +74,7 @@ public static void init() throws Exception { .includeRecon(true) .build(); cluster.waitForClusterToBeReady(); - - cluster.getStorageContainerManager().exitSafeMode(); + cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000); store = cluster.getClient().getObjectStore(); } @@ -94,7 +94,7 @@ private void writeTestData(String volumeName, byte[] data = ContainerTestHelper.getFixedLengthString( keyString, 100).getBytes(UTF_8); OzoneOutputStream keyStream = TestHelper.createKey( - keyName, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + keyName, ReplicationType.RATIS, ReplicationFactor.ONE, 100, store, volumeName, bucketName); keyStream.write(data); keyStream.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java index a5d55051d7a5..f09a150cc7ce 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java @@ -58,7 +58,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; +import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; /** * Base class for Ozone Manager HA tests. @@ -155,7 +155,7 @@ public void testPutKey() throws Exception { String keyName = UUID.randomUUID().toString(); OzoneOutputStream out = bucket - .createKey(keyName, value.getBytes(UTF_8).length, STAND_ALONE, ONE, + .createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>()); out.write(value.getBytes(UTF_8)); out.close(); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index cf0d4c325dd8..6a0d42872561 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -87,7 +87,7 @@ public OzoneBucketStub( @Override public OzoneOutputStream createKey(String key, long size) throws IOException { - return createKey(key, size, ReplicationType.STAND_ALONE, + return createKey(key, size, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java index 66c745628695..3c9a17cb5f7c 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java @@ -69,7 +69,7 @@ public void testHeadObject() throws Exception { //GIVEN String value = RandomStringUtils.randomAlphanumeric(32); OzoneOutputStream out = bucket.createKey("key1", - value.getBytes(UTF_8).length, ReplicationType.STAND_ALONE, + value.getBytes(UTF_8).length, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); out.write(value.getBytes(UTF_8)); out.close();