diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
index f212570fc54e..26f896663b81 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
@@ -232,7 +232,7 @@ public Builder addFailures(Class extends Failures> clazz) {
protected void initializeConfiguration() throws IOException {
super.initializeConfiguration();
- OzoneClientConfig clientConfig = new OzoneClientConfig();
+ OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
clientConfig.setStreamBufferFlushSize(8 * 1024 * 1024);
clientConfig.setStreamBufferMaxSize(16 * 1024 * 1024);
clientConfig.setStreamBufferSize(4 * 1024);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
index 559b8da4982e..05d297d38ed8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
@@ -27,9 +27,6 @@
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicReference;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.hdds.utils.IOUtils;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.crypto.CryptoCodec;
import org.apache.hadoop.crypto.CryptoOutputStream;
@@ -37,15 +34,19 @@
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StreamCapabilities;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.StorageType;
+import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -116,17 +117,19 @@ public static void init() throws Exception {
CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false);
CONF.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name());
CONF.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true);
- cluster = MiniOzoneCluster.newBuilder(CONF)
- .setNumDatanodes(5)
- .setTotalPipelineNumLimit(10)
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(blockSize)
.setChunkSize(chunkSize)
.setStreamBufferFlushSize(flushSize)
.setStreamBufferMaxSize(maxFlushSize)
- .setDataStreamBufferFlushize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
+ .setDataStreamBufferFlushSize(maxFlushSize)
.setDataStreamMinPacketSize(chunkSize)
- .setDataStreamStreamWindowSize(5 * chunkSize)
+ .setDataStreamWindowSize(5 * chunkSize)
+ .applyTo(CONF);
+
+ cluster = MiniOzoneCluster.newBuilder(CONF)
+ .setNumDatanodes(5)
+ .setTotalPipelineNumLimit(10)
.build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java
index 1e61c9026b15..ca68aad45515 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java
@@ -17,14 +17,15 @@
*/
package org.apache.hadoop.fs.ozone;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.TestDataUtil;
@@ -91,17 +92,19 @@ public void init() throws IOException, InterruptedException,
conf.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false);
conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true);
conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name());
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
+ .setBlockSize(blockSize)
+ .setChunkSize(chunkSize)
+ .setStreamBufferFlushSize(flushSize)
+ .setStreamBufferMaxSize(maxFlushSize)
+ .setDataStreamBufferFlushSize(maxFlushSize)
+ .setDataStreamMinPacketSize(chunkSize)
+ .setDataStreamWindowSize(5 * chunkSize)
+ .applyTo(conf);
+
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(5)
.setTotalPipelineNumLimit(10)
- .setBlockSize(blockSize)
- .setChunkSize(chunkSize)
- .setStreamBufferFlushSize(flushSize)
- .setStreamBufferMaxSize(maxFlushSize)
- .setDataStreamBufferFlushize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
- .setDataStreamMinPacketSize(chunkSize)
- .setDataStreamStreamWindowSize(5 * chunkSize)
.build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
index 2e00e8c2c622..c6893c57e969 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
@@ -35,9 +35,11 @@
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
@@ -83,12 +85,16 @@ public static void init() throws Exception {
conf = new OzoneConfiguration();
conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
BucketLayout.LEGACY.name());
+
+ ClientConfigForTesting.newBuilder(StorageUnit.MB)
+ .setChunkSize(2)
+ .setBlockSize(8)
+ .setStreamBufferFlushSize(2)
+ .setStreamBufferMaxSize(4)
+ .applyTo(conf);
+
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(5)
- .setChunkSize(2) // MB
- .setBlockSize(8) // MB
- .setStreamBufferFlushSize(2) // MB
- .setStreamBufferMaxSize(4) // MB
.build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMetrics.java
index d11ea5376d51..5aba83bd412e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMetrics.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.fs.ozone;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -25,6 +26,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
@@ -72,12 +74,16 @@ public static void init() throws Exception {
conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
BucketLayout.LEGACY.name());
conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true);
+
+ ClientConfigForTesting.newBuilder(StorageUnit.MB)
+ .setChunkSize(2)
+ .setBlockSize(8)
+ .setStreamBufferFlushSize(2)
+ .setStreamBufferMaxSize(4)
+ .applyTo(conf);
+
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(3)
- .setChunkSize(2) // MB
- .setBlockSize(8) // MB
- .setStreamBufferFlushSize(2) // MB
- .setStreamBufferMaxSize(4) // MB
.build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java
index 9d5009eeb30c..2a6c8c456b9c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java
@@ -22,14 +22,15 @@
import java.io.OutputStream;
import java.util.concurrent.ThreadLocalRandom;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.hdds.utils.IOUtils;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
+import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.OzoneBucket;
@@ -86,17 +87,20 @@ public static void init() throws Exception {
CONF.set(OZONE_FS_DATASTREAM_AUTO_THRESHOLD, AUTO_THRESHOLD + "B");
CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, true);
CONF.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name());
- cluster = MiniOzoneCluster.newBuilder(CONF)
- .setNumDatanodes(5)
- .setTotalPipelineNumLimit(10)
+
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(blockSize)
.setChunkSize(chunkSize)
.setStreamBufferFlushSize(flushSize)
.setStreamBufferMaxSize(maxFlushSize)
- .setDataStreamBufferFlushize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
+ .setDataStreamBufferFlushSize(maxFlushSize)
.setDataStreamMinPacketSize(chunkSize)
- .setDataStreamStreamWindowSize(5 * chunkSize)
+ .setDataStreamWindowSize(5 * chunkSize)
+ .applyTo(CONF);
+
+ cluster = MiniOzoneCluster.newBuilder(CONF)
+ .setNumDatanodes(5)
+ .setTotalPipelineNumLimit(10)
.build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java
index d4581d1ad4b2..563e0162acc6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java
@@ -25,10 +25,10 @@
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
@@ -45,6 +45,7 @@
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -127,21 +128,23 @@ public void init() throws Exception {
ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(10));
conf.setFromObject(ratisClientConfig);
- OzoneClientConfig clientConfig = new OzoneClientConfig();
+ OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
clientConfig.setChecksumType(ChecksumType.NONE);
conf.setFromObject(clientConfig);
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
+ .setBlockSize(blockSize)
+ .setChunkSize(chunkSize)
+ .setStreamBufferFlushSize(flushSize)
+ .setStreamBufferMaxSize(maxFlushSize)
+ .applyTo(conf);
+
conf.setQuietMode(false);
conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4,
StorageUnit.MB);
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(5)
.setTotalPipelineNumLimit(3)
- .setBlockSize(blockSize)
- .setChunkSize(chunkSize)
- .setStreamBufferFlushSize(flushSize)
- .setStreamBufferMaxSize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
.build();
cluster.waitForClusterToBeReady();
//the easiest way to create an open container is creating a key
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ClientConfigForTesting.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ClientConfigForTesting.java
new file mode 100644
index 000000000000..d436a65dab29
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ClientConfigForTesting.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone;
+
+import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
+import org.apache.hadoop.hdds.conf.StorageUnit;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE;
+
+/**
+ * Helper for tests that want to set client stream properties.
+ */
+public final class ClientConfigForTesting {
+
+ private int chunkSize = 1024 * 1024;
+ private Long blockSize;
+ private Integer streamBufferSize;
+ private Long streamBufferFlushSize;
+ private Long dataStreamBufferFlushSize;
+ private Long dataStreamWindowSize;
+ private Long streamBufferMaxSize;
+ private Integer dataStreamMinPacketSize;
+ private final StorageUnit unit;
+
+ /**
+ * @param unit Defines the unit in which size properties will be passed to the builder.
+ * All sizes are stored internally converted to {@link StorageUnit#BYTES}.
+ */
+ public static ClientConfigForTesting newBuilder(StorageUnit unit) {
+ return new ClientConfigForTesting(unit);
+ }
+
+ private ClientConfigForTesting(StorageUnit unit) {
+ this.unit = unit;
+ }
+
+ public ClientConfigForTesting setChunkSize(int size) {
+ chunkSize = (int) toBytes(size);
+ return this;
+ }
+
+ public ClientConfigForTesting setBlockSize(long size) {
+ blockSize = toBytes(size);
+ return this;
+ }
+
+ @SuppressWarnings("unused") // kept for completeness
+ public ClientConfigForTesting setStreamBufferSize(int size) {
+ streamBufferSize = (int) toBytes(size);
+ return this;
+ }
+
+ public ClientConfigForTesting setStreamBufferFlushSize(long size) {
+ streamBufferFlushSize = toBytes(size);
+ return this;
+ }
+
+ public ClientConfigForTesting setStreamBufferMaxSize(long size) {
+ streamBufferMaxSize = toBytes(size);
+ return this;
+ }
+
+ public ClientConfigForTesting setDataStreamMinPacketSize(int size) {
+ dataStreamMinPacketSize = (int) toBytes(size);
+ return this;
+ }
+
+ public ClientConfigForTesting setDataStreamBufferFlushSize(long size) {
+ dataStreamBufferFlushSize = toBytes(size);
+ return this;
+ }
+
+ public ClientConfigForTesting setDataStreamWindowSize(long size) {
+ dataStreamWindowSize = toBytes(size);
+ return this;
+ }
+
+ public void applyTo(MutableConfigurationSource conf) {
+ if (streamBufferSize == null) {
+ streamBufferSize = chunkSize;
+ }
+ if (streamBufferFlushSize == null) {
+ streamBufferFlushSize = (long) chunkSize;
+ }
+ if (streamBufferMaxSize == null) {
+ streamBufferMaxSize = 2 * streamBufferFlushSize;
+ }
+ if (dataStreamBufferFlushSize == null) {
+ dataStreamBufferFlushSize = 4L * chunkSize;
+ }
+ if (dataStreamMinPacketSize == null) {
+ dataStreamMinPacketSize = chunkSize / 4;
+ }
+ if (dataStreamWindowSize == null) {
+ dataStreamWindowSize = 8L * chunkSize;
+ }
+ if (blockSize == null) {
+ blockSize = 2 * streamBufferMaxSize;
+ }
+
+ OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
+ clientConfig.setStreamBufferSize(streamBufferSize);
+ clientConfig.setStreamBufferMaxSize(streamBufferMaxSize);
+ clientConfig.setStreamBufferFlushSize(streamBufferFlushSize);
+ clientConfig.setDataStreamBufferFlushSize(dataStreamBufferFlushSize);
+ clientConfig.setDataStreamMinPacketSize(dataStreamMinPacketSize);
+ clientConfig.setStreamWindowSize(dataStreamWindowSize);
+
+ conf.setFromObject(clientConfig);
+ conf.setStorageSize(OZONE_SCM_CHUNK_SIZE_KEY, chunkSize, StorageUnit.BYTES);
+ conf.setStorageSize(OZONE_SCM_BLOCK_SIZE, blockSize, StorageUnit.BYTES);
+ }
+
+ private long toBytes(long value) {
+ return Math.round(unit.toBytes(value));
+ }
+
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index 9afc8ce63497..e864cae00b37 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -20,11 +20,9 @@
import java.io.IOException;
import java.util.List;
import java.util.Optional;
-import java.util.OptionalInt;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -325,15 +323,6 @@ abstract class Builder {
protected String omId = UUID.randomUUID().toString();
protected Optional datanodeReservedSpace = Optional.empty();
- protected Optional chunkSize = Optional.empty();
- protected OptionalInt streamBufferSize = OptionalInt.empty();
- protected Optional streamBufferFlushSize = Optional.empty();
- protected Optional dataStreamBufferFlushSize = Optional.empty();
- protected Optional datastreamWindowSize = Optional.empty();
- protected Optional streamBufferMaxSize = Optional.empty();
- protected OptionalInt dataStreamMinPacketSize = OptionalInt.empty();
- protected Optional blockSize = Optional.empty();
- protected Optional streamBufferSizeUnit = Optional.empty();
protected boolean includeRecon = false;
@@ -489,61 +478,6 @@ public Builder setDatanodeReservedSpace(String reservedSpace) {
return this;
}
- /**
- * Sets the chunk size.
- *
- * @return MiniOzoneCluster.Builder
- */
- public Builder setChunkSize(int size) {
- chunkSize = Optional.of(size);
- return this;
- }
-
- /**
- * Sets the flush size for stream buffer.
- *
- * @return MiniOzoneCluster.Builder
- */
- public Builder setStreamBufferFlushSize(long size) {
- streamBufferFlushSize = Optional.of(size);
- return this;
- }
-
- /**
- * Sets the max size for stream buffer.
- *
- * @return MiniOzoneCluster.Builder
- */
- public Builder setStreamBufferMaxSize(long size) {
- streamBufferMaxSize = Optional.of(size);
- return this;
- }
-
- public Builder setDataStreamBufferFlushize(long size) {
- dataStreamBufferFlushSize = Optional.of(size);
- return this;
- }
-
- public Builder setDataStreamMinPacketSize(int size) {
- dataStreamMinPacketSize = OptionalInt.of(size);
- return this;
- }
-
- public Builder setDataStreamStreamWindowSize(long size) {
- datastreamWindowSize = Optional.of(size);
- return this;
- }
-
- /**
- * Sets the block size for stream buffer.
- *
- * @return MiniOzoneCluster.Builder
- */
- public Builder setBlockSize(long size) {
- blockSize = Optional.of(size);
- return this;
- }
-
public Builder setNumOfOzoneManagers(int numOMs) {
this.numOfOMs = numOMs;
return this;
@@ -554,11 +488,6 @@ public Builder setNumOfActiveOMs(int numActiveOMs) {
return this;
}
- public Builder setStreamBufferSizeUnit(StorageUnit unit) {
- this.streamBufferSizeUnit = Optional.of(unit);
- return this;
- }
-
public Builder setOMServiceId(String serviceId) {
this.omServiceId = serviceId;
return this;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index ec1b118330a9..400ae3ee2cc8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -26,8 +26,6 @@
import java.util.Collection;
import java.util.Collections;
import java.util.List;
-import java.util.Optional;
-import java.util.OptionalInt;
import java.util.Set;
import java.util.UUID;
import java.util.Iterator;
@@ -36,7 +34,6 @@
import java.util.concurrent.TimeoutException;
import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
@@ -45,7 +42,6 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.HddsTestUtils;
import org.apache.hadoop.hdds.scm.ha.SCMHANodeDetails;
@@ -655,58 +651,7 @@ protected void initializeConfiguration() throws IOException {
Files.createDirectories(metaDir);
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString());
// conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
- if (!chunkSize.isPresent()) {
- //set it to 1MB by default in tests
- chunkSize = Optional.of(1);
- }
- if (!streamBufferSize.isPresent()) {
- streamBufferSize = OptionalInt.of(chunkSize.get());
- }
- if (!streamBufferFlushSize.isPresent()) {
- streamBufferFlushSize = Optional.of((long) chunkSize.get());
- }
- if (!streamBufferMaxSize.isPresent()) {
- streamBufferMaxSize = Optional.of(2 * streamBufferFlushSize.get());
- }
- if (!dataStreamBufferFlushSize.isPresent()) {
- dataStreamBufferFlushSize = Optional.of((long) 4 * chunkSize.get());
- }
- if (!dataStreamMinPacketSize.isPresent()) {
- dataStreamMinPacketSize = OptionalInt.of(chunkSize.get() / 4);
- }
- if (!datastreamWindowSize.isPresent()) {
- datastreamWindowSize = Optional.of((long) 8 * chunkSize.get());
- }
- if (!blockSize.isPresent()) {
- blockSize = Optional.of(2 * streamBufferMaxSize.get());
- }
-
- if (!streamBufferSizeUnit.isPresent()) {
- streamBufferSizeUnit = Optional.of(StorageUnit.MB);
- }
- OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
- clientConfig.setStreamBufferSize(
- (int) Math.round(
- streamBufferSizeUnit.get().toBytes(streamBufferSize.getAsInt())));
- clientConfig.setStreamBufferMaxSize(Math.round(
- streamBufferSizeUnit.get().toBytes(streamBufferMaxSize.get())));
- clientConfig.setStreamBufferFlushSize(Math.round(
- streamBufferSizeUnit.get().toBytes(streamBufferFlushSize.get())));
- clientConfig.setDataStreamBufferFlushSize(Math.round(
- streamBufferSizeUnit.get().toBytes(dataStreamBufferFlushSize.get())));
- clientConfig.setDataStreamMinPacketSize((int) Math.round(
- streamBufferSizeUnit.get()
- .toBytes(dataStreamMinPacketSize.getAsInt())));
- clientConfig.setStreamWindowSize(Math.round(
- streamBufferSizeUnit.get().toBytes(datastreamWindowSize.get())));
- conf.setFromObject(clientConfig);
-
- conf.setStorageSize(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY,
- chunkSize.get(), streamBufferSizeUnit.get());
-
- conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, blockSize.get(),
- streamBufferSizeUnit.get());
// MiniOzoneCluster should have global pipeline upper limit.
conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT,
pipelineNumLimit >= DEFAULT_PIPELINE_LIMIT ?
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java
index 3478489edd66..0b0b2586c9e2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java
@@ -16,13 +16,13 @@
*/
package org.apache.hadoop.ozone.client.rpc;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -31,6 +31,7 @@
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.BucketArgs;
@@ -120,11 +121,16 @@ protected static void init(boolean zeroCopyEnabled) throws Exception {
TimeUnit.SECONDS);
conf.setBoolean(OzoneConfigKeys.OZONE_EC_GRPC_ZERO_COPY_ENABLED,
zeroCopyEnabled);
- cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10)
- .setTotalPipelineNumLimit(10).setBlockSize(blockSize)
- .setChunkSize(chunkSize).setStreamBufferFlushSize(flushSize)
+
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
+ .setBlockSize(blockSize)
+ .setChunkSize(chunkSize)
+ .setStreamBufferFlushSize(flushSize)
.setStreamBufferMaxSize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES).build();
+ .applyTo(conf);
+
+ cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10)
+ .setTotalPipelineNumLimit(10).build();
cluster.waitForClusterToBeReady();
client = OzoneClientFactory.getRpcClient(conf);
objectStore = client.getObjectStore();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
index ce89e679df47..e7c8be170ca1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
@@ -17,10 +17,10 @@
package org.apache.hadoop.ozone.client.rpc;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
@@ -32,6 +32,7 @@
import org.apache.hadoop.hdds.scm.protocolPB.
StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -100,14 +101,16 @@ private void startCluster(OzoneConfiguration conf) throws Exception {
raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10));
conf.setFromObject(raftClientConfig);
- conf.setQuietMode(false);
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(3)
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(blockSize)
.setChunkSize(chunkSize)
.setStreamBufferFlushSize(flushSize)
.setStreamBufferMaxSize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
+ .applyTo(conf);
+
+ conf.setQuietMode(false);
+ cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(3)
.build();
cluster.waitForClusterToBeReady();
// the easiest way to create an open container is creating a key
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
index d3caf6238737..8bb791bb103e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
@@ -17,9 +17,9 @@
package org.apache.hadoop.ozone.client.rpc;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
@@ -27,6 +27,7 @@
import org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput;
import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.ObjectStore;
@@ -92,17 +93,19 @@ public static void init() throws Exception {
conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4,
StorageUnit.MB);
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(5)
- .setTotalPipelineNumLimit(3)
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(blockSize)
.setChunkSize(chunkSize)
.setStreamBufferFlushSize(flushSize)
.setStreamBufferMaxSize(maxFlushSize)
- .setDataStreamBufferFlushize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
+ .setDataStreamBufferFlushSize(maxFlushSize)
.setDataStreamMinPacketSize(chunkSize)
- .setDataStreamStreamWindowSize(5 * chunkSize)
+ .setDataStreamWindowSize(5 * chunkSize)
+ .applyTo(conf);
+
+ cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(5)
+ .setTotalPipelineNumLimit(3)
.build();
cluster.waitForClusterToBeReady();
//the easiest way to create an open container is creating a key
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java
index 74686d363c82..1e9cefbaa481 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java
@@ -24,12 +24,12 @@
import java.util.concurrent.TimeoutException;
import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig;
@@ -38,6 +38,7 @@
import org.apache.hadoop.hdds.scm.XceiverClientMetrics;
import org.apache.hadoop.hdds.scm.storage.BufferPool;
import org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneClient;
@@ -105,14 +106,16 @@ static MiniOzoneCluster createCluster() throws IOException,
ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(30));
conf.setFromObject(ratisClientConfig);
- MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(5)
- .setTotalPipelineNumLimit(3)
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(BLOCK_SIZE)
.setChunkSize(CHUNK_SIZE)
.setStreamBufferFlushSize(FLUSH_SIZE)
.setStreamBufferMaxSize(MAX_FLUSH_SIZE)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
+ .applyTo(conf);
+
+ MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(5)
+ .setTotalPipelineNumLimit(3)
.build();
cluster.waitForClusterToBeReady();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
index f2a5748bffd1..9609dea04819 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
@@ -89,7 +89,7 @@ public static void init() throws Exception {
chunkSize = (int) OzoneConsts.MB;
blockSize = 4 * chunkSize;
- OzoneClientConfig config = new OzoneClientConfig();
+ OzoneClientConfig config = conf.getObject(OzoneClientConfig.class);
config.setChecksumType(ChecksumType.NONE);
conf.setFromObject(config);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
index 32fc9ba5c939..23ab89b80c65 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
@@ -17,16 +17,17 @@
package org.apache.hadoop.ozone.client.rpc;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.ObjectStore;
@@ -108,13 +109,16 @@ public void setup() throws Exception {
conf.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s");
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s");
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, "5s");
+
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
+ .setBlockSize(blockSize)
+ .setChunkSize(chunkSize)
+ .setStreamBufferFlushSize(flushSize)
+ .setStreamBufferMaxSize(maxFlushSize)
+ .applyTo(conf);
+
cluster =
MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1)
- .setBlockSize(blockSize)
- .setChunkSize(chunkSize)
- .setStreamBufferFlushSize(flushSize)
- .setStreamBufferMaxSize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
.setHbInterval(200)
.setCertificateClient(new CertificateClientTestImpl(conf))
.setSecretKeyClient(new SecretKeyTestClient())
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
index ab2fbeadb610..97a3047bfdb0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
@@ -17,14 +17,15 @@
package org.apache.hadoop.ozone.client.rpc;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -112,18 +113,20 @@ public void setup() throws Exception {
raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10));
conf.setFromObject(raftClientConfig);
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
+ .setDataStreamMinPacketSize(1024)
+ .setBlockSize(BLOCK_SIZE)
+ .setChunkSize(CHUNK_SIZE)
+ .setStreamBufferFlushSize(FLUSH_SIZE)
+ .setStreamBufferMaxSize(MAX_FLUSH_SIZE)
+ .applyTo(conf);
+
conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1);
conf.setQuietMode(false);
cluster =
MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(3)
.setHbInterval(200)
- .setDataStreamMinPacketSize(1024)
- .setBlockSize(BLOCK_SIZE)
- .setChunkSize(CHUNK_SIZE)
- .setStreamBufferFlushSize(FLUSH_SIZE)
- .setStreamBufferMaxSize(MAX_FLUSH_SIZE)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
.build();
cluster.waitForClusterToBeReady();
cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
index c689a692ae79..478915868271 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
@@ -85,7 +85,7 @@ public static void init() throws Exception {
chunkSize = (int) OzoneConsts.MB;
blockSize = 4 * chunkSize;
- OzoneClientConfig config = new OzoneClientConfig();
+ OzoneClientConfig config = conf.getObject(OzoneClientConfig.class);
config.setChecksumType(ChecksumType.NONE);
conf.setFromObject(config);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
index 41438996c279..fadc06bd57bd 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
@@ -17,13 +17,13 @@
package org.apache.hadoop.ozone.client.rpc;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig;
@@ -35,6 +35,7 @@
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.StaticMapping;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.ObjectStore;
@@ -129,14 +130,18 @@ private void init() throws Exception {
StaticMapping.addNodeToRack(NetUtils.normalizeHostNames(
Collections.singleton(HddsUtils.getHostName(conf))).get(0),
"/rack1");
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(10)
- .setTotalPipelineNumLimit(15)
- .setChunkSize(chunkSize)
+
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(blockSize)
+ .setChunkSize(chunkSize)
.setStreamBufferFlushSize(flushSize)
.setStreamBufferMaxSize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES).build();
+ .applyTo(conf);
+
+ cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(10)
+ .setTotalPipelineNumLimit(15)
+ .build();
cluster.waitForClusterToBeReady();
//the easiest way to create an open container is creating a key
client = OzoneClientFactory.getRpcClient(conf);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
index bc328d531535..919654d82a9b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
@@ -34,7 +34,6 @@
import java.util.UUID;
import com.google.common.cache.Cache;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.apache.hadoop.crypto.key.kms.server.MiniKMS;
@@ -44,11 +43,13 @@
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl;
import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.BucketArgs;
@@ -140,11 +141,14 @@ static void init() throws Exception {
conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
CertificateClientTestImpl certificateClientTest =
new CertificateClientTestImpl(conf);
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(10)
+
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(BLOCK_SIZE)
.setChunkSize(CHUNK_SIZE)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
+ .applyTo(conf);
+
+ cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(10)
.setCertificateClient(certificateClientTest)
.setSecretKeyClient(new SecretKeyTestClient())
.build();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
index 925cfd9d9543..c3e8a8d461b8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
@@ -20,10 +20,10 @@
import java.io.OutputStream;
import java.util.UUID;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -35,6 +35,7 @@
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.ObjectStore;
@@ -88,7 +89,7 @@ public void init() throws Exception {
maxFlushSize = 2 * flushSize;
blockSize = 2 * maxFlushSize;
- OzoneClientConfig config = new OzoneClientConfig();
+ OzoneClientConfig config = conf.getObject(OzoneClientConfig.class);
config.setChecksumType(ChecksumType.NONE);
config.setMaxRetryCount(3);
conf.setFromObject(config);
@@ -98,14 +99,17 @@ public void init() throws Exception {
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s");
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, "5s");
conf.setQuietMode(false);
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(5)
- .setTotalPipelineNumLimit(3)
+
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(blockSize)
.setChunkSize(chunkSize)
.setStreamBufferFlushSize(flushSize)
.setStreamBufferMaxSize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
+ .applyTo(conf);
+
+ cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(5)
+ .setTotalPipelineNumLimit(3)
.build();
cluster.waitForClusterToBeReady();
//the easiest way to create an open container is creating a key
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
index 72b6880c1732..cd99382f300b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
@@ -22,10 +22,10 @@
import java.util.List;
import java.util.UUID;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -38,6 +38,7 @@
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.client.ObjectStore;
@@ -108,14 +109,16 @@ public void init() throws Exception {
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, "5s");
conf.setQuietMode(false);
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(5)
- .setTotalPipelineNumLimit(3)
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(blockSize)
.setChunkSize(chunkSize)
.setStreamBufferFlushSize(flushSize)
.setStreamBufferMaxSize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
+ .applyTo(conf);
+
+ cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(5)
+ .setTotalPipelineNumLimit(3)
.build();
cluster.waitForClusterToBeReady();
//the easiest way to create an open container is creating a key
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 2adb31055238..a87d05321e27 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -38,7 +38,6 @@
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Stream;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig.EcCodec;
@@ -49,6 +48,7 @@
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -62,6 +62,7 @@
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OmUtils;
@@ -202,10 +203,14 @@ static void startCluster(OzoneConfiguration conf) throws Exception {
// Reduce long wait time in MiniOzoneClusterImpl#waitForHddsDatanodesStop
// for testZReadKeyWithUnhealthyContainerReplica.
conf.set("ozone.scm.stale.node.interval", "10s");
+
+ ClientConfigForTesting.newBuilder(StorageUnit.MB)
+ .setDataStreamMinPacketSize(1)
+ .applyTo(conf);
+
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(14)
.setTotalPipelineNumLimit(10)
- .setDataStreamMinPacketSize(1) // 1MB
.build();
cluster.waitForClusterToBeReady();
ozClient = OzoneClientFactory.getRpcClient(conf);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
index 69a103201561..f8e9b552e3ea 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
@@ -28,10 +28,10 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig;
@@ -48,6 +48,7 @@
import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
import org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -140,13 +141,16 @@ public void init() throws Exception {
conf.setFromObject(raftClientConfig);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS);
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(9)
+
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(blockSize)
.setChunkSize(chunkSize)
.setStreamBufferFlushSize(flushSize)
.setStreamBufferMaxSize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
+ .applyTo(conf);
+
+ cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(9)
.build();
cluster.waitForClusterToBeReady();
cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.THREE, 60000);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java
index 22ad4f036cfd..9f5d04c56f94 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java
@@ -20,11 +20,12 @@
import java.util.UUID;
import java.util.concurrent.TimeUnit;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
@@ -45,7 +46,7 @@ protected static MiniOzoneCluster newCluster(
ContainerLayoutVersion containerLayout) throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
- OzoneClientConfig config = new OzoneClientConfig();
+ OzoneClientConfig config = conf.getObject(OzoneClientConfig.class);
config.setBytesPerChecksum(BYTES_PER_CHECKSUM);
conf.setFromObject(config);
@@ -63,14 +64,16 @@ protected static MiniOzoneCluster newCluster(
repConf.setInterval(Duration.ofSeconds(1));
conf.setFromObject(repConf);
- return MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(5)
- .setTotalPipelineNumLimit(5)
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
.setBlockSize(BLOCK_SIZE)
.setChunkSize(CHUNK_SIZE)
.setStreamBufferFlushSize(FLUSH_SIZE)
.setStreamBufferMaxSize(MAX_FLUSH_SIZE)
- .setStreamBufferSizeUnit(StorageUnit.BYTES)
+ .applyTo(conf);
+
+ return MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(5)
+ .setTotalPipelineNumLimit(5)
.build();
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
index 5a7e404f507b..e045b48bda96 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
@@ -16,11 +16,11 @@
*/
package org.apache.hadoop.ozone.container;
-import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
@@ -34,6 +34,7 @@
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -139,11 +140,17 @@ public static void init() throws Exception {
TimeUnit.MILLISECONDS);
conf.setTimeDuration(HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL, 1,
TimeUnit.SECONDS);
- cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10)
- .setTotalPipelineNumLimit(10).setBlockSize(blockSize)
- .setChunkSize(chunkSize).setStreamBufferFlushSize(flushSize)
+
+ ClientConfigForTesting.newBuilder(StorageUnit.BYTES)
+ .setBlockSize(blockSize)
+ .setChunkSize(chunkSize)
+ .setStreamBufferFlushSize(flushSize)
.setStreamBufferMaxSize(maxFlushSize)
- .setStreamBufferSizeUnit(StorageUnit.BYTES).build();
+ .applyTo(conf);
+
+ cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10)
+ .setTotalPipelineNumLimit(10)
+ .build();
cluster.waitForClusterToBeReady();
client = OzoneClientFactory.getRpcClient(conf);
objectStore = client.getObjectStore();
diff --git a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml
index 0d63a3405724..4e79ae97fc24 100644
--- a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml
+++ b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml
@@ -78,4 +78,38 @@
ozone.scm.ha.ratis.log.appender.queue.byte-limit
4MB
+
+
+ ozone.scm.chunk.size
+ 1MB
+
+
+ ozone.scm.block.size
+ 4MB
+
+
+ ozone.client.stream.buffer.flush.size
+ 1MB
+
+
+ ozone.client.stream.buffer.max.size
+ 2MB
+
+
+ ozone.client.stream.buffer.size
+ 1MB
+
+
+ ozone.client.datastream.buffer.flush.size
+ 4MB
+
+
+ ozone.client.datastream.min.packet.size
+ 256KB
+
+
+ ozone.client.datastream.window.size
+ 8MB
+
+