+ * http://www.apache.org/licenses/LICENSE-2.0 + *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.client.io;
+
+import io.netty.buffer.ByteBuf;
+import org.apache.hadoop.hdds.scm.storage.ByteBufStreamOutput;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
+
+import java.io.IOException;
+
+/**
+ * OzoneDataStreamOutput is used to write data into Ozone.
+ * It uses SCM's {@link KeyDataStreamOutput} for writing the data.
+ */
+public class OzoneDataStreamOutput implements ByteBufStreamOutput {
+
+ private final ByteBufStreamOutput byteBufStreamOutput;
+
+ /**
+ * Constructs OzoneDataStreamOutput with KeyDataStreamOutput.
+ *
+ * @param byteBufStreamOutput
+ */
+ public OzoneDataStreamOutput(ByteBufStreamOutput byteBufStreamOutput) {
+ this.byteBufStreamOutput = byteBufStreamOutput;
+ }
+
+ @Override
+ public void write(ByteBuf b) throws IOException {
+ byteBufStreamOutput.write(b);
+ }
+
+ @Override
+ public synchronized void flush() throws IOException {
+ byteBufStreamOutput.flush();
+ }
+
+ @Override
+ public synchronized void close() throws IOException {
+ //commitKey can be done here, if needed.
+ byteBufStreamOutput.close();
+ }
+
+ public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() {
+ if (byteBufStreamOutput instanceof KeyDataStreamOutput) {
+ return ((KeyDataStreamOutput)
+ byteBufStreamOutput).getCommitUploadPartInfo();
+ }
+ // Otherwise return null.
+ return null;
+ }
+
+ public ByteBufStreamOutput getByteBufStreamOutput() {
+ return byteBufStreamOutput;
+ }
+}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index c4f98e8a633a..0b4301106a51 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -38,6 +38,7 @@
import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.VolumeArgs;
+import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.om.OMConfigKeys;
@@ -291,6 +292,20 @@ OzoneOutputStream createKey(String volumeName, String bucketName,
Map
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.client.rpc;
+
+import io.netty.buffer.Unpooled;
+import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
+import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.apache.hadoop.ozone.container.TestHelper;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+
+/**
+ * Tests BlockDataStreamOutput class.
+ */
+public class TestBlockDataStreamOutput {
+
+ /**
+ * Set a timeout for each test.
+ */
+ @Rule
+ public Timeout timeout = Timeout.seconds(300);
+ private static MiniOzoneCluster cluster;
+ private static OzoneConfiguration conf = new OzoneConfiguration();
+ private static OzoneClient client;
+ private static ObjectStore objectStore;
+ private static int chunkSize;
+ private static int flushSize;
+ private static int maxFlushSize;
+ private static int blockSize;
+ private static String volumeName;
+ private static String bucketName;
+ private static String keyString;
+
+ /**
+ * Create a MiniDFSCluster for testing.
+ *
+ * Ozone is made active by setting OZONE_ENABLED = true
+ *
+ * @throws IOException
+ */
+ @BeforeClass
+ public static void init() throws Exception {
+ chunkSize = 100;
+ flushSize = 2 * chunkSize;
+ maxFlushSize = 2 * flushSize;
+ blockSize = 2 * maxFlushSize;
+
+ OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
+ clientConfig.setChecksumType(ChecksumType.NONE);
+ clientConfig.setStreamBufferFlushDelay(false);
+ conf.setFromObject(clientConfig);
+
+ conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
+ conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
+ conf.setQuietMode(false);
+ conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4,
+ StorageUnit.MB);
+
+ cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(7)
+ .setTotalPipelineNumLimit(10)
+ .setBlockSize(blockSize)
+ .setChunkSize(chunkSize)
+ .setStreamBufferFlushSize(flushSize)
+ .setStreamBufferMaxSize(maxFlushSize)
+ .setStreamBufferSizeUnit(StorageUnit.BYTES)
+ .build();
+ cluster.waitForClusterToBeReady();
+ //the easiest way to create an open container is creating a key
+ client = OzoneClientFactory.getRpcClient(conf);
+ objectStore = client.getObjectStore();
+ keyString = UUID.randomUUID().toString();
+ volumeName = "testblockoutputstream";
+ bucketName = volumeName;
+ objectStore.createVolume(volumeName);
+ objectStore.getVolume(volumeName).createBucket(bucketName);
+ }
+
+ private String getKeyName() {
+ return UUID.randomUUID().toString();
+ }
+
+ /**
+ * Shutdown MiniDFSCluster.
+ */
+ @AfterClass
+ public static void shutdown() {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+
+ @Test
+ public void testMultiChunkWrite() throws Exception {
+ // write data less than 1 chunk size use streaming.
+ String keyName1 = getKeyName();
+ OzoneDataStreamOutput key1 = createKey(
+ keyName1, ReplicationType.RATIS, 0);
+ int dataLength1 = chunkSize/2;
+ byte[] data1 =
+ ContainerTestHelper.getFixedLengthString(keyString, dataLength1)
+ .getBytes(UTF_8);
+ key1.write(Unpooled.copiedBuffer(data1));
+ // now close the stream, It will update the key length.
+ key1.close();
+ validateData(keyName1, data1);
+
+ // write data more than 1 chunk size use streaming.
+ String keyName2 = getKeyName();
+ OzoneDataStreamOutput key2 = createKey(
+ keyName2, ReplicationType.RATIS, 0);
+ int dataLength2 = chunkSize + 50;
+ byte[] data2 =
+ ContainerTestHelper.getFixedLengthString(keyString, dataLength2)
+ .getBytes(UTF_8);
+ key2.write(Unpooled.copiedBuffer(data2));
+ // now close the stream, It will update the key length.
+ key2.close();
+ validateData(keyName2, data2);
+
+ // write data more than 1 block size use streaming.
+ String keyName3 = getKeyName();
+ OzoneDataStreamOutput key3 = createKey(
+ keyName3, ReplicationType.RATIS, 0);
+ int dataLength3 = blockSize + 50;
+ byte[] data3 =
+ ContainerTestHelper.getFixedLengthString(keyString, dataLength3)
+ .getBytes(UTF_8);
+ key3.write(Unpooled.copiedBuffer(data3));
+ // now close the stream, It will update the key length.
+ key3.close();
+ validateData(keyName3, data3);
+ }
+
+ private OzoneDataStreamOutput createKey(String keyName, ReplicationType type,
+ long size) throws Exception {
+ return TestHelper.createStreamKey(
+ keyName, type, size, objectStore, volumeName, bucketName);
+ }
+ private void validateData(String keyName, byte[] data) throws Exception {
+ TestHelper
+ .validateData(keyName, data, objectStore, volumeName, bucketName);
+ }
+
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
index aa502346b78f..a269a3cbcf9e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
@@ -22,6 +22,8 @@
import java.security.MessageDigest;
import java.util.*;
import java.util.concurrent.TimeoutException;
+
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -40,6 +42,7 @@
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry;
import org.apache.hadoop.ozone.client.io.KeyOutputStream;
+import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
@@ -123,8 +126,23 @@ public static OzoneOutputStream createKey(String keyName,
type == ReplicationType.STAND_ALONE ?
org.apache.hadoop.hdds.client.ReplicationFactor.ONE :
org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
+ ReplicationConfig config =
+ ReplicationConfig.fromTypeAndFactor(type, factor);
+ return objectStore.getVolume(volumeName).getBucket(bucketName)
+ .createKey(keyName, size, config, new HashMap<>());
+ }
+
+ public static OzoneDataStreamOutput createStreamKey(String keyName,
+ ReplicationType type, long size, ObjectStore objectStore,
+ String volumeName, String bucketName) throws Exception {
+ org.apache.hadoop.hdds.client.ReplicationFactor factor =
+ type == ReplicationType.STAND_ALONE ?
+ org.apache.hadoop.hdds.client.ReplicationFactor.ONE :
+ org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
+ ReplicationConfig config =
+ ReplicationConfig.fromTypeAndFactor(type, factor);
return objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey(keyName, size, type, factor, new HashMap<>());
+ .createStreamKey(keyName, size, config, new HashMap<>());
}
public static OzoneOutputStream createKey(String keyName,
@@ -132,8 +150,10 @@ public static OzoneOutputStream createKey(String keyName,
org.apache.hadoop.hdds.client.ReplicationFactor factor, long size,
ObjectStore objectStore, String volumeName, String bucketName)
throws Exception {
+ ReplicationConfig config =
+ ReplicationConfig.fromTypeAndFactor(type, factor);
return objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey(keyName, size, type, factor, new HashMap<>());
+ .createKey(keyName, size, config, new HashMap<>());
}
public static void validateData(String keyName, byte[] data,
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
index c575b6e8e507..56bc834511a7 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
@@ -23,9 +23,14 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
import java.util.HashMap;
import java.util.Map;
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfigValidator;
@@ -36,6 +41,7 @@
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientException;
import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
import org.apache.hadoop.ozone.shell.OzoneAddress;
import org.apache.commons.codec.digest.DigestUtils;
@@ -113,10 +119,36 @@ protected void execute(OzoneClient client, OzoneAddress address)
int chunkSize = (int) getConf().getStorageSize(OZONE_SCM_CHUNK_SIZE_KEY,
OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES);
- try (InputStream input = new FileInputStream(dataFile);
- OutputStream output = bucket.createKey(keyName, dataFile.length(),
- replicationConfig, keyMetadata)) {
- IOUtils.copyBytes(input, output, chunkSize);
+
+ if (dataFile.length() <= chunkSize) {
+ if (isVerbose()) {
+ out().println("API: async");
+ }
+ try (InputStream input = new FileInputStream(dataFile);
+ OutputStream output = bucket.createKey(keyName, dataFile.length(),
+ replicationConfig, keyMetadata)) {
+ IOUtils.copyBytes(input, output, chunkSize);
+ }
+ } else {
+ if (isVerbose()) {
+ out().println("API: streaming");
+ }
+ try (RandomAccessFile raf = new RandomAccessFile(dataFile, "r");
+ OzoneDataStreamOutput out = bucket.createStreamKey(keyName,
+ dataFile.length(), replicationConfig, keyMetadata)) {
+ FileChannel ch = raf.getChannel();
+ long len = raf.length();
+ long off = 0;
+ while (len > 0) {
+ long writeLen = Math.min(len, chunkSize);
+ ByteBuffer segment =
+ ch.map(FileChannel.MapMode.READ_ONLY, off, writeLen);
+ ByteBuf buf = Unpooled.wrappedBuffer(segment);
+ out.write(buf);
+ off += writeLen;
+ len -= writeLen;
+ }
+ }
}
}