diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java index 8dd9e6b50e57..8b3e32cf41a4 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java @@ -283,7 +283,7 @@ public void write(ByteBuffer b, int off, int len) throws IOException { } private void writeChunkIfNeeded() throws IOException { - if (currentBuffer.length()==0) { + if (currentBuffer.length() == 0) { writeChunk(currentBuffer); currentBuffer = null; } @@ -302,7 +302,7 @@ private void writeChunk(StreamBuffer sb) throws IOException { } private void allocateNewBufferIfNeeded() { - if (currentBuffer==null) { + if (currentBuffer == null) { currentBuffer = StreamBuffer.allocate(config.getDataStreamMinPacketSize()); } @@ -323,7 +323,7 @@ private void doFlushIfNeeded() throws IOException { updateFlushLength(); executePutBlock(false, false); } - if (bufferList.size()==streamWindow){ + if (bufferList.size() == streamWindow) { try { checkOpen(); if (!putBlockFutures.isEmpty()) { @@ -514,7 +514,7 @@ private void handleFlush(boolean close) // here, we just limit this buffer to the current position. So that next // write will happen in new buffer - if (currentBuffer!=null) { + if (currentBuffer != null) { writeChunk(currentBuffer); currentBuffer = null; } @@ -693,7 +693,7 @@ private void handleInterruptedException(Exception ex, boolean processExecutionException) throws IOException { LOG.error("Command execution was interrupted."); - if(processExecutionException) { + if (processExecutionException) { handleExecutionException(ex); } else { throw new IOException(EXCEPTION_MSG + ex.toString(), ex); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java index 5118ea5ead3a..d34e4dca9483 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java @@ -48,11 +48,11 @@ public int position() { } - public void put(StreamBuffer sb){ + public void put(StreamBuffer sb) { buffer.put(sb.buffer); } - public static StreamBuffer allocate(int size){ + public static StreamBuffer allocate(int size) { return new StreamBuffer(ByteBuffer.allocate(size)); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java index 24a046f62395..00cda7844ae9 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java @@ -307,8 +307,8 @@ boolean isEmpty() { } long computeBufferData() { - long totalDataLen =0; - for (StreamBuffer b : bufferList){ + long totalDataLen = 0; + for (StreamBuffer b : bufferList) { totalDataLen += b.position(); } return totalDataLen; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 492b37850094..b5b83d0cd658 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -900,7 +900,7 @@ public OzoneDataStreamOutput createStreamKey( .setAcls(getAclList()); if (Boolean.parseBoolean(metadata.get(OzoneConsts.GDPR_FLAG))) { - try{ + try { GDPRSymmetricKey gKey = new GDPRSymmetricKey(new SecureRandom()); builder.addAllMetadata(gKey.getKeyDetails()); } catch (Exception e) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index 058ba6af2c24..8ad37f1ffdda 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -321,8 +321,8 @@ abstract class Builder { protected Optional chunkSize = Optional.empty(); protected OptionalInt streamBufferSize = OptionalInt.empty(); protected Optional streamBufferFlushSize = Optional.empty(); - protected Optional dataStreamBufferFlushSize= Optional.empty(); - protected Optional datastreamWindowSize= Optional.empty(); + protected Optional dataStreamBufferFlushSize = Optional.empty(); + protected Optional datastreamWindowSize = Optional.empty(); protected Optional streamBufferMaxSize = Optional.empty(); protected OptionalInt dataStreamMinPacketSize = OptionalInt.empty(); protected Optional blockSize = Optional.empty(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index 5c5a9c8a1b64..40de6418d0b3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -646,7 +646,7 @@ protected void initializeConfiguration() throws IOException { dataStreamBufferFlushSize = Optional.of((long) 4 * chunkSize.get()); } if (!dataStreamMinPacketSize.isPresent()) { - dataStreamMinPacketSize = OptionalInt.of(chunkSize.get()/4); + dataStreamMinPacketSize = OptionalInt.of(chunkSize.get() / 4); } if (!datastreamWindowSize.isPresent()) { datastreamWindowSize = Optional.of((long) 8 * chunkSize.get()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java index 21003374d7a7..6225e2526845 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java @@ -107,7 +107,7 @@ public static void init() throws Exception { .setDataStreamBufferFlushize(maxFlushSize) .setStreamBufferSizeUnit(StorageUnit.BYTES) .setDataStreamMinPacketSize(chunkSize) - .setDataStreamStreamWindowSize(5*chunkSize) + .setDataStreamStreamWindowSize(5 * chunkSize) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key @@ -137,7 +137,7 @@ public static void shutdown() { @Test public void testHalfChunkWrite() throws Exception { testWrite(chunkSize / 2); - testWriteWithFailure(chunkSize/2); + testWriteWithFailure(chunkSize / 2); } @Test diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index e669f1c5cced..3ac013a837e3 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -263,7 +263,7 @@ private FSDataOutputStream createOutputStream(String key, short replication, boolean isRatisStreamingEnabled = getConf().getBoolean( OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLE, OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLE_DEFAULT); - if (isRatisStreamingEnabled){ + if (isRatisStreamingEnabled) { return new FSDataOutputStream(adapter.createStreamFile(key, replication, overwrite, recursive), statistics); } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java index 26561edfdb2d..a55aee4a0d1f 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java @@ -239,7 +239,7 @@ private FSDataOutputStream createOutputStream(String key, short replication, boolean isRatisStreamingEnabled = getConf().getBoolean( OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLE, OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLE_DEFAULT); - if (isRatisStreamingEnabled){ + if (isRatisStreamingEnabled) { return new FSDataOutputStream(adapter.createStreamFile(key, replication, overwrite, recursive), statistics); }