diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index f19853c1aa49..a2684955460b 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -159,7 +159,7 @@ public void connect(String encodedToken) throws Exception { private synchronized void connectToDatanode(DatanodeDetails dn) throws IOException { - if (isConnected(dn)){ + if (isConnected(dn)) { return; } // read port from the data node, on failure use default configured @@ -269,10 +269,10 @@ public ContainerCommandResponseProto sendCommand( Thread.currentThread().interrupt(); } } - try{ + try { for (Map.Entry > - entry : futureHashMap.entrySet()){ + entry : futureHashMap.entrySet()) { responseProtoHashMap.put(entry.getKey(), entry.getValue().get()); } } catch (InterruptedException e) { @@ -538,7 +538,7 @@ public void onCompleted() { } private synchronized void checkOpen(DatanodeDetails dn) - throws IOException{ + throws IOException { if (closed) { throw new IOException("This channel is not connected."); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java index 6b74adb07f80..07fd0a8c2d48 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java @@ -206,8 +206,8 @@ public static void verifyKeyName(String keyName) { if (keyName == null) { throw new IllegalArgumentException("Key name is null"); } - if(!OzoneConsts.KEYNAME_ILLEGAL_CHARACTER_CHECK_REGEX - .matcher(keyName).matches()){ + if (!OzoneConsts.KEYNAME_ILLEGAL_CHARACTER_CHECK_REGEX + .matcher(keyName).matches()) { throw new IllegalArgumentException("Invalid key name: " + keyName); } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java index e4500bce10d0..bd97cf248dad 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java @@ -152,7 +152,7 @@ public synchronized void initialize() throws IOException { // retry according to retry policy. chunks = getChunkInfos(); break; - } catch(SCMSecurityException ex) { + } catch (SCMSecurityException ex) { throw ex; } catch (StorageContainerException ex) { refreshPipeline(ex); @@ -340,9 +340,9 @@ synchronized int readWithStrategy(ByteReaderStrategy strategy) throws } else { throw e; } - } catch(SCMSecurityException ex) { + } catch (SCMSecurityException ex) { throw ex; - } catch(IOException ex) { + } catch (IOException ex) { // We got a IOException which might be due // to DN down or connectivity issue. if (shouldRetryRead(ex)) { diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java index 150c418a85c3..8b3f817a2e46 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java @@ -708,7 +708,7 @@ private void handleInterruptedException(Exception ex, boolean processExecutionException) throws IOException { LOG.error("Command execution was interrupted."); - if(processExecutionException) { + if (processExecutionException) { handleExecutionException(ex); } else { throw new IOException(EXCEPTION_MSG + ex.toString(), ex); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java index 94fa87a71e2d..a520f8a6a5a7 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java @@ -46,7 +46,7 @@ public BufferPool(int bufferSize, int capacity) { } public BufferPool(int bufferSize, int capacity, - Function byteStringConversion){ + Function byteStringConversion) { this.capacity = capacity; this.bufferSize = bufferSize; bufferList = new ArrayList<>(capacity); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java index 7238f2a2a06b..802adc11f5ea 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java @@ -97,7 +97,7 @@ void releaseBuffersOnException() { @Override XceiverClientReply sendWatchForCommit(boolean bufferFull) throws IOException { - return bufferFull? commitWatcher.watchOnFirstIndex() + return bufferFull ? commitWatcher.watchOnFirstIndex() : commitWatcher.watchOnLastIndex(); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java index f538595db501..d1e3c192824e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java @@ -151,7 +151,7 @@ public final class HddsConfigKeys { */ public static final String HDDS_X509_MAX_DURATION = "hdds.x509.max.duration"; // Limit Certificate duration to a max value of 5 years. - public static final String HDDS_X509_MAX_DURATION_DEFAULT= "P1865D"; + public static final String HDDS_X509_MAX_DURATION_DEFAULT = "P1865D"; public static final String HDDS_X509_SIGNATURE_ALGO = "hdds.x509.signature.algorithm"; public static final String HDDS_X509_SIGNATURE_ALGO_DEFAULT = "SHA256withRSA"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java index 5abe8fbb3188..ffbb3e334016 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java @@ -632,7 +632,7 @@ public static long getShutDownTimeOut(ConfigurationSource conf) { * Utility method to round up bytes into the nearest MB. */ public static int roundupMb(long bytes) { - return (int)Math.ceil((double) bytes/(double) ONE_MB); + return (int)Math.ceil((double) bytes / (double) ONE_MB); } /** diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java index 77d193035f5b..792a9d0d8407 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java @@ -151,6 +151,6 @@ public static String createStartupShutdownMessage(VersionInfo versionInfo, public static String appendIfNotPresent(String str, char c) { Preconditions.checkNotNull(str, "Input string is null"); - return str.isEmpty() || str.charAt(str.length() - 1) != c ? str + c: str; + return str.isEmpty() || str.charAt(str.length() - 1) != c ? str + c : str; } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceAudience.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceAudience.java index 37da0a3b2706..03dc00518d26 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceAudience.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceAudience.java @@ -50,7 +50,7 @@ public final class InterfaceAudience { */ @Documented @Retention(RetentionPolicy.RUNTIME) - public @interface Public {}; + public @interface Public { }; /** * Intended only for the project(s) specified in the annotation. @@ -67,7 +67,7 @@ public final class InterfaceAudience { */ @Documented @Retention(RetentionPolicy.RUNTIME) - public @interface Private {}; + public @interface Private { }; - private InterfaceAudience() {} // Audience can't exist on its own + private InterfaceAudience() { } // Audience can't exist on its own } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java index 9945690a9b0a..794ebd2d7409 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java @@ -43,7 +43,7 @@ public final class OzoneQuota { public static final String OZONE_QUOTA_TB = "TB"; /** Quota Units.*/ - public enum Units {B, KB, MB, GB, TB} + public enum Units { B, KB, MB, GB, TB } // Quota to decide how many buckets can be created. private long quotaInNamespace; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/QuotaList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/QuotaList.java index 205cca1100c6..5403469fa768 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/QuotaList.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/QuotaList.java @@ -28,13 +28,14 @@ public class QuotaList { private ArrayList unitQuota; private ArrayList sizeQuota; - public QuotaList(){ + public QuotaList() { ozoneQuota = new ArrayList(); unitQuota = new ArrayList(); sizeQuota = new ArrayList(); } - public void addQuotaList(String oQuota, OzoneQuota.Units uQuota, Long sQuota){ + public void addQuotaList( + String oQuota, OzoneQuota.Units uQuota, Long sQuota) { ozoneQuota.add(oQuota); unitQuota.add(uQuota); sizeQuota.add(sQuota); @@ -52,15 +53,15 @@ public ArrayList getUnitQuotaArray() { return this.unitQuota; } - public OzoneQuota.Units getUnits(String oQuota){ + public OzoneQuota.Units getUnits(String oQuota) { return unitQuota.get(ozoneQuota.indexOf(oQuota)); } - public Long getQuotaSize(OzoneQuota.Units uQuota){ + public Long getQuotaSize(OzoneQuota.Units uQuota) { return sizeQuota.get(unitQuota.indexOf(uQuota)); } - public OzoneQuota.Units getQuotaUnit(Long sQuota){ + public OzoneQuota.Units getQuotaUnit(Long sQuota) { return unitQuota.get(sizeQuota.indexOf(sQuota)); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java index 044bd6f8334c..8623a0e7f34c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java @@ -46,7 +46,7 @@ public enum ReplicationFactor { * @return ReplicationFactor */ public static ReplicationFactor valueOf(int value) { - if(value == 1) { + if (value == 1) { return ONE; } if (value == 3) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java index c259464bb3ac..6b7e7c63848c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java @@ -296,7 +296,7 @@ public Map getPropsWithPrefix(String confPrefix) { return configMap; } - private static void addDeprecatedKeys(){ + private static void addDeprecatedKeys() { Configuration.addDeprecations(new DeprecationDelta[]{ new DeprecationDelta("ozone.datanode.pipeline.limit", ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT), diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java index 782a3e18a43a..319fefdf4e72 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java @@ -127,7 +127,7 @@ private void loadInitialValue() { private void refresh() { //only one `refresh` can be running at a certain moment - if(isRefreshRunning.compareAndSet(false, true)) { + if (isRefreshRunning.compareAndSet(false, true)) { try { cachedValue.set(source.getUsedSpace()); } catch (RuntimeException e) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index aef3c298afcc..01bd0f482afe 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -713,7 +713,7 @@ public Builder setSetupTime(long time) { * * @return DatanodeDetails.Builder */ - public Builder setPersistedOpState(HddsProtos.NodeOperationalState state){ + public Builder setPersistedOpState(HddsProtos.NodeOperationalState state) { this.persistedOpState = state; return this; } @@ -726,7 +726,7 @@ public Builder setPersistedOpState(HddsProtos.NodeOperationalState state){ * * @return DatanodeDetails.Builder */ - public Builder setPersistedOpStateExpiry(long expiry){ + public Builder setPersistedOpStateExpiry(long expiry) { this.persistedOpStateExpiryEpochSec = expiry; return this; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java index 50480c1dcaf8..c1cd865036fb 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java @@ -136,7 +136,7 @@ private static RaftGroup emptyRaftGroup() { } private static RaftGroup newRaftGroup(Collection peers) { - return peers.isEmpty()? emptyRaftGroup() + return peers.isEmpty() ? emptyRaftGroup() : RaftGroup.valueOf(DUMMY_GROUP_ID, peers); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfig.java index c91a186b35fe..d72e27a18ae7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfig.java @@ -40,7 +40,7 @@ public class ReconConfig { type = ConfigType.STRING, defaultValue = "", tags = { ConfigTag.SECURITY, ConfigTag.RECON, ConfigTag.OZONE }, - description = "The keytab file used by Recon daemon to login as "+ + description = "The keytab file used by Recon daemon to login as " + "its service principal." ) private String keytab; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java index b5f6e4812110..14a229b5e7a5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java @@ -32,7 +32,7 @@ * Ozone configuration. */ public final class ByteStringConversion { - private ByteStringConversion(){} // no instantiation. + private ByteStringConversion() { } // no instantiation. /** * Creates the conversion function to be used to convert ByteBuffers to diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java index baee0384fb43..ce79ec2abbf5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java @@ -43,7 +43,7 @@ public class ScmConfig { type = ConfigType.STRING, defaultValue = "", tags = { ConfigTag.SECURITY, ConfigTag.OZONE }, - description = "The keytab file used by SCM daemon to login as "+ + description = "The keytab file used by SCM daemon to login as " + "its service principal." ) private String keytab; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index d4e56343bd62..c1f43c6eb534 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -525,7 +525,7 @@ public final class ScmConfigKeys { public static final String OZONE_SCM_HA_RAFT_LOG_PURGE_GAP = "ozone.scm.ha.ratis.log.purge.gap"; - public static final int OZONE_SCM_HA_RAFT_LOG_PURGE_GAP_DEFAULT =1000000; + public static final int OZONE_SCM_HA_RAFT_LOG_PURGE_GAP_DEFAULT = 1000000; public static final String OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD = "ozone.scm.ha.ratis.snapshot.threshold"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java index 6f6caf3b3f44..2f2a7bf3e5e2 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java @@ -238,7 +238,7 @@ private void incrementAndSample(String stat, ContainerID container) { increment(stat); List list = containerSample .computeIfAbsent(stat, k -> new ArrayList<>()); - synchronized(list) { + synchronized (list) { if (list.size() < SAMPLE_LIMIT) { list.add(container); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java index 644659557af5..b2b566a7a40d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java @@ -38,7 +38,7 @@ */ public class InnerNodeImpl extends NodeImpl implements InnerNode { protected static class Factory implements InnerNode.Factory { - protected Factory() {} + protected Factory() { } @Override public InnerNodeImpl newInnerNode(String name, String location, @@ -93,7 +93,7 @@ public int getNumOfNodes(int level) { } else { for (Node node: childrenMap.values()) { if (node instanceof InnerNode) { - count += ((InnerNode)node).getNumOfNodes(level -1); + count += ((InnerNode)node).getNumOfNodes(level - 1); } else { throw new RuntimeException("Cannot support Level:" + level + " on this node " + this.toString()); @@ -119,7 +119,7 @@ public List getNodes(int level) { } else { for (Node node: childrenMap.values()) { if (node instanceof InnerNode) { - result.addAll(((InnerNode)node).getNodes(level -1)); + result.addAll(((InnerNode)node).getNodes(level - 1)); } else { throw new RuntimeException("Cannot support Level:" + level + " on this node " + this.toString()); @@ -265,7 +265,7 @@ public Node getNode(String loc) { if (child == null) { return null; } - if (path.length == 1){ + if (path.length == 1) { return child; } if (child instanceof InnerNode) { @@ -292,7 +292,7 @@ public Node getLeaf(int leafIndex) { } return getChildNode(leafIndex); } else { - for(Node node : childrenMap.values()) { + for (Node node : childrenMap.values()) { InnerNodeImpl child = (InnerNodeImpl)node; int leafCount = child.getNumOfLeaves(); if (leafIndex < leafCount) { @@ -468,7 +468,7 @@ private Node getLeafOnLeafParent(int leafIndex, List excludedScopes, if (leafIndex >= getNumOfChildren()) { return null; } - for(Node node : childrenMap.values()) { + for (Node node : childrenMap.values()) { if (excludedNodes != null && excludedNodes.contains(node)) { continue; } @@ -519,7 +519,7 @@ private InnerNodeImpl createChildNode(String name) { private Node getChildNode(int index) { Iterator iterator = childrenMap.values().iterator(); Node node = null; - while(index >= 0 && iterator.hasNext()) { + while (index >= 0 && iterator.hasNext()) { node = (Node)iterator.next(); index--; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java index 43765a6e5a8c..206a0fd73b23 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java @@ -45,7 +45,7 @@ * (computers) and inner nodes represent datacenter/core-switches/routers that * manages traffic in/out of data centers or racks. */ -public class NetworkTopologyImpl implements NetworkTopology{ +public class NetworkTopologyImpl implements NetworkTopology { public static final Logger LOG = LoggerFactory.getLogger(NetworkTopologyImpl.class); @@ -91,7 +91,7 @@ public void add(Node node) { Preconditions.checkArgument(node != null, "node cannot be null"); if (node instanceof InnerNode) { throw new IllegalArgumentException( - "Not allowed to add an inner node: "+ node.getNetworkFullPath()); + "Not allowed to add an inner node: " + node.getNetworkFullPath()); } int newDepth = NetUtils.locationToDepth(node.getNetworkLocation()) + 1; @@ -104,7 +104,7 @@ public void add(Node node) { boolean add; try { add = clusterTree.add(node); - }finally { + } finally { netlock.writeLock().unlock(); } @@ -126,12 +126,12 @@ public void remove(Node node) { Preconditions.checkArgument(node != null, "node cannot be null"); if (node instanceof InnerNode) { throw new IllegalArgumentException( - "Not allowed to remove an inner node: "+ node.getNetworkFullPath()); + "Not allowed to remove an inner node: " + node.getNetworkFullPath()); } netlock.writeLock().lock(); try { clusterTree.remove(node); - }finally { + } finally { netlock.writeLock().unlock(); } LOG.info("Removed a node: {}", node.getNetworkFullPath()); @@ -534,7 +534,7 @@ private Node chooseNodeInternal(String scope, int leafIndex, " generation " + ancestorGen); } // affinity ancestor should has overlap with scope - if (affinityAncestor.getNetworkFullPath().startsWith(scope)){ + if (affinityAncestor.getNetworkFullPath().startsWith(scope)) { finalScope = affinityAncestor.getNetworkFullPath(); } else if (!scope.startsWith(affinityAncestor.getNetworkFullPath())) { return null; @@ -655,21 +655,21 @@ public int getDistanceCost(Node node1, Node node2) { if (level1 > maxLevel || level2 > maxLevel) { return Integer.MAX_VALUE; } - while(level1 > level2 && node1 != null) { + while (level1 > level2 && node1 != null) { node1 = node1.getParent(); level1--; - cost += node1 == null? 0 : node1.getCost(); + cost += node1 == null ? 0 : node1.getCost(); } - while(level2 > level1 && node2 != null) { + while (level2 > level1 && node2 != null) { node2 = node2.getParent(); level2--; - cost += node2 == null? 0 : node2.getCost(); + cost += node2 == null ? 0 : node2.getCost(); } - while(node1 != null && node2 != null && node1 != node2) { + while (node1 != null && node2 != null && node1 != node2) { node1 = node1.getParent(); node2 = node2.getParent(); - cost += node1 == null? 0 : node1.getCost(); - cost += node2 == null? 0 : node2.getCost(); + cost += node1 == null ? 0 : node1.getCost(); + cost += node2 == null ? 0 : node2.getCost(); } return cost; } finally { @@ -752,7 +752,7 @@ private int getAvailableNodesCount(String scope, List excludedScopes, List excludedAncestorList = NetUtils.getAncestorList(this, mutableExcludedNodes, ancestorGen); for (Node ancestor : excludedAncestorList) { - if (scope.startsWith(ancestor.getNetworkFullPath())){ + if (scope.startsWith(ancestor.getNetworkFullPath())) { return 0; } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java index 47e5de880d6d..fc8e23ba1328 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java @@ -28,7 +28,7 @@ public final class NodeSchema { /** * Network topology layer type enum definition. */ - public enum LayerType{ + public enum LayerType { ROOT("Root", NetConstants.INNER_NODE_COST_DEFAULT), INNER_NODE("InnerNode", NetConstants.INNER_NODE_COST_DEFAULT), LEAF_NODE("Leaf", NetConstants.NODE_COST_DEFAULT); @@ -47,7 +47,7 @@ public String toString() { return description; } - public int getCost(){ + public int getCost() { return cost; } public static LayerType getType(String typeStr) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java index cb9690fe37d0..289f7e6b75f9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java @@ -68,7 +68,7 @@ public final class NodeSchemaLoader { private static final int LAYOUT_VERSION = 1; private static volatile NodeSchemaLoader instance = null; - private NodeSchemaLoader() {} + private NodeSchemaLoader() { } public static NodeSchemaLoader getInstance() { if (instance == null) { @@ -324,7 +324,7 @@ private Map loadLayersSection(Element root) { // Integrity check, only one ROOT and one LEAF is allowed boolean foundRoot = false; boolean foundLeaf = false; - for(NodeSchema schema: schemas.values()) { + for (NodeSchema schema: schemas.values()) { if (schema.getType() == LayerType.ROOT) { if (foundRoot) { throw new IllegalArgumentException("Multiple ROOT layers are found" + @@ -385,7 +385,7 @@ private NodeSchemaLoadResult loadTopologySection(Element root, + "> is null"); } if (TOPOLOGY_PATH.equals(tagName)) { - if(value.startsWith(NetConstants.PATH_SEPARATOR_STR)) { + if (value.startsWith(NetConstants.PATH_SEPARATOR_STR)) { value = value.substring(1); } String[] layerIDs = value.split(NetConstants.PATH_SEPARATOR_STR); @@ -403,7 +403,7 @@ private NodeSchemaLoadResult loadTopologySection(Element root, throw new IllegalArgumentException("Topology path doesn't start " + "with ROOT layer"); } - if (schemas.get(layerIDs[layerIDs.length -1]).getType() != + if (schemas.get(layerIDs[layerIDs.length - 1]).getType() != LayerType.LEAF_NODE) { throw new IllegalArgumentException("Topology path doesn't end " + "with LEAF layer"); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java index 044f151868ad..f5c0b62100c9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java @@ -415,7 +415,7 @@ public static class Builder { private Instant creationTimestamp = null; private UUID suggestedLeaderId = null; - public Builder() {} + public Builder() { } public Builder(Pipeline pipeline) { this.id = pipeline.id; @@ -486,10 +486,10 @@ public Pipeline build() { if (nodeOrder != null && !nodeOrder.isEmpty()) { // This branch is for build from ProtoBuf List nodesWithOrder = new ArrayList<>(); - for(int i = 0; i < nodeOrder.size(); i++) { + for (int i = 0; i < nodeOrder.size(); i++) { int nodeIndex = nodeOrder.get(i); Iterator it = nodeStatus.keySet().iterator(); - while(it.hasNext() && nodeIndex >= 0) { + while (it.hasNext() && nodeIndex >= 0) { DatanodeDetails node = it.next(); if (nodeIndex == 0) { nodesWithOrder.add(node); @@ -503,7 +503,7 @@ public Pipeline build() { nodesWithOrder, id); } pipeline.setNodesInOrder(nodesWithOrder); - } else if (nodesInOrder != null){ + } else if (nodesInOrder != null) { // This branch is for pipeline clone pipeline.setNodesInOrder(nodesInOrder); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java index fcf3f130f8eb..7f2d2a8bec9e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java @@ -434,7 +434,7 @@ public static void closeContainer(XceiverClientSpi client, request.setContainerID(containerID); request.setCloseContainer(CloseContainerRequestProto.getDefaultInstance()); request.setDatanodeUuid(id); - if(encodedToken != null) { + if (encodedToken != null) { request.setEncodedToken(encodedToken); } client.sendCommand(request.build(), getValidatorList()); @@ -458,7 +458,7 @@ public static ReadContainerResponseProto readContainer( request.setContainerID(containerID); request.setReadContainer(ReadContainerRequestProto.getDefaultInstance()); request.setDatanodeUuid(id); - if(encodedToken != null) { + if (encodedToken != null) { request.setEncodedToken(encodedToken); } ContainerCommandResponseProto response = @@ -560,8 +560,8 @@ public static List getValidatorList() { ContainerCommandRequestProto request = builder.build(); Map responses = xceiverClient.sendCommandOnAllNodes(request); - for(Map.Entry entry: - responses.entrySet()){ + for (Map.Entry entry: + responses.entrySet()) { datanodeToResponseMap.put(entry.getKey(), entry.getValue().getGetBlock()); } return datanodeToResponseMap; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java index 3195e008cc54..8cd68a012519 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java @@ -37,7 +37,7 @@ public final class HddsVersionInfo { public static final VersionInfo HDDS_VERSION_INFO = new VersionInfo("hdds"); - private HddsVersionInfo() {} + private HddsVersionInfo() { } public static void main(String[] args) { System.out.println("Using HDDS " + HDDS_VERSION_INFO.getVersion()); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/ResourceSemaphore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/ResourceSemaphore.java index 96d59963efc2..e1e959823e3a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/ResourceSemaphore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/ResourceSemaphore.java @@ -90,7 +90,7 @@ public boolean isClosed() { @Override public String toString() { - return (isClosed()? "closed/": availablePermits() + "/") + limit; + return (isClosed() ? "closed/" : availablePermits() + "/") + limit; } /** @@ -101,7 +101,7 @@ public static class Group { public Group(int... limits) { final List list = new ArrayList<>(limits.length); - for(int limit : limits) { + for (int limit : limits) { list.add(new ResourceSemaphore(limit)); } this.resources = Collections.unmodifiableList(list); @@ -131,7 +131,7 @@ boolean tryAcquire(int... permits) { } // failed at i, releasing all previous resources - for(i--; i >= 0; i--) { + for (i--; i >= 0; i--) { resources.get(i).release(permits[i]); } return false; @@ -147,13 +147,13 @@ public void acquire(int... permits) throws InterruptedException { } protected void release(int... permits) { - for(int i = resources.size() - 1; i >= 0; i--) { + for (int i = resources.size() - 1; i >= 0; i--) { resources.get(i).release(permits[i]); } } public void close() { - for(int i = resources.size() - 1; i >= 0; i--) { + for (int i = resources.size() - 1; i >= 0; i--) { resources.get(i).close(); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java index ba062bcae148..6fff80f6756f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java @@ -51,7 +51,7 @@ public final class UniqueId { /** * Private constructor so that no one can instantiate this class. */ - private UniqueId() {} + private UniqueId() { } /** * Calculate and returns next unique id based on System#currentTimeMillis. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index cb0362352bd0..bdc87899a454 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -128,8 +128,8 @@ public final class OzoneConsts { public static final String CONTAINER_DB_SUFFIX = "container.db"; public static final String PIPELINE_DB_SUFFIX = "pipeline.db"; public static final String CRL_DB_SUFFIX = "crl.db"; - public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX; - public static final String DN_CRL_DB = "dn-"+ CRL_DB_SUFFIX; + public static final String DN_CONTAINER_DB = "-dn-" + CONTAINER_DB_SUFFIX; + public static final String DN_CRL_DB = "dn-" + CRL_DB_SUFFIX; public static final String CRL_DB_DIRECTORY_NAME = "crl"; public static final String OM_DB_NAME = "om.db"; public static final String SCM_DB_NAME = "scm.db"; @@ -187,7 +187,7 @@ public static Versioning getVersioning(boolean versioning) { public static final String OM_KEY_PREFIX = "/"; public static final String OM_USER_PREFIX = "$"; - public static final String OM_S3_PREFIX ="S3:"; + public static final String OM_S3_PREFIX = "S3:"; public static final String OM_S3_VOLUME_PREFIX = "s3"; public static final String OM_S3_SECRET = "S3Secret:"; public static final String OM_PREFIX = "Prefix:"; @@ -212,7 +212,7 @@ public static Versioning getVersioning(boolean versioning) { /** * Quota Units. */ - public enum Units {TB, GB, MB, KB, B} + public enum Units { TB, GB, MB, KB, B } /** * Max number of keys returned per list buckets operation. @@ -390,7 +390,7 @@ private OzoneConsts() { public static final Pattern KEYNAME_ILLEGAL_CHARACTER_CHECK_REGEX = Pattern.compile("^[^^{}<>^?%~#`\\[\\]\\|\\\\(\\x80-\\xff)]+$"); - public static final String FS_FILE_COPYING_TEMP_SUFFIX= "._COPYING_"; + public static final String FS_FILE_COPYING_TEMP_SUFFIX = "._COPYING_"; // Transaction Info public static final String TRANSACTION_INFO_KEY = "#TRANSACTIONINFO"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java index 098ab6b2f7f0..6c20968c8d56 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java @@ -26,7 +26,7 @@ public enum AuditEventStatus { private String status; - AuditEventStatus(String status){ + AuditEventStatus(String status) { this.status = status; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java index ee6f45dadb4c..9f1f5f0e2239 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java @@ -38,7 +38,7 @@ public class AuditLogger { * Parametrized Constructor to initialize logger. * @param type Audit Logger Type */ - public AuditLogger(AuditLoggerType type){ + public AuditLogger(AuditLoggerType type) { initializeLogger(type); } @@ -46,7 +46,7 @@ public AuditLogger(AuditLoggerType type){ * Initializes the logger with specific type. * @param loggerType specified one of the values from enum AuditLoggerType. */ - private void initializeLogger(AuditLoggerType loggerType){ + private void initializeLogger(AuditLoggerType loggerType) { this.logger = LogManager.getContext(false).getLogger(loggerType.getType()); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java index 18241c7712a5..dbfde9f555b7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java @@ -31,7 +31,7 @@ public String getType() { return type; } - AuditLoggerType(String type){ + AuditLoggerType(String type) { this.type = type; } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java index 505b95807159..3414aa403bc4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java @@ -28,11 +28,11 @@ public enum AuditMarker { private Marker marker; - AuditMarker(Marker marker){ + AuditMarker(Marker marker) { this.marker = marker; } - public Marker getMarker(){ + public Marker getMarker() { return marker; } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java index 6f3bbadaecbf..9d28c9f43ed2 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java @@ -64,12 +64,12 @@ public static class Builder { private Map params; private String ret; - public Builder setUser(String usr){ + public Builder setUser(String usr) { this.user = usr; return this; } - public Builder atIp(String ipAddr){ + public Builder atIp(String ipAddr) { this.ip = ipAddr; return this; } @@ -79,7 +79,7 @@ public Builder forOperation(AuditAction action) { return this; } - public Builder withParams(Map args){ + public Builder withParams(Map args) { this.params = args; return this; } @@ -89,12 +89,12 @@ public Builder withResult(AuditEventStatus result) { return this; } - public Builder withException(Throwable ex){ + public Builder withException(Throwable ex) { this.throwable = ex; return this; } - public AuditMessage build(){ + public AuditMessage build() { String message = "user=" + this.user + " | ip=" + this.ip + " | " + "op=" + this.op + " " + this.params + " | " + "ret=" + this.ret; return new AuditMessage(message, throwable); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java index 7ce643db4711..6187d6bd9cdf 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java @@ -80,7 +80,7 @@ public final void update(ByteBuffer b) { } private static int update(int crc, ByteBuffer b, int[] table) { - for(; b.remaining() > 7;) { + for (; b.remaining() > 7;) { final int c0 = (b.get() ^ crc) & 0xff; final int c1 = (b.get() ^ (crc >>>= 8)) & 0xff; final int c2 = (b.get() ^ (crc >>>= 8)) & 0xff; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java index 7622ffc001c3..5a63c09f1234 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java @@ -57,8 +57,9 @@ final class IncrementalChunkBuffer implements ChunkBuffer { Preconditions.checkArgument(increment > 0); this.limit = limit; this.increment = increment; - this.limitIndex = limit/increment; - this.buffers = new ArrayList<>(limitIndex + (limit%increment == 0? 0: 1)); + this.limitIndex = limit / increment; + this.buffers = new ArrayList<>( + limitIndex + (limit % increment == 0 ? 0 : 1)); this.isDuplicated = isDuplicated; } @@ -66,7 +67,7 @@ final class IncrementalChunkBuffer implements ChunkBuffer { private int getBufferCapacityAtIndex(int i) { Preconditions.checkArgument(i >= 0); Preconditions.checkArgument(i <= limitIndex); - return i < limitIndex? increment: limit%increment; + return i < limitIndex ? increment : limit % increment; } private void assertInt(int expected, int computed, String name, int i) { @@ -126,7 +127,7 @@ private ByteBuffer getAndAllocateAtPosition(int position) { Preconditions.checkArgument(position < limit); final int i = position / increment; final ByteBuffer ith = getAndAllocateAtIndex(i); - assertInt(position%increment, ith.position(), "position", i); + assertInt(position % increment, ith.position(), "position", i); return ith; } @@ -207,7 +208,7 @@ public ChunkBuffer put(ByteBuffer that) { } final int thatLimit = that.limit(); - for(int p = position(); that.position() < thatLimit;) { + for (int p = position(); that.position() < thatLimit;) { final ByteBuffer b = getAndAllocateAtPosition(p); final int min = Math.min(b.remaining(), thatLimit - that.position()); that.limit(that.position() + min); @@ -229,7 +230,7 @@ public ChunkBuffer duplicate(int newPosition, int newLimit) { final int pr = newPosition % increment; final int li = newLimit / increment; final int lr = newLimit % increment; - final int newSize = lr == 0? li: li + 1; + final int newSize = lr == 0 ? li : li + 1; for (int i = 0; i < newSize; i++) { final int pos = i < pi ? increment : i == pi ? pr : 0; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java index e6e1df5135e1..6ba438456e16 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java @@ -109,7 +109,7 @@ public String getClusterID() { public Long getCreationTime() { String creationTime = properties.getProperty(CREATION_TIME); - if(creationTime != null) { + if (creationTime != null) { return Long.parseLong(creationTime); } return null; @@ -117,7 +117,7 @@ public Long getCreationTime() { public int getLayoutVersion() { String layout = properties.getProperty(LAYOUT_VERSION); - if(layout != null) { + if (layout != null) { return Integer.parseInt(layout); } return 0; @@ -166,7 +166,7 @@ private void verifyNodeType(NodeType type) throws InconsistentStorageStateException { NodeType nodeType = getNodeType(); Preconditions.checkNotNull(nodeType); - if(type != nodeType) { + if (type != nodeType) { throw new InconsistentStorageStateException("Expected NodeType: " + type + ", but found: " + nodeType); } @@ -176,7 +176,7 @@ private void verifyClusterId() throws InconsistentStorageStateException { String clusterId = getClusterID(); Preconditions.checkNotNull(clusterId); - if(clusterId.isEmpty()) { + if (clusterId.isEmpty()) { throw new InconsistentStorageStateException("Cluster ID not found"); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/RatisSnapshotInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/RatisSnapshotInfo.java index a9de8922b5b4..ebc4bba209d6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/RatisSnapshotInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/RatisSnapshotInfo.java @@ -50,7 +50,7 @@ public void updateTermIndex(long newTerm, long newIndex) { this.snapshotIndex = newIndex; } - public RatisSnapshotInfo() {} + public RatisSnapshotInfo() { } public RatisSnapshotInfo(long term, long index) { this.term = term; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java index 8ea16897e11c..434e497e23ca 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java @@ -161,8 +161,8 @@ private List castChunkList() { * @return list of chunkinfo. */ public List getChunks() { - return chunkList == null? Collections.emptyList() - : chunkList instanceof ContainerProtos.ChunkInfo? + return chunkList == null ? Collections.emptyList() + : chunkList instanceof ContainerProtos.ChunkInfo ? Collections.singletonList((ContainerProtos.ChunkInfo)chunkList) : Collections.unmodifiableList(castChunkList()); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java index 7773828e2db3..a13f164eec62 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java @@ -45,7 +45,7 @@ public static Map getAuditParams( Map auditParams = new TreeMap<>(); Type cmdType = msg.getCmdType(); String containerID = String.valueOf(msg.getContainerID()); - switch(cmdType) { + switch (cmdType) { case CreateContainer: auditParams.put("containerID", containerID); auditParams.put("containerType", @@ -75,11 +75,11 @@ public static Map getAuditParams( return auditParams; case PutBlock: - try{ + try { auditParams.put("blockData", BlockData.getFromProtoBuf(msg.getPutBlock().getBlockData()) .toString()); - } catch (IOException ex){ + } catch (IOException ex) { if (LOG.isTraceEnabled()) { LOG.trace("Encountered error parsing BlockData from protobuf: " + ex.getMessage()); @@ -132,11 +132,11 @@ public static Map getAuditParams( case CompactChunk: return null; //CompactChunk operation case PutSmallFile: - try{ + try { auditParams.put("blockData", BlockData.getFromProtoBuf(msg.getPutSmallFile() .getBlock().getBlockData()).toString()); - } catch (IOException ex){ + } catch (IOException ex) { if (LOG.isTraceEnabled()) { LOG.trace("Encountered error parsing BlockData from protobuf: " + ex.getMessage()); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java index e95105b0742a..2fd7a9d4940a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java @@ -90,7 +90,7 @@ public boolean hasExpired() { */ public void registerCallBack(Callable callback) throws LeaseExpiredException { - if(hasExpired()) { + if (hasExpired()) { throw new LeaseExpiredException(messageForResource(resource)); } callbacks.add(callback); @@ -104,7 +104,7 @@ public void registerCallBack(Callable callback) * If the lease has already timed out */ public long getElapsedTime() throws LeaseExpiredException { - if(hasExpired()) { + if (hasExpired()) { throw new LeaseExpiredException(messageForResource(resource)); } return Time.monotonicNow() - creationTime; @@ -129,7 +129,7 @@ public long getRemainingTime() throws LeaseExpiredException { * If the lease has already timed out */ public long getLeaseLifeTime() throws LeaseExpiredException { - if(hasExpired()) { + if (hasExpired()) { throw new LeaseExpiredException(messageForResource(resource)); } return leaseTimeout.get(); @@ -144,7 +144,7 @@ public long getLeaseLifeTime() throws LeaseExpiredException { * If the lease has already timed out */ public void renew(long timeout) throws LeaseExpiredException { - if(hasExpired()) { + if (hasExpired()) { throw new LeaseExpiredException(messageForResource(resource)); } leaseTimeout.addAndGet(timeout); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java index a79d5178e7a3..3f2d5fbe9740 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java @@ -53,7 +53,7 @@ public void run() { if (LOG.isDebugEnabled()) { LOG.debug("Executing callbacks for lease on {}", resource); } - for(Callable callback : callbacks) { + for (Callable callback : callbacks) { try { callback.call(); } catch (Exception e) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java index 45f0638b992e..b3ffe59f1d7d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java @@ -104,7 +104,7 @@ public void run() { long ended = System.currentTimeMillis(); LOG.debug(String.format( "Completed shutdown in %.3f seconds; Timeouts: %d", - (ended-started)/1000.0, timeoutCount)); + (ended - started) / 1000.0, timeoutCount)); // each of the hooks have executed; now shut down the // executor itself. shutdownExecutor(new OzoneConfiguration()); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java index ef93927ee4ce..fd8aa28e63df 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java @@ -122,7 +122,7 @@ public void testGetSCMAddresses() { assertThat(addresses.size(), is(3)); it = addresses.iterator(); HashMap expected1 = new HashMap<>(hostsAndPorts); - while(it.hasNext()) { + while (it.hasNext()) { InetSocketAddress current = it.next(); assertTrue(expected1.remove(current.getHostName(), current.getPort())); @@ -136,7 +136,7 @@ public void testGetSCMAddresses() { assertThat(addresses.size(), is(3)); it = addresses.iterator(); HashMap expected2 = new HashMap<>(hostsAndPorts); - while(it.hasNext()) { + while (it.hasNext()) { InetSocketAddress current = it.next(); assertTrue(expected2.remove(current.getHostName(), current.getPort())); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java index 9adf8f7fbf50..1315ad5ec87f 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java @@ -167,7 +167,7 @@ public void testAdjustReplication() { @Test public void testValidationBasedOnConfig() { OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OZONE_REPLICATION+".allowed-configs", + conf.set(OZONE_REPLICATION + ".allowed-configs", "^STANDALONE/ONE|RATIS/THREE$"); conf.set(OZONE_REPLICATION, factor); conf.set(OZONE_REPLICATION_TYPE, type); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java index b057349ed82f..c9ed258f24f9 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java @@ -96,7 +96,7 @@ public void testExcludePattern() throws IOException { long usedSpace = du.getUsedSpace(); - assertFileSize(4*KB, usedSpace); + assertFileSize(4 * KB, usedSpace); } private static void assertFileSize(long expected, long actual) { diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java index 469faac7444b..d3ddbe0ef32f 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java @@ -45,7 +45,7 @@ public class TestContainerCommandRequestMessage { static ByteString newData(int length) { final ByteString.Output out = ByteString.newOutput(); - for(int i = 0; i < length; i++) { + for (int i = 0; i < length; i++) { out.write(RANDOM.nextInt()); } return out.toByteString(); @@ -128,10 +128,10 @@ public void testWriteChunk() throws Exception { static void runTest( BiFunction method) throws Exception { - for(int i = 0; i < 2; i++) { + for (int i = 0; i < 2; i++) { runTest(i, method); } - for(int i = 2; i < 1 << 10;) { + for (int i = 2; i < 1 << 10;) { runTest(i + 1 + RANDOM.nextInt(i - 1), method); i <<= 1; runTest(i, method); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java index 12a024005ac7..e561bb7ccc1a 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java @@ -93,7 +93,7 @@ public void testScmHANodeInfo() { int count = 1; for (SCMNodeInfo scmNodeInfo : scmNodeInfos) { Assert.assertEquals(scmServiceId, scmNodeInfo.getServiceId()); - Assert.assertEquals("scm"+count++, scmNodeInfo.getNodeId()); + Assert.assertEquals("scm" + count++, scmNodeInfo.getNodeId()); Assert.assertEquals("localhost:" + ++port, scmNodeInfo.getBlockClientAddress()); Assert.assertEquals("localhost:" + ++port, @@ -117,7 +117,7 @@ public void testSCMHANodeInfoWithDefaultPorts() { int count = 1; for (SCMNodeInfo scmNodeInfo : scmNodeInfos) { Assert.assertEquals(scmServiceId, scmNodeInfo.getServiceId()); - Assert.assertEquals("scm"+count++, scmNodeInfo.getNodeId()); + Assert.assertEquals("scm" + count++, scmNodeInfo.getNodeId()); Assert.assertEquals("localhost:" + OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, scmNodeInfo.getBlockClientAddress()); Assert.assertEquals("localhost:" + diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java index 0008e6670a5d..e50eca2e6908 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java @@ -171,7 +171,7 @@ public static Collection setupDatanodes() { @Test public void testContains() { Node nodeNotInMap = createDatanode("8.8.8.8", "/d2/r4"); - for (int i=0; i < dataNodes.length; i++) { + for (int i = 0; i < dataNodes.length; i++) { assertTrue(cluster.contains(dataNodes[i])); } assertFalse(cluster.contains(nodeNotInMap)); @@ -238,7 +238,7 @@ public void testAncestor() { assumeTrue(cluster.getMaxLevel() > 2); int maxLevel = cluster.getMaxLevel(); assertTrue(cluster.isSameParent(dataNodes[0], dataNodes[1])); - while(maxLevel > 1) { + while (maxLevel > 1) { assertTrue(cluster.isSameAncestor(dataNodes[0], dataNodes[1], maxLevel - 1)); maxLevel--; @@ -262,17 +262,17 @@ public void testAncestor() { @Test public void testAddRemove() { - for(int i = 0; i < dataNodes.length; i++) { + for (int i = 0; i < dataNodes.length; i++) { cluster.remove(dataNodes[i]); } - for(int i = 0; i < dataNodes.length; i++) { + for (int i = 0; i < dataNodes.length; i++) { assertFalse(cluster.contains(dataNodes[i])); } // no leaf nodes assertEquals(0, cluster.getNumOfLeafNode(null)); // no inner nodes assertEquals(0, cluster.getNumOfNodes(2)); - for(int i = 0; i < dataNodes.length; i++) { + for (int i = 0; i < dataNodes.length; i++) { cluster.add(dataNodes[i]); } // Inner nodes are created automatically @@ -467,10 +467,10 @@ public void testChooseRandomExcludedNode() { }}; int leafNum = cluster.getNumOfLeafNode(null); Map frequency; - for(Node[] list : excludedNodeLists) { + for (Node[] list : excludedNodeLists) { List excludedList = Arrays.asList(list); int ancestorGen = 0; - while(ancestorGen < cluster.getMaxLevel()) { + while (ancestorGen < cluster.getMaxLevel()) { frequency = pickNodesAtRandom(leafNum, null, excludedList, ancestorGen); List ancestorList = NetUtils.getAncestorList(cluster, excludedList, ancestorGen); @@ -490,7 +490,7 @@ public void testChooseRandomExcludedNode() { // all nodes excluded, no node will be picked List excludedList = Arrays.asList(dataNodes); int ancestorGen = 0; - while(ancestorGen < cluster.getMaxLevel()) { + while (ancestorGen < cluster.getMaxLevel()) { frequency = pickNodesAtRandom(leafNum, null, excludedList, ancestorGen); for (Node key : dataNodes) { assertTrue(frequency.get(key) == 0); @@ -500,7 +500,7 @@ public void testChooseRandomExcludedNode() { // out scope excluded nodes, each node will be picked excludedList = Arrays.asList(createDatanode("1.1.1.1.", "/city1/rack1")); ancestorGen = 0; - while(ancestorGen < cluster.getMaxLevel()) { + while (ancestorGen < cluster.getMaxLevel()) { frequency = pickNodes(leafNum, null, excludedList, null, ancestorGen); for (Node key : dataNodes) { assertTrue(frequency.get(key) != 0); @@ -536,7 +536,7 @@ public void testChooseRandomExcludedNodeAndScope() { while (!path.equals(ROOT)) { scope = "~" + path; int ancestorGen = 0; - while(ancestorGen < cluster.getMaxLevel()) { + while (ancestorGen < cluster.getMaxLevel()) { for (Node[] list : excludedNodeLists) { List excludedList = Arrays.asList(list); frequency = diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java index b7b3dc6340d9..00124d9cdd5d 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java @@ -51,7 +51,7 @@ public static Pipeline createSingleNodePipeline() throws IOException { public static Pipeline createPipeline(int numNodes) throws IOException { Preconditions.checkArgument(numNodes >= 1); final List ids = new ArrayList<>(numNodes); - for(int i = 0; i < numNodes; i++) { + for (int i = 0; i < numNodes; i++) { ids.add(MockDatanodeDetails.randomLocalDatanodeDetails()); } return createPipeline(ids); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java index fe4ccc0cb50d..ce6f58dadcb5 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java @@ -43,7 +43,7 @@ public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { checkElementIndex(offset, srcs.length, "offset"); - checkElementIndex(offset+length-1, srcs.length, "offset+length"); + checkElementIndex(offset + length - 1, srcs.length, "offset+length"); long bytes = 0; for (ByteBuffer b : srcs) { diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceSemaphore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceSemaphore.java index cbdd558cbe88..f9c194d45cf9 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceSemaphore.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceSemaphore.java @@ -52,18 +52,18 @@ public void testGroup() { try { g.release(1, 0); Assert.fail("Should have failed."); - } catch (IllegalStateException e){ + } catch (IllegalStateException e) { } try { g.release(0, 1); Assert.fail("Should have failed."); - } catch (IllegalStateException e){ + } catch (IllegalStateException e) { } } static void assertUsed(ResourceSemaphore.Group g, int... expected) { Assert.assertEquals(expected.length, g.resourceSize()); - for(int i = 0; i < expected.length; i++) { + for (int i = 0; i < expected.length; i++) { Assert.assertEquals(expected[i], g.get(i).used()); } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java index 0c2d98fab295..9555225b22bb 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java @@ -27,7 +27,7 @@ public class DummyEntity implements Auditable { private String key1; private String key2; - public DummyEntity(){ + public DummyEntity() { this.key1 = "value1"; this.key2 = "value2"; } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java index 41dc4f5b7e09..01fceaea88b7 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java @@ -189,7 +189,7 @@ private void verifyLog(String... expectedStrings) throws IOException { lines = FileUtils.readLines(file, (String)null); try { Thread.sleep(500 * (i + 1)); - } catch(InterruptedException ie) { + } catch (InterruptedException ie) { Thread.currentThread().interrupt(); break; } @@ -212,7 +212,7 @@ private void verifyNoLog() throws IOException { assertEquals(0, lines.size()); } - private static class TestException extends Exception{ + private static class TestException extends Exception { TestException(String message) { super(message); } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java index 2e144e656991..a61ff9054bc4 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java @@ -73,7 +73,7 @@ public void testIncorrectChecksum() throws Exception { // Change the data and check if new checksum matches the original checksum. // Modifying one byte of data should be enough for the checksum data to // mismatch - data[50] = (byte) (data[50]+1); + data[50] = (byte) (data[50] + 1); ChecksumData newChecksumData = checksum.computeChecksum(data); Assert.assertNotEquals("Checksums should not match for different data", originalChecksumData, newChecksumData); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java index 9b69fad79154..1e850991790b 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java @@ -46,7 +46,7 @@ private static int nextInt(int n) { public void testImplWithByteBuffer() { runTestImplWithByteBuffer(1); runTestImplWithByteBuffer(1 << 10); - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { runTestImplWithByteBuffer(nextInt(100) + 1); } } @@ -62,7 +62,7 @@ public void testIncrementalChunkBuffer() { runTestIncrementalChunkBuffer(1, 1); runTestIncrementalChunkBuffer(4, 8); runTestIncrementalChunkBuffer(16, 1 << 10); - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { final int a = ThreadLocalRandom.current().nextInt(100) + 1; final int b = ThreadLocalRandom.current().nextInt(100) + 1; runTestIncrementalChunkBuffer(Math.min(a, b), Math.max(a, b)); @@ -80,7 +80,7 @@ private static void runTestIncrementalChunkBuffer(int increment, int n) { public void testImplWithList() { runTestImplWithList(4, 8); runTestImplWithList(16, 1 << 10); - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { final int a = ThreadLocalRandom.current().nextInt(10) + 1; final int b = ThreadLocalRandom.current().nextInt(100) + 1; runTestImplWithList(Math.min(a, b), Math.max(a, b)); @@ -131,7 +131,7 @@ private static void runTestImpl(byte[] expected, int bpc, ChunkBuffer impl) { assertIterate(expected, impl, bpc); } else if (bpc == 0) { for (int d = 1; d < 5; d++) { - final int bytesPerChecksum = n/d; + final int bytesPerChecksum = n / d; if (bytesPerChecksum > 0) { assertIterate(expected, impl, bytesPerChecksum); } @@ -148,7 +148,7 @@ private static void runTestImpl(byte[] expected, int bpc, ChunkBuffer impl) { private static void assertDuplicate(byte[] expected, ChunkBuffer impl) { final int n = expected.length; assertToByteString(expected, 0, n, impl); - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { final int offset = nextInt(n); final int length = nextInt(n - offset + 1); assertToByteString(expected, offset, length, impl); @@ -165,14 +165,14 @@ private static void assertIterate( final int numChecksums = (n + bpc - 1) / bpc; final Iterator i = duplicated.iterate(bpc).iterator(); int count = 0; - for(int j = 0; j < numChecksums; j++) { + for (int j = 0; j < numChecksums; j++) { final ByteBuffer b = i.next(); - final int expectedRemaining = j < numChecksums - 1? - bpc : n - bpc *(numChecksums - 1); + final int expectedRemaining = j < numChecksums - 1 ? + bpc : n - bpc * (numChecksums - 1); Assert.assertEquals(expectedRemaining, b.remaining()); - final int offset = j* bpc; - for(int k = 0; k < expectedRemaining; k++) { + final int offset = j * bpc; + for (int k = 0; k < expectedRemaining; k++) { Assert.assertEquals(expected[offset + k], b.get()); count++; } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java index c1470bb2efc9..be0575d9d0f9 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java @@ -45,12 +45,12 @@ public class TestStateMachine { /** * STATES used by the test state machine. */ - public enum STATES {INIT, CREATING, OPERATIONAL, CLOSED, CLEANUP, FINAL}; + public enum STATES { INIT, CREATING, OPERATIONAL, CLOSED, CLEANUP, FINAL }; /** * EVENTS used by the test state machine. */ - public enum EVENTS {ALLOCATE, CREATE, UPDATE, CLOSE, DELETE, TIMEOUT}; + public enum EVENTS { ALLOCATE, CREATE, UPDATE, CLOSE, DELETE, TIMEOUT }; @Rule public ExpectedException exception = ExpectedException.none(); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java index 95282d5f7be1..c2e4c5542a84 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java @@ -352,7 +352,7 @@ public static ContainerCommandRequestProto getCreateContainerSecureRequest( LOG.trace("addContainer: {}", containerID); Builder request = getContainerCommandRequestBuilder(containerID, pipeline); - if(token != null){ + if (token != null) { request.setEncodedToken(token.encodeToUrlString()); } return request.build(); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java index a51be5ff3aa4..c9b9bf1de5f0 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java @@ -51,7 +51,7 @@ public int hashCode() { @Override public boolean equals(Object obj) { - if(obj instanceof DummyResource) { + if (obj instanceof DummyResource) { return name.equals(((DummyResource) obj).name); } return false; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index 58c6e728460d..3b9afa9312f6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -111,12 +111,12 @@ public class HddsDatanodeService extends GenericCli implements ServicePlugin { private final Map ratisMetricsMap = new ConcurrentHashMap<>(); private DNMXBeanImpl serviceRuntimeInfo = - new DNMXBeanImpl(HddsVersionInfo.HDDS_VERSION_INFO) {}; + new DNMXBeanImpl(HddsVersionInfo.HDDS_VERSION_INFO) { }; private ObjectName dnInfoBeanName; private DatanodeCRLStore dnCRLStore; //Constructor for DataNode PluginService - public HddsDatanodeService(){} + public HddsDatanodeService() { } public HddsDatanodeService(boolean printBanner, String[] args) { this.printBanner = printBanner; @@ -376,7 +376,7 @@ private void getSCMSignedCert(OzoneConfiguration config) { datanodeDetails.getProtoBufMessage(), getEncodedString(csr)); // Persist certificates. - if(response.hasX509CACertificate()) { + if (response.hasX509CACertificate()) { String pemEncodedCert = response.getX509Certificate(); dnCertClient.storeCertificate(pemEncodedCert, true); dnCertClient.storeCertificate(response.getX509CACertificate(), true, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java index 970251c67324..3d6cb3b13526 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java @@ -44,7 +44,7 @@ * */ @InterfaceAudience.Private -@Metrics(about="Storage Container DataNode Metrics", context="dfs") +@Metrics(about = "Storage Container DataNode Metrics", context = "dfs") public class ContainerMetrics { public static final String STORAGE_CONTAINER_METRICS = "StorageContainerMetrics"; @@ -106,7 +106,7 @@ public void incContainerOpsMetrics(ContainerProtos.Type type) { numOpsArray[type.ordinal()].incr(); } - public long getContainerOpsMetrics(ContainerProtos.Type type){ + public long getContainerOpsMetrics(ContainerProtos.Type type) { return numOpsArray[type.ordinal()].value(); } @@ -122,7 +122,7 @@ public void incContainerBytesStats(ContainerProtos.Type type, long bytes) { opsBytesArray[type.ordinal()].incr(bytes); } - public long getContainerBytesMetrics(ContainerProtos.Type type){ + public long getContainerBytesMetrics(ContainerProtos.Type type) { return opsBytesArray[type.ordinal()].value(); } } \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java index 032705d4ee77..2b6318385dca 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java @@ -189,7 +189,7 @@ public static void verifyChecksum(ContainerData containerData, HddsConfigKeys.HDDS_CONTAINER_CHECKSUM_VERIFICATION_ENABLED, HddsConfigKeys. HDDS_CONTAINER_CHECKSUM_VERIFICATION_ENABLED_DEFAULT); - if(enabled) { + if (enabled) { String storedChecksum = containerData.getChecksum(); Yaml yaml = ContainerDataYaml.getYamlForContainerType( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index 5dbba2bc98e3..1edd046f09a6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -581,16 +581,17 @@ private EventType getEventType(ContainerCommandRequestProto msg) { } private void audit(AuditAction action, EventType eventType, - Map params, AuditEventStatus result, Throwable exception){ + Map params, AuditEventStatus result, + Throwable exception) { AuditMessage amsg; switch (result) { case SUCCESS: - if(isAllowed(action.getAction())) { - if(eventType == EventType.READ && + if (isAllowed(action.getAction())) { + if (eventType == EventType.READ && AUDIT.getLogger().isInfoEnabled(AuditMarker.READ.getMarker())) { amsg = buildAuditMessageForSuccess(action, params); AUDIT.logReadSuccess(amsg); - } else if(eventType == EventType.WRITE && + } else if (eventType == EventType.WRITE && AUDIT.getLogger().isInfoEnabled(AuditMarker.WRITE.getMarker())) { amsg = buildAuditMessageForSuccess(action, params); AUDIT.logWriteSuccess(amsg); @@ -599,11 +600,11 @@ private void audit(AuditAction action, EventType eventType, break; case FAILURE: - if(eventType == EventType.READ && + if (eventType == EventType.READ && AUDIT.getLogger().isErrorEnabled(AuditMarker.READ.getMarker())) { amsg = buildAuditMessageForFailure(action, params, exception); AUDIT.logReadFailure(amsg); - } else if(eventType == EventType.WRITE && + } else if (eventType == EventType.WRITE && AUDIT.getLogger().isErrorEnabled(AuditMarker.WRITE.getMarker())) { amsg = buildAuditMessageForFailure(action, params, exception); AUDIT.logWriteFailure(amsg); @@ -656,7 +657,7 @@ enum EventType { * @return true or false accordingly. */ private boolean isAllowed(String action) { - switch(action) { + switch (action) { case "CLOSE_CONTAINER": case "CREATE_CONTAINER": case "LIST_CONTAINER": diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java index b736eb536ed5..d6ca2d120e68 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java @@ -130,7 +130,7 @@ public List getOpenBlocks(long containerId) { public void removeFromBlockMap(BlockID blockID) { Preconditions.checkNotNull(blockID); containers.computeIfPresent(blockID.getContainerID(), (containerId, blocks) - -> blocks.removeAndGetSize(blockID.getLocalID()) == 0? null: blocks); + -> blocks.removeAndGetSize(blockID.getLocalID()) == 0 ? null : blocks); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java index be83d9b7db15..24df9f5b1ee6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java @@ -121,7 +121,7 @@ public class DatanodeConfiguration { type = ConfigType.INT, defaultValue = "1440", tags = {DATANODE}, - description = "The maximum number of block delete commands queued on "+ + description = "The maximum number of block delete commands queued on " + " a datanode" ) private int blockDeleteQueueLimit = 60 * 24; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index e1fc297406c5..ce79049f4fda 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -293,7 +293,7 @@ private void start() throws IOException { now = Time.monotonicNow(); if (now < nextHB.get()) { - if(!Thread.interrupted()) { + if (!Thread.interrupted()) { try { Thread.sleep(nextHB.get() - now); } catch (InterruptedException e) { @@ -379,7 +379,7 @@ public void close() throws IOException { connectionManager.close(); } - if(container != null) { + if (container != null) { container.stop(); } @@ -637,12 +637,12 @@ public DatanodeLayoutStorage getLayoutStorage() { } public StatusAndMessages finalizeUpgrade() - throws IOException{ + throws IOException { return upgradeFinalizer.finalize(datanodeDetails.getUuidString(), this); } public StatusAndMessages queryUpgradeStatus() - throws IOException{ + throws IOException { return upgradeFinalizer.reportStatus(datanodeDetails.getUuidString(), true); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java index 9eea758b0d8a..c75da0a74d6c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java @@ -184,7 +184,7 @@ public StateContext(ConfigurationSource conf, /** * init related ReportType Collections. */ - private void initReportTypeCollection(){ + private void initReportTypeCollection() { fullReportTypeList.add(CONTAINER_REPORTS_PROTO_NAME); type2Reports.put(CONTAINER_REPORTS_PROTO_NAME, containerReports); fullReportTypeList.add(NODE_REPORT_PROTO_NAME); @@ -221,7 +221,7 @@ boolean isEntering() { */ boolean isExiting(DatanodeStateMachine.DatanodeStates newState) { boolean isExiting = state != newState && stateExecutionCount.get() > 0; - if(isExiting) { + if (isExiting) { stateExecutionCount.set(0); } return isExiting; @@ -344,7 +344,7 @@ public void putBackReports(List reportsToPutBack, Preconditions.checkState(reportType != null); } synchronized (incrementalReportsQueue) { - if (incrementalReportsQueue.containsKey(endpoint)){ + if (incrementalReportsQueue.containsKey(endpoint)) { incrementalReportsQueue.get(endpoint).addAll(0, reportsToPutBack); } } @@ -381,7 +381,7 @@ List getFullReports( InetSocketAddress endpoint) { Map mp = fullReportSendIndicator.get(endpoint); List nonIncrementalReports = new LinkedList<>(); - if (null != mp){ + if (null != mp) { for (Map.Entry kv : mp.entrySet()) { if (kv.getValue().get()) { String reportType = kv.getKey(); @@ -817,14 +817,14 @@ public Map getCommandStatusMap() { */ public boolean updateCommandStatus(Long cmdId, Consumer cmdStatusUpdater) { - if(cmdStatusMap.containsKey(cmdId)) { + if (cmdStatusMap.containsKey(cmdId)) { cmdStatusUpdater.accept(cmdStatusMap.get(cmdId)); return true; } return false; } - public void configureHeartbeatFrequency(){ + public void configureHeartbeatFrequency() { heartbeatFrequency.set(getScmHeartbeatInterval(conf)); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java index a5044cb06859..7908e3d7d28d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java @@ -68,7 +68,7 @@ private CommandDispatcher(OzoneContainer container, SCMConnectionManager this.connectionManager = connectionManager; handlerMap = new HashMap<>(); for (CommandHandler h : handlers) { - if(handlerMap.containsKey(h.getCommandType())){ + if (handlerMap.containsKey(h.getCommandType())) { LOG.error("Duplicate handler for the same command. Exiting. Handle " + "key : {}", h.getCommandType().getDescriptorForType().getName()); throw new IllegalArgumentException("Duplicate handler for the same " + diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java index 217592ddccd2..a766de025dae 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java @@ -81,7 +81,7 @@ public DatanodeStateMachine.DatanodeStates call() throws Exception { try { addresses = getSCMAddressForDatanodes(conf); } catch (IllegalArgumentException e) { - if(!Strings.isNullOrEmpty(e.getMessage())) { + if (!Strings.isNullOrEmpty(e.getMessage())) { LOG.error("Failed to get SCM addresses: {}", e.getMessage()); } return DatanodeStateMachine.DatanodeStates.SHUTDOWN; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java index fa6c937f6339..d80d1e5bca31 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java @@ -122,7 +122,7 @@ public EndpointStateMachine.EndPointStates call() throws Exception { } } catch (DiskOutOfSpaceException ex) { rpcEndPoint.setState(EndpointStateMachine.EndPointStates.SHUTDOWN); - } catch(IOException ex) { + } catch (IOException ex) { rpcEndPoint.logIfNeeded(ex); } finally { rpcEndPoint.unlock(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java index 4ecf2789a428..557473bf9cef 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java @@ -33,7 +33,7 @@ * This class is for maintaining Container State Machine statistics. */ @InterfaceAudience.Private -@Metrics(about="Container State Machine Metrics", context="dfs") +@Metrics(about = "Container State Machine Metrics", context = "dfs") public class CSMMetrics { public static final String SOURCE_NAME = CSMMetrics.class.getSimpleName(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index 301fc59237f2..4ef532049fb6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -689,7 +689,7 @@ public CompletableFuture read( private synchronized void updateLastApplied() { Long appliedTerm = null; long appliedIndex = -1; - for(long i = getLastAppliedTermIndex().getIndex() + 1;; i++) { + for (long i = getLastAppliedTermIndex().getIndex() + 1;; i++) { final Long removed = applyTransactionCompletionMap.remove(i); if (removed == null) { break; @@ -740,7 +740,7 @@ private CompletableFuture submitTask( = queue.submit(task, executor); // after the task is completed, remove the queue if the queue is empty. f.thenAccept(dummy -> containerTaskQueues.computeIfPresent(containerId, - (id, q) -> q.isEmpty()? null: q)); + (id, q) -> q.isEmpty() ? null : q)); return f; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index c04e5e967bad..237b44868261 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -273,7 +273,7 @@ private RaftProperties newRaftProperties() { // Set the ratis storage directory Collection storageDirPaths = HddsServerUtil.getOzoneDatanodeRatisDirectory(conf); - List storageDirs= new ArrayList<>(storageDirPaths.size()); + List storageDirs = new ArrayList<>(storageDirPaths.size()); storageDirPaths.stream().forEach(d -> storageDirs.add(new File(d))); RaftServerConfigKeys.setStorageDir(properties, storageDirs); @@ -693,7 +693,7 @@ private long calculatePipelineBytesWritten(HddsProtos.PipelineID pipelineID) { long bytesWritten = 0; Iterator> containerIt = containerController.getContainers(); - while(containerIt.hasNext()) { + while (containerIt.hasNext()) { ContainerData containerData = containerIt.next().getContainerData(); if (containerData.getOriginPipelineId() .compareTo(pipelineID.getId()) == 0) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java index 83b8615887cf..6a38080214a2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java @@ -145,7 +145,7 @@ public static int getLayOutVersion(Properties props, File versionFile) throws String lvStr = getProperty(props, OzoneConsts.LAYOUTVERSION, versionFile); int lv = Integer.parseInt(lvStr); - if(HDDSVolumeLayoutVersion.getLatestVersion().getVersion() != lv) { + if (HDDSVolumeLayoutVersion.getLatestVersion().getVersion() != lv) { throw new InconsistentStorageStateException("Invalid layOutVersion. " + "Version file has layOutVersion as " + lv + " and latest Datanode " + "layOutVersion is " + diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index 35ff05e70747..98e16294da16 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -290,7 +290,7 @@ public void checkVolumeAsync(StorageVolume volume) { } public void refreshAllVolumeUsage() { - volumeMap.forEach((k, v)-> v.refreshVolumeInfo()); + volumeMap.forEach((k, v) -> v.refreshVolumeInfo()); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java index 5f629ad464fd..715cb8400fe3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java @@ -161,7 +161,7 @@ public VolumeSet getVolumeSet() { } public StorageType getStorageType() { - if(this.volumeInfo != null) { + if (this.volumeInfo != null) { return this.volumeInfo.getStorageType(); } return StorageType.DEFAULT; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java index 255e7ea82e8b..1fcac8327fab 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java @@ -87,7 +87,7 @@ public synchronized void shutdown() { } } - public void refreshNow(){ + public void refreshNow() { source.refreshNow(); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index 1284f6a102e4..e0ba37a99d6e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -382,7 +382,7 @@ private void updateContainerData(Runnable update) private void compactDB() throws StorageContainerException { try { - try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { + try (ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { db.getStore().compactDB(); } } catch (StorageContainerException ex) { @@ -435,7 +435,7 @@ public void update( // holding lock and writing data to disk. We can have async implementation // to flush the update container data to disk. long containerId = containerData.getContainerID(); - if(!containerData.isValid()) { + if (!containerData.isValid()) { LOG.debug("Invalid container data. ContainerID: {}", containerId); throw new StorageContainerException("Invalid container data. " + "ContainerID: " + containerId, INVALID_CONTAINER_STATE); @@ -774,7 +774,7 @@ private enum ContainerCheckLevel { * @return * @throws IOException */ - private File createTempFile(File file) throws IOException{ + private File createTempFile(File file) throws IOException { return File.createTempFile("tmp_" + System.currentTimeMillis() + "_", file.getName(), file.getParentFile()); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java index ab2f666ac473..40d527d464e9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java @@ -230,13 +230,13 @@ private void scanData(DataTransferThrottler throttler, Canceler canceler) ContainerLayoutVersion layout = onDiskContainerData.getLayoutVersion(); - try(ReferenceCountedDB db = + try (ReferenceCountedDB db = BlockUtils.getDB(onDiskContainerData, checkConfig); BlockIterator kvIter = db.getStore().getBlockIterator()) { - while(kvIter.hasNext()) { + while (kvIter.hasNext()) { BlockData block = kvIter.nextBlock(); - for(ContainerProtos.ChunkInfo chunk : block.getChunks()) { + for (ContainerProtos.ChunkInfo chunk : block.getChunks()) { File chunkFile = layout.getChunkFile(onDiskContainerData, block.getBlockID(), ChunkInfo.getFromProtoBuf(chunk)); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index f7eedf7db0e6..81333073cc6f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -270,7 +270,7 @@ public ContainerDataProto getProtoBufMessage() { builder.setBytesUsed(this.getBytesUsed()); } - if(this.getContainerType() != null) { + if (this.getContainerType() != null) { builder.setContainerType(ContainerProtos.ContainerType.KeyValueContainer); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 476007819245..ff2d061cb724 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -194,7 +194,7 @@ static ContainerCommandResponseProto dispatchRequest(KeyValueHandler handler, DispatcherContext dispatcherContext) { Type cmdType = request.getCmdType(); - switch(cmdType) { + switch (cmdType) { case CreateContainer: return handler.handleCreateContainer(request, kvContainer); case ReadContainer: @@ -744,7 +744,7 @@ ContainerCommandResponseProto handleWriteChunk( .writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext); // We should increment stats after writeChunk - if (stage == WriteChunkStage.WRITE_DATA|| + if (stage == WriteChunkStage.WRITE_DATA || stage == WriteChunkStage.COMBINED) { metrics.incContainerBytesStats(Type.WriteChunk, writeChunk .getChunkData().getLen()); @@ -959,7 +959,7 @@ public Container importContainer(ContainerData originalContainerData, public void exportContainer(final Container container, final OutputStream outputStream, final TarContainerPacker packer) - throws IOException{ + throws IOException { final KeyValueContainer kvc = (KeyValueContainer) container; kvc.exportContainerData(outputStream, packer); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java index ad1673a02ab7..dde3e2e22dc1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java @@ -97,7 +97,7 @@ private static String getBaseContainerLocation(String hddsVolumeDir, * @param containerId * @return container sub directory */ - private static String getContainerSubDirectory(long containerId){ + private static String getContainerSubDirectory(long containerId) { int directory = (int) ((containerId >> 9) & 0xFF); return Storage.CONTAINER_DIR + directory; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index 58a0dcd94920..8256d0a5b4ad 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -119,7 +119,7 @@ public static long persistPutBlock(KeyValueContainer container, "cannot be negative"); // We are not locking the key manager since LevelDb serializes all actions // against a single DB. We rely on DB level locking to avoid conflicts. - try(ReferenceCountedDB db = BlockUtils. + try (ReferenceCountedDB db = BlockUtils. getDB(container.getContainerData(), config)) { // This is a post condition that acts as a hint to the user. // Should never fail. @@ -216,7 +216,7 @@ public BlockData getBlock(Container container, BlockID blockID) + containerBCSId + ".", UNKNOWN_BCSID); } - try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { + try (ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { // This is a post condition that acts as a hint to the user. // Should never fail. Preconditions.checkNotNull(db, DB_NULL_ERR_MSG); @@ -244,7 +244,7 @@ public long getCommittedBlockLength(Container container, BlockID blockID) throws IOException { KeyValueContainerData containerData = (KeyValueContainerData) container .getContainerData(); - try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { + try (ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { // This is a post condition that acts as a hint to the user. // Should never fail. Preconditions.checkNotNull(db, DB_NULL_ERR_MSG); @@ -276,7 +276,7 @@ public void deleteBlock(Container container, BlockID blockID) throws KeyValueContainerData cData = (KeyValueContainerData) container .getContainerData(); - try(ReferenceCountedDB db = BlockUtils.getDB(cData, config)) { + try (ReferenceCountedDB db = BlockUtils.getDB(cData, config)) { // This is a post condition that acts as a hint to the user. // Should never fail. Preconditions.checkNotNull(db, DB_NULL_ERR_MSG); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java index 905918a2f924..d40afc5f6741 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java @@ -351,7 +351,7 @@ public ContainerBackgroundTaskResult deleteViaSchema1( } // Once blocks are deleted... remove the blockID from blockDataTable. - try(BatchOperation batch = meta.getStore().getBatchHandler() + try (BatchOperation batch = meta.getStore().getBatchHandler() .initBatchOperation()) { for (String entry : succeedBlocks) { blockDataTable.deleteWithBatch(batch, entry); @@ -426,7 +426,7 @@ public ContainerBackgroundTaskResult deleteViaSchema2( // Once blocks are deleted... remove the blockID from blockDataTable // and also remove the transactions from txnTable. - try(BatchOperation batch = meta.getStore().getBatchHandler() + try (BatchOperation batch = meta.getStore().getBatchHandler() .initBatchOperation()) { for (DeletedBlocksTransaction delTx : delBlocks) { deleteTxns.deleteWithBatch(batch, delTx.getTxID()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java index 15a8a9eb5b40..a3049be46742 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java @@ -276,7 +276,7 @@ public BlockData nextBlock() throws IOException, NoSuchElementException { nextBlock = null; return currentBlock; } - if(hasNext()) { + if (hasNext()) { return nextBlock(); } throw new NoSuchElementException("Block Iterator reached end for " + diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java index 4a20dc326a74..c92448553161 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java @@ -32,7 +32,7 @@ * This class captures the container data scrubber metrics on the data-node. **/ @InterfaceAudience.Private -@Metrics(about="DataNode container data scrubber metrics", context="dfs") +@Metrics(about = "DataNode container data scrubber metrics", context = "dfs") public final class ContainerDataScrubberMetrics { private final String name; @@ -110,8 +110,8 @@ private ContainerDataScrubberMetrics(String name, MetricsSystem ms) { public static ContainerDataScrubberMetrics create(final String volumeName) { MetricsSystem ms = DefaultMetricsSystem.instance(); - String name = "ContainerDataScrubberMetrics-"+ (volumeName.isEmpty() - ? "UndefinedDataNodeVolume"+ ThreadLocalRandom.current().nextInt() + String name = "ContainerDataScrubberMetrics-" + (volumeName.isEmpty() + ? "UndefinedDataNodeVolume" + ThreadLocalRandom.current().nextInt() : volumeName.replace(':', '-')); return ms.register(name, null, new ContainerDataScrubberMetrics(name, ms)); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java index 96efcf4a146b..59657b064afb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java @@ -90,7 +90,7 @@ void runIteration() { metrics.incNumContainersScanned(); } } - long interval = System.nanoTime()-start; + long interval = System.nanoTime() - start; if (!stopping) { metrics.incNumScanIterations(); LOG.info("Completed an iteration of container metadata scrubber in" + diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java index cf8e61725bed..b70a3e5ed551 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java @@ -30,7 +30,7 @@ * data-node. **/ @InterfaceAudience.Private -@Metrics(about="DataNode container data scrubber metrics", context="dfs") +@Metrics(about = "DataNode container data scrubber metrics", context = "dfs") public final class ContainerMetadataScrubberMetrics { private final String name; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java index 91c74004beee..023b251a524f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java @@ -49,7 +49,7 @@ /** * Client to read container data from gRPC. */ -public class GrpcReplicationClient implements AutoCloseable{ +public class GrpcReplicationClient implements AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(GrpcReplicationClient.class); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/DirstreamClientHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/DirstreamClientHandler.java index e14a391dcb1b..fc9b44924ab2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/DirstreamClientHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/DirstreamClientHandler.java @@ -116,7 +116,7 @@ public void doRead(ChannelHandlerContext ctx, ByteBuf buffer) } } - public boolean isAtTheEnd(){ + public boolean isAtTheEnd() { return getCurrentFileName().equals(END_MARKER); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java index 9ff4b0aa3db6..f25e13c285eb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java @@ -46,7 +46,7 @@ public DataNodeUpgradeFinalizer(HDDSLayoutVersionManager versionManager) { @Override public void preFinalizeUpgrade(DatanodeStateMachine dsm) throws IOException { - if(!canFinalizeDataNode(dsm)) { + if (!canFinalizeDataNode(dsm)) { // DataNode is not yet ready to finalize. // Reset the Finalization state. getVersionManager().setUpgradeState(FINALIZATION_REQUIRED); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java index ec8494604ac4..3653e6c9fa7c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java @@ -120,7 +120,7 @@ public static String chooseContainerPathID(ConfigurationSource conf, boolean scmHAEnabled = conf.getBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, ScmConfigKeys.OZONE_SCM_HA_ENABLE_DEFAULT); - if (isFinalized(HDDSLayoutFeature.SCM_HA) || scmHAEnabled){ + if (isFinalized(HDDSLayoutFeature.SCM_HA) || scmHAEnabled) { return clusterID; } else { return scmID; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java index e3ea4aeeaff6..6aa0554e1026 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java @@ -27,7 +27,7 @@ * Informs a datanode to register itself with SCM again. */ public class ReregisterCommand extends - SCMCommand{ + SCMCommand { /** * Returns the type of this command. diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java index 08ca4c91f5b2..6deaddadc9c9 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java @@ -114,7 +114,7 @@ public static void tearDown() { } @Before - public void setUpDNCertClient(){ + public void setUpDNCertClient() { FileUtils.deleteQuietly(Paths.get( securityConfig.getKeyLocation(DN_COMPONENT).toString(), diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java index 157dee65ff87..eef66550df64 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java @@ -131,9 +131,10 @@ public int getContainerReportsCount() { * @return - count of reported containers. */ public long getContainerCount() { - return nodeContainers.values().parallelStream().mapToLong((containerMap)->{ - return containerMap.size(); - }).sum(); + return nodeContainers.values().parallelStream().mapToLong( + (containerMap) -> { + return containerMap.size(); + }).sum(); } /** @@ -141,11 +142,13 @@ public long getContainerCount() { * @return - number of keys reported. */ public long getKeyCount() { - return nodeContainers.values().parallelStream().mapToLong((containerMap)->{ - return containerMap.values().parallelStream().mapToLong((container) -> { - return container.getKeyCount(); - }).sum(); - }).sum(); + return nodeContainers.values().parallelStream().mapToLong( + (containerMap) -> { + return containerMap.values().parallelStream().mapToLong( + (container) -> { + return container.getKeyCount(); + }).sum(); + }).sum(); } /** @@ -153,11 +156,13 @@ public long getKeyCount() { * @return - number of bytes used. */ public long getBytesUsed() { - return nodeContainers.values().parallelStream().mapToLong((containerMap)->{ - return containerMap.values().parallelStream().mapToLong((container) -> { - return container.getUsed(); - }).sum(); - }).sum(); + return nodeContainers.values().parallelStream().mapToLong( + (containerMap) -> { + return containerMap.values().parallelStream().mapToLong( + (container) -> { + return container.getUsed(); + }).sum(); + }).sum(); } /** @@ -259,7 +264,7 @@ public void updateNodeReport(DatanodeDetailsProto datanodeDetailsProto, List storageReports = nodeReport.getStorageReportList(); - for(StorageReportProto report : storageReports) { + for (StorageReportProto report : storageReports) { nodeReportProto.addStorageReport(report); } @@ -313,7 +318,7 @@ public int getNodeReportsCount(DatanodeDetails datanodeDetails) { public int getContainerCountsForDatanode(DatanodeDetails datanodeDetails) { Map cr = nodeContainers.get(datanodeDetails); - if(cr != null) { + if (cr != null) { return cr.size(); } return 0; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 014913e806dd..5306eb05d92c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -356,7 +356,8 @@ private void updateMetaData(KeyValueContainerData data, .put(OzoneConsts.PENDING_DELETE_BLOCK_COUNT, (long) numOfBlocksPerContainer); } catch (IOException exception) { - LOG.warn("Meta Data update was not successful for container: "+container); + LOG.warn("Meta Data update was not successful for container: " + + container); } } @@ -427,7 +428,7 @@ public void testBlockDeletion() throws Exception { KeyValueContainerData data = (KeyValueContainerData) containerData.get(0); Assert.assertEquals(1, containerData.size()); - try(ReferenceCountedDB meta = BlockUtils.getDB( + try (ReferenceCountedDB meta = BlockUtils.getDB( (KeyValueContainerData) containerData.get(0), conf)) { Map> containerMap = containerSet.getContainerMapCopy(); // NOTE: this test assumes that all the container is KetValueContainer and @@ -734,7 +735,7 @@ public void testBlockThrottle() throws Exception { // in all the containers are deleted)). deleteAndWait(service, 2); - long totalContainerBlocks = blocksPerContainer*containerCount; + long totalContainerBlocks = blocksPerContainer * containerCount; GenericTestUtils.waitFor(() -> totalContainerBlocks * blockSpace == (totalContainerSpace - diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java index 562775d263c8..e55d68cbe34d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java @@ -177,7 +177,7 @@ public void testConcurrentDBGet() throws Exception { for (Future future: futureList) { try { future.get(); - } catch (InterruptedException| ExecutionException e) { + } catch (InterruptedException | ExecutionException e) { Assert.fail("Should get the DB instance"); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java index 700c6c2abef3..c8bb93b26c3e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java @@ -130,7 +130,8 @@ public void setup() throws Exception { */ @Test public void testDirectTableIterationDisabled() throws Exception { - try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) { + try (ReferenceCountedDB refCountedDB = + BlockUtils.getDB(newKvData(), conf)) { DatanodeStore store = refCountedDB.getStore(); assertTableIteratorUnsupported(store.getMetadataTable()); @@ -158,7 +159,8 @@ private void assertTableIteratorUnsupported(Table table) { */ @Test public void testBlockIteration() throws IOException { - try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) { + try (ReferenceCountedDB refCountedDB = + BlockUtils.getDB(newKvData(), conf)) { assertEquals(TestDB.NUM_DELETED_BLOCKS, countDeletedBlocks(refCountedDB)); assertEquals(TestDB.NUM_PENDING_DELETION_BLOCKS, @@ -278,7 +280,8 @@ public void testDelete() throws Exception { final long expectedRegularBlocks = TestDB.KEY_COUNT - numBlocksToDelete; - try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) { + try (ReferenceCountedDB refCountedDB = + BlockUtils.getDB(newKvData(), conf)) { // Test results via block iteration. assertEquals(expectedDeletingBlocks, @@ -320,7 +323,8 @@ public void testReadDeletedBlockChunkInfo() throws Exception { new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, metrics, c -> { }); - try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) { + try (ReferenceCountedDB refCountedDB = + BlockUtils.getDB(newKvData(), conf)) { // Read blocks that were already deleted before the upgrade. List> deletedBlocks = refCountedDB.getStore() @@ -328,13 +332,13 @@ public void testReadDeletedBlockChunkInfo() throws Exception { Set preUpgradeBlocks = new HashSet<>(); - for(Table.KeyValue chunkListKV: deletedBlocks) { + for (Table.KeyValue chunkListKV: deletedBlocks) { preUpgradeBlocks.add(chunkListKV.getKey()); try { chunkListKV.getValue(); Assert.fail("No exception thrown when trying to retrieve old " + "deleted blocks values as chunk lists."); - } catch(IOException ex) { + } catch (IOException ex) { // Exception thrown as expected. } } @@ -370,7 +374,8 @@ public void testReadDeletedBlockChunkInfo() throws Exception { @Test public void testReadBlockData() throws Exception { - try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) { + try (ReferenceCountedDB refCountedDB = + BlockUtils.getDB(newKvData(), conf)) { Table blockDataTable = refCountedDB.getStore().getBlockDataTable(); @@ -395,12 +400,12 @@ public void testReadBlockData() throws Exception { Assert.assertEquals(TestDB.BLOCK_IDS, decodedKeys); // Test reading blocks with block iterator. - try(BlockIterator iter = + try (BlockIterator iter = refCountedDB.getStore().getBlockIterator()) { List iteratorBlockIDs = new ArrayList<>(); - while(iter.hasNext()) { + while (iter.hasNext()) { long localID = iter.nextBlock().getBlockID().getLocalID(); iteratorBlockIDs.add(Long.toString(localID)); } @@ -412,7 +417,8 @@ public void testReadBlockData() throws Exception { @Test public void testReadDeletingBlockData() throws Exception { - try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) { + try (ReferenceCountedDB refCountedDB = + BlockUtils.getDB(newKvData(), conf)) { Table blockDataTable = refCountedDB.getStore().getBlockDataTable(); @@ -446,12 +452,12 @@ public void testReadDeletingBlockData() throws Exception { MetadataKeyFilters.KeyPrefixFilter filter = MetadataKeyFilters.getDeletingKeyFilter(); - try(BlockIterator iter = + try (BlockIterator iter = refCountedDB.getStore().getBlockIterator(filter)) { List iteratorBlockIDs = new ArrayList<>(); - while(iter.hasNext()) { + while (iter.hasNext()) { long localID = iter.nextBlock().getBlockID().getLocalID(); iteratorBlockIDs.add(Long.toString(localID)); } @@ -463,7 +469,8 @@ public void testReadDeletingBlockData() throws Exception { @Test public void testReadMetadata() throws Exception { - try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) { + try (ReferenceCountedDB refCountedDB = + BlockUtils.getDB(newKvData(), conf)) { Table metadataTable = refCountedDB.getStore().getMetadataTable(); @@ -479,7 +486,8 @@ public void testReadMetadata() throws Exception { @Test public void testReadDeletedBlocks() throws Exception { - try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) { + try (ReferenceCountedDB refCountedDB = + BlockUtils.getDB(newKvData(), conf)) { Table deletedBlocksTable = refCountedDB.getStore().getDeletedBlocksTable(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java index 00f68ef3dc79..85a8bda8a6ef 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java @@ -60,11 +60,11 @@ public void testAddAndRemove() { assertChunks(expected, computed); long offset = 0; int n = 5; - for(int i = 0; i < n; i++) { + for (int i = 0; i < n; i++) { offset += assertAddChunk(expected, computed, offset); } - for(; !expected.isEmpty();) { + for (; !expected.isEmpty();) { removeChunk(expected, computed); } } @@ -125,7 +125,7 @@ public void testSetChunks() { assertChunks(expected, computed); long offset = 0; int n = 5; - for(int i = 0; i < n; i++) { + for (int i = 0; i < n; i++) { offset += addChunk(expected, offset).getLen(); LOG.info("setChunk: {}", toString(expected)); computed.setChunks(expected); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java index 84f50087e1fc..1ca9f9e8570c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java @@ -50,7 +50,7 @@ public class TestDatanodeVersionFile { private int lv; @Rule - public TemporaryFolder folder= new TemporaryFolder(); + public TemporaryFolder folder = new TemporaryFolder(); @Before public void setup() throws IOException { @@ -70,7 +70,7 @@ public void setup() throws IOException { } @Test - public void testCreateAndReadVersionFile() throws IOException{ + public void testCreateAndReadVersionFile() throws IOException { //Check VersionFile exists assertTrue(versionFile.exists()); @@ -88,7 +88,7 @@ public void testCreateAndReadVersionFile() throws IOException{ } @Test - public void testIncorrectClusterId() throws IOException{ + public void testIncorrectClusterId() throws IOException { try { String randomClusterID = UUID.randomUUID().toString(); HddsVolumeUtil.getClusterID(properties, versionFile, @@ -100,7 +100,7 @@ public void testIncorrectClusterId() throws IOException{ } @Test - public void testVerifyCTime() throws IOException{ + public void testVerifyCTime() throws IOException { long invalidCTime = -10; dnVersionFile = new DatanodeVersionFile( storageID, clusterID, datanodeUUID, invalidCTime, lv); @@ -117,7 +117,7 @@ public void testVerifyCTime() throws IOException{ } @Test - public void testVerifyLayOut() throws IOException{ + public void testVerifyLayOut() throws IOException { int invalidLayOutVersion = 100; dnVersionFile = new DatanodeVersionFile( storageID, clusterID, datanodeUUID, cTime, invalidLayOutVersion); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java index 70efc4065c8b..0bfdb173a474 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java @@ -174,7 +174,7 @@ public void testCreateContainerFile() throws IOException { } @Test - public void testIncorrectContainerFile() throws IOException{ + public void testIncorrectContainerFile() throws IOException { try { String containerFile = "incorrect.container"; //Get file from resources folder diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java index 4cb3094ff589..d51d78e4adbd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java @@ -122,11 +122,11 @@ public void testIteratorsAndCount() throws StorageContainerException { Iterator> iterator = containerSet.getContainerIterator(); int count = 0; - while(iterator.hasNext()) { + while (iterator.hasNext()) { Container kv = iterator.next(); ContainerData containerData = kv.getContainerData(); long containerId = containerData.getContainerID(); - if (containerId%2 == 0) { + if (containerId % 2 == 0) { assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, containerData.getState()); } else { @@ -146,7 +146,7 @@ public void testIteratorsAndCount() throws StorageContainerException { Container kv = containerMapIterator.next().getValue(); ContainerData containerData = kv.getContainerData(); long containerId = containerData.getContainerID(); - if (containerId%2 == 0) { + if (containerId % 2 == 0) { assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, containerData.getState()); } else { @@ -167,12 +167,12 @@ public void testIteratorPerVolume() throws StorageContainerException { Mockito.when(vol2.getStorageID()).thenReturn("uuid-2"); ContainerSet containerSet = new ContainerSet(); - for (int i=0; i<10; i++) { + for (int i = 0; i < 10; i++) { KeyValueContainerData kvData = new KeyValueContainerData(i, layout, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), UUID.randomUUID().toString()); - if (i%2 == 0) { + if (i % 2 == 0) { kvData.setVolume(vol1); } else { kvData.setVolume(vol2); @@ -307,7 +307,7 @@ private ContainerSet createContainerSet() throws StorageContainerException { layout, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), UUID.randomUUID().toString()); - if (i%2 == 0) { + if (i % 2 == 0) { kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED); } else { kvData.setState(ContainerProtos.ContainerDataProto.State.OPEN); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index 64813de96874..9b8da361b2d4 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -83,7 +83,7 @@ public class TestHddsDispatcher { public static final Consumer NO_OP_ICR_SENDER = - c -> {}; + c -> { }; private final ContainerLayoutVersion layout; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java index f969148a1603..2b1bc3d248f7 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java @@ -82,7 +82,7 @@ public void setup() throws Exception { } @After - public void tearDown(){ + public void tearDown() { ContainerMetrics.remove(); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java index 83e44d3adf83..f2770d2941f9 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java @@ -198,7 +198,7 @@ public void testCRLStatusReportPublisher() throws IOException { GeneratedMessage report = ((CRLStatusReportPublisher) publisher).getReport(); Assert.assertNotNull(report); - for(Descriptors.FieldDescriptor descriptor : + for (Descriptors.FieldDescriptor descriptor : report.getDescriptorForType().getFields()) { if (descriptor.getNumber() == CRLStatusReport.RECEIVEDCRLID_FIELD_NUMBER) { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java index 791d6e079ccd..de9968128e48 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java @@ -214,7 +214,7 @@ public void closeNonExistenceContainer() { } catch (IOException e) { GenericTestUtils.assertExceptionContains("The Container " + - "is not found. ContainerID: "+containerID, e); + "is not found. ContainerID: " + containerID, e); } } @@ -227,7 +227,7 @@ public void closeMissingContainer() { } catch (IOException e) { GenericTestUtils.assertExceptionContains("The Container is in " + "the MissingContainerSet hence we can't close it. " + - "ContainerID: "+containerID, e); + "ContainerID: " + containerID, e); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java index 990d4c95bf3b..dfe7cb314b85 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java @@ -114,7 +114,7 @@ public void testRRPolicyExceptionMessage() throws Exception { try { policy.chooseVolume(volumes, blockSize); Assert.fail("expected to throw DiskOutOfSpaceException"); - } catch(DiskOutOfSpaceException e) { + } catch (DiskOutOfSpaceException e) { Assert.assertEquals("Not returning the expected message", "Out of space: The volume with the most available space (=" + 200 + " B) is less than the container size (=" + blockSize + " B).", diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java index 52bf3d320055..f0869c9c6f94 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java @@ -225,7 +225,7 @@ public void testShutdown() throws Exception { } @Test - public void testFailVolumes() throws Exception{ + public void testFailVolumes() throws Exception { MutableVolumeSet volSet = null; File readOnlyVolumePath = new File(baseDir); //Set to readonly, so that this volume will be failed diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java index 81d230fcf346..fbf39f702841 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java @@ -120,7 +120,7 @@ public void testKeyValueBlockIteratorWithMixedBlocks() throws Exception { // Default filter used is all unprefixed blocks. List unprefixedBlockIDs = blockIDs.get(""); - try(BlockIterator keyValueBlockIterator = + try (BlockIterator keyValueBlockIterator = db.getStore().getBlockIterator()) { Iterator blockIDIter = unprefixedBlockIDs.iterator(); @@ -152,7 +152,7 @@ public void testKeyValueBlockIteratorWithMixedBlocks() throws Exception { @Test public void testKeyValueBlockIteratorWithNextBlock() throws Exception { List blockIDs = createContainerWithBlocks(CONTAINER_ID, 2); - try(BlockIterator keyValueBlockIterator = + try (BlockIterator keyValueBlockIterator = db.getStore().getBlockIterator()) { assertEquals((long)blockIDs.get(0), keyValueBlockIterator.nextBlock().getLocalID()); @@ -171,7 +171,7 @@ public void testKeyValueBlockIteratorWithNextBlock() throws Exception { @Test public void testKeyValueBlockIteratorWithHasNext() throws Exception { List blockIDs = createContainerWithBlocks(CONTAINER_ID, 2); - try(BlockIterator blockIter = + try (BlockIterator blockIter = db.getStore().getBlockIterator()) { // Even calling multiple times hasNext() should not move entry forward. @@ -209,7 +209,7 @@ public void testKeyValueBlockIteratorWithFilter() throws Exception { int deletingBlocks = 5; Map> blockIDs = createContainerWithBlocks(CONTAINER_ID, normalBlocks, deletingBlocks); - try(BlockIterator keyValueBlockIterator = + try (BlockIterator keyValueBlockIterator = db.getStore().getBlockIterator( MetadataKeyFilters.getDeletingKeyFilter())) { List deletingBlockIDs = @@ -230,7 +230,7 @@ public void testKeyValueBlockIteratorWithFilter() throws Exception { public void testKeyValueBlockIteratorWithOnlyDeletedBlocks() throws Exception { createContainerWithBlocks(CONTAINER_ID, 0, 5); - try(BlockIterator keyValueBlockIterator = + try (BlockIterator keyValueBlockIterator = db.getStore().getBlockIterator()) { //As all blocks are deleted blocks, blocks does not match with normal key // filter. @@ -288,7 +288,7 @@ public void testKeyValueBlockIteratorWithAdvancedFilter() throws */ private void testWithFilter(MetadataKeyFilters.KeyPrefixFilter filter, List expectedIDs) throws Exception { - try(BlockIterator iterator = + try (BlockIterator iterator = db.getStore().getBlockIterator(filter)) { // Test seek. iterator.seekToFirst(); @@ -364,7 +364,7 @@ private Map> createContainerWithBlocks(long containerId, Map prefixCounts) throws Exception { // Create required block data. Map> blockIDs = new HashMap<>(); - try(ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData, + try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData, conf)) { List chunkList = new ArrayList<>(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index 98b862636ee5..161657257734 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -446,7 +446,7 @@ public void testContainerRocksDB() keyValueContainerData, CONF); keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - try(ReferenceCountedDB db = + try (ReferenceCountedDB db = BlockUtils.getDB(keyValueContainerData, CONF)) { RDBStore store = (RDBStore) db.getStore().getStore(); long defaultCacheSize = 64 * OzoneConsts.MB; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java index 8f1e76d37765..d50a091a6a46 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java @@ -186,7 +186,7 @@ public void testKeyValueContainerCheckCorruption() throws Exception { try (RandomAccessFile file = new RandomAccessFile(chunkFile, "rws")) { file.setLength(length / 2); } - assertEquals(length/2, chunkFile.length()); + assertEquals(length / 2, chunkFile.length()); } // metadata check should pass. diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index 134477b5320f..a5d225469db1 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -261,7 +261,7 @@ public void testHandlerCommandHandling() throws Exception { } @Test - public void testVolumeSetInKeyValueHandler() throws Exception{ + public void testVolumeSetInKeyValueHandler() throws Exception { File path = GenericTestUtils.getRandomizedTestDir(); OzoneConfiguration conf = new OzoneConfiguration(); conf.set(HDDS_DATANODE_DIR_KEY, path.getAbsolutePath()); @@ -296,7 +296,7 @@ public void testVolumeSetInKeyValueHandler() throws Exception{ try { new KeyValueHandler(conf, context.getParent().getDatanodeDetails().getUuidString(), - cset, volumeSet, metrics, c->{}); + cset, volumeSet, metrics, c -> { }); } catch (RuntimeException ex) { GenericTestUtils.assertExceptionContains("class org.apache.hadoop" + ".ozone.container.common.impl.HddsDispatcher not org.apache" + diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java index 23f690eed5d9..defc02e78ecc 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java @@ -214,7 +214,7 @@ public void testWriteAndReadChunkMultipleTimes() throws Exception { BlockData blockData = new BlockData(blockID); // WHEN - for (int i = 0; i< count; i++) { + for (int i = 0; i < count; i++) { ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", localID, i), i * len, len); chunkManager.writeChunk(container, blockID, info, data, context); @@ -228,7 +228,7 @@ public void testWriteAndReadChunkMultipleTimes() throws Exception { assertTrue(getHddsVolume().getVolumeIOStats().getWriteTime() > 0); // WHEN - for (int i = 0; i< count; i++) { + for (int i = 0; i < count; i++) { ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", localID, i), i * len, len); chunkManager.readChunk(container, blockID, info, context); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java index d3e5757344f1..674ae2dacef6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java @@ -94,7 +94,7 @@ public void setup() throws Exception { Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())) .thenReturn(hddsVolume); - for (int i=0; i<2; i++) { + for (int i = 0; i < 2; i++) { KeyValueContainerData keyValueContainerData = new KeyValueContainerData(i, ContainerLayoutVersion.FILE_PER_BLOCK, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), @@ -124,7 +124,7 @@ public void setup() throws Exception { private void markBlocksForDelete(KeyValueContainer keyValueContainer, boolean setMetaData, List blockNames, int count) throws Exception { - try(ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer + try (ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer .getContainerData(), conf)) { for (int i = 0; i < count; i++) { @@ -154,7 +154,7 @@ private List addBlocks(KeyValueContainer keyValueContainer, long containerId = keyValueContainer.getContainerData().getContainerID(); List blkNames = new ArrayList<>(); - try(ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer + try (ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer .getContainerData(), conf)) { for (int i = 0; i < blockCount; i++) { @@ -197,7 +197,7 @@ public void testContainerReader() throws Exception { Assert.assertEquals(2, containerSet.containerCount()); - for (int i=0; i < 2; i++) { + for (int i = 0; i < 2; i++) { Container keyValueContainer = containerSet.getContainer(i); KeyValueContainerData keyValueContainerData = (KeyValueContainerData) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcOutputStream.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcOutputStream.java index cf6ece3b4567..099ca9c29876 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcOutputStream.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcOutputStream.java @@ -126,18 +126,18 @@ public void seriesOfArraysExactlyFillBuffer() throws IOException { public void bufferFlushedWhenFull() throws IOException { byte[] bytes = getRandomBytes(bufferSize); - subject.write(bytes, 0, bufferSize-1); - subject.write(bytes[bufferSize-1]); + subject.write(bytes, 0, bufferSize - 1); + subject.write(bytes[bufferSize - 1]); verify(observer).onNext(any()); subject.write(bytes[0]); - subject.write(bytes, 1, bufferSize-1); + subject.write(bytes, 1, bufferSize - 1); verify(observer, times(2)).onNext(any()); } @Test public void singleArraySpansMultipleResponses() throws IOException { - byte[] bytes = writeBytes(subject, 2 * bufferSize + bufferSize/2); + byte[] bytes = writeBytes(subject, 2 * bufferSize + bufferSize / 2); subject.close(); verifyResponses(bytes); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java index 37c5557c7501..8078fc25c897 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java @@ -58,7 +58,7 @@ @RunWith(Parameterized.class) public class TestReplicationSupervisor { - private final ContainerReplicator noopReplicator = task -> {}; + private final ContainerReplicator noopReplicator = task -> { }; private final ContainerReplicator throwingReplicator = task -> { throw new RuntimeException("testing replication failure"); }; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java index cb5257d5ea80..d882ca4ed4b0 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java @@ -144,7 +144,7 @@ public void testReadsDuringFinalization() throws Exception { ExecutorService executor = Executors.newFixedThreadPool(1); Future readFuture = executor.submit(() -> { // Layout version check should be thread safe. - while(!dsm.getLayoutVersionManager() + while (!dsm.getLayoutVersionManager() .isAllowed(HDDSLayoutFeature.SCM_HA)) { readChunk(writeChunk, pipeline); } @@ -203,7 +203,7 @@ public void testImportContainer() throws Exception { ExecutorService executor = Executors.newFixedThreadPool(1); Future importFuture = executor.submit(() -> { // Layout version check should be thread safe. - while(!dsm.getLayoutVersionManager() + while (!dsm.getLayoutVersionManager() .isAllowed(HDDSLayoutFeature.SCM_HA)) { importContainer(exportContainerID, exportedContainerFile); readChunk(exportWriteChunk, pipeline); @@ -541,7 +541,7 @@ public void restartDatanode(int expectedMlv) * Get the cluster ID and SCM ID from SCM to the datanode. */ public void callVersionEndpointTask() throws Exception { - try(EndpointStateMachine esm = ContainerTestUtils.createEndpoint(conf, + try (EndpointStateMachine esm = ContainerTestUtils.createEndpoint(conf, address, 1000)) { VersionEndpointTask vet = new VersionEndpointTask(esm, conf, dsm.getContainer()); diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml index f4bfcef323ed..2e27982d5eae 100644 --- a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml +++ b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml @@ -152,6 +152,7 @@ + diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java index d89ecc68de38..14e63a1b303d 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java @@ -366,7 +366,7 @@ public long revokeCertificates(List certIds, int reason, .setReason(Reason.valueOf(reason)) .setRevokeTime(revocationTime).build(); return submitRequest(Type.RevokeCertificates, - builder->builder.setRevokeCertificatesRequest(req)) + builder -> builder.setRevokeCertificatesRequest(req)) .getRevokeCertificatesResponseProto().getCrlId(); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index b26c0da9351d..77ef3f09a49b 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -332,7 +332,7 @@ public List getExistContainerWithPipelinesInBatch( response = submitRequest(Type.GetExistContainerWithPipelinesInBatch, (builder) -> builder .setGetExistContainerWithPipelinesInBatchRequest(request)); - } catch (IOException ex){ + } catch (IOException ex) { return cps; } @@ -781,7 +781,7 @@ public boolean startContainerBalancer( Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTargetInGB, - Optional maxSizeLeavingSourceInGB) throws IOException{ + Optional maxSizeLeavingSourceInGB) throws IOException { StartContainerBalancerRequestProto.Builder builder = StartContainerBalancerRequestProto.newBuilder(); builder.setTraceID(TracingUtil.exportCurrentSpan()); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/CRLClientUpdateHandler.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/CRLClientUpdateHandler.java index 6d544819d92f..72da5194c361 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/CRLClientUpdateHandler.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/CRLClientUpdateHandler.java @@ -68,7 +68,7 @@ public class CRLClientUpdateHandler implements ClientUpdateHandler { this.clientStore = serviceGrpcClient.getClientCRLStore(); this.crlCheckInterval = crlCheckInterval; - LOG.info("Pending CRL check interval : {}s", crlCheckInterval/1000); + LOG.info("Pending CRL check interval : {}s", crlCheckInterval / 1000); this.executorService = Executors.newSingleThreadScheduledExecutor( new ThreadFactoryBuilder().setDaemon(true) .setNameFormat("CRLUpdateHandler Thread - %d").build()); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/ClientCRLStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/ClientCRLStore.java index 721988ec7a02..5e326ccfea54 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/ClientCRLStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/ClientCRLStore.java @@ -76,7 +76,7 @@ public void onRevokeCerts(CRLInfo crl) { public List getRevokedCertIds(X509CRL crl) { return Collections.unmodifiableList(crl.getRevokedCertificates().stream() - .map(cert->cert.getSerialNumber().longValue()) + .map(cert -> cert.getSerialNumber().longValue()) .collect(Collectors.toList())); } @@ -91,7 +91,7 @@ public void removePendingCrl(CRLInfo crl) { public List getPendingCrlIds() { return new ArrayList<>(pendingCrls) - .stream().map(crl->crl.getCrlSequenceID()) + .stream().map(crl -> crl.getCrlSequenceID()) .collect(Collectors.toList()); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/SCMUpdateServiceGrpcClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/SCMUpdateServiceGrpcClient.java index 96e157711bb0..8b96d5c0a984 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/SCMUpdateServiceGrpcClient.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/SCMUpdateServiceGrpcClient.java @@ -92,7 +92,7 @@ public void start() { createChannel(); } clientId = subScribeClient(); - assert(clientId != null); + assert (clientId != null); // start background thread processing pending crl ids. handler = new CRLClientUpdateHandler(clientId, updateClient, diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java index 3136168cc5a5..6738868942b4 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java @@ -100,7 +100,7 @@ List getExtensionsList(Attribute attribute) { Objects.requireNonNull(attribute); List extensionsList = new ArrayList<>(); for (ASN1Encodable value : attribute.getAttributeValues()) { - if(value != null) { + if (value != null) { Extensions extensions = Extensions.getInstance(value); extensionsList.add(extensions); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java index fc2a77b02bd9..83be3aaf3bae 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java @@ -228,7 +228,7 @@ public Future requestCertificate( CompletableFuture xcertHolder = approver.inspectCSR(csr); - if(xcertHolder.isCompletedExceptionally()) { + if (xcertHolder.isCompletedExceptionally()) { // This means that approver told us there are things which it disagrees // with in this Certificate Request. Since the first set of sanity // checks failed, we just return the future object right here. @@ -324,7 +324,7 @@ public Future> revokeCertificates( public List listCertificate(NodeType role, long startSerialId, int count, boolean isRevoked) throws IOException { return store.listCertificate(role, BigInteger.valueOf(startSerialId), count, - isRevoked? CertificateStore.CertType.REVOKED_CERTS : + isRevoked ? CertificateStore.CertType.REVOKED_CERTS : CertificateStore.CertType.VALID_CERTS); } @@ -554,7 +554,7 @@ private void generateRootCertificate(SecurityConfig securityConfig, OzoneSecurityUtil.getValidInetsForCurrentHost().forEach( ip -> { builder.addIpAddress(ip.getHostAddress()); - if(validator.isValid(ip.getCanonicalHostName())) { + if (validator.isValid(ip.getCanonicalHostName())) { builder.addDnsName(ip.getCanonicalHostName()); } }); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java index a146c738d1bc..da799d7d45f0 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java @@ -53,7 +53,7 @@ private static boolean validateBasicExtensions(Extension ext, PKIProfile pkiProfile) { BasicConstraints constraints = BasicConstraints.getInstance(ext.getParsedValue()); - if(constraints.isCA()) { + if (constraints.isCA()) { if (pkiProfile.isCA()) { return true; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java index d831c834fdd1..d681806c12d3 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java @@ -516,7 +516,7 @@ public CertificateSignRequest.Builder getCSRBuilder() OzoneSecurityUtil.getValidInetsForCurrentHost().forEach( ip -> { builder.addIpAddress(ip.getHostAddress()); - if(validator.isValid(ip.getCanonicalHostName())) { + if (validator.isValid(ip.getCanonicalHostName())) { builder.addDnsName(ip.getCanonicalHostName()); } else { getLogger().error("Invalid domain {}", ip.getCanonicalHostName()); @@ -580,7 +580,7 @@ public void storeCertificate(String pemEncodedCert, boolean force, String certName = String.format(CERT_FILE_NAME_FORMAT, cert.getSerialNumber().toString()); - if(caCert) { + if (caCert) { certName = CA_CERT_PREFIX + certName; caCertId = cert.getSerialNumber().toString(); } @@ -688,17 +688,17 @@ protected enum InitCase { @Override public synchronized InitResponse init() throws CertificateException { int initCase = 0; - PrivateKey pvtKey= getPrivateKey(); + PrivateKey pvtKey = getPrivateKey(); PublicKey pubKey = getPublicKey(); X509Certificate certificate = getCertificate(); - if(pvtKey != null){ - initCase = initCase | 1<<2; + if (pvtKey != null) { + initCase = initCase | 1 << 2; } - if(pubKey != null){ - initCase = initCase | 1<<1; + if (pubKey != null) { + initCase = initCase | 1 << 1; } - if(certificate != null){ + if (certificate != null) { initCase = initCase | 1; } getLogger().info("Certificate client init case: {}", initCase); @@ -800,7 +800,7 @@ protected boolean recoverPublicKey() throws CertificateException { PublicKey pubKey = getCertificate().getPublicKey(); try { - if(validateKeyPair(pubKey)){ + if (validateKeyPair(pubKey)) { keyCodec.writePublicKey(pubKey); publicKey = pubKey; } else { @@ -922,7 +922,7 @@ public List listCA() throws IOException { updateCAList(); } return pemEncodedCACerts; - }finally { + } finally { lock.unlock(); } } @@ -947,7 +947,7 @@ public List updateCAList() throws IOException { } @Override - public boolean processCrl(CRLInfo crl){ + public boolean processCrl(CRLInfo crl) { List certIds2Remove = new ArrayList(); crl.getX509CRL().getRevokedCertificates().forEach( cert -> certIds2Remove.add(cert.getSerialNumber().toString())); @@ -957,15 +957,15 @@ public boolean processCrl(CRLInfo crl){ } - private boolean removeCertificates(List certIds){ + private boolean removeCertificates(List certIds) { lock.lock(); boolean reInitCert = false; try { // For now, remove self cert and ca cert is not implemented // both requires a restart of the service. - if ((certSerialId!=null && certIds.contains(certSerialId)) || - (caCertId!=null && certIds.contains(caCertId)) || - (rootCaCertId!=null && certIds.contains(rootCaCertId))) { + if ((certSerialId != null && certIds.contains(certSerialId)) || + (caCertId != null && certIds.contains(caCertId)) || + (rootCaCertId != null && certIds.contains(rootCaCertId))) { reInitCert = true; } @@ -1004,7 +1004,7 @@ public long getLocalCrlId() { * Set Local CRL id. * @param crlId */ - public void setLocalCrlId(long crlId){ + public void setLocalCrlId(long crlId) { this.localCrlId = crlId; } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java index 7aea5967df20..6143bd1030b2 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java @@ -44,8 +44,8 @@ public class OMCertificateClient extends DefaultCertificateClient { public OMCertificateClient(SecurityConfig securityConfig, String certSerialId, String localCrlId) { super(securityConfig, LOG, certSerialId, COMPONENT_NAME); - this.setLocalCrlId(localCrlId!=null ? - Long.parseLong(localCrlId): 0); + this.setLocalCrlId(localCrlId != null ? + Long.parseLong(localCrlId) : 0); } public OMCertificateClient(SecurityConfig securityConfig, diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java index b8d2859eed3e..ec7b5a83f2a7 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java @@ -117,7 +117,7 @@ public static String getEncodedString(PKCS10CertificationRequest request) PemObject pemObject = new PemObject("CERTIFICATE REQUEST", request.getEncoded()); StringWriter str = new StringWriter(); - try(JcaPEMWriter pemWriter = new JcaPEMWriter(str)) { + try (JcaPEMWriter pemWriter = new JcaPEMWriter(str)) { pemWriter.writeObject(pemObject); } return str.toString(); @@ -135,7 +135,7 @@ public static PKCS10CertificationRequest getCertificationRequest(String csr) throws IOException { try (PemReader reader = new PemReader(new StringReader(csr))) { PemObject pemObject = reader.readPemObject(); - if(pemObject.getContent() == null) { + if (pemObject.getContent() == null) { throw new SCMSecurityException("Invalid Certificate signing request", INVALID_CSR); } @@ -268,10 +268,10 @@ public CertificateSignRequest.Builder setCA(Boolean isCA) { private Extension getKeyUsageExtension() throws IOException { int keyUsageFlag = KeyUsage.keyAgreement; - if(digitalEncryption){ + if (digitalEncryption) { keyUsageFlag |= KeyUsage.keyEncipherment | KeyUsage.dataEncipherment; } - if(digitalSignature) { + if (digitalSignature) { keyUsageFlag |= KeyUsage.digitalSignature; } @@ -303,7 +303,7 @@ private Extensions createExtensions() throws IOException { List extensions = new ArrayList<>(); // Add basic extension - if(ca) { + if (ca) { extensions.add(getBasicExtension()); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java index 5a9fba65b13c..8aa512f69185 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java @@ -44,7 +44,7 @@ public class CRLInfo implements Comparator, private Instant revocationTime; private CRLInfo(X509CRL x509CRL, long creationTimestamp, long crlSequenceID) { - assert((x509CRL != null) && + assert ((x509CRL != null) && !x509CRL.getRevokedCertificates().isEmpty()); this.x509CRL = x509CRL; this.creationTimestamp = creationTimestamp; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfoCodec.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfoCodec.java index 3178cfdc3bbf..2d53b8fb6fbd 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfoCodec.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfoCodec.java @@ -44,7 +44,7 @@ public CRLInfo fromPersistedFormat(byte[] rawData) throws IOException { try { return CRLInfo.fromProtobuf( HddsProtos.CRLInfoProto.PARSER.parseFrom(rawData)); - } catch (CertificateException|CRLException e) { + } catch (CertificateException | CRLException e) { throw new IllegalArgumentException( "Can't encode the the raw data from the byte array", e); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java index a3f1b6bc2c01..f4f188aaf395 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java @@ -192,7 +192,7 @@ public Process runCmdAsync(List cmd) { protected static String generateFileName(Integer pid, Output output, Event event) { String outputFormat = output.name().toLowerCase(); - if(output == Output.FLAMEGRAPH) { + if (output == Output.FLAMEGRAPH) { outputFormat = "html"; } return FILE_PREFIX + pid + "-" + diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java index 87dc882a00f4..3dc176644d9d 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java @@ -32,7 +32,7 @@ * This interface is for maintaining DB checkpoint statistics. */ @InterfaceAudience.Private -@Metrics(about="DB checkpoint Metrics", context="dfs") +@Metrics(about = "DB checkpoint Metrics", context = "dfs") public class DBCheckpointMetrics { private static final String SOURCE_NAME = DBCheckpointMetrics.class.getSimpleName(); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java index 78f8a80ae24c..7f2deeb09354 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java @@ -404,7 +404,7 @@ public static List buildCAList(CertificateClient certClient, return getCAListWithRetry(() -> waitForCACerts( scmSecurityProtocolClient::listCACertificate, expectedCount), waitDuration); - } else{ + } else { return scmSecurityProtocolClient.listCACertificate(); } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java index 508320e850ff..e3b91ba8ed04 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java @@ -82,7 +82,7 @@ public static class KeyPrefixFilter implements MetadataKeyFilter { private int keysScanned = 0; private int keysHinted = 0; - public KeyPrefixFilter() {} + public KeyPrefixFilter() { } /** * KeyPrefixFilter constructor. It is made of positive and negative prefix diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java index ec4c0e1a255a..1d1bff1bbc63 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java @@ -47,7 +47,7 @@ public final class TransactionInfo { private TransactionInfo(String transactionInfo) { String[] tInfo = transactionInfo.split(TRANSACTION_INFO_SPLIT_KEY); - Preconditions.checkState(tInfo.length==2, + Preconditions.checkState(tInfo.length == 2, "Incorrect TransactionInfo value"); term = Long.parseLong(tInfo[0]); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java index 50ac54f9211c..c9bf38504f66 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java @@ -124,8 +124,8 @@ public static DBOptions readFromFile(String dbFileName, Env env = Env.getDefault(); DBOptions options = null; File configLocation = getConfigLocation(); - if(configLocation != null && - StringUtil.isNotBlank(configLocation.toString())){ + if (configLocation != null && + StringUtil.isNotBlank(configLocation.toString())) { Path optionsFile = Paths.get(configLocation.toString(), getOptionsFileNameFromDB(dbFileName)); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java index ad48a19927a7..8b07003c9cea 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java @@ -168,7 +168,7 @@ private void applyDBDefinition(DBDefinition definition) { * @return DBStore */ public DBStore build() throws IOException { - if(StringUtil.isBlank(dbname) || (dbPath == null)) { + if (StringUtil.isBlank(dbname) || (dbPath == null)) { LOG.error("Required Parameter missing."); throw new IOException("Required parameter is missing. Please make sure " + "Path and DB name is provided."); @@ -340,7 +340,7 @@ private DBOptions getDBOptionsFromFile(Collection tableConfigs) { try { option = DBConfigFromFile.readFromFile(dbname, columnFamilyDescriptors); - if(option != null) { + if (option != null) { LOG.info("Using RocksDB DBOptions from {}.ini file", dbname); } } catch (IOException ex) { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java index f92306ab43ce..c7f6196a6381 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java @@ -98,7 +98,7 @@ public TypedTable( if (cacheType == CacheType.FULL_CACHE) { cache = new FullTableCache<>(); //fill cache - try(TableIterator> tableIterator = + try (TableIterator> tableIterator = iterator()) { while (tableIterator.hasNext()) { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java index 7be2921b6a11..401d644bc84b 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java @@ -56,7 +56,7 @@ public int hashCode() { @Override public int compareTo(Object o) { - if(Objects.equals(key, ((CacheKey)o).key)) { + if (Objects.equals(key, ((CacheKey)o).key)) { return 0; } else { return key.toString().compareTo((((CacheKey) o).key).toString()); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java index d87e90d36d34..120a08bcee80 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java @@ -63,7 +63,7 @@ public int hashCode() { @Override public int compareTo(Object o) { - if(this.epoch == ((EpochEntry)o).epoch) { + if (this.epoch == ((EpochEntry)o).epoch) { return 0; } else if (this.epoch < ((EpochEntry)o).epoch) { return -1; diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java index 39bf08290796..a2b2e775c7ca 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java @@ -92,7 +92,7 @@ public List listCertificate(NodeType role, } @Override - public void reinitialize(SCMMetadataStore metadataStore) {} + public void reinitialize(SCMMetadataStore metadataStore) { } @Override public List getCrls(List crlIds) throws IOException { diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java index 3d32a3312c79..d6df77fc3072 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java @@ -138,7 +138,7 @@ public void testWriteCRL() throws IOException, OperatorCreationException { assertTrue(crlFile.exists()); try (BufferedReader reader = new BufferedReader(new InputStreamReader( - new FileInputStream(crlFile), UTF_8))){ + new FileInputStream(crlFile), UTF_8))) { // Verify contents of the file String header = reader.readLine(); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java index 5b1a1f032a3c..1aab7a5de47f 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java @@ -279,8 +279,8 @@ private void verifyServiceId(Extensions extensions) { GeneralNames.fromExtensions( extensions, Extension.subjectAlternativeName); GeneralName[] names = gns.getNames(); - for(int i=0; i < names.length; i++) { - if(names[i].getTagNo() == GeneralName.otherName) { + for (int i = 0; i < names.length; i++) { + if (names[i].getTagNo() == GeneralName.otherName) { ASN1Encodable asn1Encodable = names[i].getName(); Iterator iterator = ((DLSequence) asn1Encodable).iterator(); while (iterator.hasNext()) { diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java index 1e3a8f4610aa..776aa4af5649 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java @@ -165,7 +165,7 @@ public void testCACert() OzoneSecurityUtil.getValidInetsForCurrentHost().forEach( ip -> { builder.addIpAddress(ip.getHostAddress()); - if(validator.isValid(ip.getCanonicalHostName())) { + if (validator.isValid(ip.getCanonicalHostName())) { builder.addDnsName(ip.getCanonicalHostName()); } }); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java index 9bad0f31070c..2fef2b87369c 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java @@ -79,7 +79,7 @@ public void testGenerateKeyWithSize() throws NoSuchProviderException, HDDSKeyGenerator keyGen = new HDDSKeyGenerator(config.getConfiguration()); KeyPair keyPair = keyGen.generateKey(4096); PublicKey publicKey = keyPair.getPublic(); - if(publicKey instanceof RSAPublicKey) { + if (publicKey instanceof RSAPublicKey) { Assert.assertEquals(4096, ((RSAPublicKey)(publicKey)).getModulus().bitLength()); } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java index 99fcbae80452..e78bcb00855b 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java @@ -69,7 +69,7 @@ public void builderWithOneParamV1() throws IOException { public void builderWithOneParamV2() throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); File newFolder = folder.newFolder(); - if(!newFolder.exists()) { + if (!newFolder.exists()) { Assert.assertTrue(newFolder.mkdirs()); } thrown.expect(IOException.class); @@ -82,7 +82,7 @@ public void builderWithOneParamV2() throws IOException { public void builderWithOpenClose() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); File newFolder = folder.newFolder(); - if(!newFolder.exists()) { + if (!newFolder.exists()) { Assert.assertTrue(newFolder.mkdirs()); } DBStore dbStore = DBStoreBuilder.newBuilder(conf) @@ -97,7 +97,7 @@ public void builderWithOpenClose() throws Exception { public void builderWithDoubleTableName() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); File newFolder = folder.newFolder(); - if(!newFolder.exists()) { + if (!newFolder.exists()) { Assert.assertTrue(newFolder.mkdirs()); } // Registering a new table with the same name should replace the previous @@ -127,7 +127,7 @@ public void builderWithDoubleTableName() throws Exception { public void builderWithDataWrites() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); File newFolder = folder.newFolder(); - if(!newFolder.exists()) { + if (!newFolder.exists()) { Assert.assertTrue(newFolder.mkdirs()); } try (DBStore dbStore = DBStoreBuilder.newBuilder(conf) @@ -156,7 +156,7 @@ public void builderWithDataWrites() throws Exception { public void builderWithDiskProfileWrites() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); File newFolder = folder.newFolder(); - if(!newFolder.exists()) { + if (!newFolder.exists()) { Assert.assertTrue(newFolder.mkdirs()); } try (DBStore dbStore = DBStoreBuilder.newBuilder(conf) diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java index c1eafa6111a4..ed8744ceba8e 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java @@ -74,7 +74,7 @@ public void setUp() throws Exception { statistics.setStatsLevel(StatsLevel.ALL); options = options.setStatistics(statistics); configSet = new HashSet<>(); - for(String name : families) { + for (String name : families) { TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions()); configSet.add(newConfig); } @@ -358,7 +358,7 @@ public void testDowngrade() throws Exception { options.setCreateMissingColumnFamilies(true); configSet = new HashSet<>(); List familiesMinusOne = families.subList(0, families.size() - 1); - for(String name : familiesMinusOne) { + for (String name : familiesMinusOne) { TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions()); configSet.add(newConfig); } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java index fea40bbf30f6..b49556df9f09 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java @@ -92,7 +92,7 @@ public void testForeachRemainingCallsConsumerWithAllElements() { } @Test - public void testHasNextDependsOnIsvalid(){ + public void testHasNextDependsOnIsvalid() { when(rocksDBIteratorMock.isValid()).thenReturn(true, true, false); RDBStoreIterator iter = new RDBStoreIterator(rocksDBIteratorMock); @@ -169,7 +169,7 @@ public void testGettingTheKeyIfIteratorIsValid() { RDBStoreIterator iter = new RDBStoreIterator(rocksDBIteratorMock); byte[] key = null; - if(iter.hasNext()) { + if (iter.hasNext()) { ByteArrayKeyValue entry = iter.next(); key = entry.getKey(); } @@ -191,7 +191,7 @@ public void testGettingTheValueIfIteratorIsValid() { ByteArrayKeyValue entry; byte[] key = null; byte[] value = null; - if(iter.hasNext()) { + if (iter.hasNext()) { entry = iter.next(); key = entry.getKey(); value = entry.getValue(); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java index 5d007630e54e..0f1858b902d9 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java @@ -63,7 +63,7 @@ private static boolean consume(Table.KeyValue keyValue) { count++; try { Assert.assertNotNull(keyValue.getKey()); - } catch(IOException ex) { + } catch (IOException ex) { Assert.fail("Unexpected Exception " + ex.toString()); } return true; @@ -80,7 +80,7 @@ public void setUp() throws Exception { options = options.setStatistics(statistics); Set configSet = new HashSet<>(); - for(String name : families) { + for (String name : families) { TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions()); configSet.add(newConfig); } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java index 073027f2639b..837ea27e5419 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java @@ -296,7 +296,7 @@ public void testTypedTableWithCacheWithFewDeletedOperationType() } ArrayList epochs = new ArrayList<>(); - for (long i=0; i<=5L; i++) { + for (long i = 0; i <= 5L; i++) { epochs.add(i); } testTable.cleanupCache(epochs); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCache.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCache.java index a1cc7ddec5ca..860a695cda4f 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCache.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCache.java @@ -74,13 +74,13 @@ public void create() { public void testPartialTableCache() { - for (int i = 0; i< 10; i++) { + for (int i = 0; i < 10; i++) { tableCache.put(new CacheKey<>(Integer.toString(i)), new CacheValue<>(Optional.of(Integer.toString(i)), i)); } - for (int i=0; i < 10; i++) { + for (int i = 0; i < 10; i++) { Assert.assertEquals(Integer.toString(i), tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue()); } @@ -94,7 +94,7 @@ public void testPartialTableCache() { // On a full table cache if some one calls cleanup it is a no-op. tableCache.evictCache(epochs); - for (int i=5; i < 10; i++) { + for (int i = 5; i < 10; i++) { Assert.assertEquals(Integer.toString(i), tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue()); } @@ -109,7 +109,7 @@ public void testPartialTableCacheWithNotContinousEntries() throws Exception { int cleanupCount = 0; ArrayList epochs = new ArrayList(); - for (long i=0; i(Integer.toString(i))).getCacheValue()); } @@ -357,13 +357,13 @@ public void testPartialTableCacheParallel() throws Exception { final int tc = totalCount; Assert.assertEquals(tc - deleted, tableCache.size()); // Check if we have remaining entries. - for (int i=6; i <= totalCount; i++) { + for (int i = 6; i <= totalCount; i++) { Assert.assertEquals(Integer.toString(i), tableCache.get( new CacheKey<>(Integer.toString(i))).getCacheValue()); } epochs = new ArrayList<>(); - for (long i=6; i<= totalCount; i++) { + for (long i = 6; i <= totalCount; i++) { epochs.add(i); } @@ -373,7 +373,7 @@ public void testPartialTableCacheParallel() throws Exception { Assert.assertEquals(0, tableCache.size()); } else { ArrayList epochs = new ArrayList<>(); - for (long i=0; i<= totalCount; i++) { + for (long i = 0; i <= totalCount; i++) { epochs.add(i); } tableCache.evictCache(epochs); @@ -453,7 +453,7 @@ public void testTableCacheWithNonConsecutiveEpochList() { tableCache.evictCache(epochs); - if(cacheType == TableCache.CacheType.PARTIAL_CACHE) { + if (cacheType == TableCache.CacheType.PARTIAL_CACHE) { Assert.assertTrue(tableCache.size() == 0); Assert.assertTrue(tableCache.getEpochEntrySet().size() == 0); } else { @@ -475,7 +475,7 @@ public void testTableCacheWithNonConsecutiveEpochList() { private int writeToCache(int count, int startVal, long sleep) throws InterruptedException { int counter = 1; - while (counter <= count){ + while (counter <= count) { tableCache.put(new CacheKey<>(Integer.toString(startVal)), new CacheValue<>(Optional.of(Integer.toString(startVal)), startVal)); startVal++; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java index 3c5fdc0f2ee2..78a87b882010 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java @@ -442,7 +442,7 @@ public void onMessage( commitTransactions(ackProto.getResultsList(), UUID.fromString(ackProto.getDnId())); metrics.incrBlockDeletionCommandSuccess(); - } else if (status == CommandStatus.Status.FAILED){ + } else if (status == CommandStatus.Status.FAILED) { metrics.incrBlockDeletionCommandFailure(); } else { LOG.error("Delete Block Command is not executed yet."); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaCount.java index 590d1f12e47a..3dab4ad83f70 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaCount.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaCount.java @@ -100,15 +100,15 @@ public Set getReplica() { @Override public String toString() { - return "Container State: " +container.getState()+ - " Replica Count: "+replica.size()+ - " Healthy Count: "+healthyCount+ - " Decommission Count: "+decommissionCount+ - " Maintenance Count: "+maintenanceCount+ - " inFlightAdd Count: "+inFlightAdd+ - " inFightDel Count: "+inFlightDel+ - " ReplicationFactor: "+repFactor+ - " minMaintenance Count: "+minHealthyForMaintenance; + return "Container State: " + container.getState() + + " Replica Count: " + replica.size() + + " Healthy Count: " + healthyCount + + " Decommission Count: " + decommissionCount + + " Maintenance Count: " + maintenanceCount + + " inFlightAdd Count: " + inFlightAdd + + " inFightDel Count: " + inFlightDel + + " ReplicationFactor: " + repFactor + + " minMaintenance Count: " + minHealthyForMaintenance; } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java index 32804d7a8d23..8a50884321b6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java @@ -164,7 +164,7 @@ private void processContainerReplicas(final DatanodeDetails datanodeDetails, try { processContainerReplica(datanodeDetails, replicaProto, publisher); } catch (ContainerNotFoundException e) { - if(unknownContainerHandleAction.equals( + if (unknownContainerHandleAction.equals( UNKNOWN_CONTAINER_ACTION_WARN)) { LOG.error("Received container report for an unknown container" + " {} from datanode {}.", replicaProto.getContainerID(), diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java index 7f52a06772ea..59e73a365143 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java @@ -487,7 +487,7 @@ private void processContainer(ContainerInfo container, updateInflightAction(container, inflightReplication, action -> replicas.stream() .anyMatch(r -> r.getDatanodeDetails().equals(action.datanode)), - ()-> metrics.incrNumReplicationCmdsTimeout(), + () -> metrics.incrNumReplicationCmdsTimeout(), action -> updateCompletedReplicationMetrics(container, action)); updateInflightAction(container, inflightDeletion, @@ -624,7 +624,7 @@ private void updateInflightAction(final ContainerInfo container, final List actions = inflightActions.get(id); Iterator iter = actions.iterator(); - while(iter.hasNext()) { + while (iter.hasNext()) { try { InflightAction a = iter.next(); NodeStatus status = nodeManager.getNodeStatus(a.datanode); @@ -919,7 +919,7 @@ public CompletableFuture move(ContainerID cid, */ private boolean isPolicySatisfiedAfterMove(ContainerInfo cif, DatanodeDetails srcDn, DatanodeDetails targetDn, - final List replicas){ + final List replicas) { Set movedReplicas = replicas.stream().collect(Collectors.toSet()); movedReplicas.removeIf(r -> r.getDatanodeDetails().equals(srcDn)); @@ -1157,7 +1157,7 @@ private void handleUnderReplicatedContainer(final ContainerInfo container, if (replicaSet.isSufficientlyReplicated() && placementStatus.isPolicySatisfied()) { - LOG.info("The container {} with replicas {} is sufficiently "+ + LOG.info("The container {} with replicas {} is sufficiently " + "replicated and is not mis-replicated", container.getContainerID(), replicaSet); return; @@ -1348,8 +1348,8 @@ private void deleteSrcDnForMove(final ContainerInfo cif, ContainerReplicaCount replicaCount = getContainerReplicaCount(cif, replicaSet); - if(!replicaSet.stream() - .anyMatch(r -> r.getDatanodeDetails().equals(srcDn))){ + if (!replicaSet.stream() + .anyMatch(r -> r.getDatanodeDetails().equals(srcDn))) { // if the target is present but source disappears somehow, // we can consider move is successful. compleleteMoveFutureWithResult(cid, MoveResult.COMPLETED); @@ -1654,7 +1654,7 @@ private NodeStatus getNodeStatus(DatanodeDetails dn) { try { return nodeManager.getNodeStatus(dn); } catch (NodeNotFoundException e) { - throw new IllegalStateException("Unable to find NodeStatus for "+dn, e); + throw new IllegalStateException("Unable to find NodeStatus for " + dn, e); } } @@ -1944,7 +1944,7 @@ public void startMove(HddsProtos.ContainerID contianerIDProto, try { cid = ContainerID.getFromProtobuf(contianerIDProto); mp = MoveDataNodePair.getFromProtobuf(mdnpp); - if(!inflightMove.containsKey(cid)) { + if (!inflightMove.containsKey(cid)) { transactionBuffer.addToBuffer(moveTable, cid, mp); inflightMove.putIfAbsent(cid, mp); } @@ -2055,8 +2055,8 @@ private void onLeaderReadyAndOutOfSafeMode() { boolean isTgtExist = replicas.stream() .anyMatch(r -> r.getDatanodeDetails().equals(v.getTgt())); - if(isSrcExist) { - if(isTgtExist) { + if (isSrcExist) { + if (isTgtExist) { //the former scm leader may or may not send the deletion command //before reelection.here, we just try to send the command again. deleteSrcDnForMove(cif, replicas); @@ -2081,8 +2081,8 @@ private void onLeaderReadyAndOutOfSafeMode() { * complete the CompletableFuture of the container in the given Map with * a given MoveResult. */ - private void compleleteMoveFutureWithResult(ContainerID cid, MoveResult mr){ - if(inflightMoveFuture.containsKey(cid)) { + private void compleleteMoveFutureWithResult(ContainerID cid, MoveResult mr) { + if (inflightMoveFuture.containsKey(cid)) { inflightMoveFuture.get(cid).complete(mr); inflightMoveFuture.remove(cid); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java index a975f04cfc0b..018f0dfd2524 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java @@ -70,7 +70,7 @@ protected void setPotentialTargets(Collection pt) { potentialTargets = pt; } - private void setUpperLimit(Double upperLimit){ + private void setUpperLimit(Double upperLimit) { this.upperLimit = upperLimit; } @@ -199,12 +199,12 @@ private boolean canSizeEnterTarget(DatanodeDetails target, long size) { */ @Override public void increaseSizeEntering(DatanodeDetails target, long size) { - if(sizeEnteringNode.containsKey(target)) { + if (sizeEnteringNode.containsKey(target)) { long totalEnteringSize = sizeEnteringNode.get(target) + size; sizeEnteringNode.put(target, totalEnteringSize); potentialTargets.removeIf( c -> c.getDatanodeDetails().equals(target)); - if(totalEnteringSize < config.getMaxSizeEnteringTarget()) { + if (totalEnteringSize < config.getMaxSizeEnteringTarget()) { //reorder potentialTargets.add(nodeManager.getUsageInfo(target)); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java index ea32cfadfbbb..995a5da111ba 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java @@ -180,7 +180,7 @@ public boolean start(ContainerBalancerConfiguration balancerConfiguration) { */ private void balance() { this.iterations = config.getIterations(); - if(this.iterations == -1) { + if (this.iterations == -1) { //run balancer infinitely this.iterations = Integer.MAX_VALUE; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java index 858234785e69..11a8a98dbeb7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java @@ -361,8 +361,8 @@ public String toString() { "%-50s %s%n" + "%-50s %s%n" + "%-50s %d%n" + - "%-50s %dGB%n"+ - "%-50s %dGB%n"+ + "%-50s %dGB%n" + + "%-50s %dGB%n" + "%-50s %dGB%n", "Key", "Value", "Threshold", threshold, "Max Datanodes to Involve per Iteration(percent)", maxDatanodesPercentageToInvolvePerIteration, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java index 591461d88750..540d26356df5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java @@ -33,7 +33,7 @@ * The selection criteria for selecting source datanodes , the containers of * which will be moved out. */ -public class FindSourceGreedy implements FindSourceStrategy{ +public class FindSourceGreedy implements FindSourceStrategy { private static final Logger LOG = LoggerFactory.getLogger(FindSourceGreedy.class); private Map sizeLeavingNode; @@ -84,7 +84,7 @@ private void setConfiguration(ContainerBalancerConfiguration conf) { @Override public void increaseSizeLeaving(DatanodeDetails dui, long size) { Long currentSize = sizeLeavingNode.get(dui); - if(currentSize != null) { + if (currentSize != null) { sizeLeavingNode.put(dui, currentSize + size); //reorder according to the latest sizeLeavingNode potentialSources.add(nodeManager.getUsageInfo(dui)); @@ -114,7 +114,7 @@ public DatanodeDetails getNextCandidateSourceDataNode() { * data nodes. */ @Override - public void removeCandidateSourceDataNode(DatanodeDetails dui){ + public void removeCandidateSourceDataNode(DatanodeDetails dui) { potentialSources.removeIf(a -> a.getDatanodeDetails().equals(dui)); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java index c799b02eee18..bf0ea7cb38e6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java @@ -47,7 +47,7 @@ private ContainerPlacementPolicyFactory() { public static PlacementPolicy getPolicy( ConfigurationSource conf, final NodeManager nodeManager, NetworkTopology clusterMap, final boolean fallback, - SCMContainerPlacementMetrics metrics) throws SCMException{ + SCMContainerPlacementMetrics metrics) throws SCMException { final Class placementClass = conf .getClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, OZONE_SCM_CONTAINER_PLACEMENT_IMPL_DEFAULT, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java index 1ca68bd3ebfd..22bdf21df931 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java @@ -33,7 +33,7 @@ /** * This class is for maintaining Topology aware container placement statistics. */ -@Metrics(about="SCM Container Placement Metrics", context = OzoneConsts.OZONE) +@Metrics(about = "SCM Container Placement Metrics", context = OzoneConsts.OZONE) public class SCMContainerPlacementMetrics implements MetricsSource { public static final String SOURCE_NAME = SCMContainerPlacementMetrics.class.getSimpleName(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java index d46713b602c5..2631a1d951d8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java @@ -55,7 +55,7 @@ public final class SCMContainerPlacementRackAware private final NetworkTopology networkTopology; private boolean fallback; private static final int RACK_LEVEL = 1; - private static final int MAX_RETRY= 3; + private static final int MAX_RETRY = 3; private final SCMContainerPlacementMetrics metrics; // Used to check the placement policy is validated in the parent class private static final int REQUIRED_RACKS = 2; @@ -118,7 +118,7 @@ public List chooseDatanodes( mutableFavoredNodes.addAll(favoredNodes); mutableFavoredNodes.removeAll(excludedNodes); } - int favoredNodeNum = mutableFavoredNodes == null? 0 : + int favoredNodeNum = mutableFavoredNodes == null ? 0 : mutableFavoredNodes.size(); List chosenNodes = new ArrayList<>(); @@ -195,7 +195,7 @@ public List chooseDatanodes( // in the same rack, then choose nodes on different racks, otherwise, // choose one on the same rack as one of excluded nodes, remaining chosen // are on different racks. - for(int i = 0; i < excludedNodesCount; i++) { + for (int i = 0; i < excludedNodesCount; i++) { for (int j = i + 1; j < excludedNodesCount; j++) { if (networkTopology.isSameParent( excludedNodes.get(i), excludedNodes.get(j))) { @@ -257,7 +257,7 @@ private DatanodeDetails chooseNode(List excludedNodes, int maxRetry = MAX_RETRY; List excludedNodesForCapacity = null; boolean isFallbacked = false; - while(true) { + while (true) { metrics.incrDatanodeChooseAttemptCount(); DatanodeDetails node = null; if (affinityNodes != null) { @@ -348,8 +348,8 @@ private List chooseNodes(List excludedNodes, Preconditions.checkArgument(chosenNodes != null); List excludedNodeList = excludedNodes != null ? excludedNodes : chosenNodes; - int favoredNodeNum = favoredNodes == null? 0 : favoredNodes.size(); - while(true) { + int favoredNodeNum = favoredNodes == null ? 0 : favoredNodes.size(); + while (true) { DatanodeDetails favoredNode = favoredNodeNum > favorIndex ? favoredNodes.get(favorIndex) : null; DatanodeDetails chosenNode; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java index 51948291a4b9..f9d2ade8fd96 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java @@ -28,7 +28,7 @@ /** * This class is for maintaining StorageContainerManager statistics. */ -@Metrics(about="Storage Container Manager Metrics", context="dfs") +@Metrics(about = "Storage Container Manager Metrics", context = "dfs") public class SCMMetrics { public static final String SOURCE_NAME = SCMMetrics.class.getSimpleName(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java index bbf1c700d194..c254dd723e5c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java @@ -358,7 +358,7 @@ public NavigableSet getMatchingContainerIDs( final ContainerQueryKey queryKey = new ContainerQueryKey(state, owner, repConfig); - if(resultCache.containsKey(queryKey)){ + if (resultCache.containsKey(queryKey)) { return resultCache.get(queryKey); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java index 5023e93a9ea1..edea6816aea7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java @@ -216,7 +216,7 @@ private static void getPrimarySCMSelfSignedCert(CertificateClient client, // Persist scm cert serial ID. scmStorageConfig.setScmCertSerialId(subSCMCertHolder.getSerialNumber() .toString()); - } catch (InterruptedException | ExecutionException| IOException | + } catch (InterruptedException | ExecutionException | IOException | CertificateException e) { LOG.error("Error while fetching/storing SCM signed certificate.", e); Thread.currentThread().interrupt(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java index e949850f6ca6..4154b62125f0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java @@ -133,7 +133,7 @@ private static void setRaftRpcProperties(final RaftProperties properties, ScmConfigKeys.OZONE_SCM_HA_RATIS_LEADER_ELECTION_TIMEOUT, ScmConfigKeys. OZONE_SCM_HA_RATIS_LEADER_ELECTION_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS)+200L, + TimeUnit.MILLISECONDS) + 200L, TimeUnit.MILLISECONDS)); Rpc.setSlownessTimeout(properties, TimeDuration.valueOf( ozoneConf.getTimeDuration( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java index 8e109683ba3d..bb12df6ec0fb 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java @@ -68,7 +68,7 @@ public Object invoke(final Object proxy, final Method method, invokeLocal(method, args); LOG.debug("Call: {} took {} ms", method, Time.monotonicNow() - startTime); return result; - } catch(InvocationTargetException iEx) { + } catch (InvocationTargetException iEx) { throw iEx.getCause(); } } @@ -100,7 +100,7 @@ private Object invokeRatis(Method method, Object[] args) // via ratis. So, in this special scenario we use RaftClient. final SCMRatisResponse response; if (method.getName().equals("storeValidCertificate") && - args[args.length -1].equals(HddsProtos.NodeType.SCM)) { + args[args.length - 1].equals(HddsProtos.NodeType.SCM)) { response = HASecurityUtils.submitScmCertsToRatis( ratisHandler.getDivision().getGroup(), diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java index 95d906e73824..9fb771b7a70f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java @@ -47,7 +47,7 @@ public final class CodecFactory { codecs.put(X509Certificate.class, new X509CertificateCodec()); } - private CodecFactory() {} + private CodecFactory() { } public static Codec getCodec(Class type) throws InvalidProtocolBufferException { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java index 799e1282027b..de7fcb0b746b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java @@ -238,11 +238,11 @@ public Table getCRLSequenceIdTable() { @Override public TableIterator getAllCerts(CertificateStore.CertType certType) { - if(certType == CertificateStore.CertType.VALID_CERTS) { + if (certType == CertificateStore.CertType.VALID_CERTS) { return validCertsTable.iterator(); } - if(certType == CertificateStore.CertType.REVOKED_CERTS) { + if (certType == CertificateStore.CertType.REVOKED_CERTS) { return revokedCertsTable.iterator(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java index 9bfa7d6c4c05..bf2559b8ab1c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java @@ -44,7 +44,7 @@ public byte[] toPersistedFormat(X509Certificate object) throws IOException { @Override public X509Certificate fromPersistedFormat(byte[] rawData) throws IOException { - try{ + try { String s = new String(rawData, StandardCharsets.UTF_8); return CertificateCodec.getX509Certificate(s); } catch (CertificateException exp) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java index eb6dc0d424f8..aa930251c4f1 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java @@ -91,7 +91,7 @@ List getCommand(final UUID datanodeUuid) { try { Commands cmds = commandMap.remove(datanodeUuid); List cmdList = null; - if(cmds != null) { + if (cmds != null) { cmdList = cmds.getCommands(); commandsInQueue -= cmdList.size() > 0 ? cmdList.size() : 0; // A post condition really. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java index 27a84deaffe0..47d7c5346983 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java @@ -85,20 +85,20 @@ public int getPort() { return port; } - private void parseHostname() throws InvalidHostStringException{ + private void parseHostname() throws InvalidHostStringException { try { // A URI *must* have a scheme, so just create a fake one - URI uri = new URI("empty://"+rawHostname.trim()); + URI uri = new URI("empty://" + rawHostname.trim()); this.hostname = uri.getHost(); this.port = uri.getPort(); if (this.hostname == null) { - throw new InvalidHostStringException("The string "+rawHostname+ + throw new InvalidHostStringException("The string " + rawHostname + " does not contain a value hostname or hostname:port definition"); } } catch (URISyntaxException e) { throw new InvalidHostStringException( - "Unable to parse the hoststring "+rawHostname, e); + "Unable to parse the hoststring " + rawHostname, e); } } } @@ -138,7 +138,7 @@ private List mapHostnamesToDatanodes(List hosts) results.add(found.get(0)); } else if (found.size() > 1) { DatanodeDetails match = null; - for(DatanodeDetails dn : found) { + for (DatanodeDetails dn : found) { if (validateDNPortMatch(host.getPort(), dn)) { match = dn; break; @@ -231,7 +231,7 @@ public synchronized List decommissionNodes( // NodeNotFoundException here expect if the node is remove in the // very short window between validation and starting decom. Therefore // log a warning and ignore the exception - LOG.warn("The host {} was not found in SCM. Ignoring the request to "+ + LOG.warn("The host {} was not found in SCM. Ignoring the request to " + "decommission it", dn.getHostName()); errors.add(new DatanodeAdminError(dn.getHostName(), "The host was not found in SCM")); @@ -274,12 +274,12 @@ public synchronized void startDecommission(DatanodeDetails dn) dn, NodeOperationalState.DECOMMISSIONING); monitor.startMonitoring(dn); } else if (nodeStatus.isDecommission()) { - LOG.info("Start Decommission called on node {} in state {}. Nothing to "+ + LOG.info("Start Decommission called on node {} in state {}. Nothing to " + "do.", dn, opState); } else { LOG.error("Cannot decommission node {} in state {}", dn, opState); - throw new InvalidNodeStateException("Cannot decommission node "+ - dn +" in state "+ opState); + throw new InvalidNodeStateException("Cannot decommission node " + + dn + " in state " + opState); } } @@ -296,7 +296,7 @@ public synchronized List recommissionNodes( // NodeNotFoundException here expect if the node is remove in the // very short window between validation and starting decom. Therefore // log a warning and ignore the exception - LOG.warn("Host {} was not found in SCM. Ignoring the request to "+ + LOG.warn("Host {} was not found in SCM. Ignoring the request to " + "recommission it.", dn.getHostName()); errors.add(new DatanodeAdminError(dn.getHostName(), "The host was not found in SCM")); @@ -306,7 +306,7 @@ public synchronized List recommissionNodes( } public synchronized void recommission(DatanodeDetails dn) - throws NodeNotFoundException{ + throws NodeNotFoundException { NodeStatus nodeStatus = getNodeStatus(dn); NodeOperationalState opState = nodeStatus.getOperationalState(); if (opState != NodeOperationalState.IN_SERVICE) { @@ -315,7 +315,7 @@ public synchronized void recommission(DatanodeDetails dn) monitor.stopMonitoring(dn); LOG.info("Queued node {} for recommission", dn); } else { - LOG.info("Recommission called on node {} with state {}. "+ + LOG.info("Recommission called on node {} with state {}. " + "Nothing to do.", dn, opState); } } @@ -333,7 +333,7 @@ public synchronized List startMaintenanceNodes( // NodeNotFoundException here expect if the node is remove in the // very short window between validation and starting decom. Therefore // log a warning and ignore the exception - LOG.warn("The host {} was not found in SCM. Ignoring the request to "+ + LOG.warn("The host {} was not found in SCM. Ignoring the request to " + "start maintenance on it", dn.getHostName()); } catch (InvalidNodeStateException e) { errors.add(new DatanodeAdminError(dn.getHostName(), e.getMessage())); @@ -360,12 +360,12 @@ public synchronized void startMaintenance(DatanodeDetails dn, int endInHours) monitor.startMonitoring(dn); LOG.info("Starting Maintenance for node {}", dn); } else if (nodeStatus.isMaintenance()) { - LOG.info("Starting Maintenance called on node {} with state {}. "+ + LOG.info("Starting Maintenance called on node {} with state {}. " + "Nothing to do.", dn, opState); } else { LOG.error("Cannot start maintenance on node {} in state {}", dn, opState); - throw new InvalidNodeStateException("Cannot start maintenance on node "+ - dn +" in state "+ opState); + throw new InvalidNodeStateException("Cannot start maintenance on node " + + dn + " in state " + opState); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java index 4e1a9649ea45..6127bb0b4d74 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java @@ -324,7 +324,7 @@ default Collection getPeerList(DatanodeDetails dn) { return null; } - default HDDSLayoutVersionManager getLayoutVersionManager(){ + default HDDSLayoutVersionManager getLayoutVersionManager() { return null; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java index e752454e0093..85dd6a067379 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java @@ -728,7 +728,7 @@ public void checkNodesHealth() { (lastHbTime) -> lastHbTime < staleNodeDeadline; try { - for(DatanodeInfo node : nodeStateMap.getAllDatanodeInfos()) { + for (DatanodeInfo node : nodeStateMap.getAllDatanodeInfos()) { NodeStatus status = nodeStateMap.getNodeStatus(node.getUuid()); switch (status.getHealth()) { case HEALTHY: diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java index a9164c72973a..03dd2e2b6dea 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java @@ -209,8 +209,8 @@ public int hashCode() { @Override public String toString() { - return "OperationalState: "+operationalState+" Health: "+health+ - " OperationStateExpiry: "+opStateExpiryEpochSeconds; + return "OperationalState: " + operationalState + " Health: " + health + + " OperationStateExpiry: " + opStateExpiryEpochSeconds; } } \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 68c26972371d..8899b13b6f46 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -268,7 +268,7 @@ public NodeStatus getNodeStatus(DatanodeDetails datanodeDetails) */ @Override public void setNodeOperationalState(DatanodeDetails datanodeDetails, - NodeOperationalState newState) throws NodeNotFoundException{ + NodeOperationalState newState) throws NodeNotFoundException { setNodeOperationalState(datanodeDetails, newState, 0); } @@ -283,7 +283,7 @@ public void setNodeOperationalState(DatanodeDetails datanodeDetails, @Override public void setNodeOperationalState(DatanodeDetails datanodeDetails, NodeOperationalState newState, long opStateExpiryEpocSec) - throws NodeNotFoundException{ + throws NodeNotFoundException { nodeStateManager.setNodeOperationalState( datanodeDetails, newState, opStateExpiryEpocSec); } @@ -612,7 +612,7 @@ public void processLayoutVersionReport(DatanodeDetails datanodeDetails, // send Finalize command multiple times. scmNodeEventPublisher.fireEvent(SCMEvents.DATANODE_COMMAND, new CommandForDatanode<>(datanodeDetails.getUuid(), finalizeCmd)); - } catch(NotLeaderException ex) { + } catch (NotLeaderException ex) { LOG.warn("Skip sending finalize upgrade command since current SCM is" + "not leader.", ex); } @@ -764,7 +764,7 @@ public Map> getNodeCount() { for (DatanodeInfo dni : nodeStateManager.getAllNodes()) { NodeStatus status = dni.getNodeStatus(); nodes.get(status.getOperationalState().name()) - .compute(status.getHealth().name(), (k, v) -> v+1); + .compute(status.getHealth().name(), (k, v) -> v + 1); } return nodes; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java index 6eb73595d2ef..b727580d1b73 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java @@ -129,12 +129,12 @@ public void getMetrics(MetricsCollector collector, boolean all) { * ... */ MetricsRecordBuilder metrics = collector.addRecord(registry.info()); - for(Map.Entry> e : nodeCount.entrySet()) { - for(Map.Entry h : e.getValue().entrySet()) { + for (Map.Entry> e : nodeCount.entrySet()) { + for (Map.Entry h : e.getValue().entrySet()) { metrics.addGauge( Interns.info( - StringUtils.camelize(e.getKey()+"_"+h.getKey()+"_nodes"), - "Number of "+e.getKey()+" "+h.getKey()+" datanodes"), + StringUtils.camelize(e.getKey() + "_" + h.getKey() + "_nodes"), + "Number of " + e.getKey() + " " + h.getKey() + " datanodes"), h.getValue()); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java index 1b0e5b56e776..ed45ed06ebe6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java @@ -136,7 +136,7 @@ private void registerMXBean() { //TODO: Unregister call should happen as a part of SCMNodeManager shutdown. private void unregisterMXBean() { - if(this.scmNodeStorageInfoBean != null) { + if (this.scmNodeStorageInfoBean != null) { MBeans.unregister(this.scmNodeStorageInfoBean); this.scmNodeStorageInfoBean = null; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java index 57a377d998f4..b934d977fa65 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java @@ -99,7 +99,7 @@ public void removeDatanode(UUID datanodeID) { Set getObjects(UUID datanode) { Preconditions.checkNotNull(datanode); final Set s = dn2ObjectMap.get(datanode); - return s != null? Collections.unmodifiableSet(s): Collections.emptySet(); + return s != null ? Collections.unmodifiableSet(s) : Collections.emptySet(); } public ReportResult.ReportResultBuilder newBuilder() { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java index 0a3e1377d6f1..3146e8fede4a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java @@ -330,7 +330,7 @@ public void addContainer(final UUID uuid, } public void setContainers(UUID uuid, Set containers) - throws NodeNotFoundException{ + throws NodeNotFoundException { lock.writeLock().lock(); try { checkIfNodeExist(uuid); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java index 9b9e206e05a3..954d212ac22e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java @@ -562,7 +562,7 @@ public void close() throws IOException { backgroundPipelineCreator.stop(); } - if(pmInfoBean != null) { + if (pmInfoBean != null) { MBeans.unregister(this.pmInfoBean); pmInfoBean = null; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java index 85ea5a558d70..64815a92d3ee 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java @@ -90,12 +90,12 @@ public void onMessage(PipelineReportFromDatanode pipelineReportFromDatanode, for (PipelineReport report : pipelineReport.getPipelineReportList()) { try { processPipelineReport(report, dn, publisher); - } catch(NotLeaderException ex) { + } catch (NotLeaderException ex) { // Avoid NotLeaderException logging which happens when processing // pipeline report on followers. } catch (PipelineNotFoundException e) { LOGGER.error("Could not find pipeline {}", report.getPipelineID()); - } catch(IOException e) { + } catch (IOException e) { LOGGER.error("Could not process pipeline report={} from dn={}.", report, dn, e); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java index 847b50e4c7e2..bbdabf0c476c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java @@ -91,7 +91,7 @@ static void destroyPipeline(DatanodeDetails dn, PipelineID pipelineID, ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf); final RaftPeer p = RatisHelper.toRaftPeer(dn); - try(RaftClient client = RatisHelper + try (RaftClient client = RatisHelper .newRaftClient(SupportedRpcType.valueOfIgnoreCase(rpcType), p, retryPolicy, grpcTlsConfig, ozoneConf)) { client.getGroupManagementApi(p.getId()) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerFactory.java index 356d047a5e5a..f130eedef50b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerFactory.java @@ -44,7 +44,7 @@ public WritableContainerFactory(StorageContainerManager scm) { public ContainerInfo getContainer(final long size, ReplicationConfig repConfig, String owner, ExcludeList excludeList) throws IOException { - switch(repConfig.getReplicationType()) { + switch (repConfig.getReplicationType()) { case STAND_ALONE: return standaloneProvider .getContainer(size, repConfig, owner, excludeList); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index 037062341e34..5d6ee5b3117c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -758,7 +758,7 @@ public StartContainerBalancerResponseProto startContainerBalancer( Optional maxSizeEnteringTargetInGB = Optional.empty(); Optional maxSizeLeavingSourceInGB = Optional.empty(); - if(request.hasThreshold()) { + if (request.hasThreshold()) { threshold = Optional.of(request.getThreshold()); } @@ -778,15 +778,15 @@ public StartContainerBalancerResponseProto startContainerBalancer( 100)); } - if(request.hasMaxSizeToMovePerIterationInGB()) { + if (request.hasMaxSizeToMovePerIterationInGB()) { maxSizeToMovePerIterationInGB = Optional.of(request.getMaxSizeToMovePerIterationInGB()); } - if(request.hasMaxSizeEnteringTargetInGB()) { + if (request.hasMaxSizeEnteringTargetInGB()) { maxSizeEnteringTargetInGB = Optional.of(request.getMaxSizeEnteringTargetInGB()); } - if(request.hasMaxSizeLeavingSourceInGB()) { + if (request.hasMaxSizeLeavingSourceInGB()) { maxSizeLeavingSourceInGB = Optional.of(request.getMaxSizeLeavingSourceInGB()); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java index ef9d6f438a50..2a1c8956e94e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java @@ -42,7 +42,7 @@ * Class defining Safe mode exit criteria for Containers. */ public class ContainerSafeModeRule extends - SafeModeExitRule{ + SafeModeExitRule { public static final Logger LOG = LoggerFactory.getLogger(ContainerSafeModeRule.class); @@ -115,7 +115,7 @@ protected synchronized void process( reportsProto.getReport().getReportsList().forEach(c -> { if (containerMap.containsKey(c.getContainerID())) { - if(containerMap.remove(c.getContainerID()) != null) { + if (containerMap.remove(c.getContainerID()) != null) { containerWithMinReplicas.getAndAdd(1); getSafeModeMetrics() .incCurrentContainersWithOneReplicaReportedCount(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java index 0c4ce84a9ce9..b03fedb647e0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java @@ -33,7 +33,7 @@ * registered with SCM. */ public class DataNodeSafeModeRule extends - SafeModeExitRule{ + SafeModeExitRule { // Min DataNodes required to exit safe mode. private int requiredDns; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java index c7b831bfe1e0..fb4ba7db65ec 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java @@ -204,7 +204,7 @@ public List allocateBlock( ); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logWriteSuccess( buildAuditMessageForSuccess(SCMAction.ALLOCATE_BLOCK, auditMap) ); @@ -274,7 +274,7 @@ public List deleteKeyBlocks( @Override public ScmInfo getScmInfo() throws IOException { boolean auditSuccess = true; - try{ + try { ScmInfo.Builder builder = new ScmInfo.Builder() .setClusterId(scm.getScmStorageConfig().getClusterID()) @@ -287,7 +287,7 @@ public ScmInfo getScmInfo() throws IOException { ); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logReadSuccess( buildAuditMessageForSuccess(SCMAction.GET_SCM_INFO, null) ); @@ -305,7 +305,7 @@ public boolean addSCM(AddSCMRequest request) throws IOException { auditMap.put("cluster", String.valueOf(request.getClusterId())); auditMap.put("addr", String.valueOf(request.getRatisAddr())); boolean auditSuccess = true; - try{ + try { return scm.getScmHAManager().addSCM(request); } catch (Exception ex) { auditSuccess = false; @@ -314,7 +314,7 @@ public boolean addSCM(AddSCMRequest request) throws IOException { ); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logReadSuccess( buildAuditMessageForSuccess(SCMAction.ADD_SCM, auditMap) ); @@ -326,12 +326,12 @@ public boolean addSCM(AddSCMRequest request) throws IOException { public List sortDatanodes(List nodes, String clientMachine) throws IOException { boolean auditSuccess = true; - try{ + try { NodeManager nodeManager = scm.getScmNodeManager(); Node client = null; List possibleClients = nodeManager.getNodesByAddress(clientMachine); - if (possibleClients.size()>0){ + if (possibleClients.size() > 0) { client = possibleClients.get(0); } List nodeList = new ArrayList(); @@ -353,7 +353,7 @@ public List sortDatanodes(List nodes, ); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logReadSuccess( buildAuditMessageForSuccess(SCMAction.SORT_DATANODE, null) ); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 59e4b2bb5118..9388a9813820 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -222,7 +222,7 @@ public ContainerInfo getContainer(long containerID) throws IOException { ); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logReadSuccess( buildAuditMessageForSuccess(SCMAction.GET_CONTAINER, auditMap) ); @@ -363,7 +363,7 @@ public List getExistContainerWithPipelinesInBatch( * replication factor. */ private boolean hasRequiredReplicas(ContainerInfo contInfo) { - try{ + try { return getScm().getContainerManager() .getContainerReplicas(contInfo.containerID()) .size() >= contInfo.getReplicationConfig().getRequiredNodes(); @@ -458,7 +458,7 @@ public List listContainer(long startContainerID, buildAuditMessageForFailure(SCMAction.LIST_CONTAINER, auditMap, ex)); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logReadSuccess( buildAuditMessageForSuccess(SCMAction.LIST_CONTAINER, auditMap)); } @@ -483,7 +483,7 @@ public void deleteContainer(long containerID) throws IOException { ); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logWriteSuccess( buildAuditMessageForSuccess(SCMAction.DELETE_CONTAINER, auditMap) ); @@ -643,7 +643,7 @@ public void closePipeline(HddsProtos.PipelineID pipelineID) @Override public ScmInfo getScmInfo() throws IOException { boolean auditSuccess = true; - try{ + try { ScmInfo.Builder builder = new ScmInfo.Builder() .setClusterId(scm.getScmStorageConfig().getClusterID()) @@ -667,7 +667,7 @@ public ScmInfo getScmInfo() throws IOException { ); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logReadSuccess( buildAuditMessageForSuccess(SCMAction.GET_SCM_INFO, null) ); @@ -936,7 +936,8 @@ private HddsProtos.DatanodeUsageInfoProto getUsageInfoFromDatanodeDetails( */ @Override public List getDatanodeUsageInfo( - boolean mostUsed, int count) throws IOException, IllegalArgumentException{ + boolean mostUsed, int count) + throws IOException, IllegalArgumentException { // check admin authorisation try { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java index 248c90c36423..fcadbf7462f6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java @@ -204,7 +204,7 @@ public SCMVersionResponseProto getVersion(SCMVersionRequestProto buildAuditMessageForFailure(SCMAction.GET_VERSION, null, ex)); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logReadSuccess( buildAuditMessageForSuccess(SCMAction.GET_VERSION, null)); } @@ -249,7 +249,7 @@ public SCMRegisteredResponseProto register( buildAuditMessageForFailure(SCMAction.REGISTER, auditMap, ex)); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logWriteSuccess( buildAuditMessageForSuccess(SCMAction.REGISTER, auditMap)); } @@ -284,7 +284,7 @@ public SCMHeartbeatResponseProto sendHeartbeat( ); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logWriteSuccess( buildAuditMessageForSuccess(SCMAction.SEND_HEARTBEAT, auditMap) ); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 85efdfb9ac52..2fb788d5d7cf 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -557,7 +557,7 @@ private void initializeSystemManagers(OzoneConfiguration conf, .build(); } - if(configurator.getScmNodeManager() != null) { + if (configurator.getScmNodeManager() != null) { scmNodeManager = configurator.getScmNodeManager(); } else { scmNodeManager = new SCMNodeManager(conf, scmStorageConfig, eventQueue, @@ -616,7 +616,7 @@ private void initializeSystemManagers(OzoneConfiguration conf, scmHAManager, getScmMetadataStore().getMoveTable()); } - if(configurator.getScmSafeModeManager() != null) { + if (configurator.getScmSafeModeManager() != null) { scmSafeModeManager = configurator.getScmSafeModeManager(); } else { scmSafeModeManager = new SCMSafeModeManager(conf, @@ -642,7 +642,7 @@ private void initializeCAnSecurityProtocol(OzoneConfiguration conf, // TODO: Support Certificate Server loading via Class Name loader. // So it is easy to use different Certificate Servers if needed. - if(this.scmMetadataStore == null) { + if (this.scmMetadataStore == null) { LOG.error("Cannot initialize Certificate Server without a valid meta " + "data layer."); throw new SCMException("Cannot initialize CA without a valid metadata " + @@ -796,7 +796,7 @@ private ContainerTokenSecretManager createContainerTokenSecretManager( private void initalizeMetadataStore(OzoneConfiguration conf, SCMConfigurator configurator) throws IOException { - if(configurator.getMetadataStore() != null) { + if (configurator.getMetadataStore() != null) { scmMetadataStore = configurator.getMetadataStore(); } else { scmMetadataStore = new SCMMetadataStoreImpl(conf); @@ -977,7 +977,7 @@ public static boolean scmBootstrap(OzoneConfiguration conf) // will be persisted into the version file once this node gets added // to existing SCM ring post node regular start up. - if(OzoneSecurityUtil.isSecurityEnabled(conf)) { + if (OzoneSecurityUtil.isSecurityEnabled(conf)) { HASecurityUtils.initializeSecurity(scmStorageConfig, config, getScmAddress(scmhaNodeDetails, conf), false); } @@ -1830,7 +1830,7 @@ public String getSCMNodeId() { } public StatusAndMessages finalizeUpgrade(String upgradeClientID) - throws IOException{ + throws IOException { return upgradeFinalizer.finalize(upgradeClientID, this); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java index 1d8859fac348..030601a5fd3a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java @@ -179,7 +179,7 @@ public void start(OzoneConfiguration conf) throws Exception { @Override public boolean init(OzoneConfiguration conf, String clusterId) - throws IOException{ + throws IOException { return StorageContainerManager.scmInit(conf, clusterId); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java index 1fc4f7625ef7..feb58fc09831 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java @@ -613,7 +613,7 @@ public static StorageContainerManager getScm(OzoneConfiguration conf, conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0"); conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0"); SCMStorageConfig scmStore = new SCMStorageConfig(conf); - if(scmStore.getState() != Storage.StorageState.INITIALIZED) { + if (scmStore.getState() != Storage.StorageState.INITIALIZED) { String clusterId = UUID.randomUUID().toString(); String scmId = UUID.randomUUID().toString(); scmStore.setClusterId(clusterId); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java index 20c046856736..0c9222d06195 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java @@ -43,7 +43,7 @@ public class TestHddsServerUtil { public Timeout timeout = Timeout.seconds(300); @Rule - public ExpectedException thrown= ExpectedException.none(); + public ExpectedException thrown = ExpectedException.none(); /** * Verify that the datanode endpoint is parsed correctly. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java index f7386500fd16..8394c5a6b7bd 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java @@ -56,7 +56,7 @@ public class TestHddsServerUtils { public Timeout timeout = Timeout.seconds(300);; @Rule - public ExpectedException thrown= ExpectedException.none(); + public ExpectedException thrown = ExpectedException.none(); /** * Test getting OZONE_SCM_DATANODE_ADDRESS_KEY with port. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index e078b1f30a31..e8bd07bdddbc 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -103,7 +103,7 @@ public class TestBlockManager { public ExpectedException thrown = ExpectedException.none(); @Rule - public TemporaryFolder folder= new TemporaryFolder(); + public TemporaryFolder folder = new TemporaryFolder(); private SCMMetadataStore scmMetadataStore; private ReplicationConfig replicationConfig; @@ -452,7 +452,7 @@ public void testBlockDistributionWithMultipleRaftLogDisks() throws Exception { // the pipeline per raft log disk config is set to 1 by default int numContainers = (int)Math.ceil((double) (numContainerPerOwnerInPipeline * - numContainerPerOwnerInPipeline)/numMetaDataVolumes); + numContainerPerOwnerInPipeline) / numMetaDataVolumes); Assert.assertTrue(numContainers == pipelineManager. getNumberOfContainers(pipeline.getId())); Assert.assertTrue( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index f6cab87daa1a..9f7d9c295f85 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -532,7 +532,7 @@ public void addContainer(DatanodeDetails dd, @Override public void addDatanodeCommand(UUID dnId, SCMCommand command) { - if(commandMap.containsKey(dnId)) { + if (commandMap.containsKey(dnId)) { List commandList = commandMap.get(dnId); Preconditions.checkNotNull(commandList); commandList.add(command); @@ -601,7 +601,7 @@ public int getCommandCount(DatanodeDetails dd) { } public void clearCommandQueue(UUID dnId) { - if(commandMap.containsKey(dnId)) { + if (commandMap.containsKey(dnId)) { commandMap.put(dnId, new LinkedList<>()); } } @@ -799,7 +799,7 @@ public List getNodesByAddress(String address) { if (uuids == null) { return results; } - for(String uuid : uuids) { + for (String uuid : uuids) { DatanodeDetails dn = getNodeByUuid(uuid); if (dn != null) { results.add(dn); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java index c015f1852608..3d8551d8fceb 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java @@ -82,7 +82,7 @@ public void setNodeStatus(DatanodeDetails dd, NodeStatus status) { */ public void setPipelines(DatanodeDetails dd, int count) { Set pipelines = new HashSet<>(); - for (int i=0; i t1 = executor.submit(() -> fullReportHandler.onMessage(fcr, publisher)); Future t2 = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java index e6f32e081ac6..b23482b91573 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java @@ -186,7 +186,7 @@ public void testCalculationOfUtilization() { // modify this after balancer is fully completed try { Thread.sleep(100); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } expectedUnBalancedNodes = determineExpectedUnBalancedNodes(randomThreshold); @@ -219,7 +219,7 @@ public void unBalancedNodesListShouldBeEmptyWhenClusterIsBalanced() { // modify this after balancer is fully completed try { Thread.sleep(100); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } containerBalancer.stop(); Assert.assertEquals(0, containerBalancer.getUnBalancedNodes().size()); @@ -244,7 +244,7 @@ public void containerBalancerShouldObeyMaxDatanodesToInvolveLimit() { // modify this after balancer is fully completed try { Thread.sleep(1000); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } Assert.assertFalse( containerBalancer.getCountDatanodesInvolvedPerIteration() > @@ -266,7 +266,7 @@ public void containerBalancerShouldSelectOnlyClosedContainers() { // modify this after balancer is fully completed try { Thread.sleep(1000); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } containerBalancer.stop(); @@ -286,7 +286,7 @@ public void containerBalancerShouldSelectOnlyClosedContainers() { // modify this after balancer is fully completed try { Thread.sleep(1000); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } containerBalancer.stop(); // check whether all selected containers are closed @@ -310,7 +310,7 @@ public void containerBalancerShouldObeyMaxSizeToMoveLimit() { // modify this after balancer is fully completed try { Thread.sleep(1000); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } // balancer should not have moved more size than the limit Assert.assertFalse(containerBalancer.getSizeMovedPerIteration() > @@ -330,7 +330,7 @@ public void targetDatanodeShouldNotAlreadyContainSelectedContainer() { // modify this after balancer is fully completed try { Thread.sleep(1000); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } containerBalancer.stop(); Map sourceToTargetMap = @@ -357,7 +357,7 @@ public void containerMoveSelectionShouldFollowPlacementPolicy() { // modify this after balancer is fully completed try { Thread.sleep(1000); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } containerBalancer.stop(); Map sourceToTargetMap = @@ -428,7 +428,7 @@ public void selectedContainerShouldNotAlreadyHaveBeenSelected() { // modify this after balancer is fully completed try { Thread.sleep(1000); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } containerBalancer.stop(); Set containers = new HashSet<>(); @@ -455,7 +455,7 @@ public void balancerShouldNotSelectConfiguredExcludeContainers() { // modify this after balancer is fully completed try { Thread.sleep(1000); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } containerBalancer.stop(); Set excludeContainers = @@ -490,7 +490,7 @@ public void balancerShouldObeyMaxSizeEnteringTargetLimit() { // modify this after balancer is fully completed try { Thread.sleep(500); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } containerBalancer.stop(); // balancer should have identified unbalanced nodes @@ -513,7 +513,7 @@ public void testMetrics() { // modify this after balancer is fully completed try { Thread.sleep(500); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } containerBalancer.stop(); ContainerBalancerMetrics metrics = containerBalancer.getMetrics(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java index 37f4594a844d..a830a71b4ac3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java @@ -550,7 +550,7 @@ public void testOutOfServiceNodesNotSelected() { dn.setNodeStatus(new NodeStatus(DECOMMISSIONED, HEALTHY)); } - for (int i=0; i<10; i++) { + for (int i = 0; i < 10; i++) { // Set a random DN to in_service and ensure it is always picked int index = new Random().nextInt(dnInfos.size()); dnInfos.get(index).setNodeStatus(NodeStatus.inServiceHealthy()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java index 9ec10fc9138a..5f82cce53295 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java @@ -64,7 +64,7 @@ public void testSCMHAConfig() throws Exception { conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId); String[] nodes = new String[] {"scm1", "scm2", "scm3"}; - conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY+"."+scmServiceId, + conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY + "." + scmServiceId, "scm1,scm2,scm3"); conf.set(ScmConfigKeys.OZONE_SCM_NODE_ID_KEY, "scm1"); @@ -72,14 +72,14 @@ public void testSCMHAConfig() throws Exception { int i = 1; for (String nodeId : nodes) { conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, - scmServiceId, nodeId), "localhost:"+port++); + scmServiceId, nodeId), "localhost:" + port++); conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_PORT_KEY, scmServiceId, nodeId), port); conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY, scmServiceId, nodeId), "172.28.9.1"); conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY, - scmServiceId, nodeId), "localhost:"+port++); + scmServiceId, nodeId), "localhost:" + port++); conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_SECURITY_SERVICE_PORT_KEY, scmServiceId, nodeId), port); conf.set(ConfUtils.addKeySuffixes( @@ -87,26 +87,26 @@ public void testSCMHAConfig() throws Exception { "172.28.9.1"); conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_ADDRESS_KEY, - scmServiceId, nodeId), "localhost:"+port++); + scmServiceId, nodeId), "localhost:" + port++); conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_PORT_KEY, scmServiceId, nodeId), port); conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_BIND_HOST_KEY, scmServiceId, nodeId), "172.28.9.1"); conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_ADDRESS_KEY, - scmServiceId, nodeId), "localhost:"+port++); + scmServiceId, nodeId), "localhost:" + port++); conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_PORT_KEY, scmServiceId, nodeId), port); conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_BIND_HOST_KEY, scmServiceId, nodeId), "172.28.9.1"); conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_HTTP_ADDRESS_KEY, - scmServiceId, nodeId), "localhost:"+port++); + scmServiceId, nodeId), "localhost:" + port++); conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_HTTP_BIND_HOST_KEY, scmServiceId, nodeId), "172.28.9.1"); conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_DB_DIRS, - scmServiceId, nodeId), "/var/scm-metadata"+ i++); + scmServiceId, nodeId), "/var/scm-metadata" + i++); conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_ADDRESS_KEY, scmServiceId, nodeId), "localhost"); @@ -121,7 +121,7 @@ public void testSCMHAConfig() throws Exception { port = 9880; // Validate configs. - Assert.assertEquals("localhost:"+port++, + Assert.assertEquals("localhost:" + port++, conf.get(ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, scmServiceId, "scm1"))); Assert.assertEquals(port, @@ -132,7 +132,7 @@ public void testSCMHAConfig() throws Exception { scmServiceId, "scm1"))); - Assert.assertEquals("localhost:"+port++, + Assert.assertEquals("localhost:" + port++, conf.get(ConfUtils.addKeySuffixes( OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY, scmServiceId, "scm1"))); Assert.assertEquals(port, conf.getInt(ConfUtils.addKeySuffixes( @@ -142,7 +142,7 @@ public void testSCMHAConfig() throws Exception { OZONE_SCM_SECURITY_SERVICE_BIND_HOST_KEY, scmServiceId, "scm1"))); - Assert.assertEquals("localhost:"+port++, + Assert.assertEquals("localhost:" + port++, conf.get(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_ADDRESS_KEY, scmServiceId, "scm1"))); Assert.assertEquals(port, @@ -152,7 +152,7 @@ public void testSCMHAConfig() throws Exception { ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_BIND_HOST_KEY, scmServiceId, "scm1"))); - Assert.assertEquals("localhost:"+port++, + Assert.assertEquals("localhost:" + port++, conf.get(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_ADDRESS_KEY, scmServiceId, "scm1"))); Assert.assertEquals(port, @@ -163,7 +163,7 @@ public void testSCMHAConfig() throws Exception { "scm1"))); - Assert.assertEquals("localhost:"+port++, + Assert.assertEquals("localhost:" + port++, conf.get(ConfUtils.addKeySuffixes(OZONE_SCM_HTTP_ADDRESS_KEY, scmServiceId, "scm1"))); Assert.assertEquals("172.28.9.1", @@ -192,7 +192,7 @@ public void testHAWithSamePortConfig() throws Exception { conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId); String[] nodes = new String[] {"scm1", "scm2", "scm3"}; - conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY+"."+scmServiceId, + conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY + "." + scmServiceId, "scm1,scm2,scm3"); conf.set(ScmConfigKeys.OZONE_SCM_NODE_ID_KEY, "scm1"); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisRequest.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisRequest.java index f5913aa28e87..c352ca463f2c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisRequest.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisRequest.java @@ -49,7 +49,7 @@ public void testEncodeAndDecodeSuccess() throws Exception { } @Test(expected = InvalidProtocolBufferException.class) - public void testEncodeWithNonProto() throws Exception{ + public void testEncodeWithNonProto() throws Exception { PipelineID pipelineID = PipelineID.randomId(); // Non proto args Object[] args = new Object[] {pipelineID}; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestPipelineIDCodec.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestPipelineIDCodec.java index 5543be5832b1..834b539ae650 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestPipelineIDCodec.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestPipelineIDCodec.java @@ -57,7 +57,7 @@ public void testPersistingFFAsUUID() throws Exception { @Test public void testPersistingARandomUUID() throws Exception { - for (int i=0; i<100; i++) { + for (int i = 0; i < 100; i++) { UUID uuid = UUID.randomUUID(); long mask = 0x0000_0000_0000_00FFL; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java index b307b576c030..63ecb29f0383 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java @@ -212,7 +212,7 @@ public void testContainerPlacementCapacity() throws IOException, assertEquals(remaining * nodeCount, (long) scmNodeManager.getStats().getRemaining().get()); - xceiverClientManager= new XceiverClientManager(conf); + xceiverClientManager = new XceiverClientManager(conf); ContainerInfo container = containerManager .allocateContainer( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java index 80b72409295a..8c69116fdf2f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java @@ -105,7 +105,7 @@ public void testNodeCanBeQueuedAndCancelled() { */ @Test public void testClosePipelinesEventFiredWhenAdminStarted() - throws NodeNotFoundException{ + throws NodeNotFoundException { DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails(); nodeManager.register(dn1, new NodeStatus(HddsProtos.NodeOperationalState.DECOMMISSIONING, @@ -425,7 +425,7 @@ public void testCancelledNodesMovedToInService() */ private Set generateContainers(int count) { Set containers = new HashSet<>(); - for (int i=0; i dns = generateDatanodes(); // Try to decommission a host that does exist, but give incorrect port try { - decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()+":10")); + decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress() + ":10")); fail("InvalidHostStringException expected"); } catch (InvalidHostStringException e) { } @@ -131,7 +131,7 @@ public void testAnyInvalidHostThrowsException() // that does not exist try { decom.decommissionNodes(Arrays.asList( - dns.get(0).getIpAddress()+":10")); + dns.get(0).getIpAddress() + ":10")); fail("InvalidHostStringException expected"); } catch (InvalidHostStringException e) { } @@ -159,7 +159,7 @@ public void testNodesCanBeDecommissionedAndRecommissioned() // and we hardcoded ports to 3456, 4567, 5678 DatanodeDetails multiDn = dns.get(10); String multiAddr = - multiDn.getIpAddress()+":"+multiDn.getPorts().get(0).getValue(); + multiDn.getIpAddress() + ":" + multiDn.getPorts().get(0).getValue(); decom.decommissionNodes(Arrays.asList(multiAddr)); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(multiDn).getOperationalState()); @@ -202,7 +202,7 @@ public void testNodesCanBePutIntoMaintenanceAndRecommissioned() // and we hardcoded ports to 3456, 4567, 5678 DatanodeDetails multiDn = dns.get(10); String multiAddr = - multiDn.getIpAddress()+":"+multiDn.getPorts().get(0).getValue(); + multiDn.getIpAddress() + ":" + multiDn.getPorts().get(0).getValue(); decom.startMaintenanceNodes(Arrays.asList(multiAddr), 100); assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, nodeManager.getNodeStatus(multiDn).getOperationalState()); @@ -296,7 +296,7 @@ private SCMNodeManager createNodeManager(OzoneConfiguration config) */ private List generateDatanodes() { List dns = new ArrayList<>(); - for (int i=0; i<10; i++) { + for (int i = 0; i < 10; i++) { DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); dns.add(dn); nodeManager.register(dn, null, null); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java index da87d1861b00..31ecbf6fff86 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java @@ -304,7 +304,7 @@ public Event getLastEvent() { if (events.size() == 0) { return null; } else { - return events.get(events.size()-1); + return events.get(events.size() - 1); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index ab87a59e27e3..ea8a184acff3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -457,7 +457,7 @@ private void assertPipelines(HddsProtos.ReplicationFactor factor, // these pipelines use nodes outside of allowedDNs. if (success) { for (Pipeline pipeline: pipelines) { - for(DatanodeDetails pipelineDN: pipeline.getNodes()) { + for (DatanodeDetails pipelineDN: pipeline.getNodes()) { // Do not wait for this condition to be true. Disallowed DNs should // never be used once we have the expected number of pipelines. if (!allowedDnIds.contains(pipelineDN.getUuidString())) { @@ -1762,7 +1762,7 @@ public void testGetNodeInfo() final int nodeCount = 6; SCMNodeManager nodeManager = createNodeManager(conf); - for (int i=0; i(datanodes.size()), nodesRequired, 0, 10 * OzoneConsts.TB); Assert.fail("SCMException should have been thrown."); - } catch(SCMException ex) { + } catch (SCMException ex) { Assert.assertTrue(ex.getMessage().contains(expectedMessageSubstring)); } @@ -260,13 +260,13 @@ public void testChooseNodeNotEnoughSpace() throws IOException { new ArrayList<>(datanodes.size()), nodesRequired, 10 * OzoneConsts.TB, 0); Assert.fail("SCMException should have been thrown."); - } catch(SCMException ex) { + } catch (SCMException ex) { Assert.assertTrue(ex.getMessage().contains(expectedMessageSubstring)); } } @Test - public void testPickLowestLoadAnchor() throws IOException{ + public void testPickLowestLoadAnchor() throws IOException { List healthyNodes = nodeManager .getNodes(NodeStatus.inServiceHealthy()); @@ -343,7 +343,7 @@ public void testFallBackPickNodes() { } @Test - public void testRackAwarenessNotEnabledWithFallBack() throws SCMException{ + public void testRackAwarenessNotEnabledWithFallBack() throws SCMException { DatanodeDetails anchor = placementPolicy .chooseNode(nodesWithOutRackAwareness); DatanodeDetails randomNode = placementPolicy @@ -425,12 +425,12 @@ private List overWriteLocationInNodes( } @Test - public void testHeavyNodeShouldBeExcluded() throws SCMException{ + public void testHeavyNodeShouldBeExcluded() throws SCMException { List healthyNodes = nodeManager.getNodes(NodeStatus.inServiceHealthy()); int nodesRequired = HddsProtos.ReplicationFactor.THREE.getNumber(); // only minority of healthy NODES are heavily engaged in pipelines. - int minorityHeavy = healthyNodes.size()/2 - 1; + int minorityHeavy = healthyNodes.size() / 2 - 1; List pickedNodes1 = placementPolicy.chooseDatanodes( new ArrayList<>(PIPELINE_PLACEMENT_MAX_NODES_COUNT), new ArrayList<>(PIPELINE_PLACEMENT_MAX_NODES_COUNT), @@ -443,7 +443,7 @@ public void testHeavyNodeShouldBeExcluded() throws SCMException{ Assert.assertTrue(checkDuplicateNodesUUID(pickedNodes1)); // majority of healthy NODES are heavily engaged in pipelines. - int majorityHeavy = healthyNodes.size()/2 + 2; + int majorityHeavy = healthyNodes.size() / 2 + 2; insertHeavyNodesIntoNodeManager(healthyNodes, majorityHeavy); boolean thrown = false; List pickedNodes2 = null; @@ -627,7 +627,7 @@ private Set mockPipelineIDs(int count) { } private void insertHeavyNodesIntoNodeManager( - List nodes, int heavyNodeCount) throws SCMException{ + List nodes, int heavyNodeCount) throws SCMException { if (nodes == null) { throw new SCMException("", SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java index d1f383c44620..3d1a707c400b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java @@ -289,7 +289,7 @@ public void testCreatePipelinesWhenNotEnoughSpace() throws Exception { provider.create(new RatisReplicationConfig(factor)); Assert.fail("Expected SCMException for large container size with " + "replication factor " + factor.toString()); - } catch(SCMException ex) { + } catch (SCMException ex) { Assert.assertTrue(ex.getMessage().contains(expectedErrorSubstring)); } } @@ -302,7 +302,7 @@ public void testCreatePipelinesWhenNotEnoughSpace() throws Exception { provider.create(new RatisReplicationConfig(factor)); Assert.fail("Expected SCMException for large metadata size with " + "replication factor " + factor.toString()); - } catch(SCMException ex) { + } catch (SCMException ex) { Assert.assertTrue(ex.getMessage().contains(expectedErrorSubstring)); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java index c9840e7b999c..83419e67b06e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java @@ -133,7 +133,7 @@ public void testOneReplicaPipelineRule() throws Exception { LoggerFactory.getLogger(SCMSafeModeManager.class)); List pipelines = pipelineManager.getPipelines(); - firePipelineEvent(pipelines.subList(0, pipelineFactorThreeCount -1)); + firePipelineEvent(pipelines.subList(0, pipelineFactorThreeCount - 1)); // As 90% of 7 with ceil is 7, if we send 6 pipeline reports, rule // validate should be still false. @@ -144,7 +144,7 @@ public void testOneReplicaPipelineRule() throws Exception { Assert.assertFalse(rule.validate()); //Fire last pipeline event from datanode. - firePipelineEvent(pipelines.subList(pipelineFactorThreeCount -1, + firePipelineEvent(pipelines.subList(pipelineFactorThreeCount - 1, pipelineFactorThreeCount)); GenericTestUtils.waitFor(() -> rule.validate(), 1000, 5000); @@ -181,13 +181,13 @@ public void testOneReplicaPipelineRuleMixedPipelines() throws Exception { pipelineManager.getPipelines( new RatisReplicationConfig(ReplicationFactor.THREE)); - firePipelineEvent(pipelines.subList(0, pipelineCountThree -1)); + firePipelineEvent(pipelines.subList(0, pipelineCountThree - 1)); GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains( "reported count is 6"), 1000, 5000); //Fire last pipeline event from datanode. - firePipelineEvent(pipelines.subList(pipelineCountThree -1, + firePipelineEvent(pipelines.subList(pipelineCountThree - 1, pipelineCountThree)); GenericTestUtils.waitFor(() -> rule.validate(), 1000, 5000); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java index ef6345ef0ec5..cb200f2ea84c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java @@ -435,7 +435,7 @@ public void testSafeModeExitRuleWithPipelineAvailabilityCheck( }, 100, 1000 * 5); } - private void checkHealthy(int expectedCount) throws Exception{ + private void checkHealthy(int expectedCount) throws Exception { GenericTestUtils.waitFor(() -> scmSafeModeManager .getHealthyPipelineSafeModeRule() .getCurrentHealthyPipelineCount() == expectedCount, @@ -548,14 +548,14 @@ private void testSafeModeDataNodes(int numOfDns) throws Exception { assertTrue(scmSafeModeManager.getInSafeMode()); // Register all DataNodes except last one and assert SCM is in safe mode. - for (int i = 0; i < numOfDns-1; i++) { + for (int i = 0; i < numOfDns - 1; i++) { queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, HddsTestUtils.createNodeRegistrationContainerReport(containers)); assertTrue(scmSafeModeManager.getInSafeMode()); assertTrue(scmSafeModeManager.getCurrentContainerThreshold() == 1); } - if(numOfDns == 0){ + if (numOfDns == 0) { GenericTestUtils.waitFor(() -> { return scmSafeModeManager.getInSafeMode(); }, 10, 1000 * 10); @@ -586,7 +586,7 @@ public void testSafeModePipelineExitRule() throws Exception { containers.addAll(HddsTestUtils.getContainerInfo(25 * 4)); String storageDir = GenericTestUtils.getTempPath( TestSCMSafeModeManager.class.getName() + UUID.randomUUID()); - try{ + try { MockNodeManager nodeManager = new MockNodeManager(true, 3); config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); // enable pipeline check diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMCertStore.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMCertStore.java index 5ddbe5532f68..7ad118ca9c4a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMCertStore.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMCertStore.java @@ -193,7 +193,7 @@ public void testRevokeCertificates() throws Exception { // Generate 3 more certificates and revoke 2 of them List newSerialIDs = new ArrayList<>(); - for (int i = 0; i<3; i++) { + for (int i = 0; i < 3; i++) { X509Certificate cert = generateX509Cert(); scmCertStore.storeValidCertificate(cert.getSerialNumber(), cert, SCM); newSerialIDs.add(cert.getSerialNumber()); @@ -250,7 +250,7 @@ public void testRevokeCertificatesForFutureTime() throws Exception { scmCertStore.storeValidCertificate(serialID, x509Certificate, SCM); Date now = new Date(); // Set revocation time in the future - Date revocationTime = new Date(now.getTime()+500); + Date revocationTime = new Date(now.getTime() + 500); X509CertificateHolder caCertificateHolder = new X509CertificateHolder(generateX509Cert().getEncoded()); @@ -282,7 +282,7 @@ private X509Certificate generateX509Cert() throws Exception { private long getTableSize(Iterator iterator) { long size = 0; - while(iterator.hasNext()) { + while (iterator.hasNext()) { size++; iterator.next(); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/update/server/TestSCMUpdateServiceGrpcServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/update/server/TestSCMUpdateServiceGrpcServer.java index 47495c91ee15..e7b2c5754396 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/update/server/TestSCMUpdateServiceGrpcServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/update/server/TestSCMUpdateServiceGrpcServer.java @@ -57,7 +57,7 @@ public class TestSCMUpdateServiceGrpcServer { public Timeout timeout = Timeout.seconds(300); @Rule - public ExpectedException thrown= ExpectedException.none(); + public ExpectedException thrown = ExpectedException.none(); @Rule public final TemporaryFolder tempDir = new TemporaryFolder(); @@ -129,13 +129,13 @@ public void testClientUpdateWithRevoke() throws Exception { } server.notifyCrlUpdate(); - GenericTestUtils.waitFor(() -> client.getUpdateCount()==4, 100, 2000); + GenericTestUtils.waitFor(() -> client.getUpdateCount() == 4, 100, 2000); Assert.assertEquals(4, client.getUpdateCount()); Assert.assertEquals(0, client.getErrorCount()); revokeCertNow(certIds.get(5)); server.notifyCrlUpdate(); - GenericTestUtils.waitFor(() -> client.getUpdateCount()>4, 100, 2000); + GenericTestUtils.waitFor(() -> client.getUpdateCount() > 4, 100, 2000); Assert.assertEquals(5, client.getUpdateCount()); Assert.assertEquals(0, client.getErrorCount()); } catch (Exception e) { @@ -178,7 +178,7 @@ public void testClientUpdateWithDelayedRevoke() throws Exception { revokeCertNow((certIds.get(0))); server.notifyCrlUpdate(); - GenericTestUtils.waitFor(() -> client.getUpdateCount()==1, + GenericTestUtils.waitFor(() -> client.getUpdateCount() == 1, 100, 2000); Assert.assertEquals(1, client.getUpdateCount()); Assert.assertEquals(0, client.getErrorCount()); @@ -186,14 +186,14 @@ public void testClientUpdateWithDelayedRevoke() throws Exception { // revoke cert 5 with 10 seconds delay revokeCert(certIds.get(5), Instant.now().plus(Duration.ofSeconds(5))); server.notifyCrlUpdate(); - GenericTestUtils.waitFor(() -> client.getUpdateCount()>1, + GenericTestUtils.waitFor(() -> client.getUpdateCount() > 1, 100, 2000); Assert.assertTrue(2 <= client.getUpdateCount()); Assert.assertEquals(0, client.getErrorCount()); Assert.assertTrue(1 >= client.getClientCRLStore() .getPendingCrlIds().size()); - GenericTestUtils.waitFor(() -> client.getPendingCrlRemoveCount()==1, + GenericTestUtils.waitFor(() -> client.getPendingCrlRemoveCount() == 1, 100, 20_000); Assert.assertTrue(client.getClientCRLStore() .getPendingCrlIds().isEmpty()); @@ -243,7 +243,7 @@ public void testClientUpdateWithRestart() throws Exception { revokeCertNow((certIds.get(i))); } server.notifyCrlUpdate(); - GenericTestUtils.waitFor(() -> client.getUpdateCount()==4, + GenericTestUtils.waitFor(() -> client.getUpdateCount() == 4, 100, 2000); Assert.assertEquals(4, client.getUpdateCount()); @@ -257,7 +257,7 @@ public void testClientUpdateWithRestart() throws Exception { // client retry connect to the server. The client will handle that. server.stop(); server.start(); - GenericTestUtils.waitFor(() -> client.getErrorCount()==1, + GenericTestUtils.waitFor(() -> client.getErrorCount() == 1, 100, 2000); Assert.assertEquals(4, client.getUpdateCount()); Assert.assertEquals(1, client.getErrorCount()); @@ -266,7 +266,7 @@ public void testClientUpdateWithRestart() throws Exception { revokeCertNow(certIds.get(5)); server.notifyCrlUpdate(); - GenericTestUtils.waitFor(() -> client.getUpdateCount()>4, + GenericTestUtils.waitFor(() -> client.getUpdateCount() > 4, 100, 5000); Assert.assertEquals(5, client.getUpdateCount()); Assert.assertEquals(1, client.getErrorCount()); @@ -282,16 +282,16 @@ public void testClientUpdateWithRestart() throws Exception { client.createChannel(); client.start(); Assert.assertEquals(5, clientCRLStore.getLatestCrlId()); - GenericTestUtils.waitFor(() -> client.getUpdateCount()>5, + GenericTestUtils.waitFor(() -> client.getUpdateCount() > 5, 100, 2000); revokeCertNow(certIds.get(6)); // mostly noop server.notifyCrlUpdate(); LOG.info("Test client restart end."); - GenericTestUtils.waitFor(() -> client.getUpdateCount()>6, + GenericTestUtils.waitFor(() -> client.getUpdateCount() > 6, 100, 2000); - Assert.assertTrue(client.getUpdateCount()>=6); + Assert.assertTrue(client.getUpdateCount() >= 6); Assert.assertEquals(2, client.getErrorCount()); Assert.assertEquals(6, clientCRLStore.getLatestCrlId()); } catch (Exception e) { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestScmStartupSlvLessThanMlv.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestScmStartupSlvLessThanMlv.java index 9cdad811905b..762f946d83fc 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestScmStartupSlvLessThanMlv.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestScmStartupSlvLessThanMlv.java @@ -66,7 +66,7 @@ public void testStartupSlvLessThanMlv() throws Exception { try { new StorageContainerManager(conf); Assert.fail("Expected IOException due to incorrect MLV on SCM creation."); - } catch(IOException e) { + } catch (IOException e) { String expectedMessage = String.format("Metadata layout version (%s) > " + "software layout version (%s)", mlv, largestSlv); GenericTestUtils.assertExceptionContains(expectedMessage, e); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java index 33fb3557bbb1..4db75d2052e9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java @@ -183,7 +183,7 @@ public List getMostOrLeastUsedDatanodes(boolean mostUsed) { * @return DatanodeUsageInfo of the specified datanode */ @Override - public DatanodeUsageInfo getUsageInfo(DatanodeDetails dn){ + public DatanodeUsageInfo getUsageInfo(DatanodeDetails dn) { return null; } diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java index a1e47407e2f2..61dd5bf1ad3f 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java @@ -521,7 +521,7 @@ public static E intercept( throws Exception { return intercept(clazz, contained, "Expecting " + clazz.getName() - + (contained != null? (" with text " + contained) : "") + + (contained != null ? (" with text " + contained) : "") + " but got ", () -> { eval.call(); @@ -589,7 +589,7 @@ public static void assertOptionalEquals(String message, T expected, Optional actual) { Assert.assertNotNull(message, actual); - Assert.assertTrue(message +" -not present", actual.isPresent()); + Assert.assertTrue(message + " -not present", actual.isPresent()); Assert.assertEquals(message, expected, actual.get()); } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStatusSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStatusSubcommand.java index e0cd436bdf0a..44e4d4c9c50b 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStatusSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStatusSubcommand.java @@ -36,7 +36,7 @@ public class ContainerBalancerStatusSubcommand extends ScmSubcommand { @Override public void execute(ScmClient scmClient) throws IOException { boolean execReturn = scmClient.getContainerBalancerStatus(); - if(execReturn){ + if (execReturn) { System.out.println("ContainerBalancer is Running."); } else { System.out.println("ContainerBalancer is Not Running."); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java index c6800befd8cd..9bc3649dd9f0 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java @@ -43,7 +43,7 @@ public void execute(ScmClient scmClient) throws IOException { boolean execReturn = scmClient.getReplicationManagerStatus(); // Output data list - if(execReturn){ + if (execReturn) { LOG.info("ReplicationManager is Running."); } else { LOG.info("ReplicationManager is Not Running."); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java index ba359af1c59b..db2f02c5e125 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java @@ -51,7 +51,7 @@ public void execute(ScmClient scmClient) throws IOException { boolean execReturn = scmClient.inSafeMode(); // Output data list - if(execReturn){ + if (execReturn) { LOG.info("SCM is in safe mode."); if (verbose) { for (Map.Entry> entry : diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java index 12490c5c2c51..bcf64deb85e2 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java @@ -42,7 +42,7 @@ public class SafeModeExitSubcommand extends ScmSubcommand { @Override public void execute(ScmClient scmClient) throws IOException { boolean execReturn = scmClient.forceExitSafeMode(); - if(execReturn){ + if (execReturn) { LOG.info("SCM exit safe mode successfully."); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java index 232cc8de6934..23ff9176df9f 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java @@ -52,7 +52,7 @@ public void execute(ScmClient scmClient) throws IOException { String.join("\n", hosts)); if (errors.size() > 0) { for (DatanodeAdminError error : errors) { - System.err.println("Error: " + error.getHostname() +": " + System.err.println("Error: " + error.getHostname() + ": " + error.getError()); } // Throwing the exception will cause a non-zero exit status for the diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java index 6d59e3c71a72..a64c400f66f1 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java @@ -45,7 +45,7 @@ public class MaintenanceSubCommand extends ScmSubcommand { private List hosts = new ArrayList<>(); @CommandLine.Option(names = {"--end"}, - description = "Automatically end maintenance after the given hours. "+ + description = "Automatically end maintenance after the given hours. " + "By default, maintenance must be ended manually.") private int endInHours = 0; @@ -58,7 +58,7 @@ public void execute(ScmClient scmClient) throws IOException { String.join("\n", hosts)); if (errors.size() > 0) { for (DatanodeAdminError error : errors) { - System.err.println("Error: " + error.getHostname() +": " + System.err.println("Error: " + error.getHostname() + ": " + error.getError()); } // Throwing the exception will cause a non-zero exit status for the diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java index 94b97dbe3a69..61f7826cf647 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java @@ -53,7 +53,7 @@ public void execute(ScmClient scmClient) throws IOException { String.join("\n", hosts)); if (errors.size() > 0) { for (DatanodeAdminError error : errors) { - System.err.println("Error: " + error.getHostname() +": " + System.err.println("Error: " + error.getHostname() + ": " + error.getError()); } // Throwing the exception will cause a non-zero exit status for the diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestReportSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestReportSubCommand.java index 5c7107683ba5..be0e2c8fb69b 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestReportSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestReportSubCommand.java @@ -73,7 +73,7 @@ public void testCorrectValuesAppearInEmptyReport() throws IOException { for (HddsProtos.LifeCycleState state : HddsProtos.LifeCycleState.values()) { Pattern p = Pattern.compile( - "^"+state.toString() + ": 0$", Pattern.MULTILINE); + "^" + state.toString() + ": 0$", Pattern.MULTILINE); Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); } @@ -81,7 +81,7 @@ public void testCorrectValuesAppearInEmptyReport() throws IOException { for (ReplicationManagerReport.HealthState state : ReplicationManagerReport.HealthState.values()) { Pattern p = Pattern.compile( - "^"+state.toString() + ": 0$", Pattern.MULTILINE); + "^" + state.toString() + ": 0$", Pattern.MULTILINE); Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); } @@ -98,7 +98,7 @@ public void testCorrectValuesAppearInReport() throws IOException { int counter = SEED; for (HddsProtos.LifeCycleState state : HddsProtos.LifeCycleState.values()) { Pattern p = Pattern.compile( - "^"+state.toString() + ": " + counter + "$", Pattern.MULTILINE); + "^" + state.toString() + ": " + counter + "$", Pattern.MULTILINE); Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); counter++; @@ -108,13 +108,13 @@ public void testCorrectValuesAppearInReport() throws IOException { for (ReplicationManagerReport.HealthState state : ReplicationManagerReport.HealthState.values()) { Pattern p = Pattern.compile( - "^"+state.toString() + ": " + counter + "$", Pattern.MULTILINE); + "^" + state.toString() + ": " + counter + "$", Pattern.MULTILINE); Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); // Check the correct samples are returned p = Pattern.compile( - "^First 100 "+ state + " containers:\n" + "^First 100 " + state + " containers:\n" + containerList(0, counter) + "$", Pattern.MULTILINE); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); @@ -151,7 +151,7 @@ private String containerList(int start, int end) { if (i != start) { sb.append(", "); } - sb.append("#"+i); + sb.append("#" + i); } return sb.toString(); } diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java index 70c74a9b98a8..69b0efbda1c1 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java @@ -90,7 +90,7 @@ public void testDataNodeOperationalStateAndHealthIncludedInOutput() assertTrue(m.find()); for (HddsProtos.NodeState state : HddsProtos.NodeState.values()) { p = Pattern.compile( - "^Health State:\\s+"+state+"$", Pattern.MULTILINE); + "^Health State:\\s+" + state + "$", Pattern.MULTILINE); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); } @@ -106,11 +106,11 @@ public void testDataNodeOperationalStateAndHealthIncludedInOutput() private List getNodeDetails() { List nodes = new ArrayList<>(); - for (int i=0; i<4; i++) { + for (int i = 0; i < 4; i++) { HddsProtos.DatanodeDetailsProto.Builder dnd = HddsProtos.DatanodeDetailsProto.newBuilder(); dnd.setHostName("host" + i); - dnd.setIpAddress("1.2.3." + i+1); + dnd.setIpAddress("1.2.3." + i + 1); dnd.setNetworkLocation("/default"); dnd.setNetworkName("host" + i); dnd.addPorts(HddsProtos.Port.newBuilder() diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java index 1f819ac91f92..2412b889faba 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java @@ -207,7 +207,7 @@ public Iterator listVolumes(String volumePrefix, public Iterator listVolumesByUser(String user, String volumePrefix, String prevVolume) throws IOException { - if(Strings.isNullOrEmpty(user)) { + if (Strings.isNullOrEmpty(user)) { user = UserGroupInformation.getCurrentUser().getUserName(); } return new VolumeIterator(user, volumePrefix, prevVolume); @@ -269,7 +269,7 @@ public boolean hasNext() { @Override public OzoneVolume next() { - if(hasNext()) { + if (hasNext()) { currentValue = currentIterator.next(); return currentValue; } @@ -284,7 +284,7 @@ public OzoneVolume next() { private List getNextListOfVolumes(String prevVolume) { try { //if user is null, we do list of all volumes. - if(user != null) { + if (user != null) { return proxy.listVolumes(user, volPrefix, prevVolume, listCacheSize); } return proxy.listVolumes(volPrefix, prevVolume, listCacheSize); diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index 16d5a1b2f0c6..a292ae263f75 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -611,7 +611,7 @@ public long getUsedNamespace() { * @return {@code Iterator} */ public Iterator listKeys(String keyPrefix) - throws IOException{ + throws IOException { return listKeys(keyPrefix, null); } @@ -910,7 +910,7 @@ public OzoneMultipartUploadList listMultipartUploads(String prefix) * @param userName new owner * @throws IOException */ - public boolean setOwner(String userName) throws IOException{ + public boolean setOwner(String userName) throws IOException { boolean result = proxy.setBucketOwner(volumeName, name, userName); this.owner = userName; return result; @@ -939,7 +939,7 @@ void setKeyPrefix(String keyPrefixPath) { * The returned keys match key prefix. * @param keyPrefix */ - KeyIterator(String keyPrefix, String prevKey) throws IOException{ + KeyIterator(String keyPrefix, String prevKey) throws IOException { setKeyPrefix(keyPrefix); this.currentValue = null; this.currentIterator = getNextListOfKeys(prevKey).iterator(); @@ -947,7 +947,7 @@ void setKeyPrefix(String keyPrefixPath) { @Override public boolean hasNext() { - if(!currentIterator.hasNext() && currentValue != null) { + if (!currentIterator.hasNext() && currentValue != null) { try { currentIterator = getNextListOfKeys(currentValue.getName()).iterator(); @@ -960,7 +960,7 @@ public boolean hasNext() { @Override public OzoneKey next() { - if(hasNext()) { + if (hasNext()) { currentValue = currentIterator.next(); return currentValue; } @@ -1008,7 +1008,7 @@ List getNextListOfKeys(String prevKey) throws * * Note: Does not guarantee to return the list of keys in a sorted order. */ - private class KeyIteratorWithFSO extends KeyIterator{ + private class KeyIteratorWithFSO extends KeyIterator { private Stack stack; private List pendingItemsToBeBatched; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java index 9bf3973aeaec..2830bb13c040 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java @@ -51,7 +51,7 @@ public final class OzoneClientFactory { /** * Private constructor, class is not meant to be initialized. */ - private OzoneClientFactory(){} + private OzoneClientFactory() { } /** diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java index 9326bed5e978..e37969d42a69 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java @@ -107,7 +107,7 @@ public String getVolumeName() { * * @return bucketName */ - public String getBucketName(){ + public String getBucketName() { return bucketName; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java index 3847b1214f64..389ccf80011a 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java @@ -480,7 +480,7 @@ public boolean hasNext() { @Override public OzoneBucket next() { - if(hasNext()) { + if (hasNext()) { currentValue = currentIterator.next(); return currentValue; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java index 45e84735597b..5aff6856bd4f 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java @@ -248,7 +248,7 @@ void incCurrentPosition(long len) { /** * Increases current position by one. Used in writes. */ - void incCurrentPosition(){ + void incCurrentPosition() { currentPosition++; } @@ -280,7 +280,7 @@ void updateBlockID(BlockID id) { this.blockID = id; } - OzoneClientConfig getConf(){ + OzoneClientConfig getConf() { return this.config; } @@ -305,7 +305,7 @@ public Pipeline getPipeline() { * OMKeyLocationInfo. * @return */ - Pipeline getPipelineForOMLocationReport(){ + Pipeline getPipelineForOMLocationReport() { return getPipeline(); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java index eecc73bc3d96..470f69597873 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java @@ -80,7 +80,7 @@ public static KeyProvider.KeyVersion decryptEncryptedDataEncryptionKey( */ public static Text getKeyProviderMapKey(URI namespaceUri) { return new Text(O3_KMS_PREFIX + namespaceUri.getScheme() - +"://" + namespaceUri.getAuthority()); + + "://" + namespaceUri.getAuthority()); } public static String bytes2String(byte[] bytes) { @@ -131,7 +131,7 @@ public static URI getKeyProviderUri(UserGroupInformation ugi, } public static KeyProvider getKeyProvider(final ConfigurationSource conf, - final URI serverProviderUri) throws IOException{ + final URI serverProviderUri) throws IOException { if (serverProviderUri == null) { throw new IOException("KMS serverProviderUri is not configured."); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index b9db113e935f..328044184b02 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -537,7 +537,7 @@ public void createBucket( List listOfAcls = getAclList(); //ACLs from BucketArgs - if(bucketArgs.getAcls() != null) { + if (bucketArgs.getAcls() != null) { listOfAcls.addAll(bucketArgs.getAcls()); } @@ -856,7 +856,7 @@ public OzoneOutputStream createKey( .setAcls(getAclList()) .setLatestVersionLocation(getLatestVersionLocation); if (Boolean.parseBoolean(metadata.get(OzoneConsts.GDPR_FLAG))) { - try{ + try { GDPRSymmetricKey gKey = new GDPRSymmetricKey(new SecureRandom()); builder.addAllMetadata(gKey.getKeyDetails()); } catch (Exception e) { @@ -950,13 +950,13 @@ public OzoneInputStream getKey( List keyLocationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly(); - for(OmKeyLocationInfo keyLocationInfo : keyLocationInfos) { + for (OmKeyLocationInfo keyLocationInfo : keyLocationInfos) { Map blocks = new HashMap<>(); Pipeline pipelineBefore = keyLocationInfo.getPipeline(); List datanodes = pipelineBefore.getNodes(); - for(DatanodeDetails dn : datanodes) { + for (DatanodeDetails dn : datanodes) { List nodes = new ArrayList<>(); nodes.add(dn); Pipeline pipeline @@ -1015,7 +1015,7 @@ public void renameKey(String volumeName, String bucketName, String fromKeyName, String toKeyName) throws IOException { verifyVolumeName(volumeName); verifyBucketName(bucketName); - if(checkKeyNameEnabled){ + if (checkKeyNameEnabled) { HddsClientUtils.verifyKeyName(toKeyName); } HddsClientUtils.checkNotNull(fromKeyName, toKeyName); @@ -1157,13 +1157,13 @@ public OzoneOutputStream createMultipartKey(String volumeName, throws IOException { verifyVolumeName(volumeName); verifyBucketName(bucketName); - if(checkKeyNameEnabled) { + if (checkKeyNameEnabled) { HddsClientUtils.verifyKeyName(keyName); } HddsClientUtils.checkNotNull(keyName, uploadID); - Preconditions.checkArgument(partNumber > 0 && partNumber <=10000, "Part " + + Preconditions.checkArgument(partNumber > 0 && partNumber <= 10000, "Part " + "number should be greater than zero and less than or equal to 10000"); - Preconditions.checkArgument(size >=0, "size should be greater than or " + + Preconditions.checkArgument(size >= 0, "size should be greater than or " + "equal to zero"); String requestId = UUID.randomUUID().toString(); OmKeyArgs keyArgs = new OmKeyArgs.Builder() @@ -1508,7 +1508,7 @@ private OzoneInputStream createInputStream( final KeyProvider.KeyVersion decrypted = getDEK(feInfo); List cryptoInputStreams = new ArrayList<>(); - for(LengthInputStream lengthInputStream : lengthInputStreams) { + for (LengthInputStream lengthInputStream : lengthInputStreams) { final OzoneCryptoInputStream ozoneCryptoInputStream = new OzoneCryptoInputStream(lengthInputStream, OzoneKMSUtil.getCryptoCodec(conf, feInfo), @@ -1546,11 +1546,11 @@ private OzoneOutputStream createOutputStream(OpenKeySession openKey, decrypted.getMaterial(), feInfo.getIV()); return new OzoneOutputStream(cryptoOut); } else { - try{ + try { GDPRSymmetricKey gk; Map openKeyMetadata = openKey.getKeyInfo().getMetadata(); - if(Boolean.valueOf(openKeyMetadata.get(OzoneConsts.GDPR_FLAG))){ + if (Boolean.valueOf(openKeyMetadata.get(OzoneConsts.GDPR_FLAG))) { gk = new GDPRSymmetricKey( openKeyMetadata.get(OzoneConsts.GDPR_SECRET), openKeyMetadata.get(OzoneConsts.GDPR_ALGORITHM) @@ -1559,7 +1559,7 @@ private OzoneOutputStream createOutputStream(OpenKeySession openKey, return new OzoneOutputStream( new CipherOutputStream(keyOutputStream, gk.getCipher())); } - }catch (Exception ex){ + } catch (Exception ex) { throw new IOException(ex); } diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java index 712120d223ba..6c047ab998a4 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java @@ -65,7 +65,7 @@ public class TestHddsClientUtils { public Timeout timeout = Timeout.seconds(300); @Rule - public ExpectedException thrown= ExpectedException.none(); + public ExpectedException thrown = ExpectedException.none(); /** * Verify client endpoint lookup failure if it is not configured. @@ -104,7 +104,7 @@ public void testGetScmClientAddressForHA() { conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId); String[] nodes = new String[] {"scm1", "scm2", "scm3"}; - conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY+"."+scmServiceId, + conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY + "." + scmServiceId, "scm1,scm2,scm3"); conf.set(ScmConfigKeys.OZONE_SCM_NODE_ID_KEY, "scm1"); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 96c56580cd29..7571f4ec4103 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -484,7 +484,7 @@ public static RepeatedOmKeyInfo prepareKeyForDelete(OmKeyInfo keyInfo, // If this key is in a GDPR enforced bucket, then before moving // KeyInfo to deletedTable, remove the GDPR related metadata and // FileEncryptionInfo from KeyInfo. - if(Boolean.valueOf(keyInfo.getMetadata().get(OzoneConsts.GDPR_FLAG))) { + if (Boolean.valueOf(keyInfo.getMetadata().get(OzoneConsts.GDPR_FLAG))) { keyInfo.getMetadata().remove(OzoneConsts.GDPR_FLAG); keyInfo.getMetadata().remove(OzoneConsts.GDPR_ALGORITHM); keyInfo.getMetadata().remove(OzoneConsts.GDPR_SECRET); @@ -494,7 +494,7 @@ public static RepeatedOmKeyInfo prepareKeyForDelete(OmKeyInfo keyInfo, // Set the updateID keyInfo.setUpdateID(trxnLogIndex, isRatisEnabled); - if(repeatedOmKeyInfo == null) { + if (repeatedOmKeyInfo == null) { //The key doesn't exist in deletedTable, so create a new instance. repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo); } else { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java index 6a74342b8d28..7ca0634949c0 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java @@ -105,7 +105,7 @@ public OzoneAcl(ACLIdentityType type, String name, BitSet acls, Objects.requireNonNull(type); Objects.requireNonNull(acls); - if(acls.cardinality() > ACLType.getNoOfAcls()) { + if (acls.cardinality() > ACLType.getNoOfAcls()) { throw new IllegalArgumentException("Acl bitset passed has unexpected " + "size. bitset size:" + acls.cardinality() + ", bitset:" + acls.toString()); @@ -159,7 +159,7 @@ public static OzoneAcl parseAcl(String acl) AclScope aclScope = AclScope.ACCESS; // Check if acl string contains scope info. - if(parts[2].matches(ACL_SCOPE_REGEX)) { + if (parts[2].matches(ACL_SCOPE_REGEX)) { int indexOfOpenBracket = parts[2].indexOf("["); bits = parts[2].substring(0, indexOfOpenBracket); aclScope = AclScope.valueOf(parts[2].substring(indexOfOpenBracket + 1, @@ -194,7 +194,7 @@ public static List parseAcls(String acls) } List ozAcls = new ArrayList<>(); - for(String acl:parts) { + for (String acl:parts) { ozAcls.add(parseAcl(acl)); } return ozAcls; @@ -289,7 +289,7 @@ public BitSet getAclBitSet() { } public List getAclList() { - if(aclBitSet != null) { + if (aclBitSet != null) { return aclBitSet.stream().mapToObj(a -> ACLType.values()[a]).collect(Collectors.toList()); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index e3e7f411e283..82d26f95ac4a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -201,15 +201,15 @@ private OMConfigKeys() { public static final String DELEGATION_REMOVER_SCAN_INTERVAL_KEY = "ozone.manager.delegation.remover.scan.interval"; public static final long DELEGATION_REMOVER_SCAN_INTERVAL_DEFAULT = - 60*60*1000; + 60 * 60 * 1000; public static final String DELEGATION_TOKEN_RENEW_INTERVAL_KEY = "ozone.manager.delegation.token.renew-interval"; public static final long DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT = - 24*60*60*1000; // 1 day = 86400000 ms + 24 * 60 * 60 * 1000; // 1 day = 86400000 ms public static final String DELEGATION_TOKEN_MAX_LIFETIME_KEY = "ozone.manager.delegation.token.max-lifetime"; public static final long DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT = - 7*24*60*60*1000; // 7 days + 7 * 24 * 60 * 60 * 1000; // 7 days public static final String OZONE_DB_CHECKPOINT_TRANSFER_RATE_KEY = "ozone.manager.db.checkpoint.transfer.bandwidthPerSec"; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java index 1806a0354211..9291d33d9af2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java @@ -153,7 +153,7 @@ public Map toAuditMap() { this.metadata.get(OzoneConsts.GDPR_FLAG)); auditMap.put(OzoneConsts.IS_VERSION_ENABLED, String.valueOf(this.isVersionEnabled)); - if(this.storageType != null){ + if (this.storageType != null) { auditMap.put(OzoneConsts.STORAGE_TYPE, this.storageType.name()); } if (this.ownerName != null) { @@ -241,16 +241,16 @@ public BucketArgs getProtobuf() { BucketArgs.Builder builder = BucketArgs.newBuilder(); builder.setVolumeName(volumeName) .setBucketName(bucketName); - if(isVersionEnabled != null) { + if (isVersionEnabled != null) { builder.setIsVersionEnabled(isVersionEnabled); } - if(storageType != null) { + if (storageType != null) { builder.setStorageType(storageType.toProto()); } - if(quotaInBytes > 0 || quotaInBytes == OzoneConsts.QUOTA_RESET) { + if (quotaInBytes > 0 || quotaInBytes == OzoneConsts.QUOTA_RESET) { builder.setQuotaInBytes(quotaInBytes); } - if(quotaInNamespace > 0 || quotaInNamespace == OzoneConsts.QUOTA_RESET) { + if (quotaInNamespace > 0 || quotaInNamespace == OzoneConsts.QUOTA_RESET) { builder.setQuotaInNamespace(quotaInNamespace); } if (ownerName != null) { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index d6f22430f9a4..485cf32a7a65 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -150,7 +150,7 @@ public long getParentObjectID() { public synchronized OmKeyLocationInfoGroup getLatestVersionLocations() { - return keyLocationVersions.size() == 0? null : + return keyLocationVersions.size() == 0 ? null : keyLocationVersions.get(keyLocationVersions.size() - 1); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java index 1504f4e35e90..9df7518db4a5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java @@ -225,7 +225,7 @@ public static OmKeyLocationInfo getFromProtobuf(KeyLocation keyLocation) { getPipeline(keyLocation), keyLocation.getLength(), keyLocation.getOffset(), keyLocation.getPartNumber()); - if(keyLocation.hasToken()) { + if (keyLocation.hasToken()) { info.token = (Token) OzonePBHelper.tokenFromProto(keyLocation.getToken()); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java index e312138b5f96..ec660684f54a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java @@ -165,7 +165,7 @@ void appendNewBlocks(List newLocationList) { } } - void removeBlocks(long versionToRemove){ + void removeBlocks(long versionToRemove) { locationVersionMap.remove(versionToRemove); } @@ -181,7 +181,7 @@ public String toString() { sb.append("version:").append(version).append(" "); sb.append("isMultipartKey:").append(isMultipartKey); for (List kliList : locationVersionMap.values()) { - for(OmKeyLocationInfo kli: kliList) { + for (OmKeyLocationInfo kli: kliList) { sb.append(kli.getLocalID()).append(" || "); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java index 85165d6e5b5e..91cc19c3143f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java @@ -94,7 +94,7 @@ public void setQuotaInBytes(long quotaInBytes) { } public void setQuotaInNamespace(long quotaInNamespace) { - this.quotaInNamespace= quotaInNamespace; + this.quotaInNamespace = quotaInNamespace; } public void setCreationTime(long time) { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java index 0ca1e36e2259..94dff5115e90 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java @@ -43,7 +43,7 @@ */ public final class OzoneAclUtil { - private OzoneAclUtil(){ + private OzoneAclUtil() { } /** @@ -60,7 +60,7 @@ public static List getAclList(String userName, // User ACL. listOfAcls.add(new OzoneAcl(USER, userName, userRights, ACCESS)); - if(userGroups != null) { + if (userGroups != null) { // Group ACLs of the User. Arrays.asList(userGroups).forEach((group) -> listOfAcls.add( new OzoneAcl(GROUP, group, groupRights, ACCESS))); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java index 3f09d4ac3be2..1de593441e27 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java @@ -31,7 +31,7 @@ */ public final class OzoneFSUtils { - private OzoneFSUtils() {} + private OzoneFSUtils() { } /** * Returns string representation of path after removing the leading slash. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java index cde8e3901aac..83a7184123ad 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java @@ -54,7 +54,7 @@ public List getOmKeyInfoList() { public static RepeatedOmKeyInfo getFromProto(RepeatedKeyInfo repeatedKeyInfo) { List list = new ArrayList<>(); - for(KeyInfo k : repeatedKeyInfo.getKeyInfoList()) { + for (KeyInfo k : repeatedKeyInfo.getKeyInfoList()) { list.add(OmKeyInfo.getFromProtobuf(k)); } return new RepeatedOmKeyInfo.Builder().setOmKeyInfos(list).build(); @@ -67,7 +67,7 @@ public static RepeatedOmKeyInfo getFromProto(RepeatedKeyInfo */ public RepeatedKeyInfo getProto(boolean compact, int clientVersion) { List list = new ArrayList<>(); - for(OmKeyInfo k : omKeyInfoList) { + for (OmKeyInfo k : omKeyInfoList) { list.add(k.getProtobuf(compact, clientVersion)); } @@ -82,7 +82,7 @@ public RepeatedKeyInfo getProto(boolean compact, int clientVersion) { public static class Builder { private List omKeyInfos; - public Builder(){} + public Builder() { } public Builder setOmKeyInfos(List infoList) { this.omKeyInfos = infoList; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java index c6eb5ddc0ae6..0a8b1d6f67c2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java @@ -62,7 +62,7 @@ public final class ServiceInfo { /** * Default constructor for JSON deserialization. */ - public ServiceInfo() {} + public ServiceInfo() { } /** * Constructs the ServiceInfo for the {@code nodeType}. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java index 6b12f13e634e..eebb4d87517c 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java @@ -61,7 +61,7 @@ public long getUpdateID() { * @param obId - long */ public void setObjectID(long obId) { - if(this.objectID != 0) { + if (this.objectID != 0) { throw new UnsupportedOperationException("Attempt to modify object ID " + "which is not zero. Current Object ID is " + this.objectID); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index e2bf9f026da4..be758a06d229 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -603,7 +603,7 @@ public OpenKeySession openKey(OmKeyArgs args) throws IOException { .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()); - if(args.getAcls() != null) { + if (args.getAcls() != null) { keyArgs.addAllAcls(args.getAcls().stream().distinct().map(a -> OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); } @@ -1231,7 +1231,7 @@ public Token getDelegationToken(Text renewer) OMPBHelper.convertToDelegationToken(resp.getResponse().getToken()) : null; } catch (IOException e) { - if(e instanceof OMException) { + if (e instanceof OMException) { throw (OMException)e; } throw new OMException("Get delegation token failed.", e, @@ -1263,7 +1263,7 @@ public long renewDelegationToken(Token token) .getRenewDelegationTokenResponse(); return resp.getResponse().getNewExpiryTime(); } catch (IOException e) { - if(e instanceof OMException) { + if (e instanceof OMException) { throw (OMException)e; } throw new OMException("Renew delegation token failed.", e, @@ -1292,7 +1292,7 @@ public void cancelDelegationToken(Token token) try { handleError(submitRequest(omRequest)); } catch (IOException e) { - if(e instanceof OMException) { + if (e instanceof OMException) { throw (OMException)e; } throw new OMException("Cancel delegation token failed.", e, diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java index 2ff2dc830a25..51c7d54d6c61 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java @@ -50,7 +50,7 @@ private OMPBHelper() { * @return tokenProto */ public static TokenProto convertToTokenProto(Token tok) { - if(tok == null){ + if (tok == null) { throw new IllegalArgumentException("Invalid argument: token is null"); } @@ -86,9 +86,9 @@ public static BucketEncryptionKeyInfo convert( } return new BucketEncryptionKeyInfo( - beInfo.hasCryptoProtocolVersion()? + beInfo.hasCryptoProtocolVersion() ? convert(beInfo.getCryptoProtocolVersion()) : null, - beInfo.hasSuite()? convert(beInfo.getSuite()) : null, + beInfo.hasSuite() ? convert(beInfo.getSuite()) : null, beInfo.getKeyName()); } @@ -106,7 +106,7 @@ public static BucketEncryptionInfoProto convert( if (beInfo.getSuite() != null) { bb.setSuite(convert(beInfo.getSuite())); } - if (beInfo.getVersion()!= null) { + if (beInfo.getVersion() != null) { bb.setCryptoProtocolVersion(convert(beInfo.getVersion())); } return bb.build(); @@ -142,7 +142,7 @@ public static FileEncryptionInfo convert(FileEncryptionInfoProto proto) { } public static CipherSuite convert(CipherSuiteProto proto) { - switch(proto) { + switch (proto) { case AES_CTR_NOPADDING: return CipherSuite.AES_CTR_NOPADDING; default: @@ -166,7 +166,7 @@ public static CipherSuiteProto convert(CipherSuite suite) { public static CryptoProtocolVersionProto convert( CryptoProtocolVersion version) { - switch(version) { + switch (version) { case UNKNOWN: return OzoneManagerProtocolProtos.CryptoProtocolVersionProto .UNKNOWN_PROTOCOL_VERSION; @@ -180,7 +180,7 @@ public static CryptoProtocolVersionProto convert( public static CryptoProtocolVersion convert( CryptoProtocolVersionProto proto) { - switch(proto) { + switch (proto) { case ENCRYPTION_ZONES: return CryptoProtocolVersion.ENCRYPTION_ZONES; default: diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java index 76fb76a8e41e..09c8743137d4 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java @@ -111,7 +111,7 @@ public static OzoneObjInfo fromProtobuf(OzoneManagerProtocolProtos.OzoneObj .setStoreType(StoreType.valueOf(proto.getStoreType().name())); String[] tokens = StringUtils.split(proto.getPath(), OZONE_URI_DELIMITER, 3); - if(tokens == null) { + if (tokens == null) { throw new IllegalArgumentException("Unexpected path:" + proto.getPath()); } // Set volume name. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java index 0b1b787342c4..85e452ed0fbd 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java @@ -44,7 +44,7 @@ public final class OzoneVersionInfo { public static final RatisVersionInfo RATIS_VERSION_INFO = new RatisVersionInfo(); - private OzoneVersionInfo() {} + private OzoneVersionInfo() { } public static void main(String[] args) { System.out.println( diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java index d7794dbbf7c6..0f859732aac5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java @@ -129,7 +129,7 @@ private boolean removePrefixPathInternal(RadixNode current, return false; } - if (removePrefixPathInternal(node, path, level+1)) { + if (removePrefixPathInternal(node, path, level + 1)) { current.getChildren().remove(name); return current.hasChildren(); } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestOMFailoverProxyProvider.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestOMFailoverProxyProvider.java index 12b0d408654c..052ff8ff62ec 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestOMFailoverProxyProvider.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestOMFailoverProxyProvider.java @@ -180,7 +180,7 @@ public void testCanonicalTokenServiceName() throws IOException { String nodeId = NODE_ID_BASE_STR + i; ozoneConf.set( ConfUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY, OM_SERVICE_ID, - nodeId), nodeAddrs.get(i-1)); + nodeId), nodeAddrs.get(i - 1)); allNodeIds.add(nodeId); } ozoneConf.set(ConfUtils.addKeySuffixes(OZONE_OM_NODES_KEY, OM_SERVICE_ID), diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java index 16285c20170b..de12e795a3b4 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java @@ -70,7 +70,7 @@ private void testResourceReacquireLock(String[] resourceName, // Lock re-acquire not allowed by same thread. if (resource == OzoneManagerLock.Resource.USER_LOCK || resource == OzoneManagerLock.Resource.S3_SECRET_LOCK || - resource == OzoneManagerLock.Resource.PREFIX_LOCK){ + resource == OzoneManagerLock.Resource.PREFIX_LOCK) { lock.acquireWriteLock(resource, resourceName); try { lock.acquireWriteLock(resource, resourceName); diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java index 39c622043ba2..1ddf353c1279 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java @@ -55,7 +55,7 @@ public void testKeyGenerationWithValidInput() throws Exception { @Test public void testKeyGenerationWithInvalidInput() throws Exception { GDPRSymmetricKey gkey = null; - try{ + try { gkey = new GDPRSymmetricKey(RandomStringUtils.randomAlphabetic(5), OzoneConsts.GDPR_ALGORITHM_NAME); } catch (IllegalArgumentException ex) { diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java index ab24b1b59256..2db0dbbd8a9c 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java @@ -67,10 +67,10 @@ public void testGetBucketName() { objInfo = getBuilder(volume, bucket, key).build(); assertEquals(objInfo.getBucketName(), bucket); - objInfo =getBuilder(volume, null, null).build(); + objInfo = getBuilder(volume, null, null).build(); assertEquals(objInfo.getBucketName(), null); - objInfo =getBuilder(null, bucket, null).build(); + objInfo = getBuilder(null, bucket, null).build(); assertEquals(objInfo.getBucketName(), bucket); } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java index b97b8445dbcd..817885ea9300 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java @@ -80,13 +80,13 @@ public void testGetLongestPrefix() { public void testGetLongestPrefixPath() { List> lpp = ROOT.getLongestPrefixPath("/a/b/c/d/g/p"); - RadixNode lpn = lpp.get(lpp.size()-1); + RadixNode lpn = lpp.get(lpp.size() - 1); assertEquals("g", lpn.getName()); lpn.setValue(100); List> lpq = ROOT.getLongestPrefixPath("/a/b/c/d/g/q"); - RadixNode lqn = lpp.get(lpq.size()-1); + RadixNode lqn = lpp.get(lpq.size() - 1); System.out.print(RadixTree.radixPathToString(lpq)); assertEquals(lpn, lqn); assertEquals("g", lqn.getName()); diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh b/hadoop-ozone/dev-support/checks/checkstyle.sh index e76a67a01e1e..bd5e7f0f1996 100755 --- a/hadoop-ozone/dev-support/checks/checkstyle.sh +++ b/hadoop-ozone/dev-support/checks/checkstyle.sh @@ -44,6 +44,9 @@ find "." -name checkstyle-errors.xml -print0 \ -e 's//g" \ | tee "$REPORT_FILE" ## generate counter diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java index 05094b00b427..b6ef8651b2e0 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java @@ -229,7 +229,7 @@ public Builder addFailures(Class clazz) { protected void initializeConfiguration() throws IOException { super.initializeConfiguration(); - OzoneClientConfig clientConfig =new OzoneClientConfig(); + OzoneClientConfig clientConfig = new OzoneClientConfig(); clientConfig.setStreamBufferFlushSize(8 * 1024 * 1024); clientConfig.setStreamBufferMaxSize(16 * 1024 * 1024); clientConfig.setStreamBufferSize(4 * 1024); @@ -331,7 +331,7 @@ public static int getNumberOfOmToFail() { public Set omToFail() { int numNodesToFail = getNumberOfOmToFail(); - if (failedOmSet.size() >= numOzoneManagers/2) { + if (failedOmSet.size() >= numOzoneManagers / 2) { return Collections.emptySet(); } @@ -359,7 +359,7 @@ public void restartOzoneManager(OzoneManager om, boolean waitForOM) // Should the selected node be stopped or started. public boolean shouldStopOm() { - if (failedOmSet.size() >= numOzoneManagers/2) { + if (failedOmSet.size() >= numOzoneManagers / 2) { return false; } return RandomUtils.nextBoolean(); @@ -407,7 +407,7 @@ public static int getNumberOfScmToFail() { public Set scmToFail() { int numNodesToFail = getNumberOfScmToFail(); - if (failedScmSet.size() >= numStorageContainerManagers/2) { + if (failedScmSet.size() >= numStorageContainerManagers / 2) { return Collections.emptySet(); } @@ -434,7 +434,7 @@ public void restartStorageContainerManager(StorageContainerManager scm, // Should the selected node be stopped or started. public boolean shouldStopScm() { - if (failedScmSet.size() >= numStorageContainerManagers/2) { + if (failedScmSet.size() >= numStorageContainerManagers / 2) { return false; } return RandomUtils.nextBoolean(); diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java index 7e78e0f3c6fd..f9c7fd0ec4ce 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java @@ -58,7 +58,7 @@ public class MiniOzoneLoadGenerator { this.conf = conf; this.omServiceID = omServiceId; - for(Class clazz : loadGeneratorClazzes) { + for (Class clazz : loadGeneratorClazzes) { addLoads(clazz, buffer); } diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadBucket.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadBucket.java index c6ccb3a88134..4a380feb6ac2 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadBucket.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadBucket.java @@ -195,7 +195,7 @@ void doPostOp() throws IOException { @Override public String toString() { return super.toString() + " " - + (readDir ? "readDirectory": "writeDirectory"); + + (readDir ? "readDirectory" : "writeDirectory"); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java index dc86044fc10b..752962f6b877 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java @@ -177,7 +177,7 @@ public void testObjectStoreCreateWithO3fs() throws Exception { keys.add("/dir1/dir2"); keys.add("/dir1/dir2/dir3"); keys.add("/dir1/dir2/dir3/dir4/"); - for (int i=1; i <= 3; i++) { + for (int i = 1; i <= 3; i++) { int length = 10; String fileName = parentDir.concat("/file" + i + "/"); keys.add(fileName); @@ -190,7 +190,7 @@ public void testObjectStoreCreateWithO3fs() throws Exception { } // check - for (int i=1; i <= 3; i++) { + for (int i = 1; i <= 3; i++) { String fileName = parentDir.concat("/file" + i + "/"); Path p = new Path(fileName); Assert.assertTrue(o3fs.getFileStatus(p).isFile()); @@ -209,12 +209,12 @@ public void testObjectStoreCreateWithO3fs() throws Exception { Assert.assertTrue(result); // No Key should exist. - for(String key : keys) { + for (String key : keys) { checkPath(new Path(key)); } - for (int i=1; i <= 3; i++) { + for (int i = 1; i <= 3; i++) { int length = 10; String fileName = parentDir.concat("/file" + i + "/"); OzoneOutputStream ozoneOutputStream = @@ -229,12 +229,12 @@ public void testObjectStoreCreateWithO3fs() throws Exception { o3fs.rename(new Path("/dir1"), new Path("/dest")); // No source Key should exist. - for(String key : keys) { + for (String key : keys) { checkPath(new Path(key)); } // check dest path. - for (int i=1; i <= 3; i++) { + for (int i = 1; i <= 3; i++) { String fileName = "/dest/".concat(parentDir.concat("/file" + i + "/")); Path p = new Path(fileName); Assert.assertTrue(o3fs.getFileStatus(p).isFile()); @@ -467,7 +467,7 @@ private void checkPath(Path path) { private void checkAncestors(Path p) throws Exception { p = p.getParent(); - while(p.getParent() != null) { + while (p.getParent() != null) { FileStatus fileStatus = o3fs.getFileStatus(p); Assert.assertTrue(fileStatus.isDirectory()); p = p.getParent(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java index 0e9c360771c6..5c2e0cf3f1ce 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java @@ -523,7 +523,7 @@ public void testOzoneManagerLocatedFileStatusBlockOffsetsWithMultiBlockFile() assertEquals(0, blockLocations[0].getOffset()); assertEquals(blockSize, blockLocations[1].getOffset()); - assertEquals(2*blockSize, blockLocations[2].getOffset()); + assertEquals(2 * blockSize, blockLocations[2].getOffset()); assertEquals(blockSize, blockLocations[0].getLength()); assertEquals(blockSize, blockLocations[1].getLength()); assertEquals(837, blockLocations[2].getLength()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java index 8bd4ea6b4a40..5393ffdb0add 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java @@ -207,7 +207,7 @@ public void cleanup() { for (FileStatus fileStatus : fileStatuses) { fs.delete(fileStatus.getPath(), true); } - } catch (IOException ex){ + } catch (IOException ex) { fail("Failed to cleanup files."); } } @@ -260,7 +260,7 @@ public void testCreateFileShouldCheckExistenceOfDirWithSameName() fs.mkdirs(dir1); try (FSDataOutputStream outputStream1 = fs.create(dir1, false)) { fail("Should throw FileAlreadyExistsException"); - } catch (FileAlreadyExistsException fae){ + } catch (FileAlreadyExistsException fae) { // ignore as its expected } @@ -291,14 +291,14 @@ public void testCreateFileShouldCheckExistenceOfDirWithSameName() fileStatus.isDirectory()); // invalid sub directory - try{ + try { fs.getFileStatus(new Path("/d1/d2/d3/d4/key3/invalid")); fail("Should throw FileNotFoundException"); } catch (FileNotFoundException fnfe) { // ignore as its expected } // invalid file name - try{ + try { fs.getFileStatus(new Path("/d1/d2/d3/d4/invalidkey")); fail("Should throw FileNotFoundException"); } catch (FileNotFoundException fnfe) { @@ -345,10 +345,10 @@ public void testCreateWithInvalidPaths() throws Exception { } private void checkInvalidPath(Path path) throws Exception { - try{ + try { LambdaTestUtils.intercept(InvalidPathException.class, "Invalid path Name", () -> fs.create(path, false)); - } catch (AssertionError e){ + } catch (AssertionError e) { fail("testCreateWithInvalidPaths failed for path" + path); } } @@ -417,7 +417,7 @@ public void testRecursiveDelete() throws Exception { Path grandparent = new Path("/gdir1"); for (int i = 1; i <= 10; i++) { - Path parent = new Path(grandparent, "pdir" +i); + Path parent = new Path(grandparent, "pdir" + i); Path child = new Path(parent, "child"); ContractTestUtils.touch(fs, child); } @@ -446,7 +446,7 @@ public void testRecursiveDelete() throws Exception { checkPath(grandparent); for (int i = 1; i <= 10; i++) { - Path parent = new Path(grandparent, "dir" +i); + Path parent = new Path(grandparent, "dir" + i); Path child = new Path(parent, "child"); checkPath(parent); checkPath(child); @@ -456,8 +456,8 @@ public void testRecursiveDelete() throws Exception { Path level0 = new Path("/level0"); for (int i = 1; i <= 3; i++) { - Path level1 = new Path(level0, "level" +i); - Path level2 = new Path(level1, "level" +i); + Path level1 = new Path(level0, "level" + i); + Path level2 = new Path(level1, "level" + i); Path level1File = new Path(level1, "file1"); Path level2File = new Path(level2, "file1"); ContractTestUtils.touch(fs, level1File); @@ -466,8 +466,8 @@ public void testRecursiveDelete() throws Exception { // Delete at sub directory level. for (int i = 1; i <= 3; i++) { - Path level1 = new Path(level0, "level" +i); - Path level2 = new Path(level1, "level" +i); + Path level1 = new Path(level0, "level" + i); + Path level2 = new Path(level1, "level" + i); fs.delete(level2, true); fs.delete(level1, true); } @@ -480,8 +480,8 @@ public void testRecursiveDelete() throws Exception { checkPath(level0); for (int i = 1; i <= 3; i++) { - Path level1 = new Path(level0, "level" +i); - Path level2 = new Path(level1, "level" +i); + Path level1 = new Path(level0, "level" + i); + Path level2 = new Path(level1, "level" + i); Path level1File = new Path(level1, "file1"); Path level2File = new Path(level2, "file1"); checkPath(level1); @@ -591,9 +591,9 @@ public void testListStatusWithIntermediateDir() throws Exception { // Wait until the filestatus is updated if (!enabledFileSystemPaths) { - GenericTestUtils.waitFor(()-> { + GenericTestUtils.waitFor(() -> { try { - return fs.listStatus(parent).length!=0; + return fs.listStatus(parent).length != 0; } catch (IOException e) { LOG.error("listStatus() Failed", e); Assert.fail("listStatus() Failed"); @@ -644,7 +644,7 @@ public void testListStatusOnLargeDirectory() throws Exception { deleteRootDir(); // cleanup Set paths = new TreeSet<>(); int numDirs = LISTING_PAGE_SIZE + LISTING_PAGE_SIZE / 2; - for(int i = 0; i < numDirs; i++) { + for (int i = 0; i < numDirs; i++) { Path p = new Path(root, String.valueOf(i)); fs.mkdirs(p); paths.add(p.getName()); @@ -677,7 +677,7 @@ public void testListStatusOnLargeDirectory() throws Exception { "Total directories listed do not match the existing directories", numDirs, fileStatuses.length); - for (int i=0; i < numDirs; i++) { + for (int i = 0; i < numDirs; i++) { assertTrue(paths.contains(fileStatuses[i].getPath().getName())); } } @@ -1278,7 +1278,7 @@ public void testTrash() throws Exception { Path trashPath = new Path(userTrashCurrent, testKeyName); // Wait until the TrashEmptier purges the key - GenericTestUtils.waitFor(()-> { + GenericTestUtils.waitFor(() -> { try { return !o3fs.exists(trashPath); } catch (IOException e) { @@ -1292,9 +1292,9 @@ public void testTrash() throws Exception { Assert.assertEquals(1, fs.listStatus(userTrash).length); // wait for deletion of checkpoint dir - GenericTestUtils.waitFor(()-> { + GenericTestUtils.waitFor(() -> { try { - return o3fs.listStatus(userTrash).length==0; + return o3fs.listStatus(userTrash).length == 0; } catch (IOException e) { LOG.error("Delete from Trash Failed", e); Assert.fail("Delete from Trash Failed"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java index 974912a822b7..41539625ddbf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java @@ -188,7 +188,7 @@ private String getLeaderOMNodeAddr() { */ private String getHostFromAddress(String addr) { Optional hostOptional = getHostName(addr); - assert(hostOptional.isPresent()); + assert (hostOptional.isPresent()); return hostOptional.get(); } @@ -199,7 +199,7 @@ private String getHostFromAddress(String addr) { */ private int getPortFromAddress(String addr) { OptionalInt portOptional = getHostPort(addr); - assert(portOptional.isPresent()); + assert (portOptional.isPresent()); return portOptional.getAsInt(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java index 96b461f1f841..de731612023a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java @@ -191,7 +191,7 @@ public static void initClusterAndEnv() throws IOException, conf = new OzoneConfiguration(); conf.setFloat(OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL); conf.setFloat(FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL); - conf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, TRASH_INTERVAL/2); + conf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, TRASH_INTERVAL / 2); conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled); if (isBucketFSOptimized) { bucketLayout = BucketLayout.FILE_SYSTEM_OPTIMIZED; @@ -546,7 +546,7 @@ public void testListStatusOnLargeDirectory() throws Exception { Path root = new Path("/" + volumeName + "/" + bucketName); Set paths = new TreeSet<>(); int numDirs = LISTING_PAGE_SIZE + LISTING_PAGE_SIZE / 2; - for(int i = 0; i < numDirs; i++) { + for (int i = 0; i < numDirs; i++) { Path p = new Path(root, String.valueOf(i)); fs.mkdirs(p); paths.add(p.getName()); @@ -557,12 +557,12 @@ public void testListStatusOnLargeDirectory() throws Exception { "Total directories listed do not match the existing directories", numDirs, fileStatuses.length); - for (int i=0; i < numDirs; i++) { + for (int i = 0; i < numDirs; i++) { Assert.assertTrue(paths.contains(fileStatuses[i].getPath().getName())); } // Cleanup - for(int i = 0; i < numDirs; i++) { + for (int i = 0; i < numDirs; i++) { Path p = new Path(root, String.valueOf(i)); fs.delete(p, true); } @@ -1362,7 +1362,7 @@ public void testTrash() throws Exception { // Wait until the TrashEmptier purges the keys - GenericTestUtils.waitFor(()-> { + GenericTestUtils.waitFor(() -> { try { return !ofs.exists(trashPath) && !ofs.exists(trashPath2); } catch (IOException e) { @@ -1372,7 +1372,7 @@ public void testTrash() throws Exception { } }, 1000, 180000); - if (isBucketFSOptimized){ + if (isBucketFSOptimized) { Assert.assertTrue(getOMMetrics() .getNumTrashAtomicDirRenames() > prevNumTrashAtomicDirRenames); } else { @@ -1385,10 +1385,10 @@ public void testTrash() throws Exception { } // wait for deletion of checkpoint dir - GenericTestUtils.waitFor(()-> { + GenericTestUtils.waitFor(() -> { try { - return ofs.listStatus(userTrash).length==0 && - ofs.listStatus(userTrash2).length==0; + return ofs.listStatus(userTrash).length == 0 && + ofs.listStatus(userTrash2).length == 0; } catch (IOException e) { LOG.error("Delete from Trash Failed", e); Assert.fail("Delete from Trash Failed"); @@ -1397,7 +1397,7 @@ public void testTrash() throws Exception { }, 1000, 120000); // This condition should succeed once the checkpoint directory is deleted - if(isBucketFSOptimized){ + if (isBucketFSOptimized) { GenericTestUtils.waitFor( () -> getOMMetrics().getNumTrashAtomicDirDeletes() > prevNumTrashAtomicDirDeletes, 100, 180000); @@ -1444,7 +1444,7 @@ private void checkInvalidPath(Path path) throws Exception { @Test public void testRenameFile() throws Exception { final String dir = "/dir" + new Random().nextInt(1000); - Path dirPath = new Path(getBucketPath() +dir); + Path dirPath = new Path(getBucketPath() + dir); getFs().mkdirs(dirPath); Path file1Source = new Path(getBucketPath() + dir @@ -1466,7 +1466,7 @@ public void testRenameFile() throws Exception { @Test public void testRenameFileToDir() throws Exception { final String dir = "/dir" + new Random().nextInt(1000); - Path dirPath = new Path(getBucketPath() +dir); + Path dirPath = new Path(getBucketPath() + dir); getFs().mkdirs(dirPath); Path file1Destin = new Path(getBucketPath() + dir + "/file1"); @@ -1531,11 +1531,11 @@ public void testRenameDirToItsOwnSubDir() throws Exception { final Path sourceRoot = new Path(getBucketPath() + root); LOG.info("Rename op-> source:{} to destin:{}", sourceRoot, subDir1); // rename should fail and return false - try{ + try { getFs().rename(sourceRoot, subDir1); fail("Should throw exception : Cannot rename a directory to" + " its own subdirectory"); - } catch (IllegalArgumentException e){ + } catch (IllegalArgumentException e) { //expected } } @@ -1560,7 +1560,7 @@ public void testRenameDestinationParentDoesntExist() throws Exception { try { getFs().rename(dir2SourcePath, destinPath); fail("Should fail as parent of dst does not exist!"); - } catch (FileNotFoundException fnfe){ + } catch (FileNotFoundException fnfe) { //expected } // (b) parent of dst is a file. /root_dir/file1/c @@ -1568,10 +1568,10 @@ public void testRenameDestinationParentDoesntExist() throws Exception { ContractTestUtils.touch(getFs(), filePath); Path newDestinPath = new Path(filePath, "c"); // rename shouldthrow exception - try{ + try { getFs().rename(dir2SourcePath, newDestinPath); fail("Should fail as parent of dst is a file!"); - } catch (IOException e){ + } catch (IOException e) { //expected } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java index 0fc23c360870..f7858d1e2a24 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java @@ -27,7 +27,7 @@ */ public final class ITestOzoneContractUtils { - private ITestOzoneContractUtils(){} + private ITestOzoneContractUtils() { } private static List fsoCombinations = Arrays.asList(new Object[] { // FSO configuration is a cluster level server side configuration. @@ -47,7 +47,7 @@ private ITestOzoneContractUtils(){} // and old buckets will be operated on }); - static List getFsoCombinations(){ + static List getFsoCombinations() { return fsoCombinations; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java index f6c9a252bc74..784897aae75e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java @@ -66,7 +66,7 @@ public Path getTestPath() { return path; } - public static void initOzoneConfiguration(boolean fsoServer){ + public static void initOzoneConfiguration(boolean fsoServer) { fsOptimizedServer = fsoServer; } @@ -92,7 +92,7 @@ public static void createCluster() throws IOException { conf.addResource(CONTRACT_XML); - if (fsOptimizedServer){ + if (fsOptimizedServer) { // Default bucket layout is set to FSO in case of FSO server. conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, OMConfigKeys.OZONE_BUCKET_LAYOUT_FILE_SYSTEM_OPTIMIZED); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java index d741117ab6fc..c1bbcf44f384 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java @@ -59,7 +59,7 @@ public void init(int numDatanodes, int datanodePipelineLimit) cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numDatanodes) - .setTotalPipelineNumLimit(numDatanodes + numDatanodes/3) + .setTotalPipelineNumLimit(numDatanodes + numDatanodes / 3) .setHbInterval(2000) .setHbProcessorInterval(1000) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java index 4ebcab189da7..b4d7270e9a2a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java @@ -154,7 +154,7 @@ public void testMultiRaft() throws Exception { shutdown(); } private void assertNotSamePeers() { - nodeManager.getAllNodes().forEach((dn) ->{ + nodeManager.getAllNodes().forEach((dn) -> { Collection peers = nodeManager.getPeerList(dn); Assert.assertFalse(peers.contains(dn)); List trimList = nodeManager.getAllNodes(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java index 58c0c62c2eb6..457e12afd591 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java @@ -61,7 +61,7 @@ public void init(int numDatanodes) throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numDatanodes) - .setTotalPipelineNumLimit(numDatanodes + numDatanodes/3) + .setTotalPipelineNumLimit(numDatanodes + numDatanodes / 3) .setHbInterval(2000) .setHbProcessorInterval(1000) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java index 724d34c2d6a6..4c97b51c14f0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java @@ -86,10 +86,10 @@ public void testScmSafeMode() throws Exception { int datanodeCount = 6; setup(datanodeCount); - waitForRatis3NodePipelines(datanodeCount/3); + waitForRatis3NodePipelines(datanodeCount / 3); waitForRatis1NodePipelines(datanodeCount); - int totalPipelineCount = datanodeCount + (datanodeCount/3); + int totalPipelineCount = datanodeCount + (datanodeCount / 3); //Cluster is started successfully cluster.stop(); @@ -178,7 +178,7 @@ public void testScmSafeMode() throws Exception { }); waitForRatis1NodePipelines(datanodeCount); - waitForRatis3NodePipelines(datanodeCount/3); + waitForRatis3NodePipelines(datanodeCount / 3); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java index e7f6f34a5bab..800992d52de0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java @@ -189,7 +189,7 @@ public void shutdown() throws IOException, InterruptedException { * Some tests repeatedly modify the cluster. Helper function to reload the * latest SCM state. */ - private void loadSCMState(){ + private void loadSCMState() { scm = cluster.getStorageContainerManager(); scmContainerManager = scm.getContainerManager(); scmPipelineManager = scm.getPipelineManager(); @@ -503,7 +503,7 @@ private Boolean injectSCMFailureDuringSCMUpgrade() IOException { // For some tests this could get called in a different thread context. // We need to guard concurrent updates to the cluster. - synchronized(cluster) { + synchronized (cluster) { cluster.restartStorageContainerManager(true); loadSCMState(); } @@ -1090,7 +1090,7 @@ public void testFinalizationWithFailureInjectionHelper( // Verify that new pipeline can be created with upgraded datanodes. try { testPostUpgradePipelineCreation(); - } catch(SCMException e) { + } catch (SCMException e) { // If pipeline creation fails, make sure that there is a valid reason // for this i.e. all datanodes are already part of some pipeline. for (HddsDatanodeService dataNode : cluster.getHddsDatanodes()) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index 6e56a4d34b07..919b9b2a2098 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -677,7 +677,7 @@ protected void initializeConfiguration() throws IOException { // In this way safemode exit will happen only when atleast we have one // pipeline. conf.setInt(HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE, - numOfDatanodes >=3 ? 3 : 1); + numOfDatanodes >= 3 ? 3 : 1); configureTrace(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java index 890462098a67..ab2405ab8cd0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java @@ -189,7 +189,7 @@ private void ensureNotShutdown() throws IOException { private Thread reapClusters() { Thread t = new Thread(() -> { - while(!shutdown || !expiredClusters.isEmpty()) { + while (!shutdown || !expiredClusters.isEmpty()) { try { // Why not just call take and wait forever until interrupt is // thrown? Inside MiniCluster.shutdown, there are places where it @@ -251,7 +251,7 @@ private Thread createClusters() { } private void destroyRemainingClusters() { - while(!clusters.isEmpty()) { + while (!clusters.isEmpty()) { try { MiniOzoneCluster cluster = clusters.poll(); if (cluster != null) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java index 440f5ca7574e..556af06044d4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java @@ -490,7 +490,7 @@ protected OMHAService createOMService() throws IOException, try { initOMHAConfig(); - for (int i = 1; i<= numOfOMs; i++) { + for (int i = 1; i <= numOfOMs; i++) { // Set nodeId String nodeId = OM_NODE_ID_PREFIX + i; OzoneConfiguration config = new OzoneConfiguration(conf); @@ -564,7 +564,7 @@ protected SCMHAService createSCMService() try { initSCMHAConfig(); - for (int i = 1; i<= numOfSCMs; i++) { + for (int i = 1; i <= numOfSCMs; i++) { // Set nodeId String nodeId = SCM_NODE_ID_PREFIX + i; String metaDirPath = path + "/" + nodeId; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java index e4cb1a153165..dba973978055 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java @@ -129,7 +129,7 @@ public static void closeAllContainers(EventPublisher eventPublisher, */ public static void performOperationOnKeyContainers( CheckedConsumer consumer, - List omKeyLocationInfoGroups) throws Exception{ + List omKeyLocationInfoGroups) throws Exception { for (OmKeyLocationInfoGroup omKeyLocationInfoGroup : omKeyLocationInfoGroups) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java index 2b0dfc2033ac..8663c7250b38 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java @@ -64,7 +64,7 @@ public static void setup() throws Exception { @AfterClass public static void cleanup() throws Exception { - if(cluster != null) { + if (cluster != null) { cluster.shutdown(); } } @@ -100,7 +100,7 @@ public void testContainerBalancerCLIOperations() throws Exception { // modify this after balancer is fully completed try { Thread.sleep(100); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } running = containerBalancerClient.getContainerBalancerStatus(); assertFalse(running); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java index 55164688a16d..e15d7b1f1482 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java @@ -64,7 +64,7 @@ public static void setup() throws Exception { @AfterClass public static void cleanup() throws Exception { - if(cluster != null) { + if (cluster != null) { cluster.shutdown(); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java index e7436e06e852..2b095144d8be 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java @@ -119,7 +119,7 @@ public void testStartMultipleDatanodes() throws Exception { cluster.waitForClusterToBeReady(); List datanodes = cluster.getHddsDatanodes(); assertEquals(numberOfNodes, datanodes.size()); - for(HddsDatanodeService dn : datanodes) { + for (HddsDatanodeService dn : datanodes) { // Create a single member pipe line List dns = new ArrayList<>(); dns.add(dn.getDatanodeDetails()); @@ -132,7 +132,7 @@ public void testStartMultipleDatanodes() throws Exception { .build(); // Verify client is able to connect to the container - try (XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf)){ + try (XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf)) { client.connect(); assertTrue(client.isConnected(pipeline.getFirstNode())); } @@ -285,7 +285,7 @@ public void testContainerRandomPort() throws IOException { } private void createMalformedIDFile(File malformedFile) - throws IOException{ + throws IOException { malformedFile.delete(); DatanodeDetails id = randomDatanodeDetails(); ContainerUtils.writeDatanodeDetailsTo(id, malformedFile); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index 494f74c04706..e74a98b1cc14 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -138,7 +138,7 @@ public final class TestSecureOzoneCluster { public Timeout timeout = Timeout.seconds(80); @Rule - public TemporaryFolder folder= new TemporaryFolder(); + public TemporaryFolder folder = new TemporaryFolder(); private MiniKdc miniKdc; private OzoneConfiguration conf; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java index b37f78571ae9..58021f30ebf8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java @@ -177,12 +177,12 @@ public InitResponse init() throws CertificateException { } @Override - public String getSignatureAlgorithm(){ + public String getSignatureAlgorithm() { return securityConfig.getSignatureAlgo(); } @Override - public String getSecurityProvider(){ + public String getSecurityProvider() { return securityConfig.getProvider(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java index e03c5a9f44c9..82f23a6b1144 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java @@ -314,7 +314,7 @@ public void testReleaseBuffersOnException() throws Exception { // is updated to the latest index in putBlock response. watcher.watchForCommit(replies.get(1).getLogIndex() + 100); Assert.fail("Expected exception not thrown"); - } catch(IOException ioe) { + } catch (IOException ioe) { // with retry count set to noRetry and a lower watch request // timeout, watch request will eventually // fail with TimeoutIOException from ratis client or the client diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java index ac62bc0a1a10..b16b82473da6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java @@ -59,7 +59,7 @@ /** * Tests Close Container Exception handling by Ozone Client. */ -public class TestDiscardPreallocatedBlocks{ +public class TestDiscardPreallocatedBlocks { /** * Set a timeout for each test. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java index e1358eb83d1b..155f785c2aa0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java @@ -212,7 +212,7 @@ public void testWriteSmallFile() throws Exception { OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); String data = ContainerTestHelper - .getFixedLengthString(keyString, chunkSize/2); + .getFixedLengthString(keyString, chunkSize / 2); key.write(data.getBytes(UTF_8)); // get the name of a valid container Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index 9a4d69151adb..4ebcc8745c0c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -178,7 +178,7 @@ public static void init() throws Exception { */ @AfterClass public static void shutdown() throws IOException { - if(ozClient != null) { + if (ozClient != null) { ozClient.close(); } @@ -251,7 +251,7 @@ private void createAndVerifyKeyData(OzoneBucket bucket) throws Exception { byte[] fileContent; int len = 0; - try(OzoneInputStream is = bucket.readKey(keyName)) { + try (OzoneInputStream is = bucket.readKey(keyName)) { fileContent = new byte[value.getBytes(StandardCharsets.UTF_8).length]; len = is.read(fileContent); } @@ -267,7 +267,7 @@ private void createAndVerifyKeyData(OzoneBucket bucket) throws Exception { } private OzoneBucket createVolumeAndBucket(String volumeName, - String bucketName) throws Exception{ + String bucketName) throws Exception { store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); BucketArgs bucketArgs = BucketArgs.newBuilder() @@ -333,7 +333,7 @@ public void testKeyWithEncryptionAndGdpr() throws Exception { byte[] fileContent; int len = 0; - try(OzoneInputStream is = bucket.readKey(keyName)) { + try (OzoneInputStream is = bucket.readKey(keyName)) { fileContent = new byte[value.getBytes(StandardCharsets.UTF_8).length]; len = is.read(fileContent); } @@ -498,7 +498,7 @@ public void testMultipartUploadWithEncryption(OzoneBucket bucket, // Read different data lengths and starting from different offsets and // verify the data matches. Random random = new Random(); - int randomSize = random.nextInt(keySize/2); + int randomSize = random.nextInt(keySize / 2); int randomOffset = random.nextInt(keySize - randomSize); int[] readDataSizes = {keySize, keySize / 3 + 1, BLOCK_SIZE, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index 636029c51cc1..9ea04d453fac 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -140,7 +140,7 @@ static void startCluster(OzoneConfiguration conf) throws Exception { * Close OzoneClient and shutdown MiniOzoneCluster. */ static void shutdownCluster() throws IOException { - if(ozClient != null) { + if (ozClient != null) { ozClient.close(); } @@ -609,11 +609,11 @@ public void testListMultipartUploadParts() throws Exception { generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(1, partName1); - String partName2 =uploadPart(bucket, keyName, uploadID, 2, + String partName2 = uploadPart(bucket, keyName, uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(2, partName2); - String partName3 =uploadPart(bucket, keyName, uploadID, 3, + String partName3 = uploadPart(bucket, keyName, uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(3, partName3); @@ -711,11 +711,11 @@ public void testListMultipartUploadPartsWithContinuation() generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(1, partName1); - String partName2 =uploadPart(bucket, keyName, uploadID, 2, + String partName2 = uploadPart(bucket, keyName, uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(2, partName2); - String partName3 =uploadPart(bucket, keyName, uploadID, 3, + String partName3 = uploadPart(bucket, keyName, uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(3, partName3); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java index 3cda449c57df..ea992f519826 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java @@ -191,8 +191,8 @@ public void testGroupMismatchExceptionHandling() throws Exception { @Test public void testMaxRetriesByOzoneClient() throws Exception { String keyName = getKeyName(); - OzoneOutputStream key = - createKey(keyName, ReplicationType.RATIS, (MAX_RETRIES+1) * blockSize); + OzoneOutputStream key = createKey( + keyName, ReplicationType.RATIS, (MAX_RETRIES + 1) * blockSize); Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); List entries = keyOutputStream.getStreamEntries(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index e70087a088d4..128c407b2482 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -203,7 +203,7 @@ static void startCluster(OzoneConfiguration conf) throws Exception { * Close OzoneClient and shutdown MiniOzoneCluster. */ static void shutdownCluster() throws IOException { - if(ozClient != null) { + if (ozClient != null) { ozClient.close(); } @@ -224,7 +224,7 @@ public static void setOzClient(OzoneClient ozClient) { TestOzoneRpcClientAbstract.ozClient = ozClient; } - public static void setOzoneManager(OzoneManager ozoneManager){ + public static void setOzoneManager(OzoneManager ozoneManager) { TestOzoneRpcClientAbstract.ozoneManager = ozoneManager; } @@ -1091,7 +1091,7 @@ public void testBucketUsedNamespace() throws IOException { private void writeKey(OzoneBucket bucket, String keyName, ReplicationFactor replication, String value, int valueLength) - throws IOException{ + throws IOException { OzoneOutputStream out = bucket.createKey(keyName, valueLength, RATIS, replication, new HashMap<>()); out.write(value.getBytes(UTF_8)); @@ -1100,7 +1100,7 @@ private void writeKey(OzoneBucket bucket, String keyName, private void writeFile(OzoneBucket bucket, String keyName, ReplicationFactor replication, String value, int valueLength) - throws IOException{ + throws IOException { OzoneOutputStream out = bucket.createFile(keyName, valueLength, RATIS, replication, true, true); out.write(value.getBytes(UTF_8)); @@ -1901,33 +1901,33 @@ public void testListVolume() throws IOException { String volBase = "vol-list-"; //Create 10 volume vol-list-a-0- to vol-list-a-9- String volBaseNameA = volBase + "a-"; - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { store.createVolume( volBaseNameA + i + "-" + RandomStringUtils.randomNumeric(5)); } //Create 10 volume vol-list-b-0- to vol-list-b-9- String volBaseNameB = volBase + "b-"; - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { store.createVolume( volBaseNameB + i + "-" + RandomStringUtils.randomNumeric(5)); } Iterator volIterator = store.listVolumes(volBase); int totalVolumeCount = 0; - while(volIterator.hasNext()) { + while (volIterator.hasNext()) { volIterator.next(); totalVolumeCount++; } Assert.assertEquals(20, totalVolumeCount); Iterator volAIterator = store.listVolumes( volBaseNameA); - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { Assert.assertTrue(volAIterator.next().getName() .startsWith(volBaseNameA + i + "-")); } Assert.assertFalse(volAIterator.hasNext()); Iterator volBIterator = store.listVolumes( volBaseNameB); - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { Assert.assertTrue(volBIterator.next().getName() .startsWith(volBaseNameB + i + "-")); } @@ -1950,7 +1950,7 @@ public void testListBucket() //Create 10 buckets in vol-a- and 10 in vol-b- String bucketBaseNameA = "bucket-a-"; - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { volA.createBucket( bucketBaseNameA + i + "-" + RandomStringUtils.randomNumeric(5)); volB.createBucket( @@ -1958,7 +1958,7 @@ public void testListBucket() } //Create 10 buckets in vol-a- and 10 in vol-b- String bucketBaseNameB = "bucket-b-"; - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { volA.createBucket( bucketBaseNameB + i + "-" + RandomStringUtils.randomNumeric(5)); volB.createBucket( @@ -1967,7 +1967,7 @@ public void testListBucket() Iterator volABucketIter = volA.listBuckets("bucket-"); int volABucketCount = 0; - while(volABucketIter.hasNext()) { + while (volABucketIter.hasNext()) { volABucketIter.next(); volABucketCount++; } @@ -1975,7 +1975,7 @@ public void testListBucket() Iterator volBBucketIter = volA.listBuckets("bucket-"); int volBBucketCount = 0; - while(volBBucketIter.hasNext()) { + while (volBBucketIter.hasNext()) { volBBucketIter.next(); volBBucketCount++; } @@ -1984,7 +1984,7 @@ public void testListBucket() Iterator volABucketAIter = volA.listBuckets("bucket-a-"); int volABucketACount = 0; - while(volABucketAIter.hasNext()) { + while (volABucketAIter.hasNext()) { volABucketAIter.next(); volABucketACount++; } @@ -1992,21 +1992,21 @@ public void testListBucket() Iterator volBBucketBIter = volA.listBuckets("bucket-b-"); int volBBucketBCount = 0; - while(volBBucketBIter.hasNext()) { + while (volBBucketBIter.hasNext()) { volBBucketBIter.next(); volBBucketBCount++; } Assert.assertEquals(10, volBBucketBCount); Iterator volABucketBIter = volA.listBuckets( "bucket-b-"); - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { Assert.assertTrue(volABucketBIter.next().getName() .startsWith(bucketBaseNameB + i + "-")); } Assert.assertFalse(volABucketBIter.hasNext()); Iterator volBBucketAIter = volB.listBuckets( "bucket-a-"); - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { Assert.assertTrue(volBBucketAIter.next().getName() .startsWith(bucketBaseNameA + i + "-")); } @@ -2021,7 +2021,7 @@ public void testListBucketsOnEmptyVolume() store.createVolume(volume); OzoneVolume vol = store.getVolume(volume); Iterator buckets = vol.listBuckets(""); - while(buckets.hasNext()) { + while (buckets.hasNext()) { fail(); } } @@ -2115,7 +2115,7 @@ public void testListKey() Iterator volABucketAIter = volAbucketA.listKeys("key-"); int volABucketAKeyCount = 0; - while(volABucketAIter.hasNext()) { + while (volABucketAIter.hasNext()) { volABucketAIter.next(); volABucketAKeyCount++; } @@ -2123,7 +2123,7 @@ public void testListKey() Iterator volABucketBIter = volAbucketB.listKeys("key-"); int volABucketBKeyCount = 0; - while(volABucketBIter.hasNext()) { + while (volABucketBIter.hasNext()) { volABucketBIter.next(); volABucketBKeyCount++; } @@ -2131,7 +2131,7 @@ public void testListKey() Iterator volBBucketAIter = volBbucketA.listKeys("key-"); int volBBucketAKeyCount = 0; - while(volBBucketAIter.hasNext()) { + while (volBBucketAIter.hasNext()) { volBBucketAIter.next(); volBBucketAKeyCount++; } @@ -2139,7 +2139,7 @@ public void testListKey() Iterator volBBucketBIter = volBbucketB.listKeys("key-"); int volBBucketBKeyCount = 0; - while(volBBucketBIter.hasNext()) { + while (volBBucketBIter.hasNext()) { volBBucketBIter.next(); volBBucketBKeyCount++; } @@ -2147,14 +2147,14 @@ public void testListKey() Iterator volABucketAKeyAIter = volAbucketA.listKeys("key-a-"); int volABucketAKeyACount = 0; - while(volABucketAKeyAIter.hasNext()) { + while (volABucketAKeyAIter.hasNext()) { volABucketAKeyAIter.next(); volABucketAKeyACount++; } Assert.assertEquals(10, volABucketAKeyACount); Iterator volABucketAKeyBIter = volAbucketA.listKeys("key-b-"); - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { Assert.assertTrue(volABucketAKeyBIter.next().getName() .startsWith("key-b-" + i + "-")); } @@ -2171,7 +2171,7 @@ public void testListKeyOnEmptyBucket() vol.createBucket(bucket); OzoneBucket buc = vol.getBucket(bucket); Iterator keys = buc.listKeys(""); - while(keys.hasNext()) { + while (keys.hasNext()) { fail(); } } @@ -2821,11 +2821,11 @@ public void testListMultipartUploadParts() throws Exception { generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(1, partName1); - String partName2 =uploadPart(bucket, keyName, uploadID, 2, + String partName2 = uploadPart(bucket, keyName, uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(2, partName2); - String partName3 =uploadPart(bucket, keyName, uploadID, 3, + String partName3 = uploadPart(bucket, keyName, uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(3, partName3); @@ -2872,11 +2872,11 @@ public void testListMultipartUploadPartsWithContinuation() generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(1, partName1); - String partName2 =uploadPart(bucket, keyName, uploadID, 2, + String partName2 = uploadPart(bucket, keyName, uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(2, partName2); - String partName3 =uploadPart(bucket, keyName, uploadID, 3, + String partName3 = uploadPart(bucket, keyName, uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(3, partName3); @@ -3261,7 +3261,7 @@ private void validateOzoneAccessAcl(OzoneObj ozObj) throws IOException { List expectedAcls = getAclList(new OzoneConfiguration()); // Case:1 Add new acl permission to existing acl. - if(expectedAcls.size()>0) { + if (expectedAcls.size() > 0) { OzoneAcl oldAcl = expectedAcls.get(0); OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(), ACLType.READ_ACL, oldAcl.getAclScope()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java index 8ad62f222b91..c5e54db832d7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java @@ -162,7 +162,7 @@ private static void emptyAuditLog() throws IOException { * Close OzoneClient and shutdown MiniOzoneCluster. */ private static void shutdownCluster() throws IOException { - if(ozClient != null) { + if (ozClient != null) { ozClient.close(); } @@ -246,28 +246,28 @@ public void testXXXAclFailureAudits() throws Exception { // xxxAcl will fail as current ugi user doesn't have the required access // for volume - try{ + try { List acls = store.getAcl(volObj); } catch (Exception ex) { verifyLog(OMAction.GET_ACL.name(), volumeName, AuditEventStatus.FAILURE.name()); } - try{ + try { store.addAcl(volObj, USER_ACL); } catch (Exception ex) { verifyLog(OMAction.ADD_ACL.name(), volumeName, AuditEventStatus.FAILURE.name()); } - try{ + try { store.removeAcl(volObj, USER_ACL); } catch (Exception ex) { verifyLog(OMAction.REMOVE_ACL.name(), volumeName, AuditEventStatus.FAILURE.name()); } - try{ + try { store.setAcl(volObj, aclListToAdd); } catch (Exception ex) { verifyLog(OMAction.SET_ACL.name(), volumeName, "johndoe", "jane", @@ -282,16 +282,16 @@ private void verifyLog(String... expected) throws Exception { GenericTestUtils.waitFor(() -> (lines != null) ? true : false, 100, 60000); - try{ + try { // When log entry is expected, the log file will contain one line and // that must be equal to the expected string assertTrue(lines.size() != 0); - for(String exp: expected){ + for (String exp: expected) { assertTrue(lines.get(0).contains(exp)); } - } catch (AssertionError ex){ + } catch (AssertionError ex) { LOG.error("Error occurred in log verification", ex); - if(lines.size() != 0){ + if (lines.size() != 0) { LOG.error("Actual line ::: " + lines.get(0)); LOG.error("Expected tokens ::: " + Arrays.toString(expected)); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java index 6cd85a1a5bad..5ac78b8f0f1f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java @@ -191,7 +191,7 @@ private void testReadKey(String volumeName, String bucketName, } private void testListStatus(String volumeName, String bucketName, - String keyName, boolean versioning) throws Exception{ + String keyName, boolean versioning) throws Exception { OzoneVolume volume = objectStore.getVolume(volumeName); OzoneBucket ozoneBucket = volume.getBucket(bucketName); List ozoneFileStatusList = ozoneBucket.listStatus(keyName, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java index 751fd26ab81a..791a2267eac8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java @@ -139,7 +139,7 @@ public void init() throws Exception { */ @After public void shutdown() throws IOException { - if(ozClient != null) { + if (ozClient != null) { ozClient.close(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java index a52549956287..5abc09ec0f9d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java @@ -119,7 +119,7 @@ public static void init() throws Exception { .setCertificateClient(certificateClientTest) .build(); secretManager = new OzoneBlockTokenSecretManager(new SecurityConfig(conf), - 60 *60, certificateClientTest.getCertificate(). + 60 * 60, certificateClientTest.getCertificate(). getSerialNumber().toString()); secretManager.start(certificateClientTest); cluster.getOzoneManager().startSecretManager(); @@ -168,7 +168,7 @@ public void testPutKeySuccessWithBlockToken() throws Exception { OzoneKey key = bucket.getKey(keyName); Assert.assertEquals(keyName, key.getName()); byte[] fileContent; - try(OzoneInputStream is = bucket.readKey(keyName)) { + try (OzoneInputStream is = bucket.readKey(keyName)) { fileContent = new byte[value.getBytes(UTF_8).length]; is.read(fileContent); } @@ -342,7 +342,7 @@ public void testZReadKeyWithUnhealthyContainerReplica() { */ @AfterClass public static void shutdown() throws IOException { - if(ozClient != null) { + if (ozClient != null) { ozClient.close(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java index d6821c4aa654..aa6ff93d514b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java @@ -364,7 +364,7 @@ public void testWatchForCommitForGroupMismatchException() throws Exception { .watchForCommit(reply.getLogIndex() + new Random().nextInt(100) + 10); Assert.fail("Expected exception not thrown"); - } catch(Exception e) { + } catch (Exception e) { Assert.assertTrue(HddsClientUtils .checkForException(e) instanceof GroupMismatchException); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java index e9b82e4c1d5f..7f0ab38f9f3e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java @@ -198,7 +198,7 @@ void validateData(byte[] inputData, int offset, byte[] readData) { byte[] expectedData = new byte[readDataLen]; System.arraycopy(inputData, (int) offset, expectedData, 0, readDataLen); - for (int i=0; i < readDataLen; i++) { + for (int i = 0; i < readDataLen; i++) { Assert.assertEquals("Read data at does not match the input data at " + "position " + (offset + i), expectedData[i], readData[i]); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java index f16c3ed9001d..4b83429e153c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java @@ -77,18 +77,18 @@ public TestKeyInputStream(ContainerLayoutVersion layout) { private void randomSeek(int dataLength, KeyInputStream keyInputStream, byte[] inputData) throws Exception { // Do random seek. - for (int i=0; i=100; i-=20) { + for (int i = dataLength - 100; i >= 100; i -= 20) { validate(keyInputStream, inputData, i, 20); } // Start from begin and seek such that we read partially chunks. - for (int i=0; i {})); + c -> { })); } HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null); @@ -182,7 +182,7 @@ public void testContainerMetrics() throws Exception { } // clean up volume dir File file = new File(path); - if(file.exists()) { + if (file.exists()) { FileUtil.fullyDelete(file); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 4f0c437d68e1..c7bf1f858d92 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -487,7 +487,7 @@ static void runAsyncTests( final List computeResults = new LinkedList<>(); int requestCount = 1000; // Create a bunch of Async calls from this test. - for(int x = 0; x {})); + c -> { })); } HddsDispatcher dispatcher = new HddsDispatcher( conf, containerSet, volumeSet, handlers, context, metrics, null); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index a5fcbb9ff678..6ff49c69f1dd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -169,7 +169,7 @@ public void testClientServer() throws Exception { .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), XceiverClientGrpc::new, (dn, conf) -> new XceiverServerGrpc(dd, conf, - hddsDispatcher, caClient), (dn, p) -> {}, (p) -> {}); + hddsDispatcher, caClient), (dn, p) -> { }, (p) -> { }); } private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, @@ -194,7 +194,7 @@ private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, Handler.getHandlerForContainerType(containerType, conf, dd.getUuid().toString(), containerSet, volumeSet, metrics, - c -> {})); + c -> { })); } HddsDispatcher hddsDispatcher = new HddsDispatcher( conf, containerSet, volumeSet, handlers, context, metrics, @@ -234,7 +234,7 @@ private static void runTestClientServerRatis(RpcType rpc, int numNodes) XceiverClientRatis::newXceiverClientRatis, TestSecureContainerServer::newXceiverServerRatis, (dn, p) -> RatisTestHelper.initXceiverServerRatis(rpc, dn, p), - (p) -> {}); + (p) -> { }); } private static void runTestClientServer( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java index af39055ad264..2a5873071a5a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java @@ -224,7 +224,7 @@ public void testHddsVolumeFailureOnContainerFileCorrupt() throws Exception { try { c1.close(); Assert.fail(); - } catch(Exception e) { + } catch (Exception e) { Assert.assertTrue(e instanceof IOException); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java index a01d4928fca8..f67783bddb13 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java @@ -152,28 +152,28 @@ private Path depthBFS(FileSystem fs, FileStatus[] fileStatuses, int span, int actualDepth) throws IOException { int depth = 0; Path p = null; - if(span > 0){ + if (span > 0) { depth = 0; - } else if(span == 0){ + } else if (span == 0) { depth = 1; - } else{ + } else { LOG.info("Span value can never be negative"); } LinkedList queue = new LinkedList(); FileStatus f1 = fileStatuses[0]; queue.add(f1); - while(queue.size() != 0){ + while (queue.size() != 0) { FileStatus f = queue.poll(); FileStatus[] temp = fs.listStatus(f.getPath()); - if(temp.length > 0){ + if (temp.length > 0) { ++depth; - for(int i = 0; i < temp.length; i++){ + for (int i = 0; i < temp.length; i++) { queue.add(temp[i]); } } - if(span == 0){ + if (span == 0) { p = f.getPath(); - } else{ + } else { p = f.getPath().getParent(); } } @@ -188,17 +188,17 @@ private Path depthBFS(FileSystem fs, FileStatus[] fileStatuses, * and count the span directories. */ - private int spanCheck(FileSystem fs, int span, Path p) throws IOException{ + private int spanCheck(FileSystem fs, int span, Path p) throws IOException { int sp = 0; int depth = 0; - if(span >= 0){ + if (span >= 0) { depth = 0; - } else{ + } else { LOG.info("Span value can never be negative"); } FileStatus[] fileStatuses = fs.listStatus(p); - for (FileStatus fileStatus : fileStatuses){ - if(fileStatus.isDirectory()){ + for (FileStatus fileStatus : fileStatuses) { + if (fileStatus.isDirectory()) { ++sp; } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index d691f2c324dc..68cc82428736 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -348,7 +348,7 @@ public void testCreateDirectory() throws IOException { OmKeyArgs keyArgs = createBuilder() .setKeyName(keyNameBuf.toString()) .build(); - for (int i =0; i< 5; i++) { + for (int i = 0; i < 5; i++) { keyNameBuf.append("/").append(RandomStringUtils.randomAlphabetic(5)); } String keyName = keyNameBuf.toString(); @@ -418,7 +418,7 @@ public void testOpenFile() throws IOException { // recursive flag is set to false StringBuffer keyNameBuf = new StringBuffer(); keyNameBuf.append(RandomStringUtils.randomAlphabetic(5)); - for (int i =0; i< 5; i++) { + for (int i = 0; i < 5; i++) { keyNameBuf.append("/").append(RandomStringUtils.randomAlphabetic(5)); } keyName = keyNameBuf.toString(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java index a59983fa3fa3..beaf10c71da2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java @@ -70,7 +70,7 @@ public void setup() throws Exception { @After public void shutdown() throws Exception { - if (dbStore!=null){ + if (dbStore != null) { dbStore.close(); } } @@ -78,7 +78,7 @@ public void shutdown() throws Exception { @Test public void testOMDB() throws Exception { File newFolder = folder.newFolder(); - if(!newFolder.exists()) { + if (!newFolder.exists()) { Assert.assertTrue(newFolder.mkdirs()); } // Dummy om.db with only keyTable @@ -88,11 +88,11 @@ public void testOMDB() throws Exception { .addTable("keyTable") .build(); // insert 5 keys - for (int i = 0; i<5; i++) { + for (int i = 0; i < 5; i++) { OmKeyInfo value = OMRequestTestUtils.createOmKeyInfo("sampleVol", - "sampleBuck", "key" + (i+1), HddsProtos.ReplicationType.STAND_ALONE, + "sampleBuck", "key" + (i + 1), HddsProtos.ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor.ONE); - String key = "key"+ (i); + String key = "key" + (i); Table keyTable = dbStore.getTable("keyTable"); byte[] arr = value.getProtobuf(CURRENT_VERSION).toByteArray(); keyTable.put(key.getBytes(UTF_8), arr); @@ -111,7 +111,7 @@ public void testOMDB() throws Exception { try { getKeyNames(dbScanner); Assert.fail("IllegalArgumentException is expected"); - }catch (IllegalArgumentException e){ + } catch (IllegalArgumentException e) { //ignore } @@ -177,7 +177,7 @@ private List getKeyNames(DBScanner scanner) scanner.setTableName("keyTable"); scanner.call(); Assert.assertFalse(scanner.getScannedObjects().isEmpty()); - for (Object o : scanner.getScannedObjects()){ + for (Object o : scanner.getScannedObjects()) { OmKeyInfo keyInfo = (OmKeyInfo)o; keyNames.add(keyInfo.getKeyName()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index 941097324a61..9b9d6d8e0bae 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -521,7 +521,7 @@ private void doKeyOps(OmKeyArgs keyArgs) { } try { - long id = (keySession != null)?keySession.getId():0; + long id = (keySession != null) ? keySession.getId() : 0; writeClient.commitKey(keyArgs, id); } catch (IOException ignored) { } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmStartupSlvLessThanMlv.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmStartupSlvLessThanMlv.java index adccc7aabded..f77386198e40 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmStartupSlvLessThanMlv.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmStartupSlvLessThanMlv.java @@ -70,7 +70,7 @@ public void testStartupSlvLessThanMlv() throws Exception { try { clusterBuilder.build(); Assert.fail("Expected OMException due to incorrect MLV on OM creation."); - } catch(OMException e) { + } catch (OMException e) { String expectedMessage = String.format("Cannot initialize " + "VersionManager. Metadata layout version (%s) > software layout" + " version (%s)", mlv, largestSlv); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java index 36a17a83e708..389217a3e2f6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java @@ -240,7 +240,7 @@ public void testThreeNodeOMservice() throws Exception { OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT; break; case omNode2Id : - expectedPeerAddress = "0.0.0.0:"+ + expectedPeerAddress = "0.0.0.0:" + OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT; break; case omNode3Id : @@ -323,7 +323,7 @@ public void testOMHAWithUnresolvedAddresses() throws Exception { OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT; break; case omNode2Id : - expectedPeerAddress = "0.0.0.0:"+ + expectedPeerAddress = "0.0.0.0:" + OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT; break; case omNode3Id : diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithACL.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithACL.java index 06a3a974c9d3..3af7f011b07d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithACL.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithACL.java @@ -184,7 +184,7 @@ public void testSetKeyAcl() throws Exception { public void testAddPrefixAcl() throws Exception { OzoneBucket ozoneBucket = setupBucket(); String remoteUserName = "remoteUser"; - String prefixName = RandomStringUtils.randomAlphabetic(5) +"/"; + String prefixName = RandomStringUtils.randomAlphabetic(5) + "/"; OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, READ, DEFAULT); @@ -201,7 +201,7 @@ public void testAddPrefixAcl() throws Exception { public void testRemovePrefixAcl() throws Exception { OzoneBucket ozoneBucket = setupBucket(); String remoteUserName = "remoteUser"; - String prefixName = RandomStringUtils.randomAlphabetic(5) +"/"; + String prefixName = RandomStringUtils.randomAlphabetic(5) + "/"; OzoneAcl userAcl = new OzoneAcl(USER, remoteUserName, READ, ACCESS); OzoneAcl userAcl1 = new OzoneAcl(USER, "remote", @@ -237,7 +237,7 @@ public void testRemovePrefixAcl() throws Exception { public void testSetPrefixAcl() throws Exception { OzoneBucket ozoneBucket = setupBucket(); String remoteUserName = "remoteUser"; - String prefixName = RandomStringUtils.randomAlphabetic(5) +"/"; + String prefixName = RandomStringUtils.randomAlphabetic(5) + "/"; OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, READ, DEFAULT); @@ -304,7 +304,7 @@ private void testAddAcl(String remoteUserName, OzoneObj ozoneObj, } private void testRemoveAcl(String remoteUserName, OzoneObj ozoneObj, - OzoneAcl userAcl) throws Exception{ + OzoneAcl userAcl) throws Exception { ObjectStore objectStore = getObjectStore(); // As by default create will add some default acls in RpcClient. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java index 7dbacc455f54..0c2f526870ec 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java @@ -537,7 +537,7 @@ private void validateListParts(OzoneBucket ozoneBucket, String keyName, Assert.assertTrue(partInfoList.size() == partsMap.size()); - for (int i=0; i< partsMap.size(); i++) { + for (int i = 0; i < partsMap.size(); i++) { Assert.assertEquals(partsMap.get(partInfoList.get(i).getPartNumber()), partInfoList.get(i).getPartName()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java index 9b0d8bcc9400..9de622ca0d2f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java @@ -88,7 +88,7 @@ public void testGetServiceList() throws Exception { ObjectMapper objectMapper = new ObjectMapper(); TypeReference> serviceInfoReference = - new TypeReference>() {}; + new TypeReference>() { }; List serviceInfos = objectMapper.readValue( serviceListJson, serviceInfoReference); Map serviceMap = new HashMap<>(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java index 08f465f63573..db5c32759541 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java @@ -69,7 +69,7 @@ public void testScmSnapshot() throws Exception { testSnapshot(ozoneCluster); } - public static void testSnapshot(MiniOzoneCluster cluster) throws Exception{ + public static void testSnapshot(MiniOzoneCluster cluster) throws Exception { GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer .captureLogs(LoggerFactory.getLogger( ReconStorageContainerManagerFacade.class)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java index 2818ed0ab52e..9e21311936dc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java @@ -350,7 +350,7 @@ private LinkedTreeMap getContainerResponseMap(String containerResponse, * For test purpose each container will have only one key. */ private void addKeys(int start, int end) throws Exception { - for(int i = start; i < end; i++) { + for (int i = start; i < end; i++) { Pipeline pipeline = HddsTestUtils.getRandomPipeline(); List omKeyLocationInfoList = new ArrayList<>(); BlockID blockID = new BlockID(i, 1); @@ -359,7 +359,7 @@ private void addKeys(int start, int end) throws Exception { omKeyLocationInfoList.add(omKeyLocationInfo1); OmKeyLocationInfoGroup omKeyLocationInfoGroup = new OmKeyLocationInfoGroup(0, omKeyLocationInfoList); - writeDataToOm("key"+i, "bucket"+i, "vol"+i, + writeDataToOm("key" + i, "bucket" + i, "vol" + i, Collections.singletonList(omKeyLocationInfoGroup)); } } @@ -367,7 +367,7 @@ private void addKeys(int start, int end) throws Exception { private long getTableKeyCount(TableIterator> iterator) { long keyCount = 0; - while(iterator.hasNext()) { + while (iterator.hasNext()) { keyCount++; iterator.next(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java index c16583cce285..e2d59dfb8945 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java @@ -133,7 +133,7 @@ public void testNamespaceSummaryAPI() throws Exception { Assert.assertEquals(0, entity.getNumTotalDir()); for (int i = 0; i < 10; i++) { Assert.assertNotNull(impl.getOMMetadataManagerInstance() - .getVolumeTable().get("/vol"+ i)); + .getVolumeTable().get("/vol" + i)); } addKeys(10, 12, "dir"); impl.syncDataFromOM(); @@ -141,7 +141,7 @@ public void testNamespaceSummaryAPI() throws Exception { // test Recon is sync'ed with OM. for (int i = 10; i < 12; i++) { Assert.assertNotNull(impl.getOMMetadataManagerInstance() - .getVolumeTable().getSkipCache("/vol"+ i)); + .getVolumeTable().getSkipCache("/vol" + i)); } // test root response @@ -161,8 +161,8 @@ public void testNamespaceSummaryAPI() throws Exception { * For test purpose each container will have only one key. */ private void addKeys(int start, int end, String dirPrefix) throws Exception { - for(int i = start; i < end; i++) { - writeKeys("vol"+i, "bucket"+i, dirPrefix + i + "/key"+i); + for (int i = start; i < end; i++) { + writeKeys("vol" + i, "bucket" + i, dirPrefix + i + "/key" + i); } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java index e480d513d05b..65860551b3f2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java @@ -64,7 +64,7 @@ public static void init() throws Exception { @AfterClass public static void shutdown() throws InterruptedException { - if(cluster != null) { + if (cluster != null) { cluster.shutdown(); } IOUtils.cleanupWithLogger(null, storageContainerLocationClient); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java index 94b998447c46..2e67a88347af 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java @@ -102,8 +102,8 @@ public void cleanup() { public void testReplicasAreReportedForClosedContainerAfterRestart() throws Exception { // Create some keys to write data into the open containers - for (int i=0; i<10; i++) { - TestDataUtil.createKey(bucket, "key"+i, ReplicationFactor.THREE, + for (int i = 0; i < 10; i++) { + TestDataUtil.createKey(bucket, "key" + i, ReplicationFactor.THREE, ReplicationType.RATIS, "this is the content"); } StorageContainerManager scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java index 8ba2d529fc52..9e00867a1f07 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java @@ -151,14 +151,14 @@ public void testSCMContainerStateCount() throws Exception { ContainerManager scmContainerManager = scm.getContainerManager(); List containerInfoList = new ArrayList<>(); - for (int i=0; i < 10; i++) { + for (int i = 0; i < 10; i++) { containerInfoList.add( scmContainerManager.allocateContainer(new StandaloneReplicationConfig( ReplicationFactor.ONE), UUID.randomUUID().toString())); } long containerID; - for (int i=0; i < 10; i++) { + for (int i = 0; i < 10; i++) { if (i % 2 == 0) { containerID = containerInfoList.get(i).getContainerID(); scmContainerManager.updateContainerState( @@ -184,7 +184,7 @@ public void testSCMContainerStateCount() throws Exception { containerStateCount = scm.getContainerStateCount(); containerStateCount.forEach((k, v) -> { - if(k.equals(HddsProtos.LifeCycleState.CLOSING.toString())) { + if (k.equals(HddsProtos.LifeCycleState.CLOSING.toString())) { assertEquals((int)v, 5); } else if (k.equals(HddsProtos.LifeCycleState.CLOSED.toString())) { assertEquals((int)v, 5); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java index a3bd2953f3b1..8b7e0b443eee 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java @@ -61,7 +61,7 @@ public void setup() { dns.add(MockDatanodeDetails.randomDatanodeDetails()); dnsInOrder = new ArrayList<>(); - for (int i=2; i>=0; i--) { + for (int i = 2; i >= 0; i--) { dnsInOrder.add(dns.get(i)); } @@ -82,14 +82,14 @@ public void testCorrectDnsReturnedFromPipeline() throws IOException { Assert.assertNotEquals(dns.get(0), dnsInOrder.get(0)); } - @Test(timeout=5000) + @Test(timeout = 5000) public void testRandomFirstNodeIsCommandTarget() throws IOException { final ArrayList allDNs = new ArrayList<>(dns); // Using a new Xceiver Client, call it repeatedly until all DNs in the // pipeline have been the target of the command, indicating it is shuffling // the DNs on each call with a new client. This test will timeout if this // is not happening. - while(allDNs.size() > 0) { + while (allDNs.size() > 0) { XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf) { @Override public XceiverClientReply sendCommandAsync( @@ -112,7 +112,7 @@ public void testFirstNodeIsCorrectWithTopologyForCommandTarget() // With a new Client, make 100 calls and ensure the first sortedDN is used // each time. The logic should always use the sorted node, so we can check // only a single DN is ever seen after 100 calls. - for (int i=0; i<100; i++) { + for (int i = 0; i < 100; i++) { XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf) { @Override public XceiverClientReply sendCommandAsync( @@ -131,7 +131,7 @@ public XceiverClientReply sendCommandAsync( public void testConnectionReusedAfterGetBlock() throws IOException { // With a new Client, make 100 calls. On each call, ensure that only one // DN is seen, indicating the same DN connection is reused. - for (int i=0; i<100; i++) { + for (int i = 0; i < 100; i++) { final Set seenDNs = new HashSet<>(); XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf) { @Override diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java index 91bb899b5f81..65d754126a9f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java @@ -358,14 +358,14 @@ public void testContainerIsReplicatedWhenAllNodesGotoMaintenance() Set replicas = getContainerReplicas(container); List forMaintenance = new ArrayList<>(); - replicas.forEach(r ->forMaintenance.add(r.getDatanodeDetails())); + replicas.forEach(r -> forMaintenance.add(r.getDatanodeDetails())); scmClient.startMaintenanceNodes(forMaintenance.stream() .map(d -> getDNHostAndPort(d)) .collect(Collectors.toList()), 0); // Ensure all 3 DNs go to maintenance - for(DatanodeDetails dn : forMaintenance) { + for (DatanodeDetails dn : forMaintenance) { waitForDnToReachPersistedOpState(dn, IN_MAINTENANCE); } @@ -379,7 +379,7 @@ public void testContainerIsReplicatedWhenAllNodesGotoMaintenance() .collect(Collectors.toList())); // Ensure all 3 DNs go to maintenance - for(DatanodeDetails dn : forMaintenance) { + for (DatanodeDetails dn : forMaintenance) { waitForDnToReachOpState(dn, IN_SERVICE); } @@ -400,26 +400,26 @@ public void testEnteringMaintenanceNodeCompletesAfterSCMRestart() Set replicas = getContainerReplicas(container); List forMaintenance = new ArrayList<>(); - replicas.forEach(r ->forMaintenance.add(r.getDatanodeDetails())); + replicas.forEach(r -> forMaintenance.add(r.getDatanodeDetails())); scmClient.startMaintenanceNodes(forMaintenance.stream() .map(d -> getDNHostAndPort(d)) .collect(Collectors.toList()), 0); // Ensure all 3 DNs go to entering_maintenance - for(DatanodeDetails dn : forMaintenance) { + for (DatanodeDetails dn : forMaintenance) { waitForDnToReachPersistedOpState(dn, ENTERING_MAINTENANCE); } cluster.restartStorageContainerManager(true); setManagers(); List newDns = new ArrayList<>(); - for(DatanodeDetails dn : forMaintenance) { + for (DatanodeDetails dn : forMaintenance) { newDns.add(nm.getNodeByUuid(dn.getUuid().toString())); } // Ensure all 3 DNs go to maintenance - for(DatanodeDetails dn : newDns) { + for (DatanodeDetails dn : newDns) { waitForDnToReachOpState(dn, IN_MAINTENANCE); } @@ -550,7 +550,7 @@ private void setManagers() { */ private void generateData(int keyCount, String keyPrefix, ReplicationFactor repFactor, ReplicationType repType) throws IOException { - for (int i=0; i= 0); + assert (numOfArgs >= 0); String[] res = new String[1 + 1 + numOfOMs + numOfArgs]; final int indexOmServiceIds = 0; final int indexOmNodes = 1; @@ -280,7 +280,7 @@ private String[] getHASetConfStrings(int numOfArgs) { String[] omNodesArr = omNodesVal.split(","); // Sanity check - assert(omNodesArr.length == numOfOMs); + assert (omNodesArr.length == numOfOMs); for (int i = 0; i < numOfOMs; i++) { res[indexOmAddressStart + i] = getSetConfStringFromConf(ConfUtils.addKeySuffixes( @@ -624,7 +624,7 @@ public void testDeleteTrashNoSkipTrash() throws Exception { // create volume: vol1 with bucket: bucket1 final String testVolBucket = "/vol1/bucket1"; - final String testKey = testVolBucket+"/key1"; + final String testKey = testVolBucket + "/key1"; final String[] volBucketArgs = new String[] {"-mkdir", "-p", testVolBucket}; final String[] keyArgs = new String[] {"-touch", testKey}; @@ -652,7 +652,7 @@ public void testDeleteTrashNoSkipTrash() throws Exception { final String[] rmKeyArgs = new String[] {"-rm", "-R", testKey}; final String[] rmTrashArgs = new String[] {"-rm", "-R", - testVolBucket+"/.Trash"}; + testVolBucket + "/.Trash"}; final Path trashPathKey1 = Path.mergePaths(new Path( new OFSPath(testKey).getTrashRoot(), new Path("Current")), new Path(testKey)); @@ -666,11 +666,11 @@ public void testDeleteTrashNoSkipTrash() throws Exception { Assert.assertEquals(0, res); LOG.info("Executing testDeleteTrashNoSkipTrash: key1 deleted moved to" - +" Trash: "+trashPathKey1.toString()); + + " Trash: " + trashPathKey1.toString()); fs.getFileStatus(trashPathKey1); LOG.info("Executing testDeleteTrashNoSkipTrash: deleting trash FsShell " - +"with args{}: ", Arrays.asList(rmTrashArgs)); + + "with args{}: ", Arrays.asList(rmTrashArgs)); res = ToolRunner.run(shell, rmTrashArgs); Assert.assertEquals(0, res); diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java index fc16b3c88662..a28c6d13b571 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java @@ -38,7 +38,7 @@ public class TestOmPrefixInfo { private static OzoneManagerStorageProtos.OzoneAclInfo buildTestOzoneAclInfo( - String aclString){ + String aclString) { OzoneAcl oacl = OzoneAcl.parseAcl(aclString); ByteString rights = ByteString.copyFrom(oacl.getAclBitSet().toByteArray()); return OzoneManagerStorageProtos.OzoneAclInfo.newBuilder() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index 058adf33d13f..b818a221fb60 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -621,7 +621,7 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) } return hasAccess; } catch (IOException ex) { - if(ex instanceof OMException) { + if (ex instanceof OMException) { throw (OMException) ex; } LOG.error("CheckAccess operation failed for bucket:{}/{}.", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index b8857fdb63ea..639893af849f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -749,7 +749,7 @@ public OmMultipartUploadListParts listParts(String volumeName, } } catch (OMException ex) { throw ex; - } catch (IOException ex){ + } catch (IOException ex) { LOG.error( "List Multipart Upload Parts Failed: volume: {}, bucket: {}, ,key: " + "{} ", @@ -1071,7 +1071,7 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) } return hasAccess; } catch (IOException ex) { - if(ex instanceof OMException) { + if (ex instanceof OMException) { throw (OMException) ex; } LOG.error("CheckAccess operation failed for key:{}/{}/{}", volume, @@ -1763,7 +1763,7 @@ public List listStatusFSO(OmKeyArgs args, boolean recursive, if (fileStatusInfo != null) { prefixKeyInDB = fileStatusInfo.getKeyInfo().getParentObjectID(); - if(fileStatusInfo.isDirectory()){ + if (fileStatusInfo.isDirectory()) { seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB, fileStatusInfo.getKeyInfo().getFileName()); @@ -2005,7 +2005,7 @@ private int listStatusFindFilesInTableCache( String cacheKey = entry.getKey().getCacheKey(); OmKeyInfo cacheOmKeyInfo = entry.getValue().getCacheValue(); // cacheOmKeyInfo is null if an entry is deleted in cache - if(cacheOmKeyInfo == null){ + if (cacheOmKeyInfo == null) { deletedKeySet.add(cacheKey); continue; } @@ -2050,7 +2050,7 @@ private int listStatusFindDirsInTableCache( String cacheKey = entry.getKey().getCacheKey(); OmDirectoryInfo cacheOmDirInfo = entry.getValue().getCacheValue(); // cacheOmKeyInfo is null if an entry is deleted in cache - if(cacheOmDirInfo == null){ + if (cacheOmDirInfo == null) { deletedKeySet.add(cacheKey); continue; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java index 69871fdf8f87..827727c2c2f3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java @@ -31,7 +31,7 @@ * This class is for maintaining Ozone Manager statistics. */ @InterfaceAudience.Private -@Metrics(about="Ozone Manager Metrics", context="dfs") +@Metrics(about = "Ozone Manager Metrics", context = "dfs") public class OMMetrics { private static final String SOURCE_NAME = OMMetrics.class.getSimpleName(); @@ -246,17 +246,17 @@ public void setNumBuckets(long val) { public void setNumKeys(long val) { long oldVal = this.numKeys.value(); - this.numKeys.incr(val- oldVal); + this.numKeys.incr(val - oldVal); } public void setNumDirs(long val) { long oldVal = this.numDirs.value(); - this.numDirs.incr(val- oldVal); + this.numDirs.incr(val - oldVal); } public void setNumFiles(long val) { long oldVal = this.numDirs.value(); - this.numDirs.incr(val- oldVal); + this.numDirs.incr(val - oldVal); } public void decNumKeys(long val) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 81e1e09e6068..c2dcb20f9ec9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -834,13 +834,13 @@ public List listBuckets(final String volumeName, @Override public Iterator, CacheValue>> - getBucketIterator(){ + getBucketIterator() { return bucketTable.cacheIterator(); } @Override public TableIterator> - getKeyIterator(){ + getKeyIterator() { return keyTable.iterator(); } @@ -933,7 +933,7 @@ public List listKeys(String volumeName, String bucketName, // entries. CacheValue cacheValue = keyTable.getCacheValue(new CacheKey<>(kv.getKey())); - if(cacheValue == null || cacheValue.getCacheValue() != null) { + if (cacheValue == null || cacheValue.getCacheValue() != null) { cacheKeyMap.put(kv.getKey(), kv.getValue()); currentCount++; } @@ -1105,7 +1105,7 @@ public List getPendingDeletionKeys(final int keyCount) if (kv != null) { RepeatedOmKeyInfo infoList = kv.getValue(); // Get block keys as a list. - for(OmKeyInfo info : infoList.getOmKeyInfoList()){ + for (OmKeyInfo info : infoList.getOmKeyInfoList()) { OmKeyLocationInfoGroup latest = info.getLatestVersionLocations(); List item = latest.getLocationList().stream() .map(b -> new BlockID(b.getContainerID(), b.getLocalID())) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneAclUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneAclUtils.java index e7834db0df07..7c322581b2a8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneAclUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneAclUtils.java @@ -63,7 +63,7 @@ public static void checkAllAcls(OzoneManager ozoneManager, //OzoneNativeAuthorizer differs from Ranger Authorizer as Ranger requires // only READ access on parent level access. OzoneNativeAuthorizer has // different parent level access based on the child level access type - if(ozoneManager.isNativeAuthorizerEnabled()) { + if (ozoneManager.isNativeAuthorizerEnabled()) { if (aclType == IAccessAuthorizer.ACLType.CREATE || aclType == IAccessAuthorizer.ACLType.DELETE || aclType == IAccessAuthorizer.ACLType.WRITE_ACL) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 900babde032a..e2715afab58c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -2379,11 +2379,11 @@ public boolean checkAcls(OzoneObj obj, RequestContext context, if (!accessAuthorizer.checkAccess(obj, context)) { if (throwIfPermissionDenied) { - String volumeName = obj.getVolumeName() != null? - "Volume:" + obj.getVolumeName() + " ": ""; - String bucketName = obj.getBucketName() != null? - "Bucket:" + obj.getBucketName() + " ": ""; - String keyName = obj.getKeyName() != null? + String volumeName = obj.getVolumeName() != null ? + "Volume:" + obj.getVolumeName() + " " : ""; + String bucketName = obj.getBucketName() != null ? + "Bucket:" + obj.getBucketName() + " " : ""; + String keyName = obj.getKeyName() != null ? "Key:" + obj.getKeyName() : ""; LOG.warn("User {} doesn't have {} permission to access {} {}{}{}", context.getClientUgi().getUserName(), context.getAclRights(), @@ -3711,7 +3711,7 @@ private void addS3GVolumeToDB() throws IOException { // Commit to DB. - try(BatchOperation batchOperation = + try (BatchOperation batchOperation = metadataManager.getStore().initBatchOperation()) { metadataManager.getVolumeTable().putWithBatch(batchOperation, dbVolumeKey, omVolumeArgs); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerPrepareState.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerPrepareState.java index 130ce4dd0d13..2ffb3882f8ba 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerPrepareState.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerPrepareState.java @@ -182,7 +182,7 @@ public synchronized void restorePrepareFromFile(long currentIndex) File prepareMarkerFile = getPrepareMarkerFile(); if (prepareMarkerFile.exists()) { byte[] data = new byte[(int) prepareMarkerFile.length()]; - try(FileInputStream stream = new FileInputStream(prepareMarkerFile)) { + try (FileInputStream stream = new FileInputStream(prepareMarkerFile)) { stream.read(data); } catch (IOException e) { throwPrepareException(e, "Failed to read prepare marker " + @@ -255,7 +255,7 @@ private void writePrepareMarkerFile(long index) throws IOException { File parentDir = markerFile.getParentFile(); Files.createDirectories(parentDir.toPath()); - try(FileOutputStream stream = new FileOutputStream(markerFile)) { + try (FileOutputStream stream = new FileOutputStream(markerFile)) { stream.write(Long.toString(index).getBytes(StandardCharsets.UTF_8)); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java index ac2091fa26e7..8a22bff12792 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java @@ -178,7 +178,7 @@ private void commonInit() { * to execute its tasks. This allows the dependency to be injected for unit * testing. */ - static class OMStarterHelper implements OMStarterInterface{ + static class OMStarterHelper implements OMStarterInterface { @Override public void start(OzoneConfiguration conf) throws IOException, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java index 57d17cd4339b..634968ac2982 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java @@ -65,7 +65,7 @@ public S3SecretValue getS3Secret(String kerberosID) throws IOException { try { S3SecretValue s3Secret = omMetadataManager.getS3SecretTable().get(kerberosID); - if(s3Secret == null) { + if (s3Secret == null) { byte[] secret = OmUtils.getSHADigest(); result = new S3SecretValue(kerberosID, DigestUtils.sha256Hex(secret)); omMetadataManager.getS3SecretTable().put(kerberosID, result); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java index 92b76913be38..68d38141fb3d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java @@ -135,7 +135,7 @@ public FSDataInputStream open(Path path, int i) { public FSDataOutputStream create(Path path, FsPermission fsPermission, boolean b, int i, short i1, - long l, Progressable progressable){ + long l, Progressable progressable) { throw new UnsupportedOperationException( "fs.create() not implemented in TrashOzoneFileSystem"); } @@ -173,12 +173,12 @@ private boolean renameFSO(OFSPath srcPath, OFSPath dstPath) { OzoneManagerProtocolProtos.OMRequest omRequest = getRenameKeyRequest(srcPath, dstPath); try { - if(omRequest != null) { + if (omRequest != null) { submitRequest(omRequest); return true; } return false; - } catch (Exception e){ + } catch (Exception e) { LOG.error("Couldn't send rename request", e); return false; } @@ -203,7 +203,7 @@ private boolean deleteFSO(OFSPath srcPath) { OzoneManagerProtocolProtos.OMRequest omRequest = getDeleteKeyRequest(srcPath); try { - if(omRequest != null) { + if (omRequest != null) { submitRequest(omRequest); return true; } @@ -299,7 +299,7 @@ public Collection getTrashRoots(boolean allUsers) { CacheValue>> bucketIterator = ozoneManager.getMetadataManager().getBucketIterator(); List ret = new ArrayList<>(); - while (bucketIterator.hasNext()){ + while (bucketIterator.hasNext()) { Map.Entry, CacheValue> entry = bucketIterator.next(); OmBucketInfo omBucketInfo = entry.getValue().getCacheValue(); @@ -316,7 +316,7 @@ public Collection getTrashRoots(boolean allUsers) { } } } - } catch (Exception e){ + } catch (Exception e) { LOG.error("Couldn't perform fs operation " + "fs.listStatus()/fs.exists()", e); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java index d8bc27010e4f..e1138afc8e33 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java @@ -70,7 +70,7 @@ public class TrashPolicyOzone extends TrashPolicyDefault { /** Format of checkpoint directories used prior to Hadoop 0.23. */ private static final DateFormat OLD_CHECKPOINT = new SimpleDateFormat("yyMMddHHmm"); - private static final int MSECS_PER_MINUTE = 60*1000; + private static final int MSECS_PER_MINUTE = 60 * 1000; private long emptierInterval; @@ -78,7 +78,7 @@ public class TrashPolicyOzone extends TrashPolicyDefault { private OzoneManager om; - public TrashPolicyOzone(){ + public TrashPolicyOzone() { } @Override @@ -110,7 +110,7 @@ public void initialize(Configuration conf, FileSystem fs) { } } - TrashPolicyOzone(FileSystem fs, Configuration conf, OzoneManager om){ + TrashPolicyOzone(FileSystem fs, Configuration conf, OzoneManager om) { initialize(conf, fs); this.om = om; } @@ -198,7 +198,7 @@ public void run() { // sleep for interval Thread.sleep(end - now); // if not leader, thread will always be sleeping - if (!om.isLeaderReady()){ + if (!om.isLeaderReady()) { continue; } } catch (InterruptedException e) { @@ -219,7 +219,7 @@ public void run() { continue; } TrashPolicyOzone trash = new TrashPolicyOzone(fs, conf, om); - Runnable task = ()->{ + Runnable task = () -> { try { om.getMetrics().incNumTrashRootsProcessed(); trash.deleteCheckpoint(trashRoot.getPath(), false); @@ -241,7 +241,7 @@ public void run() { } try { fs.close(); - } catch(IOException e) { + } catch (IOException e) { LOG.warn("Trash cannot close FileSystem: ", e); } finally { executor.shutdown(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index e3ab5bd91cff..eaa38ef7132f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -243,7 +243,7 @@ private void flushTransactions() { setReadyBuffer(); List flushedEpochs = null; - try(BatchOperation batchOperation = omMetadataManager.getStore() + try (BatchOperation batchOperation = omMetadataManager.getStore() .initBatchOperation()) { AtomicReference lastTraceId = new AtomicReference<>(); @@ -376,7 +376,7 @@ private void addCleanupEntry(DoubleBufferEntry entry, Map applyTransaction(TransactionContext trx) { CompletableFuture future = CompletableFuture.supplyAsync( () -> runCommand(request, trxLogIndex), executorService); future.thenApply(omResponse -> { - if(!omResponse.getSuccess()) { + if (!omResponse.getSuccess()) { // When INTERNAL_ERROR or METADATA_ERROR it is considered as // critical error and terminate the OM. Considering INTERNAL_ERROR // also for now because INTERNAL_ERROR is thrown for any error @@ -516,8 +516,8 @@ private OMResponse runCommand(OMRequest request, long trxLogIndex) { */ public void updateLastAppliedIndex(List flushedEpochs) { Preconditions.checkArgument(flushedEpochs.size() > 0); - computeAndUpdateLastAppliedIndex(flushedEpochs.get(flushedEpochs.size() -1), - -1L, flushedEpochs, true); + computeAndUpdateLastAppliedIndex( + flushedEpochs.get(flushedEpochs.size() - 1), -1L, flushedEpochs, true); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index a4ef4a13f1af..6d9ce346479a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -155,7 +155,7 @@ public OzoneManagerProtocolProtos.UserInfo getUserInfo() { public OzoneManagerProtocolProtos.UserInfo getUserIfNotExists( OzoneManager ozoneManager) { OzoneManagerProtocolProtos.UserInfo userInfo = getUserInfo(); - if (!userInfo.hasRemoteAddress() || !userInfo.hasUserName()){ + if (!userInfo.hasRemoteAddress() || !userInfo.hasUserName()) { OzoneManagerProtocolProtos.UserInfo.Builder newuserInfo = OzoneManagerProtocolProtos.UserInfo.newBuilder(); UserGroupInformation user; @@ -164,7 +164,7 @@ public OzoneManagerProtocolProtos.UserInfo getUserIfNotExists( user = UserGroupInformation.getCurrentUser(); remoteAddress = ozoneManager.getOmRpcServerAddr() .getAddress(); - } catch (Exception e){ + } catch (Exception e) { LOG.debug("Couldn't get om Rpc server address", e); return getUserInfo(); } @@ -517,7 +517,7 @@ private static String isValidKeyPath(String path) throws OMException { if (path.length() == 0) { throw new OMException("Invalid KeyPath, empty keyName" + path, INVALID_KEY_NAME); - } else if(path.startsWith("/")) { + } else if (path.startsWith("/")) { isValid = false; } else { // Check for ".." "." ":" "/" diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index b3acaaaddb3f..b90e10472748 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -336,13 +336,13 @@ public boolean checkQuotaBytesValid(OMMetadataManager metadataManager, List bucketList = metadataManager.listBuckets( omVolumeArgs.getVolume(), null, null, Integer.MAX_VALUE); - for(OmBucketInfo bucketInfo : bucketList) { + for (OmBucketInfo bucketInfo : bucketList) { long nextQuotaInBytes = bucketInfo.getQuotaInBytes(); - if(nextQuotaInBytes > OzoneConsts.QUOTA_RESET) { + if (nextQuotaInBytes > OzoneConsts.QUOTA_RESET) { totalBucketQuota += nextQuotaInBytes; } } - if(volumeQuotaInBytes < totalBucketQuota + if (volumeQuotaInBytes < totalBucketQuota && volumeQuotaInBytes != OzoneConsts.QUOTA_RESET) { throw new IllegalArgumentException("Total buckets quota in this volume " + "should not be greater than volume quota : the total space quota is" + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java index 44c37562cf3d..17c4e3925d37 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java @@ -267,15 +267,15 @@ public boolean checkQuotaBytesValid(OMMetadataManager metadataManager, } List bucketList = metadataManager.listBuckets( omVolumeArgs.getVolume(), null, null, Integer.MAX_VALUE); - for(OmBucketInfo bucketInfo : bucketList) { + for (OmBucketInfo bucketInfo : bucketList) { long nextQuotaInBytes = bucketInfo.getQuotaInBytes(); - if(nextQuotaInBytes > OzoneConsts.QUOTA_RESET && + if (nextQuotaInBytes > OzoneConsts.QUOTA_RESET && !omBucketArgs.getBucketName().equals(bucketInfo.getBucketName())) { totalBucketQuota += nextQuotaInBytes; } } - if(volumeQuotaInBytes < totalBucketQuota && + if (volumeQuotaInBytes < totalBucketQuota && volumeQuotaInBytes != OzoneConsts.QUOTA_RESET) { throw new OMException("Total buckets quota in this volume " + "should not be greater than volume quota : the total space quota is" + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java index d4ba01750575..5d01c4ff6ac8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java @@ -120,7 +120,7 @@ OMClientResponse onSuccess(OMResponse.Builder omResponse, @Override void onComplete(boolean operationResult, IOException exception, OMMetrics omMetrics, AuditLogger auditLogger, - Map auditMap){ + Map auditMap) { auditLog(auditLogger, buildAuditMessage(OMAction.SET_ACL, auditMap, exception, getOmRequest().getUserInfo())); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index e197cca8301c..48d4274284ea 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -193,7 +193,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmKeyInfo dirKeyInfo = null; if (omDirectoryResult == FILE_EXISTS || omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) { - throw new OMException("Unable to create directory: " +keyName + throw new OMException("Unable to create directory: " + keyName + " in volume/bucket: " + volumeName + "/" + bucketName, FILE_ALREADY_EXISTS); } else if (omDirectoryResult == DIRECTORY_EXISTS_IN_GIVENPATH || diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index 4e19b9982664..24994d7a86f7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -97,7 +97,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY, OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT); - if(checkKeyNameEnabled){ + if (checkKeyNameEnabled) { OmUtils.validateKeyName(StringUtils.removeEnd(keyArgs.getKeyName(), OzoneConsts.FS_FILE_COPYING_TEMP_SUFFIX)); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java index 9f05fee7e4f3..f46b2dd29ffe 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java @@ -199,7 +199,7 @@ public static OMPathInfoWithFSO verifyDirectoryKeysInPath( // Add all the sub-dirs to the missing list except the leaf element. // For example, /vol1/buck1/a/b/c/d/e/f/file1.txt. // Assume /vol1/buck1/a/b/c exists, then add d, e, f into missing list. - if(elements.hasNext()){ + if (elements.hasNext()) { // skips leaf node. missing.add(fileName); } @@ -273,7 +273,7 @@ public static OMPathInfoWithFSO verifyDirectoryKeysInPath( * Includes the list of missing intermediate directories and * the directory search result code. */ - public static class OMPathInfoWithFSO extends OMPathInfo{ + public static class OMPathInfoWithFSO extends OMPathInfo { private String leafNodeName; private long lastKnownParentId; private long leafNodeObjectId; @@ -759,7 +759,7 @@ public static String getAbsolutePath(String prefixName, String fileName) { * @param keyInfo omKeyInfo * @return omDirectoryInfo object */ - public static OmDirectoryInfo getDirectoryInfo(OmKeyInfo keyInfo){ + public static OmDirectoryInfo getDirectoryInfo(OmKeyInfo keyInfo) { OmDirectoryInfo.Builder builder = new OmDirectoryInfo.Builder(); builder.setParentObjectID(keyInfo.getParentObjectID()); builder.setAcls(keyInfo.getAcls()); @@ -838,7 +838,7 @@ public static long getToKeyNameParentId(String volumeName, "Failed to rename %s to %s, %s doesn't exist", fromKeyName, toKeyName, toKeyParentDir), OMException.ResultCodes.KEY_RENAME_ERROR); - } else if (toKeyParentDirStatus.isFile()){ + } else if (toKeyParentDirStatus.isFile()) { throw new OMException(String.format( "Failed to rename %s to %s, %s is a file", fromKeyName, toKeyName, toKeyParentDir), OMException.ResultCodes.KEY_RENAME_ERROR); @@ -975,7 +975,7 @@ public static long getParentID(long bucketId, Iterator pathComponents, long lastKnownParentId = bucketId; // If no sub-dirs then bucketID is the root/parent. - if(!pathComponents.hasNext()){ + if (!pathComponents.hasNext()) { return bucketId; } if (StringUtils.isBlank(errMsg)) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index 9a2ac61216a2..9bdb51f7fd1d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -88,7 +88,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY, OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT); - if(checkKeyNameEnabled){ + if (checkKeyNameEnabled) { OmUtils.validateKeyName(StringUtils.removeEnd(keyArgs.getKeyName(), OzoneConsts.FS_FILE_COPYING_TEMP_SUFFIX)); } @@ -265,7 +265,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); - if(bucketLockAcquired) { + if (bucketLockAcquired) { omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index 50d9e4cdc383..ffa3ebf46313 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -98,7 +98,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY, OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT); - if(checkKeyNameEnabled){ + if (checkKeyNameEnabled) { OmUtils.validateKeyName(keyArgs.getKeyName()); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java index 99ca308ac4e8..0c96756db45c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java @@ -88,7 +88,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY, OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT); - if(checkKeyNameEnabled){ + if (checkKeyNameEnabled) { OmUtils.validateKeyName(renameKeyRequest.getToKeyName()); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java index 2235bafb8da6..f9c67d6a94b1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java @@ -149,7 +149,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, volumeName, bucketName, toKeyName, 0); // Check if toKey exists. - if(toKeyFileStatus != null) { + if (toKeyFileStatus != null) { // Destination exists and following are different cases: OmKeyInfo toKeyValue = toKeyFileStatus.getKeyInfo(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index b4528b889c19..8fe9011ca0f1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -259,22 +259,22 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, OmBucketInfo bucketInfo, PrefixManager prefixManager) { List acls = new ArrayList<>(); - if(keyArgs.getAclsList() != null) { + if (keyArgs.getAclsList() != null) { acls.addAll(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())); } // Inherit DEFAULT acls from prefix. - if(prefixManager != null) { + if (prefixManager != null) { List< OmPrefixInfo > prefixList = prefixManager.getLongestPrefixPath( OZONE_URI_DELIMITER + keyArgs.getVolumeName() + OZONE_URI_DELIMITER + keyArgs.getBucketName() + OZONE_URI_DELIMITER + keyArgs.getKeyName()); - if(prefixList.size() > 0) { + if (prefixList.size() > 0) { // Add all acls from direct parent to key. OmPrefixInfo prefixInfo = prefixList.get(prefixList.size() - 1); - if(prefixInfo != null) { + if (prefixInfo != null) { if (OzoneAclUtil.inheritDefaultAcls(acls, prefixInfo.getAcls())) { return acls; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java index 4a45ef97d821..0fe89250f1b9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java @@ -112,7 +112,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, if (getOmRequest().getAddAclRequest().hasObj() && operationResult) { modificationTime = getOmRequest().getAddAclRequest() .getModificationTime(); - } else if (getOmRequest().getSetAclRequest().hasObj() && operationResult){ + } else if (getOmRequest().getSetAclRequest().hasObj() + && operationResult) { modificationTime = getOmRequest().getSetAclRequest() .getModificationTime(); } else if (getOmRequest().getRemoveAclRequest().hasObj() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java index 21d10c4f1e68..b7cf65613609 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java @@ -168,7 +168,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, int keyFactor = omKeyInfo.getReplicationConfig().getRequiredNodes(); Iterator iter = multipartKeyInfo.getPartKeyInfoMap().entrySet().iterator(); - while(iter.hasNext()) { + while (iter.hasNext()) { Map.Entry entry = (Map.Entry)iter.next(); PartKeyInfo iterPartKeyInfo = (PartKeyInfo)entry.getValue(); quotaReleased += diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java index 26d962703a7b..d8848fc116dd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java @@ -197,13 +197,13 @@ public boolean checkQuotaBytesValid(OMMetadataManager metadataManager, List bucketList = metadataManager.listBuckets( volumeName, null, null, Integer.MAX_VALUE); - for(OmBucketInfo bucketInfo : bucketList) { + for (OmBucketInfo bucketInfo : bucketList) { long nextQuotaInBytes = bucketInfo.getQuotaInBytes(); - if(nextQuotaInBytes > OzoneConsts.QUOTA_RESET) { + if (nextQuotaInBytes > OzoneConsts.QUOTA_RESET) { totalBucketQuota += nextQuotaInBytes; } } - if(volumeQuotaInBytes < totalBucketQuota && + if (volumeQuotaInBytes < totalBucketQuota && volumeQuotaInBytes != OzoneConsts.QUOTA_RESET) { throw new OMException("Total buckets quota in this volume " + "should not be greater than volume quota : the total space quota is" + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java index b91aef8d8417..ce1a4d03d9f7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java @@ -102,7 +102,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, if (getOmRequest().getAddAclRequest().hasObj()) { modificationTime = getOmRequest().getAddAclRequest() .getModificationTime(); - } else if (getOmRequest().getSetAclRequest().hasObj()){ + } else if (getOmRequest().getSetAclRequest().hasObj()) { modificationTime = getOmRequest().getSetAclRequest() .getModificationTime(); } else if (getOmRequest().getRemoveAclRequest().hasObj()) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java index 8df7792e3b93..e776df08594d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java @@ -110,7 +110,7 @@ OMResponse.Builder onInit() { @Override OMClientResponse onSuccess(OMResponse.Builder omResponse, - OmVolumeArgs omVolumeArgs, boolean aclApplied){ + OmVolumeArgs omVolumeArgs, boolean aclApplied) { omResponse.setAddAclResponse(OzoneManagerProtocolProtos.AddAclResponse .newBuilder().setResponse(aclApplied).build()); return new OMVolumeAclOpResponse(omResponse.build(), omVolumeArgs); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java index 4ab55f3788c1..ff2792d44fef 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java @@ -110,7 +110,7 @@ OMResponse.Builder onInit() { @Override OMClientResponse onSuccess(OMResponse.Builder omResponse, - OmVolumeArgs omVolumeArgs, boolean aclApplied){ + OmVolumeArgs omVolumeArgs, boolean aclApplied) { omResponse.setRemoveAclResponse(OzoneManagerProtocolProtos.RemoveAclResponse .newBuilder().setResponse(aclApplied).build()); return new OMVolumeAclOpResponse(omResponse.build(), omVolumeArgs); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java index 710250920b59..95d98f4ddda6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java @@ -107,7 +107,7 @@ OMResponse.Builder onInit() { @Override OMClientResponse onSuccess(OMResponse.Builder omResponse, - OmVolumeArgs omVolumeArgs, boolean aclApplied){ + OmVolumeArgs omVolumeArgs, boolean aclApplied) { omResponse.setSetAclResponse(OzoneManagerProtocolProtos.SetAclResponse .newBuilder().setResponse(aclApplied).build()); return new OMVolumeAclOpResponse(omResponse.build(), omVolumeArgs); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java index 5d34f6a32ca5..10dda299793a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java @@ -404,7 +404,7 @@ public void removeToken(OzoneTokenIdentifier ozoneTokenIdentifier) { @Override public byte[] retrievePassword(OzoneTokenIdentifier identifier) throws InvalidToken { - if(identifier.getTokenType().equals(S3AUTHINFO)) { + if (identifier.getTokenType().equals(S3AUTHINFO)) { return validateS3AuthInfo(identifier); } return validateToken(identifier).getPassword(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretStore.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretStore.java index 756e821bf81c..fb348fd4f2b7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretStore.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretStore.java @@ -102,9 +102,9 @@ public int loadTokens(OzoneManagerSecretState state) throws IOException { int loadedToken = 0; try (TableIterator> iterator = - omMetadataManager.getDelegationTokenTable().iterator()){ + omMetadataManager.getDelegationTokenTable().iterator()) { iterator.seekToFirst(); - while(iterator.hasNext()) { + while (iterator.hasNext()) { KeyValue kv = iterator.next(); state.tokenState.put(kv.getKey(), kv.getValue()); loadedToken++; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java index 6a2cf5890cec..cfeab4b31f3e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java @@ -208,7 +208,7 @@ public void listMultipartUploadsWithFewEntriesInCache() throws IOException { initMultipartUpload(writeClient, volume, bucket, "dir/ozonekey2"); - OmMultipartInfo omMultipartInfo3 =addinitMultipartUploadToCache(volume, + OmMultipartInfo omMultipartInfo3 = addinitMultipartUploadToCache(volume, bucket, "dir/ozonekey3"); OmMultipartInfo omMultipartInfo4 = initMultipartUpload(writeClient, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java index 73e9ea57ae23..277969c5e9da 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java @@ -59,7 +59,7 @@ public void testDBDefinition() throws Exception { missingOmDBTables.remove("default"); int countOmDBTables = missingOmDBTables.size(); // Remove the file if it is found in both the datastructures - for(DBColumnFamilyDefinition definition : columnFamilyDefinitions) { + for (DBColumnFamilyDefinition definition : columnFamilyDefinitions) { if (!missingOmDBTables.remove(definition.getName())) { missingDBDefTables.add(definition.getName()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java index 2ad44d163afa..7354a940baa5 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java @@ -278,7 +278,7 @@ public void testListBuckets() throws Exception { // volumeB with prefixBucketNameWithHadoopOwner. startBucket = null; TreeSet expectedBuckets = new TreeSet<>(); - for (int i=0; i<5; i++) { + for (int i = 0; i < 5; i++) { omBucketInfoList = omMetadataManager.listBuckets(volumeName2, startBucket, prefixBucketNameWithHadoopOwner, 10); @@ -341,7 +341,7 @@ public void testListKeys() throws Exception { TreeSet keysASet = new TreeSet<>(); TreeSet keysBSet = new TreeSet<>(); TreeSet keysCSet = new TreeSet<>(); - for (int i=1; i<= 100; i++) { + for (int i = 1; i <= 100; i++) { if (i % 2 == 0) { keysASet.add( prefixKeyA + i); @@ -357,7 +357,7 @@ public void testListKeys() throws Exception { TreeSet keysAVolumeBSet = new TreeSet<>(); TreeSet keysBVolumeBSet = new TreeSet<>(); - for (int i=1; i<= 100; i++) { + for (int i = 1; i <= 100; i++) { if (i % 2 == 0) { keysAVolumeBSet.add( prefixKeyA + i); @@ -422,7 +422,7 @@ public void testListKeys() throws Exception { // volumeB/ozoneBucket with "key-a". startKey = null; TreeSet expectedKeys = new TreeSet<>(); - for (int i=0; i<5; i++) { + for (int i = 0; i < 5; i++) { omKeyInfoList = omMetadataManager.listKeys(volumeNameB, hadoopBucket, startKey, prefixKeyB, 10); @@ -472,7 +472,7 @@ public void testListKeysWithFewDeleteEntriesInCache() throws Exception { TreeSet deleteKeySet = new TreeSet<>(); - for (int i=1; i<= 100; i++) { + for (int i = 1; i <= 100; i++) { if (i % 2 == 0) { keysASet.add( prefixKeyA + i); @@ -510,7 +510,7 @@ public void testListKeysWithFewDeleteEntriesInCache() throws Exception { // Now get key count by 10. String startKey = null; expectedKeys = new TreeSet<>(); - for (int i=0; i<5; i++) { + for (int i = 0; i < 5; i++) { omKeyInfoList = omMetadataManager.listKeys(volumeNameA, ozoneBucket, startKey, prefixKeyA, 10); @@ -618,7 +618,7 @@ public void testGetExpiredOpenKeys() throws Exception { private void addKeysToOM(String volumeName, String bucketName, String keyName, int i) throws Exception { - if (i%2== 0) { + if (i % 2 == 0) { OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, 1000L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java index 7d7c310f6858..01601668b61f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java @@ -109,7 +109,7 @@ private MockOzoneManagerProtocol(String nodeId, Exception ex) { public OMResponse submitRequest(RpcController controller, OzoneManagerProtocolProtos.OMRequest request) throws ServiceException { throw new ServiceException("ServiceException of type " + - exception.getClass() + " for "+ omNodeId, exception); + exception.getClass() + " for " + omNodeId, exception); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java index 0d7f95b94f71..16dc32283582 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java @@ -109,7 +109,7 @@ public void testDoubleBufferWithDummyResponse() throws Exception { assertEquals(0, metrics.getTotalNumOfFlushedTransactions()); assertEquals(0, metrics.getMaxNumberOfTransactionsFlushedInOneIteration()); - for (int i=0; i < bucketCount; i++) { + for (int i = 0; i < bucketCount; i++) { doubleBuffer.add(createDummyBucketResponse(volumeName), trxId.incrementAndGet()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java index 050417aa2e8d..92d5c6292f44 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -284,7 +284,7 @@ public void testDoubleBufferWithMixOfTransactionsParallel() throws Exception { private void doMixTransactions(String volumeName, int bucketCount, Queue deleteBucketQueue, Queue bucketQueue) { - for (int i=0; i < bucketCount; i++) { + for (int i = 0; i < bucketCount; i++) { String bucketName = UUID.randomUUID().toString(); long transactionID = trxId.incrementAndGet(); OMBucketCreateResponse omBucketCreateResponse = createBucket(volumeName, @@ -434,7 +434,7 @@ private boolean assertRowCount(int expected, Table table) { private void doTransactions(int bucketCount) { String volumeName = UUID.randomUUID().toString(); createVolume(volumeName, trxId.incrementAndGet()); - for (int i=0; i< bucketCount; i++) { + for (int i = 0; i < bucketCount; i++) { createBucket(volumeName, UUID.randomUUID().toString(), trxId.incrementAndGet()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java index 2b2b75af240f..351f524c1da3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java @@ -185,7 +185,7 @@ public void testIsReadOnlyCapturesAllCmdTypeEnums() throws Exception { OmUtils.isReadOnly(request); assertFalse(cmdtype + " is not categorized in " + "OmUtils#isReadyOnly", - logCapturer.getOutput().contains("CmdType " + cmdtype +" is not " + + logCapturer.getOutput().contains("CmdType " + cmdtype + " is not " + "categorized as readOnly or not.")); logCapturer.clearOutput(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java index b73bbc5d6f8b..a0a7cd838073 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java @@ -303,7 +303,7 @@ public void testPreAppendTransaction() throws Exception { mockTransactionContext(createKeyRequest)); Assert.fail("Expected StateMachineException to be thrown when " + "submitting write request while prepared."); - } catch(StateMachineException smEx) { + } catch (StateMachineException smEx) { Assert.assertFalse(smEx.leaderShouldStepDown()); Throwable cause = smEx.getCause(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java index 46bdb5eaf785..9519eb3b327a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java @@ -396,7 +396,7 @@ private OMRequest createDirectoryRequest(String volumeName, String bucketName, private String genRandomKeyName() { StringBuilder keyNameBuilder = new StringBuilder(); keyNameBuilder.append(RandomStringUtils.randomAlphabetic(5)); - for (int i =0; i< 3; i++) { + for (int i = 0; i < 3; i++) { keyNameBuilder.append("/").append(RandomStringUtils.randomAlphabetic(5)); } return keyNameBuilder.toString(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java index 153a4ea62351..5c93dae0caa4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java @@ -51,7 +51,7 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest { @Test - public void testPreExecute() throws Exception{ + public void testPreExecute() throws Exception { OMRequest omRequest = createFileRequest(volumeName, bucketName, keyName, HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS, false, false); @@ -91,7 +91,7 @@ public void testPreExecute() throws Exception{ } @Test - public void testPreExecuteWithBlankKey() throws Exception{ + public void testPreExecuteWithBlankKey() throws Exception { OMRequest omRequest = createFileRequest(volumeName, bucketName, "", HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS, false, false); @@ -404,7 +404,7 @@ protected OMRequest createFileRequest( * @return OMFileCreateRequest reference */ @NotNull - protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest){ + protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) { return new OMFileCreateRequest(omRequest); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java index e35645eb4da3..d4122c0d49b4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java @@ -445,12 +445,12 @@ private OMRequest createCommitKeyRequest() { private List getKeyLocation(int count) { List keyLocations = new ArrayList<>(); - for (int i=0; i < count; i++) { + for (int i = 0; i < count; i++) { KeyLocation keyLocation = KeyLocation.newBuilder() .setBlockID(HddsProtos.BlockID.newBuilder() .setContainerBlockID(HddsProtos.ContainerBlockID.newBuilder() - .setContainerID(i+1000).setLocalID(i+100).build())) + .setContainerID(i + 1000).setLocalID(i + 100).build())) .setOffset(0).setLength(200).setCreateVersion(version).build(); keyLocations.add(keyLocation); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java index 0e27c1ce2b05..f64250a9b439 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java @@ -45,7 +45,7 @@ private long getBucketID() throws java.io.IOException { String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey); - if(omBucketInfo!= null){ + if (omBucketInfo != null) { return omBucketInfo.getObjectID(); } // bucket doesn't exists in DB diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java index 9ff7d7d4d822..5bc9d4548470 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java @@ -574,7 +574,7 @@ protected void checkCreatedPaths(OMKeyCreateRequest omKeyCreateRequest, protected long checkIntermediatePaths(Path keyPath) throws Exception { // Check intermediate paths are created keyPath = keyPath.getParent(); - while(keyPath != null) { + while (keyPath != null) { Assert.assertNotNull(omMetadataManager.getKeyTable(getBucketLayout()).get( omMetadataManager .getOzoneDirKey(volumeName, bucketName, keyPath.toString()))); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java index 5d006ca315bf..24da4c3dd6eb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java @@ -125,10 +125,10 @@ private void verifyPath(OzonePrefixPath ozonePrefixPath, String pathName, pathName); Assert.assertTrue("Failed to list keyPaths", pathItr.hasNext()); Assert.assertEquals(expectedPath, pathItr.next().getTrimmedName()); - try{ + try { pathItr.next(); Assert.fail("Reached end of the list!"); - } catch (NoSuchElementException nse){ + } catch (NoSuchElementException nse) { // expected } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java index d4fe1da7b051..4b2b46794f16 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java @@ -137,7 +137,7 @@ public void testValidateAndUpdateCache() throws Exception { .setStatus(Status.OK) .build(); - try(BatchOperation batchOperation = + try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { OMKeyPurgeResponse omKeyPurgeResponse = new OMKeyPurgeResponse( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java index d3fcee7e7d48..958a6a0f988e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java @@ -337,7 +337,7 @@ private void assertNotInOpenKeyTable(OpenKeyBucket... openKeys) private List getFullOpenKeyNames(OpenKeyBucket... openKeyBuckets) { List fullKeyNames = new ArrayList<>(); - for(OpenKeyBucket keysPerBucket: openKeyBuckets) { + for (OpenKeyBucket keysPerBucket: openKeyBuckets) { String volume = keysPerBucket.getVolumeName(); String bucket = keysPerBucket.getBucketName(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java index f91f27cb802b..2e12aa735a83 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java @@ -163,7 +163,7 @@ public void testInvalidPartOrderError() throws Exception { List partList = new ArrayList<>(); - String partName= getPartName(volumeName, bucketName, keyName, + String partName = getPartName(volumeName, bucketName, keyName, multipartUploadID, 23); partList.add(Part.newBuilder().setPartName(partName).setPartNumber(23) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java index c854773f7cd3..a0b94d162619 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java @@ -84,7 +84,7 @@ public void testValidateAndUpdateCacheWithZeroMaxUserVolumeCount() Assert.assertEquals(expectedObjId, respone.getOmVolumeArgs() .getObjectID()); Assert.assertEquals(txLogIndex, respone.getOmVolumeArgs().getUpdateID()); - } catch (IllegalArgumentException ex){ + } catch (IllegalArgumentException ex) { GenericTestUtils.assertExceptionContains("should be greater than zero", ex); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java index 89137a95c355..6539f481cb3b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java @@ -121,7 +121,7 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception { String volumeName = UUID.randomUUID().toString(); long quotaInBytes = 100L; - long quotaInNamespace= 100L; + long quotaInNamespace = 100L; OMRequest originalRequest = OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java index 230360bad7e2..3c17c8da1618 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java @@ -217,7 +217,7 @@ private void assertCacheItemCounts( for (String tableName : om.getMetadataManager().listTableNames()) { if (!cleanup.contains(tableName)) { assertEquals( - "Cache item count of table " +tableName, + "Cache item count of table " + tableName, cacheItemCount.get(tableName).intValue(), Iterators.size( om.getMetadataManager().getTable(tableName).cacheIterator() @@ -289,7 +289,7 @@ private OMFileCreateRequest anOMFileCreateRequest() { return new OMFileCreateRequest(protoRequest); } - private OMKeyCreateRequest anOMKeyCreateRequest(){ + private OMKeyCreateRequest anOMKeyCreateRequest() { OMRequest protoRequest = mock(OMRequest.class); when(protoRequest.getCreateKeyRequest()).thenReturn(aKeyCreateRequest()); when(protoRequest.getCmdType()).thenReturn(Type.CreateKey); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java index 9699524aea22..acae362469de 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java @@ -258,7 +258,7 @@ private Iterable mockFeatures(String... names) { private Iterable mockFeatures( int startFromLV, String... names ) { - int i=startFromLV; + int i = startFromLV; List ret = new ArrayList<>(); for (String name : names) { ret.add(mockFeature(name, i)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOzoneManagerPrepareState.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOzoneManagerPrepareState.java index a23469a4dfc0..71621894c523 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOzoneManagerPrepareState.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOzoneManagerPrepareState.java @@ -203,7 +203,7 @@ private void writePrepareMarkerFile(byte[] bytes) throws IOException { if (!mkdirs) { throw new IOException("Unable to create marker file directory."); } - try(FileOutputStream stream = + try (FileOutputStream stream = new FileOutputStream(markerFile)) { stream.write(bytes); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java index 670b7ee3afd1..e39fe39930c7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java @@ -91,7 +91,7 @@ public void setUp() throws Exception { s3SecretManager = new S3SecretManagerImpl(conf, metadataManager) { @Override public S3SecretValue getS3Secret(String kerberosID) { - if(s3Secrets.containsKey(kerberosID)) { + if (s3Secrets.containsKey(kerberosID)) { return new S3SecretValue(kerberosID, s3Secrets.get(kerberosID)); } return null; @@ -99,7 +99,7 @@ public S3SecretValue getS3Secret(String kerberosID) { @Override public String getS3UserSecretString(String awsAccessKey) { - if(s3Secrets.containsKey(awsAccessKey)) { + if (s3Secrets.containsKey(awsAccessKey)) { return s3Secrets.get(awsAccessKey); } return null; @@ -322,7 +322,7 @@ public void testVerifySignatureFailure() throws Exception { OzoneTokenIdentifier id = new OzoneTokenIdentifier(); // set invalid om cert serial id id.setOmCertSerialId("1927393"); - id.setMaxDate(Time.now() + 60*60*24); + id.setMaxDate(Time.now() + 60 * 60 * 24); id.setOwner(new Text("test")); Assert.assertFalse(secretManager.verifySignature(id, id.getBytes())); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java index 48ed205a5872..bb8157208579 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java @@ -218,7 +218,7 @@ public void testAsymmetricTokenPerf() throws NoSuchAlgorithmException, } long duration = Time.monotonicNowNanos() - startTime; LOG.info("Average token sign time with HmacSha256(RSA/1024 key) is {} ns", - duration/testTokenCount); + duration / testTokenCount); startTime = Time.monotonicNowNanos(); for (int i = 0; i < testTokenCount; i++) { @@ -226,7 +226,7 @@ public void testAsymmetricTokenPerf() throws NoSuchAlgorithmException, } duration = Time.monotonicNowNanos() - startTime; LOG.info("Average token verify time with HmacSha256(RSA/1024 key) " - + "is {} ns", duration/testTokenCount); + + "is {} ns", duration / testTokenCount); } @Test @@ -273,7 +273,7 @@ public void testSymmetricTokenPerfHelper(String hmacAlgorithm, int keyLen) { } long duration = Time.monotonicNowNanos() - startTime; LOG.info("Average token sign time with {}({} symmetric key) is {} ns", - hmacAlgorithm, keyLen, duration/testTokenCount); + hmacAlgorithm, keyLen, duration / testTokenCount); } /* diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java index a5568d01722c..868899d8300b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java @@ -349,7 +349,7 @@ private void resetAclsAndValidateAccess(OzoneObj obj, List acls; String user = testUgi.getUserName(); String group = (testUgi.getGroups().size() > 0) ? - testUgi.getGroups().get(0): ""; + testUgi.getGroups().get(0) : ""; RequestContext.Builder builder = new RequestContext.Builder() .setClientUgi(testUgi) @@ -372,7 +372,7 @@ private void resetAclsAndValidateAccess(OzoneObj obj, // Reset acls to only one right. if (obj.getResourceType() == VOLUME) { setVolumeAcl(Collections.singletonList(newAcl)); - } else if (obj.getResourceType() == BUCKET){ + } else if (obj.getResourceType() == BUCKET) { setBucketAcl(Collections.singletonList(newAcl)); } else { aclImplementor.setAcl(obj, Collections.singletonList(newAcl)); @@ -450,7 +450,7 @@ private void resetAclsAndValidateAccess(OzoneObj obj, // only DB not cache. if (obj.getResourceType() == VOLUME) { addVolumeAcl(addAcl); - } else if (obj.getResourceType() == BUCKET){ + } else if (obj.getResourceType() == BUCKET) { addBucketAcl(addAcl); } else { aclImplementor.addAcl(obj, addAcl); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java index 145e5992f9d6..3f73debd6586 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java @@ -187,7 +187,7 @@ public void testVolumeOps() throws Exception { List aclsToTest = Arrays.stream(IAccessAuthorizer.ACLType.values()).filter( - (type)-> type != NONE && type != CREATE) + (type) -> type != NONE && type != CREATE) .collect(Collectors.toList()); for (IAccessAuthorizer.ACLType type: aclsToTest) { nonAdminOwnerContext = getUserRequestContext(getTestVolOwnerName(0), @@ -296,6 +296,6 @@ private OzoneObj getTestKeyobj(int volIndex, int bucketIndex, List getAclsToTest() { return Arrays.stream(IAccessAuthorizer.ACLType.values()).filter( - (type)-> type != NONE).collect(Collectors.toList()); + (type) -> type != NONE).collect(Collectors.toList()); } } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index 2c3a465ebd63..4c09a81d2744 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -342,7 +342,7 @@ public FileStatusAdapter getFileStatus(String key, URI uri, @Override - public Iterator listKeys(String pathKey) throws IOException{ + public Iterator listKeys(String pathKey) throws IOException { incrementCounter(Statistic.OBJECTS_LIST, 1); return new IteratorAdapter(bucket.listKeys(pathKey)); } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index c920747b70ee..c8549f0023b0 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -285,7 +285,7 @@ private class RenameIterator extends OzoneListingIterator { @Override boolean processKey(List keyList) throws IOException { // TODO RenameKey needs to be changed to batch operation - for(String key : keyList) { + for (String key : keyList) { String newKeyName = dstKey.concat(key.substring(srcKey.length())); adapter.renameKey(key, newKeyName); } @@ -512,7 +512,7 @@ public boolean delete(Path f, boolean recursive) throws IOException { if (adapter.isFSOptimizedBucket()) { if (f.isRoot()) { - if (!recursive && listStatus(f).length!=0){ + if (!recursive && listStatus(f).length != 0) { throw new PathIsNotEmptyDirectoryException(f.toString()); } LOG.warn("Cannot delete root directory."); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java index 5c319173d6c4..14983dc74910 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java @@ -31,7 +31,7 @@ * Shared Utilities for Ozone FS and related classes. */ public final class OzoneClientUtils { - private OzoneClientUtils(){ + private OzoneClientUtils() { // Not used. } public static BucketLayout resolveLinkBucketLayout(OzoneBucket bucket, diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java index d7888a5013dd..918640799c71 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java @@ -101,7 +101,7 @@ public int available() throws IOException { */ @Override public int read(ByteBuffer buf) throws IOException { - if (buf.isReadOnly()){ + if (buf.isReadOnly()) { throw new ReadOnlyBufferException(); } diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/SqlDbUtils.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/SqlDbUtils.java index 55069ce54a7d..a3675dcbe771 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/SqlDbUtils.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/SqlDbUtils.java @@ -61,7 +61,7 @@ public static void createNewDerbyDatabase(String jdbcUrl, String schemaName) System.setProperty("derby.stream.error.method", DERBY_DISABLE_LOG_METHOD); Class.forName(DERBY_DRIVER_CLASS); - try(Connection connection = DriverManager.getConnection(jdbcUrl + try (Connection connection = DriverManager.getConnection(jdbcUrl + ";user=" + schemaName + ";create=true")) { LOG.info("Created derby database at {}.", jdbcUrl); @@ -72,7 +72,7 @@ public static void createNewDerbyDatabase(String jdbcUrl, String schemaName) * Used to suppress embedded derby database logging. * @return No-Op output stream. */ - public static OutputStream disableDerbyLogFile(){ + public static OutputStream disableDerbyLogFile() { return new OutputStream() { @Override public void write(int b) throws IOException { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/MetricsProxyEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/MetricsProxyEndpoint.java index 327e9b192663..eec33466b6f6 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/MetricsProxyEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/MetricsProxyEndpoint.java @@ -93,7 +93,7 @@ public void getMetricsResponse( ) { final ByteBuffer buffer = ByteBuffer.allocateDirect(16 * 1024); - while(inputChannel.read(buffer) != -1) { + while (inputChannel.read(buffer) != -1) { buffer.flip(); outputChannel.write(buffer); buffer.compact(); @@ -101,7 +101,7 @@ public void getMetricsResponse( buffer.flip(); - while(buffer.hasRemaining()) { + while (buffer.hasRemaining()) { outputChannel.write(buffer); } } finally { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java index cbcd9cac8647..ac34c58f97b0 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java @@ -449,7 +449,7 @@ public Response getQuotaUsage(@QueryParam("path") String path) for (OmVolumeArgs volume: volumes) { final long quota = volume.getQuotaInBytes(); - assert(quota >= -1L); + assert (quota >= -1L); if (quota == -1L) { // If one volume has unlimited quota, the "root" quota is unlimited. quotaInBytes = -1L; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PipelineEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PipelineEndpoint.java index c7e5cc71a119..d475be4921fd 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PipelineEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PipelineEndpoint.java @@ -116,7 +116,7 @@ public Response getPipelines() { // ex. group id of 48981bf7-8bea-4fbd-9857-79df51ee872d // is group-79DF51EE872D String[] splits = pipelineId.toString().split("-"); - String groupId = "group-" + splits[splits.length-1].toUpperCase(); + String groupId = "group-" + splits[splits.length - 1].toUpperCase(); Optional leaderElectionCount = getMetricValue( "ratis_leader_election_electionCount", groupId); leaderElectionCount.ifPresent(pipelineBuilder::setLeaderElections); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java index b605c0fc9362..fdf493f168da 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java @@ -82,7 +82,7 @@ public NSSummary fromPersistedFormat(byte[] rawData) throws IOException { res.setNumOfFiles(in.readInt()); res.setSizeOfFiles(in.readLong()); short len = in.readShort(); - assert(len == (short) ReconConstants.NUM_OF_BINS); + assert (len == (short) ReconConstants.NUM_OF_BINS); int[] fileSizeBucket = new int[len]; for (int i = 0; i < len; ++i) { fileSizeBucket[i] = in.readInt(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java index 67c635516f1c..d29bf5074a55 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java @@ -144,7 +144,7 @@ private long processExistingDBRecords(long currentTime) { containerHealthSchemaManager.getAllUnhealthyRecordsCursor()) { ContainerHealthStatus currentContainer = null; Set existingRecords = new HashSet<>(); - while(cursor.hasNext()) { + while (cursor.hasNext()) { recordCount++; UnhealthyContainersRecord rec = cursor.fetchNext(); try { @@ -259,7 +259,7 @@ public static class ContainerHealthRecords { public static boolean retainOrUpdateRecord( ContainerHealthStatus container, UnhealthyContainersRecord rec) { boolean returnValue = false; - switch(UnHealthyContainerStates.valueOf(rec.getContainerState())) { + switch (UnHealthyContainerStates.valueOf(rec.getContainerState())) { case MISSING: returnValue = container.isMissing(); break; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java index 8e1516278eda..b8d7be04808a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java @@ -150,7 +150,7 @@ public void checkAndAddNewContainerBatch( existContainers = containers.get(true); } List noExistContainers = null; - if (containers.containsKey(false)){ + if (containers.containsKey(false)) { noExistContainers = containers.get(false).parallelStream(). map(ContainerReplicaProto::getContainerID) .collect(Collectors.toList()); @@ -178,7 +178,7 @@ public void checkAndAddNewContainerBatch( ContainerReplicaProto.State crpState = crp.getState(); try { checkContainerStateAndUpdate(cID, crpState); - } catch (Exception ioe){ + } catch (Exception ioe) { LOG.error("Exception while " + "checkContainerStateAndUpdate container", ioe); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineFactory.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineFactory.java index f390ed72f9cf..02f27518ce7c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineFactory.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineFactory.java @@ -43,7 +43,7 @@ static class ReconPipelineProvider extends PipelineProvider { @Override - public Pipeline create(ReplicationConfig config){ + public Pipeline create(ReplicationConfig config) { // We don't expect this to be called at all. But adding this as a red // flag for troubleshooting. throw new UnsupportedOperationException( diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java index 081281d4d14a..53a45dcd8a46 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java @@ -264,7 +264,7 @@ public void start() { boolean isSCMSnapshotEnabled = ozoneConfiguration.getBoolean( ReconServerConfigKeys.OZONE_RECON_SCM_SNAPSHOT_ENABLED, ReconServerConfigKeys.OZONE_RECON_SCM_SNAPSHOT_ENABLED_DEFAULT); - if(isSCMSnapshotEnabled) { + if (isSCMSnapshotEnabled) { initializeSCMDB(); LOG.info("SCM DB initialized"); } else { @@ -335,7 +335,7 @@ private void initializeSCMDB() { ReconServerConfigKeys.OZONE_RECON_SCM_CONTAINER_THRESHOLD, ReconServerConfigKeys.OZONE_RECON_SCM_CONTAINER_THRESHOLD_DEFAULT); - if(Math.abs(scmContainersCount - reconContainerCount) > threshold) { + if (Math.abs(scmContainersCount - reconContainerCount) > threshold) { LOG.info("Recon Container Count: {}, SCM Container Count: {}", reconContainerCount, scmContainersCount); updateReconSCMDBWithNewSnapshot(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java index 9625db65d109..1ebeeddb2b57 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java @@ -34,7 +34,7 @@ /** * Codec to encode ContainerKeyPrefix as byte array. */ -public class ContainerKeyPrefixCodec implements Codec{ +public class ContainerKeyPrefixCodec implements Codec { private static final String KEY_DELIMITER = "_"; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java index 288b26fba4c7..08612e3f8ae2 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java @@ -198,7 +198,7 @@ public void registerOMDBTasks() { OmSnapshotTaskName.OmDeltaRequest.name(), System.currentTimeMillis(), getCurrentOMDBSequenceNumber()); if (!reconTaskStatusDao.existsById( - OmSnapshotTaskName.OmDeltaRequest.name())){ + OmSnapshotTaskName.OmDeltaRequest.name())) { reconTaskStatusDao.insert(reconTaskStatusRecord); LOG.info("Registered {} task ", OmSnapshotTaskName.OmDeltaRequest.name()); @@ -208,7 +208,7 @@ public void registerOMDBTasks() { OmSnapshotTaskName.OmSnapshotRequest.name(), System.currentTimeMillis(), getCurrentOMDBSequenceNumber()); if (!reconTaskStatusDao.existsById( - OmSnapshotTaskName.OmSnapshotRequest.name())){ + OmSnapshotTaskName.OmSnapshotRequest.name())) { reconTaskStatusDao.insert(reconTaskStatusRecord); LOG.info("Registered {} task ", OmSnapshotTaskName.OmSnapshotRequest.name()); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java index 8c390d40899f..e9d8cbb7785a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java @@ -128,7 +128,7 @@ public Pair process(OMUpdateEventBatch events) { String updatedKey = omdbUpdateEvent.getKey(); OmKeyInfo omKeyInfo = omdbUpdateEvent.getValue(); - try{ + try { switch (omdbUpdateEvent.getAction()) { case PUT: handlePutKeyEvent(omKeyInfo, fileSizeCountMap); @@ -258,7 +258,7 @@ private static class FileSizeCountKey { @Override public boolean equals(Object obj) { - if(obj instanceof FileSizeCountKey) { + if (obj instanceof FileSizeCountKey) { FileSizeCountKey s = (FileSizeCountKey) obj; return volume.equals(s.volume) && bucket.equals(s.bucket) && fileSizeUpperBound.equals(s.fileSizeUpperBound); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/TableCountTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/TableCountTask.java index 79b28feeb01d..6e827c76599a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/TableCountTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/TableCountTask.java @@ -123,7 +123,7 @@ public Pair process(OMUpdateEventBatch events) { continue; } String rowKey = getRowKeyFromTable(omdbUpdateEvent.getTable()); - try{ + try { switch (omdbUpdateEvent.getAction()) { case PUT: objectCountMap.computeIfPresent(rowKey, (k, count) -> count + 1L); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java index 9485dbf8f7f1..d5da4a35a1cf 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java @@ -68,7 +68,7 @@ public void testGetTaskTimes() { response.getEntity(); Assert.assertEquals(resultList.size(), responseList.size()); - for(ReconTaskStatus r : responseList) { + for (ReconTaskStatus r : responseList) { Assert.assertEquals(reconTaskStatusRecord.getTaskName(), r.getTaskName()); Assert.assertEquals(reconTaskStatusRecord.getLastUpdatedTimestamp(), r.getLastUpdatedTimestamp()); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java index ac5aeafbc6c2..db2448b3deef 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java @@ -370,7 +370,7 @@ public ContainerPlacementStatus validateContainerPlacement( } private boolean isDnPresent(List dns) { - for(DatanodeDetails dn : dns) { + for (DatanodeDetails dn : dns) { if (misRepWhenDnPresent != null && dn.getUuid().equals(misRepWhenDnPresent)) { return true; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java index ab21a3532686..59689237c6dd 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java @@ -94,7 +94,7 @@ public void testReconSchemaCreated() throws Exception { new ImmutablePair<>("count", Types.BIGINT)); List> actualPairsFileCount = new ArrayList<>(); - while(resultSetFileCount.next()) { + while (resultSetFileCount.next()) { actualPairsFileCount.add(new ImmutablePair<>(resultSetFileCount.getString( "COLUMN_NAME"), resultSetFileCount.getInt( "DATA_TYPE"))); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java index 161c03536d8d..c73990297ab5 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java @@ -217,7 +217,7 @@ public void testGetAndApplyDeltaUpdatesFromOM() throws Exception { RocksDB rocksDB = ((RDBStore)sourceOMMetadataMgr.getStore()).getDb(); TransactionLogIterator transactionLogIterator = rocksDB.getUpdatesSince(0L); DBUpdates dbUpdatesWrapper = new DBUpdates(); - while(transactionLogIterator.isValid()) { + while (transactionLogIterator.isValid()) { TransactionLogIterator.BatchResult result = transactionLogIterator.getBatch(); result.writeBatch().markWalTerminationPoint(); @@ -280,7 +280,7 @@ public void testGetAndApplyDeltaUpdatesFromOMWithLimit() throws Exception { TransactionLogIterator transactionLogIterator = rocksDB.getUpdatesSince(0L); DBUpdates[] dbUpdatesWrapper = new DBUpdates[4]; int index = 0; - while(transactionLogIterator.isValid()) { + while (transactionLogIterator.isValid()) { TransactionLogIterator.BatchResult result = transactionLogIterator.getBatch(); result.writeBatch().markWalTerminationPoint(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java index 06157d3a1b50..ff5a5bb84c1f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java @@ -85,7 +85,7 @@ public void setUp() throws Exception { } @Test - public void testReprocessOMDB() throws Exception{ + public void testReprocessOMDB() throws Exception { Map keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(1); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java index 2ff20cfb6123..3dd0d2f59a09 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java @@ -107,7 +107,7 @@ public void testPut() throws Exception { rocksDB.getUpdatesSince(0); List writeBatches = new ArrayList<>(); - while(transactionLogIterator.isValid()) { + while (transactionLogIterator.isValid()) { TransactionLogIterator.BatchResult result = transactionLogIterator.getBatch(); result.writeBatch().markWalTerminationPoint(); @@ -190,7 +190,7 @@ public void testDelete() throws Exception { rocksDB.getUpdatesSince(3); List writeBatches = new ArrayList<>(); - while(transactionLogIterator.isValid()) { + while (transactionLogIterator.isValid()) { TransactionLogIterator.BatchResult result = transactionLogIterator.getBatch(); result.writeBatch().markWalTerminationPoint(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java index 8f7f76cd9085..81c406e5af27 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java @@ -116,7 +116,7 @@ public void testProcess() { ArrayList events = new ArrayList<>(); // Create 5 put, 1 delete and 1 update event for each table for (String tableName: tableCountTask.getTaskTables()) { - for (int i=0; i<5; i++) { + for (int i = 0; i < 5; i++) { events.add(getOMUpdateEvent("item" + i, null, tableName, PUT)); } // for delete event, if value is set to null, the counter will not be diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java index 7259d8517967..a257155e764d 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java @@ -90,10 +90,10 @@ public void filter(ContainerRequestContext requestContext) throws if (host.length() > domain.length()) { String bucketName = host.substring(0, host.length() - domain.length()); - if(!bucketName.endsWith(".")) { + if (!bucketName.endsWith(".")) { //Checking this as the virtual host style pattern is http://bucket.host/ throw getException("Invalid S3 Gateway request {" + requestContext - .getUriInfo().getRequestUri().toString() +"}:" +" Host: {" + host + .getUriInfo().getRequestUri().toString() + "}:" + " Host: {" + host + " is in invalid format"); } else { bucketName = bucketName.substring(0, bucketName.length() - 1); @@ -134,7 +134,7 @@ public void setConfiguration(OzoneConfiguration config) { */ private String getDomainName(String host) { String match = null; - int length=0; + int length = 0; for (String domainVal : domains) { if (host.endsWith(domainVal)) { int len = domainVal.length(); @@ -148,7 +148,7 @@ private String getDomainName(String host) { } private String checkHostWithoutPort(String host) { - if (host.contains(":")){ + if (host.contains(":")) { return host.substring(0, host.lastIndexOf(":")); } else { return host; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index 398a6bafe43a..204c1a564fe7 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -212,7 +212,7 @@ public Response get( if (count < maxKeys) { response.setTruncated(false); - } else if(ozoneKeyIterator.hasNext()) { + } else if (ozoneKeyIterator.hasNext()) { response.setTruncated(true); ContinueToken nextToken = new ContinueToken(lastKey, prevDir); response.setNextToken(nextToken.encodeToString()); @@ -498,7 +498,7 @@ public Response putAcl(String bucketName, HttpHeaders httpHeaders, } } // Add new permission on Volume - for(OzoneAcl acl : ozoneAclListOnVolume) { + for (OzoneAcl acl : ozoneAclListOnVolume) { volume.addAcl(acl); } } catch (OMException exception) { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 6a8807584623..8014cefe58a3 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -175,7 +175,7 @@ public Iterator listS3Buckets(String prefix, private Iterator iterateBuckets( Function> query) - throws IOException, OS3Exception{ + throws IOException, OS3Exception { try { return query.apply(getVolume()); } catch (OMException e) { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index c829a96d9b63..d6f46e087e1f 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -549,13 +549,13 @@ public Response completeMultipartUpload(@PathParam("bucket") String bucket, throw S3ErrorTable.newError(NO_SUCH_UPLOAD, uploadID); } else if (ex.getResult() == ResultCodes.ENTITY_TOO_SMALL) { throw S3ErrorTable.newError(ENTITY_TOO_SMALL, key); - } else if(ex.getResult() == ResultCodes.INVALID_REQUEST) { + } else if (ex.getResult() == ResultCodes.INVALID_REQUEST) { OS3Exception os3Exception = S3ErrorTable.newError(INVALID_REQUEST, key); os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: You must " + "specify at least one part"); throw os3Exception; - } else if(ex.getResult() == ResultCodes.NOT_A_FILE) { + } else if (ex.getResult() == ResultCodes.NOT_A_FILE) { OS3Exception os3Exception = S3ErrorTable.newError(INVALID_REQUEST, key); os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: " + @@ -880,7 +880,7 @@ private static OptionalLong parseAndValidateDate(String ozoneDateStr) { } long currentDate = System.currentTimeMillis(); - if (ozoneDateInMs <= currentDate){ + if (ozoneDateInMs <= currentDate) { return OptionalLong.of(ozoneDateInMs); } else { // dates in the future are invalid, so return empty() diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java index c59c4d19663e..792f2e2ef5e9 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java @@ -83,7 +83,7 @@ public String getValue() { public static ACLType getType(String typeStr) { - for(ACLType type: ACLType.values()) { + for (ACLType type: ACLType.values()) { if (type.getValue().equals(typeStr)) { return type; } @@ -139,7 +139,7 @@ boolean isSupported() { } public static ACLIdentityType getTypeFromGranteeType(String typeStr) { - for(ACLIdentityType type: ACLIdentityType.values()) { + for (ACLIdentityType type: ACLIdentityType.values()) { if (type.getGranteeType().equals(typeStr)) { return type; } @@ -148,7 +148,7 @@ public static ACLIdentityType getTypeFromGranteeType(String typeStr) { } public static ACLIdentityType getTypeFromHeaderType(String typeStr) { - for(ACLIdentityType type: ACLIdentityType.values()) { + for (ACLIdentityType type: ACLIdentityType.values()) { if (type.getHeaderType().equals(typeStr)) { return type; } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3BucketAcl.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3BucketAcl.java index 66f931fdef71..ee9e1a0ce33d 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3BucketAcl.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3BucketAcl.java @@ -169,10 +169,10 @@ public static class Grantee { @XmlElement(name = "ID") private String id; - @XmlAttribute(name="xsi:type") + @XmlAttribute(name = "xsi:type") private String xsiType = "CanonicalUser"; - @XmlAttribute(name="xmlns:xsi") + @XmlAttribute(name = "xmlns:xsi") private String xsiNs = "http://www.w3.org/2001/XMLSchema-instance"; public String getXsiNs() { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java index 14bf2a23cf4c..1783b5873111 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java @@ -71,7 +71,7 @@ public void parseCredential() throws OS3Exception { case 6: // Access id is kerberos principal. // Ex: testuser/om@EXAMPLE.COM/20190321/us-west-1/s3/aws4_request - accessKeyID = split[0] + "/" +split[1]; + accessKeyID = split[0] + "/" + split[1]; date = split[2].trim(); awsRegion = split[3].trim(); awsService = split[4].trim(); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java index 2a46c55f1403..19d9380de917 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java @@ -66,7 +66,7 @@ public ContainerRequest createContainerRequest(String host, String path, virtualHostStyleUri = new URI("http://" + s3HttpAddr); } else if (path != null && queryParams == null) { virtualHostStyleUri = new URI("http://" + s3HttpAddr + path); - } else if (path !=null && queryParams != null) { + } else if (path != null && queryParams != null) { virtualHostStyleUri = new URI("http://" + s3HttpAddr + path + queryParams); } else { diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/commontypes/TestObjectKeyNameAdapter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/commontypes/TestObjectKeyNameAdapter.java index 6666da715f59..df4b5f43d91d 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/commontypes/TestObjectKeyNameAdapter.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/commontypes/TestObjectKeyNameAdapter.java @@ -37,7 +37,7 @@ public void testEncodeResult() throws Exception { getAdapter().marshal("a+b+c/")); } - private XmlAdapter getAdapter(){ + private XmlAdapter getAdapter() { return (new ObjectKeyNameAdapter()); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java index 19ab3bfb7e87..f5e4a061571f 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java @@ -57,7 +57,7 @@ public void testListBucket() throws Exception { assertEquals(0, response.getBucketsNum()); String bucketBaseName = "bucket-" + getClass().getName(); - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { clientStub.getObjectStore().createS3Bucket(bucketBaseName + i); } response = (ListBucketResponse) rootEndpoint.get().getEntity(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/FinalizeUpgradeSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/FinalizeUpgradeSubCommand.java index de0bd7eedd49..55be795e2711 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/FinalizeUpgradeSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/FinalizeUpgradeSubCommand.java @@ -88,11 +88,11 @@ public Void call() throws Exception { try { UpgradeFinalizer.StatusAndMessages finalizationResponse = client.finalizeUpgrade(upgradeClientID); - if (isFinalized(finalizationResponse.status())){ + if (isFinalized(finalizationResponse.status())) { System.out.println("Upgrade has already been finalized."); emitExitMsg(); return null; - } else if (!isStarting(finalizationResponse.status())){ + } else if (!isStarting(finalizationResponse.status())) { System.err.println("Invalid response from Ozone Manager."); System.err.println( "Current finalization status is: " + finalizationResponse.status() @@ -116,7 +116,7 @@ private void monitorAndWaitFinalization(OzoneManagerProtocol client, emitFinishedMsg("Ozone Manager"); } catch (CancellationException e) { emitCancellationMsg("Ozone Manager"); - } catch (InterruptedException e){ + } catch (InterruptedException e) { emitCancellationMsg("Ozone Manager"); Thread.currentThread().interrupt(); } catch (ExecutionException e) { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeScmUpgradeSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeScmUpgradeSubcommand.java index fd354f76aba3..8e46485218b5 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeScmUpgradeSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeScmUpgradeSubcommand.java @@ -72,11 +72,11 @@ public void execute(ScmClient scmClient) throws IOException { try { StatusAndMessages finalizationResponse = scmClient.finalizeScmUpgrade(upgradeClientID); - if (isFinalized(finalizationResponse.status())){ + if (isFinalized(finalizationResponse.status())) { System.out.println("Upgrade has already been finalized."); emitExitMsg(); return; - } else if (!isStarting(finalizationResponse.status())){ + } else if (!isStarting(finalizationResponse.status())) { System.err.println("Invalid response from Storage Container Manager."); System.err.println( "Current finalization status is: " + finalizationResponse.status() @@ -101,7 +101,7 @@ private void monitorAndWaitFinalization(ScmClient client, emitFinishedMsg("Storage Container Manager"); } catch (CancellationException e) { emitCancellationMsg("Storage Container Manager"); - } catch (InterruptedException e){ + } catch (InterruptedException e) { emitCancellationMsg("Storage Container Manager"); Thread.currentThread().interrupt(); } catch (ExecutionException e) { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeUpgradeCommandUtil.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeUpgradeCommandUtil.java index 539ac2860654..308b90036a6f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeUpgradeCommandUtil.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeUpgradeCommandUtil.java @@ -80,13 +80,13 @@ public static void emitGeneralErrorMsg() { } public static void emitFinishedMsg(String component) { - System.out.println("Finalization of " + component +"'s metadata upgrade " + System.out.println("Finalization of " + component + "'s metadata upgrade " + "finished."); } public static void emitCancellationMsg(String component) { System.err.println("Finalization command was cancelled. Note that, this" - + "will not cancel finalization in " + component +". Progress can be" + + "will not cancel finalization in " + component + ". Progress can be" + "monitored in the Ozone Manager's log."); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/AuditParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/AuditParser.java index ae64c94ed2d5..9af8a7447c5a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/AuditParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/AuditParser.java @@ -54,7 +54,7 @@ public static void main(String[] argv) throws Exception { new AuditParser().run(argv); } - public String getDatabase(){ + public String getDatabase() { return database; } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java index 8750e19bc11d..725b2b89fbc9 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java @@ -70,7 +70,7 @@ private static Connection getConnection(String dbName) throws Exception { private static void loadProperties() { Properties props = new Properties(); - try{ + try { InputStream inputStream = DatabaseHelper.class.getClassLoader() .getResourceAsStream(ParserConsts.PROPS_FILE); if (inputStream != null) { @@ -85,7 +85,7 @@ private static void loadProperties() { throw new FileNotFoundException("property file '" + ParserConsts.PROPS_FILE + "' not found in the classpath"); } - } catch (Exception e){ + } catch (Exception e) { LOG.error(e.getMessage()); } @@ -145,14 +145,14 @@ private static ArrayList parseAuditLogs(String filePath) AuditEntry tempEntry = null; while (true) { - if (tempEntry == null){ + if (tempEntry == null) { tempEntry = new AuditEntry(); } if (currentLine == null) { break; } else { - if (!currentLine.matches(ParserConsts.DATE_REGEX)){ + if (!currentLine.matches(ParserConsts.DATE_REGEX)) { tempEntry.appendException(currentLine); } else { entry = StringUtils.stripAll(currentLine.split("\\|")); @@ -168,11 +168,11 @@ private static ArrayList parseAuditLogs(String filePath) .setParams(ops[1]) .setResult(entry[6].substring(entry[6].indexOf('=') + 1)) .build(); - if (entry.length == 8){ + if (entry.length == 8) { tempEntry.setException(entry[7]); } } - if (nextLine == null || nextLine.matches(ParserConsts.DATE_REGEX)){ + if (nextLine == null || nextLine.matches(ParserConsts.DATE_REGEX)) { listResult.add(tempEntry); tempEntry = null; } @@ -205,8 +205,8 @@ private static String executeStatement(String dbName, String sql) if (rs != null) { rsm = rs.getMetaData(); int cols = rsm.getColumnCount(); - while (rs.next()){ - for (int index = 1; index <= cols; index++){ + while (rs.next()) { + for (int index = 1; index <= cols; index++) { result.append(rs.getObject(index)); result.append("\t"); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java index 5d585596fbef..f3f8c459f45a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java @@ -53,7 +53,7 @@ public class TemplateCommandHandler implements Callable { @Override public Void call() throws Exception { try { - if(DatabaseHelper.validateTemplate(template)) { + if (DatabaseHelper.validateTemplate(template)) { System.out.println( DatabaseHelper.executeTemplate(auditParser.getDatabase(), template) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/model/AuditEntry.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/model/AuditEntry.java index c6b0b337a760..035bf26ebd06 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/model/AuditEntry.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/model/AuditEntry.java @@ -30,7 +30,7 @@ public class AuditEntry { private String result; private String exception; - public AuditEntry(){} + public AuditEntry() { } public String getUser() { return user; @@ -104,7 +104,7 @@ public void setException(String exception) { this.exception = exception.trim(); } - public void appendException(String text){ + public void appendException(String text) { this.exception += "\n" + text.trim(); } @@ -126,47 +126,47 @@ public Builder() { } - public Builder setTimestamp(String ts){ + public Builder setTimestamp(String ts) { this.timestamp = ts; return this; } - public Builder setLevel(String lvl){ + public Builder setLevel(String lvl) { this.level = lvl; return this; } - public Builder setLogger(String lgr){ + public Builder setLogger(String lgr) { this.logger = lgr; return this; } - public Builder setUser(String usr){ + public Builder setUser(String usr) { this.user = usr; return this; } - public Builder setIp(String ipAddress){ + public Builder setIp(String ipAddress) { this.ip = ipAddress; return this; } - public Builder setOp(String operation){ + public Builder setOp(String operation) { this.op = operation; return this; } - public Builder setParams(String prms){ + public Builder setParams(String prms) { this.params = prms; return this; } - public Builder setResult(String res){ + public Builder setResult(String res) { this.result = res; return this; } - public Builder setException(String exp){ + public Builder setException(String exp) { this.exception = exp; return this; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java index 0deb7d5f7a91..24c8f243e88f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java @@ -77,7 +77,7 @@ private String getChunkLocationPath(String containerLocation) { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException{ + throws IOException, OzoneClientException { containerOperationClient = new ContainerOperationClient(createOzoneConfiguration()); xceiverClientManager = containerOperationClient @@ -105,7 +105,7 @@ protected void execute(OzoneClient client, OzoneAddress address) List locationInfos = keyInfo .getLatestVersionLocations().getBlocksLatestVersionOnly(); // for zero-sized key - if(locationInfos.isEmpty()){ + if (locationInfos.isEmpty()) { System.out.println("No Key Locations Found"); return; } @@ -142,7 +142,7 @@ protected void execute(OzoneClient client, OzoneAddress address) for (Map.Entry entry: responses.entrySet()) { JsonObject jsonObj = new JsonObject(); - if(entry.getValue() == null){ + if (entry.getValue() == null) { LOG.error("Cant execute getBlock on this node"); continue; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java index 2ecfa138a570..275908e6414c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java @@ -177,7 +177,7 @@ private static ColumnFamilyHandle getColumnFamilyHandle( } private void constructColumnFamilyMap(DBDefinition dbDefinition) { - if (dbDefinition == null){ + if (dbDefinition == null) { System.out.println("Incorrect Db Path"); return; } @@ -217,7 +217,7 @@ private void printAppropriateTable( DBDefinitionFactory.setDnDBSchemaVersion(dnDBSchemaVersion); this.constructColumnFamilyMap(DBDefinitionFactory. getDefinition(Paths.get(dbPath))); - if (this.columnFamilyMap !=null) { + if (this.columnFamilyMap != null) { if (!this.columnFamilyMap.containsKey(tableName)) { System.out.print("Table with name:" + tableName + " does not exist"); } else { @@ -239,8 +239,8 @@ private void printAppropriateTable( } private String removeTrailingSlashIfNeeded(String dbPath) { - if(dbPath.endsWith(OzoneConsts.OZONE_URI_DELIMITER)){ - dbPath = dbPath.substring(0, dbPath.length()-1); + if (dbPath.endsWith(OzoneConsts.OZONE_URI_DELIMITER)) { + dbPath = dbPath.substring(0, dbPath.length() - 1); } return dbPath; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java index cabddf977206..0ebc83245629 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java @@ -170,7 +170,7 @@ public void parse(String vol, String buck, String db, dumpInfo(Types.BUCKET, effectivePath, objectBucketId, bucketKey); Iterator pathIterator = p.iterator(); - while(pathIterator.hasNext()) { + while (pathIterator.hasNext()) { Path elem = pathIterator.next(); String path = metadataManager.getOzonePathKey(lastObjectId, elem.toString()); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java index 6a7f1787643a..c1ce49ddb329 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java @@ -94,7 +94,7 @@ protected void execute(OzoneClient client, OzoneAddress address) configuration.setBoolean("ozone.client.verify.checksum", !isChecksumVerifyEnabled); - if(isChecksumVerifyEnabled) { + if (isChecksumVerifyEnabled) { clientProtocol = client.getObjectStore().getClientProxy(); clientProtocolWithoutChecksum = new RpcClient(configuration, null); } else { @@ -187,14 +187,14 @@ private void downloadReplicasAndCreateManifest( Throwable cause = e.getCause(); replicaJson.addProperty(JSON_PROPERTY_REPLICA_EXCEPTION, e.getMessage()); - if(cause instanceof OzoneChecksumException) { + if (cause instanceof OzoneChecksumException) { BlockID blockID = block.getKey().getBlockID(); String datanodeUUID = replica.getKey().getUuidString(); is = getInputStreamWithoutChecksum(replicasWithoutChecksum, datanodeUUID, blockID); Files.copy(is, replicaFile.toPath(), StandardCopyOption.REPLACE_EXISTING); - } else if(cause instanceof StatusRuntimeException) { + } else if (cause instanceof StatusRuntimeException) { break; } } finally { @@ -213,10 +213,10 @@ private OzoneInputStream getInputStreamWithoutChecksum( OzoneInputStream is = new OzoneInputStream(); for (Map.Entry> block : replicasWithoutChecksum.entrySet()) { - if(block.getKey().getBlockID().equals(blockID)) { + if (block.getKey().getBlockID().equals(blockID)) { for (Map.Entry replica : block.getValue().entrySet()) { - if(replica.getKey().getUuidString().equals(datanodeUUID)) { + if (replica.getKey().getUuidString().equals(datanodeUUID)) { is = replica.getValue(); } } @@ -234,8 +234,8 @@ private String createDirectory(String volumeName, String bucketName, "_" + fileSuffix; System.out.println("Creating directory : " + directoryName); File dir = new File(outputDir + "/" + directoryName); - if (!dir.exists()){ - if(dir.mkdir()) { + if (!dir.exists()) { + if (dir.mkdir()) { System.out.println("Successfully created!"); } else { throw new IOException(String.format( diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java index 0a639ec148ca..a7b330ce796c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java @@ -309,7 +309,7 @@ public void printReport() { /** * Print out reports with the given message. */ - public void print(String msg){ + public void print(String msg) { Consumer print = freonCommand.isInteractive() ? System.out::println : LOG::info; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkGenerator.java index e774fcdccd80..9e73bfb637e5 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkGenerator.java @@ -136,7 +136,7 @@ public Void call() throws Exception { } else { xceiverClients = new ArrayList<>(); pipelines = new HashSet<>(); - for(String pipelineId:pipelinesFromCmd){ + for (String pipelineId:pipelinesFromCmd) { List selectedPipelines = pipelinesFromSCM.stream() .filter((p -> p.getId().toString() .equals("PipelineID=" + pipelineId) @@ -144,11 +144,11 @@ public Void call() throws Exception { .collect(Collectors.toList()); pipelines.addAll(selectedPipelines); } - for (Pipeline p:pipelines){ + for (Pipeline p:pipelines) { LOG.info("Writing to pipeline: " + p.getId()); xceiverClients.add(xceiverClientManager.acquireClient(p)); } - if (pipelines.isEmpty()){ + if (pipelines.isEmpty()) { throw new IllegalArgumentException( "Couldn't find the any/the selected pipeline"); } @@ -166,8 +166,8 @@ public Void call() throws Exception { private boolean pipelineContainsDatanode(Pipeline p, List datanodeHosts) { - for (DatanodeDetails dn:p.getNodes()){ - if (datanodeHosts.contains(dn.getHostName())){ + for (DatanodeDetails dn:p.getNodes()) { + if (datanodeHosts.contains(dn.getHostName())) { return true; } } @@ -219,7 +219,7 @@ private void writeChunk(long stepNo) .setData(dataToWrite); XceiverClientSpi clientSpi = xceiverClients.get( - (int) (stepNo%(xceiverClients.size()))); + (int) (stepNo % (xceiverClients.size()))); sendWriteChunkRequest(blockId, writeChunkRequest, clientSpi); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java index b0937d0ba61b..88ec44d914cc 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java @@ -175,7 +175,7 @@ private void createSubDirRecursively(String parent, int depthIndex, } } - while(spanIndex < span) { + while (spanIndex < span) { String levelSubDir = makeDirWithGivenNumberOfFiles(parent); ++spanIndex; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java index c5d4d156c8bf..cfdc924486ab 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java @@ -163,7 +163,7 @@ public static void generateConfigurations(String path, generatedConfig.setProperties(requiredProperties); File output = new File(path, "ozone-site.xml"); - if(output.createNewFile()){ + if (output.createNewFile()) { JAXBContext context = JAXBContext.newInstance(OzoneConfiguration.XMLConfiguration.class); Marshaller m = context.createMarshaller(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java index 740e667c4d66..af6d624ed74e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java @@ -151,7 +151,7 @@ public OzoneClient createClient(MutableConfigurationSource conf) client = createRpcClientFromHostPort(ozoneURI.getHost(), ozoneURI.getPort(), conf); } - } else {// When host is not specified + } else { // When host is not specified Collection omServiceIds = conf.getTrimmedStringCollection( OZONE_OM_SERVICE_IDS_KEY); @@ -270,7 +270,7 @@ private static URI stringToUri(String pathString) { // add leading slash to the path, if it does not exist int firstSlash = path.indexOf('/'); - if(firstSlash != 0) { + if (firstSlash != 0) { path = "/" + path; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java index 5c0766218199..e1592e521a88 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java @@ -57,7 +57,7 @@ public class CreateBucketHandler extends BucketHandler { " user if not specified") private String ownerName; - enum AllowedBucketLayouts {FILE_SYSTEM_OPTIMIZED, OBJECT_STORE} + enum AllowedBucketLayouts { FILE_SYSTEM_OPTIMIZED, OBJECT_STORE } @Option(names = { "--layout", "-l" }, description = "Allowed Bucket Layouts: ${COMPLETION-CANDIDATES}", diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java index 93a421a2ed0b..fa83bbbc41a8 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java @@ -186,7 +186,7 @@ public void testQueryCommand() { @Test public void testLoadCommand() { String[] args1 = new String[]{dbName, "load", LOGS1}; - try{ + try { execute(args1, ""); fail("No exception thrown."); } catch (Exception e) { diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java index 2a5223f4ff0b..75648b441970 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java @@ -54,12 +54,12 @@ public static void init() throws UnsupportedEncodingException { } @After - public void setUp(){ + public void setUp() { bout.reset(); } @AfterClass - public static void tearDown(){ + public static void tearDown() { System.setOut(psBackup); } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java index 2486e5786dcd..b378628da4ab 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java @@ -144,10 +144,10 @@ public List handleExecutionException(ExecutionException ex, throw ex; } }; - try{ + try { cmd.parseWithHandlers(new CommandLine.RunLast(), exceptionHandler, args); - }catch(Exception ex){ + } catch (Exception ex) { Assert.assertTrue("Expected " + msg + ", but got: " + ex.getMessage(), ex.getMessage().contains(msg)); } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/test/OzoneTestDriver.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/test/OzoneTestDriver.java index ac1f7fd61e51..ef0cac7f8b34 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/test/OzoneTestDriver.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/test/OzoneTestDriver.java @@ -37,7 +37,7 @@ public OzoneTestDriver(ProgramDriver pgd) { try { pgd.addClass("freon", Freon.class, "Populates ozone with data."); - } catch(Throwable e) { + } catch (Throwable e) { e.printStackTrace(); } } @@ -46,7 +46,7 @@ public void run(String[] args) { int exitCode = -1; try { exitCode = pgd.run(args); - } catch(Throwable e) { + } catch (Throwable e) { e.printStackTrace(); } @@ -55,7 +55,7 @@ public void run(String[] args) { } } - public static void main(String[] args){ + public static void main(String[] args) { new OzoneTestDriver().run(args); } }