diff --git a/dev-support/pmd/pmd-ruleset.xml b/dev-support/pmd/pmd-ruleset.xml
index 8d977ddb03e7..e4677d6b3402 100644
--- a/dev-support/pmd/pmd-ruleset.xml
+++ b/dev-support/pmd/pmd-ruleset.xml
@@ -30,6 +30,7 @@
+
.*/generated-sources/.*
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java
index 4c357f339a9a..c416113db3ef 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java
@@ -366,7 +366,7 @@ protected synchronized int readWithStrategy(ByteReaderStrategy strategy)
int len = strategy.getTargetLength();
while (len > 0) {
// if we are at the last chunk and have read the entire chunk, return
- if (chunkStreams.size() == 0 ||
+ if (chunkStreams.isEmpty() ||
(chunkStreams.size() - 1 <= chunkIndex &&
chunkStreams.get(chunkIndex)
.getRemaining() == 0)) {
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java
index 0d772dfe77fa..b928f74f06de 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java
@@ -131,7 +131,7 @@ ContainerCommandResponseProto> executePutBlock(boolean close,
blockID = bd.getBlockID();
}
List chunks = bd.getChunks();
- if (chunks != null && chunks.size() > 0) {
+ if (chunks != null && !chunks.isEmpty()) {
if (chunks.get(0).hasStripeChecksum()) {
checksumBlockData = bd;
break;
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/MultipartInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/MultipartInputStream.java
index d661719c17ce..e48b704aade2 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/MultipartInputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/MultipartInputStream.java
@@ -83,7 +83,7 @@ protected synchronized int readWithStrategy(ByteReaderStrategy strategy)
int totalReadLen = 0;
while (strategy.getTargetLength() > 0) {
- if (partStreams.size() == 0 ||
+ if (partStreams.isEmpty() ||
partStreams.size() - 1 <= partIndex &&
partStreams.get(partIndex).getRemaining() == 0) {
return totalReadLen == 0 ? EOF : totalReadLen;
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java
index 6977c999fc38..25f2f46d9a53 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java
@@ -335,7 +335,7 @@ public synchronized int read(ByteBuffer byteBuffer) throws IOException {
protected boolean shouldRetryFailedRead(int failedIndex) {
Deque spareLocations = spareDataLocations.get(failedIndex);
- if (spareLocations != null && spareLocations.size() > 0) {
+ if (spareLocations != null && !spareLocations.isEmpty()) {
failedLocations.add(dataLocations[failedIndex]);
DatanodeDetails spare = spareLocations.removeFirst();
dataLocations[failedIndex] = spare;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 90c365cbbb37..60b2598a50a7 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -207,7 +207,7 @@ public static Optional getHostName(String value) {
return Optional.empty();
}
String hostname = value.replaceAll("\\:[0-9]+$", "");
- if (hostname.length() == 0) {
+ if (hostname.isEmpty()) {
return Optional.empty();
} else {
return Optional.of(hostname);
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java
index 0774581c3f11..f3626bfeaf5d 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeInfo.java
@@ -79,7 +79,7 @@ public static List buildNodeInfo(ConfigurationSource conf) {
if (scmServiceId != null) {
ArrayList< String > scmNodeIds = new ArrayList<>(
HddsUtils.getSCMNodeIds(conf, scmServiceId));
- if (scmNodeIds.size() == 0) {
+ if (scmNodeIds.isEmpty()) {
throw new ConfigurationException(
String.format("Configuration does not have any value set for %s " +
"for the SCM serviceId %s. List of SCM Node ID's should " +
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java
index c7fde25979a3..70f8c22ea99e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java
@@ -574,7 +574,7 @@ private Map getAncestorCountMap(Collection nodes,
Preconditions.checkState(genToExclude >= 0);
Preconditions.checkState(genToReturn >= 0);
- if (nodes == null || nodes.size() == 0) {
+ if (nodes == null || nodes.isEmpty()) {
return Collections.emptyMap();
}
// with the recursive call, genToReturn can be smaller than genToExclude
@@ -619,7 +619,7 @@ private Node getLeafOnLeafParent(int leafIndex, List excludedScopes,
if (excludedNodes != null && excludedNodes.contains(node)) {
continue;
}
- if (excludedScopes != null && excludedScopes.size() > 0) {
+ if (excludedScopes != null && !excludedScopes.isEmpty()) {
if (excludedScopes.stream().anyMatch(node::isDescendant)) {
continue;
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/ssl/ReloadingX509KeyManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/ssl/ReloadingX509KeyManager.java
index 09074af7984e..9b0b650b5457 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/ssl/ReloadingX509KeyManager.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/ssl/ReloadingX509KeyManager.java
@@ -221,7 +221,7 @@ private X509ExtendedKeyManager init(PrivateKey newPrivateKey, List newTrustChain) {
return currentPrivateKey != null && currentPrivateKey.equals(privateKey) &&
- currentTrustChain.size() > 0 &&
+ !currentTrustChain.isEmpty() &&
newTrustChain.size() == currentTrustChain.size() &&
newTrustChain.stream()
.allMatch(
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/ssl/ReloadingX509TrustManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/ssl/ReloadingX509TrustManager.java
index c45c74516a8e..f76cd6c55ece 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/ssl/ReloadingX509TrustManager.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/ssl/ReloadingX509TrustManager.java
@@ -161,7 +161,7 @@ private X509TrustManager init(List newRootCaCerts)
}
private boolean isAlreadyUsing(List newRootCaCerts) {
- return newRootCaCerts.size() > 0 &&
+ return !newRootCaCerts.isEmpty() &&
currentRootCACerts.size() == newRootCaCerts.size() &&
newRootCaCerts.stream()
.allMatch(
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/SelfSignedCertificate.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/SelfSignedCertificate.java
index 2ed330039a4b..187d449b2285 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/SelfSignedCertificate.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/SelfSignedCertificate.java
@@ -144,7 +144,7 @@ private X509Certificate generateCertificate(BigInteger caCertSerialId) throws Op
int keyUsageFlag = KeyUsage.keyCertSign | KeyUsage.cRLSign;
KeyUsage keyUsage = new KeyUsage(keyUsageFlag);
builder.addExtension(Extension.keyUsage, true, keyUsage);
- if (altNames != null && altNames.size() >= 1) {
+ if (altNames != null && !altNames.isEmpty()) {
builder.addExtension(new Extension(Extension.subjectAlternativeName,
false, new GeneralNames(altNames.toArray(
new GeneralName[altNames.size()])).getEncoded()));
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java
index 6562e52dd5ed..d056cc778fbe 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java
@@ -50,7 +50,7 @@ public JaegerSpanContext extract(StringBuilder s) {
throw new MalformedTracerStateStringException(value);
} else {
String traceId = parts[0];
- if (traceId.length() <= 32 && traceId.length() >= 1) {
+ if (traceId.length() <= 32 && !traceId.isEmpty()) {
return new JaegerSpanContext(high(traceId),
(new BigInteger(traceId, 16)).longValue(),
(new BigInteger(parts[1], 16)).longValue(),
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java
index 752d410b14b8..e2a48009f698 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java
@@ -98,7 +98,7 @@ public int getThreadCount() {
@VisibleForTesting
public void runPeriodicalTaskNow() throws Exception {
BackgroundTaskQueue tasks = getTasks();
- while (tasks.size() > 0) {
+ while (!tasks.isEmpty()) {
tasks.poll().call();
}
}
@@ -131,7 +131,7 @@ public synchronized void run() {
LOG.debug("Number of background tasks to execute : {}", tasks.size());
}
- while (tasks.size() > 0) {
+ while (!tasks.isEmpty()) {
BackgroundTask task = tasks.poll();
CompletableFuture.runAsync(() -> {
long startTime = System.nanoTime();
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java
index 9ffb598ebcbd..56f5ef1f77ab 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java
@@ -116,12 +116,12 @@ public boolean verifyChecksumDataMatches(ChecksumData that, int startIndex)
throws OzoneChecksumException {
// pre checks
- if (this.checksums.size() == 0) {
+ if (this.checksums.isEmpty()) {
throw new OzoneChecksumException("Original checksumData has no " +
"checksums");
}
- if (that.checksums.size() == 0) {
+ if (that.checksums.isEmpty()) {
throw new OzoneChecksumException("Computed checksumData has no " +
"checksums");
}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
index b2a3026059a3..f07f4ca408b8 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
@@ -487,7 +487,7 @@ void testChooseRandomExcludedNode(NodeSchema[] schemas,
excludedList, ancestorGen);
for (Node key : dataNodes) {
if (excludedList.contains(key) ||
- (ancestorList.size() > 0 &&
+ (!ancestorList.isEmpty() &&
ancestorList.stream()
.map(a -> (InnerNode) a)
.anyMatch(a -> a.isAncestor(key)))) {
@@ -558,7 +558,7 @@ void testChooseRandomExcludedNodeAndScope(NodeSchema[] schemas,
excludedList, ancestorGen);
for (Node key : dataNodes) {
if (excludedList.contains(key) || key.isDescendant(path) ||
- (ancestorList.size() > 0 &&
+ (!ancestorList.isEmpty() &&
ancestorList.stream()
.map(a -> (InnerNode) a)
.anyMatch(a -> a.isAncestor(key)))) {
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/StorageSize.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/StorageSize.java
index a362da791503..85550e4a4463 100644
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/StorageSize.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/StorageSize.java
@@ -45,7 +45,7 @@ private static void checkState(boolean state, String errorString) {
}
public static StorageSize parse(String value) {
- checkState(value != null && value.length() > 0, "value cannot be blank");
+ checkState(value != null && !value.isEmpty(), "value cannot be blank");
String sanitizedValue = value.trim().toLowerCase(Locale.ENGLISH);
StorageUnit parsedUnit = null;
for (StorageUnit unit : StorageUnit.values()) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
index cb926a7946cf..bc995854a8f1 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
@@ -590,7 +590,7 @@ public boolean isThreadPoolAvailable(ExecutorService executor) {
}
ThreadPoolExecutor ex = (ThreadPoolExecutor) executor;
- if (ex.getQueue().size() == 0) {
+ if (ex.getQueue().isEmpty()) {
return true;
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
index 7d213af4b2d1..69a40e1f1ad2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
@@ -199,7 +199,7 @@ public CommandDispatcher build() {
"Missing scm connection manager.");
Preconditions.checkNotNull(this.container, "Missing ozone container.");
Preconditions.checkNotNull(this.context, "Missing state context.");
- Preconditions.checkArgument(this.handlerList.size() > 0,
+ Preconditions.checkArgument(!this.handlerList.isEmpty(),
"The number of command handlers must be greater than 0.");
return new CommandDispatcher(this.container, this.connectionManager,
this.context, handlerList.toArray(
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
index 630efdc093f0..2fb7b9c69b21 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
@@ -132,7 +132,7 @@ private void checkVolumeSet(MutableVolumeSet volumeSet,
volumeSet.failVolume(volume.getStorageDir().getPath());
}
}
- if (volumeSet.getVolumesList().size() == 0) {
+ if (volumeSet.getVolumesList().isEmpty()) {
// All volumes are in inconsistent state
throw new DiskOutOfSpaceException(
"All configured Volumes are in Inconsistent State");
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
index 6663786d7e07..29888b23b4fe 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
@@ -189,7 +189,7 @@ private void initializeVolumeSet() throws IOException {
// First checking if we have any volumes, if all volumes are failed the
// volumeMap size will be zero, and we throw Exception.
- if (volumeMap.size() == 0) {
+ if (volumeMap.isEmpty()) {
throw new DiskOutOfSpaceException("No storage locations configured");
}
}
@@ -219,7 +219,7 @@ public void checkAllVolumes(StorageVolumeChecker checker)
throw new IOException("Interrupted while running disk check", e);
}
- if (failedVolumes.size() > 0) {
+ if (!failedVolumes.isEmpty()) {
LOG.warn("checkAllVolumes got {} failed volumes - {}",
failedVolumes.size(), failedVolumes);
handleVolumeFailures(failedVolumes);
@@ -266,7 +266,7 @@ public void checkVolumeAsync(StorageVolume volume) {
volumeChecker.checkVolume(
volume, (healthyVolumes, failedVolumes) -> {
- if (failedVolumes.size() > 0) {
+ if (!failedVolumes.isEmpty()) {
LOG.warn("checkVolumeAsync callback got {} failed volumes: {}",
failedVolumes.size(), failedVolumes);
} else {
@@ -441,7 +441,7 @@ public boolean hasEnoughVolumes() {
boolean hasEnoughVolumes;
if (maxVolumeFailuresTolerated ==
StorageVolumeChecker.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
- hasEnoughVolumes = getVolumesList().size() >= 1;
+ hasEnoughVolumes = !getVolumesList().isEmpty();
} else {
hasEnoughVolumes = getFailedVolumesList().size() <= maxVolumeFailuresTolerated;
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java
index a5a2cb052f0e..1fe1e32f2b66 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java
@@ -45,7 +45,7 @@ public HddsVolume chooseVolume(List volumes,
long maxContainerSize) throws IOException {
// No volumes available to choose from
- if (volumes.size() < 1) {
+ if (volumes.isEmpty()) {
throw new DiskOutOfSpaceException("No more available volumes");
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java
index 67a28fc28f25..057d96204a82 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java
@@ -289,7 +289,7 @@ public void reconstructECBlockGroup(BlockLocationInfo blockLocationInfo,
emptyBlockStreams[i] = getECBlockOutputStream(blockLocationInfo, datanodeDetails, repConfig, replicaIndex);
}
- if (toReconstructIndexes.size() > 0) {
+ if (!toReconstructIndexes.isEmpty()) {
sis.setRecoveryIndexes(toReconstructIndexes.stream().map(i -> (i - 1))
.collect(Collectors.toSet()));
long length = safeBlockGroupLength;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index c5af0c7d9ed2..ab8826d0f525 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -229,7 +229,7 @@ public void create(VolumeSet volumeSet, VolumeChoosingPolicy
LOG.error("Exception attempting to create container {} on volume {}" +
" remaining volumes to try {}", containerData.getContainerID(),
containerVolume.getHddsRootDir(), volumes.size(), ex);
- if (volumes.size() == 0) {
+ if (volumes.isEmpty()) {
throw new StorageContainerException(
"Container creation failed. " + ex.getMessage(), ex,
CONTAINER_INTERNAL_ERROR);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
index 7b392896b5f2..2c5e38bb447d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
@@ -354,7 +354,7 @@ private ScanResult scanBlock(BlockData block, DataTransferThrottler throttler,
// In EC, client may write empty putBlock in padding block nodes.
// So, we need to make sure, chunk length > 0, before declaring
// the missing chunk file.
- if (block.getChunks().size() > 0 && block
+ if (!block.getChunks().isEmpty() && block
.getChunks().get(0).getLen() > 0) {
return ScanResult.unhealthy(ScanResult.FailureType.MISSING_CHUNK_FILE,
chunkFile, new IOException("Missing chunk file " +
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
index a01a93eead8a..a204fc9a5ee8 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
@@ -428,7 +428,7 @@ public synchronized List getTrustChain()
if (cert != null) {
chain.add(cert);
}
- Preconditions.checkState(chain.size() > 0, "Empty trust chain");
+ Preconditions.checkState(!chain.isEmpty(), "Empty trust chain");
}
return chain;
}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java
index 8b4fbaf713c9..21b3ad606176 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java
@@ -115,7 +115,7 @@ public static ManagedDBOptions readFromFile(String dbFileName,
List cfDescs) throws IOException {
Preconditions.checkNotNull(dbFileName);
Preconditions.checkNotNull(cfDescs);
- Preconditions.checkArgument(cfDescs.size() > 0);
+ Preconditions.checkArgument(!cfDescs.isEmpty());
//TODO: Add Documentation on how to support RocksDB Mem Env.
Env env = Env.getDefault();
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
index f76f2558ac47..cdfae899e169 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
@@ -444,7 +444,7 @@ private ManagedDBOptions getDBOptionsFromFile(
columnFamilyDescriptors.add(tc.getDescriptor());
}
- if (columnFamilyDescriptors.size() > 0) {
+ if (!columnFamilyDescriptors.isEmpty()) {
try {
option = DBConfigFromFile.readFromFile(dbname,
columnFamilyDescriptors);
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
index f14a8f014c86..de2627fa7aa0 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
@@ -443,7 +443,7 @@ public DBUpdatesWrapper getUpdatesSince(long sequenceNumber, long limitCount)
sequenceNumber, e);
dbUpdatesWrapper.setDBUpdateSuccess(false);
} finally {
- if (dbUpdatesWrapper.getData().size() > 0) {
+ if (!dbUpdatesWrapper.getData().isEmpty()) {
rdbMetrics.incWalUpdateDataSize(cumulativeDBUpdateLogBatchSize);
rdbMetrics.incWalUpdateSequenceCount(
dbUpdatesWrapper.getCurrentSequenceNumber() - sequenceNumber);
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
index f5935b2cd69e..5c5247e011a5 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
@@ -332,7 +332,7 @@ && get(startKey) == null) {
currentKey, null))) {
result.add(currentEntry);
} else {
- if (result.size() > 0 && sequential) {
+ if (!result.isEmpty() && sequential) {
// if the caller asks for a sequential range of results,
// and we met a dis-match, abort iteration from here.
// if result is empty, we continue to look for the first match.
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java
index dcb1eaa4cc2c..7c3defabeeaf 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java
@@ -124,7 +124,7 @@ public synchronized StatusAndMessages reportStatus(
assertClientId(upgradeClientID);
List returningMsgs = new ArrayList<>(msgs.size() + 10);
Status status = versionManager.getUpgradeState();
- while (msgs.size() > 0) {
+ while (!msgs.isEmpty()) {
returningMsgs.add(msgs.poll());
}
return new StatusAndMessages(status, returningMsgs);
diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestUtils.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestUtils.java
index 0fbf5757c332..b2f022613879 100644
--- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestUtils.java
+++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestUtils.java
@@ -48,7 +48,7 @@ public static String getLexicographicallyHigherString(String val) {
public static List> getTestingBounds(
SortedMap keys) {
Set boundary = new HashSet<>();
- if (keys.size() > 0) {
+ if (!keys.isEmpty()) {
List sortedKeys = new ArrayList<>(keys.keySet());
boundary.add(getLexicographicallyLowerString(keys.firstKey()));
boundary.add(keys.firstKey());
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
index 479b4066dc32..640a6503552b 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
@@ -925,7 +925,7 @@ private void traverseGraph(
// fist go through fwdGraph to find nodes that don't have successors.
// These nodes will be the top level nodes in reverse graph
Set successors = fwdMutableGraph.successors(infileNode);
- if (successors.size() == 0) {
+ if (successors.isEmpty()) {
LOG.debug("No successors. Cumulative keys: {}, total keys: {}",
infileNode.getCumulativeKeysReverseTraversal(),
infileNode.getTotalNumberOfKeys());
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
index 4522f602d06c..2c46c0e4e26e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
@@ -235,7 +235,7 @@ protected List chooseDatanodesInternal(
healthyNodes.removeAll(usedNodes);
}
String msg;
- if (healthyNodes.size() == 0) {
+ if (healthyNodes.isEmpty()) {
msg = "No healthy node found to allocate container.";
LOG.error(msg);
throw new SCMException(msg, SCMException.ResultCodes
@@ -440,7 +440,7 @@ public ContainerPlacementStatus validateContainerPlacement(
// We have a network topology so calculate if it is satisfied or not.
int requiredRacks = getRequiredRackCount(replicas, 0);
if (topology == null || replicas == 1 || requiredRacks == 1) {
- if (dns.size() > 0) {
+ if (!dns.isEmpty()) {
// placement is always satisfied if there is at least one DN.
return validPlacement;
} else {
@@ -556,7 +556,7 @@ public Set replicasToCopyToFixMisreplication(
.limit(numberOfReplicasToBeCopied)
.collect(Collectors.toList());
if (numberOfReplicasToBeCopied > replicasToBeCopied.size()) {
- Node rack = replicaList.size() > 0 ? this.getPlacementGroup(
+ Node rack = !replicaList.isEmpty() ? this.getPlacementGroup(
replicaList.get(0).getDatanodeDetails()) : null;
LOG.warn("Not enough copyable replicas available in rack {}. " +
"Required number of Replicas to be copied: {}." +
@@ -641,14 +641,14 @@ public Set replicasToRemoveToFixOverreplication(
Node rack = pq.poll();
Set replicaSet =
placementGroupReplicaIdMap.get(rack).get(rid);
- if (replicaSet.size() > 0) {
+ if (!replicaSet.isEmpty()) {
ContainerReplica r = replicaSet.stream().findFirst().get();
replicasToRemove.add(r);
replicaSet.remove(r);
replicaIdMap.get(rid).remove(r);
placementGroupCntMap.compute(rack,
(group, cnt) -> (cnt == null ? 0 : cnt) - 1);
- if (replicaSet.size() == 0) {
+ if (replicaSet.isEmpty()) {
placementGroupReplicaIdMap.get(rack).remove(rid);
} else {
pq.add(rack);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java
index 6c761f092ca2..68ef5457a14e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java
@@ -146,7 +146,7 @@ protected List chooseDatanodesInternal(
}
DatanodeDetails favoredNode;
int favorIndex = 0;
- if (mutableUsedNodes.size() == 0) {
+ if (mutableUsedNodes.isEmpty()) {
// choose all nodes for a new pipeline case
// choose first datanode from scope ROOT or from favoredNodes if not null
favoredNode = favoredNodeNum > favorIndex ?
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackScatter.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackScatter.java
index 1ea7efe4eeb0..0741f5db4ae4 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackScatter.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackScatter.java
@@ -126,7 +126,7 @@ private Set chooseNodesFromRacks(List racks,
skippedRacks.clear();
}
- if (mutableFavoredNodes.size() > 0) {
+ if (!mutableFavoredNodes.isEmpty()) {
List chosenFavoredNodesInForLoop = new ArrayList<>();
for (DatanodeDetails favoredNode : mutableFavoredNodes) {
Node curRack = getRackOfDatanodeDetails(favoredNode);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java
index cec96128f112..6ee168cd9e1f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaPendingOps.java
@@ -233,7 +233,7 @@ public void removeExpiredEntries() {
updateTimeoutMetrics(op);
}
}
- if (ops.size() == 0) {
+ if (ops.isEmpty()) {
pendingOps.remove(containerID);
}
} finally {
@@ -304,7 +304,7 @@ private boolean completeOp(ContainerReplicaOp.PendingOpType opType,
decrementCounter(op.getOpType(), replicaIndex);
}
}
- if (ops.size() == 0) {
+ if (ops.isEmpty()) {
pendingOps.remove(containerID);
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECContainerReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECContainerReplicaCount.java
index 52eca6ebb116..07e1dc0f44eb 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECContainerReplicaCount.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECContainerReplicaCount.java
@@ -552,22 +552,22 @@ public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("Container State: ").append(containerInfo.getState())
.append(", Replicas: (Count: ").append(replicas.size());
- if (healthyIndexes.size() > 0) {
+ if (!healthyIndexes.isEmpty()) {
sb.append(", Healthy: ").append(healthyIndexes.size());
}
- if (unhealthyReplicaDNs.size() > 0) {
+ if (!unhealthyReplicaDNs.isEmpty()) {
sb.append(", Unhealthy: ").append(unhealthyReplicaDNs.size());
}
- if (decommissionIndexes.size() > 0) {
+ if (!decommissionIndexes.isEmpty()) {
sb.append(", Decommission: ").append(decommissionIndexes.size());
}
- if (maintenanceIndexes.size() > 0) {
+ if (!maintenanceIndexes.isEmpty()) {
sb.append(", Maintenance: ").append(maintenanceIndexes.size());
}
- if (pendingAdd.size() > 0) {
+ if (!pendingAdd.isEmpty()) {
sb.append(", PendingAdd: ").append(pendingAdd.size());
}
- if (pendingDelete.size() > 0) {
+ if (!pendingDelete.isEmpty()) {
sb.append(", PendingDelete: ").append(pendingDelete.size());
}
sb.append(")")
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECOverReplicationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECOverReplicationHandler.java
index 9e11d66d63f1..c35c10d0f7f3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECOverReplicationHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECOverReplicationHandler.java
@@ -114,7 +114,7 @@ public int processAndSendCommands(
List overReplicatedIndexes =
replicaCount.overReplicatedIndexes(true);
//sanity check
- if (overReplicatedIndexes.size() == 0) {
+ if (overReplicatedIndexes.isEmpty()) {
LOG.warn("The container {} with replicas {} was found over replicated "
+ "by EcContainerReplicaCount, but there are no over replicated "
+ "indexes returned", container.getContainerID(), replicas);
@@ -137,7 +137,7 @@ public int processAndSendCommands(
Set replicasToRemove =
selectReplicasToRemove(candidates, 1);
- if (replicasToRemove.size() == 0) {
+ if (replicasToRemove.isEmpty()) {
LOG.warn("The container {} is over replicated, but no replicas were "
+ "selected to remove by the placement policy. Replicas: {}",
container, replicas);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java
index c7e7c580d5d0..1d6b16976581 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java
@@ -430,7 +430,7 @@ private int processDecommissioningIndexes(
ContainerInfo container = replicaCount.getContainer();
Set decomIndexes = replicaCount.decommissioningOnlyIndexes(true);
int commandsSent = 0;
- if (decomIndexes.size() > 0) {
+ if (!decomIndexes.isEmpty()) {
LOG.debug("Processing decommissioning indexes {} for container {}.",
decomIndexes, container.containerID());
final List selectedDatanodes = getTargetDatanodes(
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisOverReplicationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisOverReplicationHandler.java
index 5cc7b675fa37..3e11141f8e23 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisOverReplicationHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisOverReplicationHandler.java
@@ -120,7 +120,7 @@ public int processAndSendCommands(
// get replicas that can be deleted, in sorted order
List eligibleReplicas =
getEligibleReplicas(replicaCount, pendingOps);
- if (eligibleReplicas.size() == 0) {
+ if (eligibleReplicas.isEmpty()) {
LOG.info("Did not find any replicas that are eligible to be deleted for" +
" container {}.", containerInfo);
return 0;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java
index 57a1d83f794d..f0be5b231d99 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java
@@ -273,14 +273,14 @@ public static List selectUnhealthyReplicasForDelete(ContainerI
deleteCandidates.sort(
Comparator.comparingLong(ContainerReplica::getSequenceId));
if (containerInfo.getState() == HddsProtos.LifeCycleState.CLOSED) {
- return deleteCandidates.size() > 0 ? deleteCandidates : null;
+ return !deleteCandidates.isEmpty() ? deleteCandidates : null;
}
if (containerInfo.getState() == HddsProtos.LifeCycleState.QUASI_CLOSED) {
List nonUniqueOrigins =
findNonUniqueDeleteCandidates(replicas, deleteCandidates,
nodeStatusFn);
- return nonUniqueOrigins.size() > 0 ? nonUniqueOrigins : null;
+ return !nonUniqueOrigins.isEmpty() ? nonUniqueOrigins : null;
}
return null;
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/DeletingContainerHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/DeletingContainerHandler.java
index 785f20433f78..31fcd6e9c8a6 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/DeletingContainerHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/DeletingContainerHandler.java
@@ -74,7 +74,7 @@ public boolean handle(ContainerCheckRequest request) {
return true;
}
- if (request.getContainerReplicas().size() == 0) {
+ if (request.getContainerReplicas().isEmpty()) {
LOG.debug("Deleting Container {} has no replicas so marking for cleanup" +
" and returning true", containerInfo);
replicationManager.updateContainerState(
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/ECReplicationCheckHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/ECReplicationCheckHandler.java
index 0bb4d414af8f..082879d3257c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/ECReplicationCheckHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/ECReplicationCheckHandler.java
@@ -132,7 +132,7 @@ public ContainerHealthResult checkHealth(ContainerCheckRequest request) {
List missingIndexes = replicaCount.unavailableIndexes(false);
int remainingRedundancy = repConfig.getParity();
boolean dueToOutOfService = true;
- if (missingIndexes.size() > 0) {
+ if (!missingIndexes.isEmpty()) {
// The container has reduced redundancy and will need reconstructed
// via an EC reconstruction command. Note that it may also have some
// replicas in decommission / maintenance states, but as the under
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/RatisReplicationCheckHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/RatisReplicationCheckHandler.java
index 9d99080695ed..3ed7348112a6 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/RatisReplicationCheckHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/RatisReplicationCheckHandler.java
@@ -265,7 +265,7 @@ of unhealthy replicas (such as 3 CLOSED and 1 UNHEALTHY replicas of a
getPlacementStatus(replicas, requiredNodes, Collections.emptyList());
ContainerPlacementStatus placementStatusWithPending = placementStatus;
if (!placementStatus.isPolicySatisfied()) {
- if (replicaPendingOps.size() > 0) {
+ if (!replicaPendingOps.isEmpty()) {
placementStatusWithPending =
getPlacementStatus(replicas, requiredNodes, replicaPendingOps);
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
index 045572e52288..a895aa4bed9b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
@@ -366,25 +366,25 @@ public NavigableSet getMatchingContainerIDs(
// one is empty.
final NavigableSet stateSet =
lifeCycleStateMap.getCollection(state);
- if (stateSet.size() == 0) {
+ if (stateSet.isEmpty()) {
return EMPTY_SET;
}
final NavigableSet ownerSet =
ownerMap.getCollection(owner);
- if (ownerSet.size() == 0) {
+ if (ownerSet.isEmpty()) {
return EMPTY_SET;
}
final NavigableSet factorSet =
repConfigMap.getCollection(repConfig);
- if (factorSet.size() == 0) {
+ if (factorSet.isEmpty()) {
return EMPTY_SET;
}
final NavigableSet typeSet =
typeMap.getCollection(repConfig.getReplicationType());
- if (typeSet.size() == 0) {
+ if (typeSet.isEmpty()) {
return EMPTY_SET;
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHANodeDetails.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHANodeDetails.java
index 70365cf5c792..8e438a179090 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHANodeDetails.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHANodeDetails.java
@@ -183,7 +183,7 @@ public static SCMHANodeDetails loadSCMHAConfig(OzoneConfiguration conf,
// TODO: need to fall back to ozone.scm.names in case scm node ids are
// not defined.
- if (scmNodeIds.size() == 0) {
+ if (scmNodeIds.isEmpty()) {
throw new IllegalArgumentException(
String.format("Configuration does not have any value set for %s " +
"for the service %s. List of SCM Node ID's should be " +
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
index 2eb90868b9d3..568328210c7d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
@@ -83,7 +83,7 @@ List getCommand(final UUID datanodeUuid) {
List cmdList = null;
if (cmds != null) {
cmdList = cmds.getCommands();
- commandsInQueue -= cmdList.size() > 0 ? cmdList.size() : 0;
+ commandsInQueue -= !cmdList.isEmpty() ? cmdList.size() : 0;
// A post condition really.
Preconditions.checkState(commandsInQueue >= 0);
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
index 5cc70f652285..5fb0c2188c9c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
@@ -233,7 +233,7 @@ public void run() {
trackedDecomMaintenance = getTrackedNodeCount();
}
processTransitioningNodes();
- if (trackedNodes.size() > 0 || pendingNodes.size() > 0) {
+ if (!trackedNodes.isEmpty() || !pendingNodes.isEmpty()) {
LOG.info("There are {} nodes tracked for decommission and " +
"maintenance. {} pending nodes.",
trackedNodes.size(), pendingNodes.size());
@@ -388,7 +388,7 @@ private boolean checkPipelinesClosedOnNode(TrackedNode dn)
Set pipelines = nodeManager.getPipelines(dn
.getDatanodeDetails());
NodeStatus status = nodeManager.getNodeStatus(dn.getDatanodeDetails());
- if (pipelines == null || pipelines.size() == 0
+ if (pipelines == null || pipelines.isEmpty()
|| status.operationalStateExpired()) {
return true;
} else {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 3efaf4bba4db..43d13e4ae6de 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -1259,7 +1259,7 @@ public Map getNodeStatistics() {
}
private void nodeUsageStatistics(Map nodeStatics) {
- if (nodeStateManager.getAllNodes().size() < 1) {
+ if (nodeStateManager.getAllNodes().isEmpty()) {
return;
}
float[] usages = new float[nodeStateManager.getAllNodes().size()];
@@ -1310,7 +1310,7 @@ private void nodeStateStatistics(Map nodeStatics) {
}
private void nodeSpaceStatistics(Map nodeStatics) {
- if (nodeStateManager.getAllNodes().size() < 1) {
+ if (nodeStateManager.getAllNodes().isEmpty()) {
return;
}
long capacityByte = 0;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
index 3f556e85d44e..a0bae80157e3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
@@ -109,7 +109,7 @@ public List getDatanodeList(
public void insertNewDatanode(UUID datanodeID,
Set report) throws SCMException {
Preconditions.checkNotNull(report);
- Preconditions.checkState(report.size() != 0);
+ Preconditions.checkState(!report.isEmpty());
Preconditions.checkNotNull(datanodeID);
synchronized (scmNodeStorageReportMap) {
if (isKnownDatanode(datanodeID)) {
@@ -132,7 +132,7 @@ public void updateDatanodeMap(UUID datanodeID,
Set report) throws SCMException {
Preconditions.checkNotNull(datanodeID);
Preconditions.checkNotNull(report);
- Preconditions.checkState(report.size() != 0);
+ Preconditions.checkState(!report.isEmpty());
synchronized (scmNodeStorageReportMap) {
if (!scmNodeStorageReportMap.containsKey(datanodeID)) {
throw new SCMException("No such datanode", NO_SUCH_DATANODE);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
index 251661eb47f7..3a6551b6b05a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
@@ -134,7 +134,7 @@ List filterViableNodes(
List healthyNodes =
nodeManager.getNodes(NodeStatus.inServiceHealthy());
String msg;
- if (healthyNodes.size() == 0) {
+ if (healthyNodes.isEmpty()) {
msg = "No healthy node found to allocate container.";
LOG.error(msg);
throw new SCMException(msg, SCMException.ResultCodes
@@ -307,7 +307,7 @@ private List getResultSetWithTopology(
List mutableExclude = new ArrayList<>();
boolean rackAwareness = getAnchorAndNextNode(healthyNodes,
usedNodes, results, mutableLstNodes, mutableExclude);
- if (mutableLstNodes.size() == 0) {
+ if (mutableLstNodes.isEmpty()) {
LOG.warn("Unable to find healthy node for anchor(first) node.");
throw new SCMException("Unable to find anchor node.",
SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE);
@@ -401,7 +401,7 @@ private boolean getAnchorAndNextNode(List healthyNodes,
DatanodeDetails anchor;
DatanodeDetails nextNode = null;
// First choose an anchor node.
- if (usedNodes.size() == 0) {
+ if (usedNodes.isEmpty()) {
// No usedNode, choose anchor based on healthyNodes
anchor = chooseFirstNode(healthyNodes);
if (anchor != null) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
index 0cc64b4d1376..c95f7208b80c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
@@ -164,8 +164,8 @@ public synchronized Pipeline create(RatisReplicationConfig replicationConfig,
break;
case THREE:
List excludeDueToEngagement = filterPipelineEngagement();
- if (excludeDueToEngagement.size() > 0) {
- if (excludedNodes.size() == 0) {
+ if (!excludeDueToEngagement.isEmpty()) {
+ if (excludedNodes.isEmpty()) {
excludedNodes = excludeDueToEngagement;
} else {
excludedNodes.addAll(excludeDueToEngagement);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableECContainerProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableECContainerProvider.java
index 37e299f18115..91b4f26a86e0 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableECContainerProvider.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableECContainerProvider.java
@@ -122,7 +122,7 @@ public ContainerInfo getContainer(final long size,
PipelineRequestInformation.Builder.getBuilder()
.setSize(size)
.build();
- while (existingPipelines.size() > 0) {
+ while (!existingPipelines.isEmpty()) {
int pipelineIndex =
pipelineChoosePolicy.choosePipelineIndex(existingPipelines, pri);
if (pipelineIndex < 0) {
@@ -197,7 +197,7 @@ private ContainerInfo allocateContainer(ReplicationConfig repConfig,
throws IOException {
List excludedNodes = Collections.emptyList();
- if (excludeList.getDatanodes().size() > 0) {
+ if (!excludeList.getDatanodes().isEmpty()) {
excludedNodes = new ArrayList<>(excludeList.getDatanodes());
}
@@ -248,7 +248,7 @@ private ContainerInfo getContainerFromPipeline(Pipeline pipeline)
NavigableSet containers =
pipelineManager.getContainersInPipeline(pipeline.getId());
// Assume 1 container per pipeline for EC
- if (containers.size() == 0) {
+ if (containers.isEmpty()) {
return null;
}
ContainerID containerID = containers.first();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableRatisContainerProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableRatisContainerProvider.java
index 93b4c3b2f6c5..2c15d1013e3d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableRatisContainerProvider.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableRatisContainerProvider.java
@@ -165,7 +165,7 @@ private List findPipelinesByState(
List pipelines = pipelineManager.getPipelines(repConfig,
pipelineState, excludeList.getDatanodes(),
excludeList.getPipelineIds());
- if (pipelines.size() == 0 && !excludeList.isEmpty()) {
+ if (pipelines.isEmpty() && !excludeList.isEmpty()) {
// if no pipelines can be found, try finding pipeline without
// exclusion
pipelines = pipelineManager.getPipelines(repConfig, pipelineState);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/HealthyPipelineChoosePolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/HealthyPipelineChoosePolicy.java
index 4756b06c4ee4..454c335d0e68 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/HealthyPipelineChoosePolicy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/HealthyPipelineChoosePolicy.java
@@ -35,7 +35,7 @@ public class HealthyPipelineChoosePolicy implements PipelineChoosePolicy {
public Pipeline choosePipeline(List pipelineList,
PipelineRequestInformation pri) {
Pipeline fallback = null;
- while (pipelineList.size() > 0) {
+ while (!pipelineList.isEmpty()) {
Pipeline pipeline = randomPolicy.choosePipeline(pipelineList, pri);
if (pipeline.isHealthy()) {
return pipeline;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index c7ca09a31370..afaab94f02a3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -1384,7 +1384,7 @@ private Set queryNodeState(
Set returnSet = new TreeSet<>();
List tmp = scm.getScmNodeManager()
.getNodes(opState, nodeState);
- if ((tmp != null) && (tmp.size() > 0)) {
+ if ((tmp != null) && (!tmp.isEmpty())) {
returnSet.addAll(tmp);
}
return returnSet;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
index fac19fce99c0..ddc87da038e4 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
@@ -140,7 +140,7 @@ public List dispatch(SCMHeartbeatRequestProto heartbeat) {
final List icrs =
heartbeat.getIncrementalContainerReportList();
- if (icrs.size() > 0) {
+ if (!icrs.isEmpty()) {
LOG.debug("Dispatching ICRs.");
for (IncrementalContainerReportProto icr : icrs) {
eventPublisher.fireEvent(INCREMENTAL_CONTAINER_REPORT,
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java
index 684841b54072..fe8833f053c4 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java
@@ -256,7 +256,7 @@ public synchronized List getAllRootCaCertificates()
throws IOException {
List pemEncodedList = new ArrayList<>();
Set certList =
- scmCertificateClient.getAllRootCaCerts().size() == 0 ?
+ scmCertificateClient.getAllRootCaCerts().isEmpty() ?
scmCertificateClient.getAllCaCerts() :
scmCertificateClient.getAllRootCaCerts();
for (X509Certificate cert : certList) {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
index 062377192165..ea1054784d09 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
@@ -146,7 +146,7 @@ public void setNodeOperationalState(DatanodeDetails dn,
@Override
public Set getPipelines(DatanodeDetails datanodeDetails) {
Set p = pipelineMap.get(datanodeDetails.getUuid());
- if (p == null || p.size() == 0) {
+ if (p == null || p.isEmpty()) {
return null;
} else {
return p;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java
index aeea1b5fd82e..fe2e6e4da6e8 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java
@@ -1170,7 +1170,7 @@ public void testMaintenanceIndexCopiedWhenContainerUnRecoverable()
int replicateCommand = 0;
int reconstructCommand = 0;
boolean shouldReconstructCommandExist =
- missingIndexes.size() > 0 && missingIndexes.size() <= repConfig
+ !missingIndexes.isEmpty() && missingIndexes.size() <= repConfig
.getParity();
for (Map.Entry> dnCommand : commandsSent) {
if (dnCommand.getValue() instanceof ReplicateContainerCommand) {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java
index d9593b1f33ee..39d2f85307e7 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java
@@ -393,7 +393,7 @@ public List getEvents() {
}
public Event getLastEvent() {
- if (events.size() == 0) {
+ if (events.isEmpty()) {
return null;
} else {
return events.get(events.size() - 1);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
index 4007b7cd3058..3e25706962db 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
@@ -636,7 +636,7 @@ public void testPipelineNotCreatedUntilSafeModePrecheck() throws Exception {
// Simulate safemode check exiting.
scmContext.updateSafeModeStatus(
new SCMSafeModeManager.SafeModeStatus(true, true));
- GenericTestUtils.waitFor(() -> pipelineManager.getPipelines().size() != 0,
+ GenericTestUtils.waitFor(() -> !pipelineManager.getPipelines().isEmpty(),
100, 10000);
pipelineManager.close();
}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java
index d7f974f5b39c..b2f7ce1384b4 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java
@@ -94,7 +94,7 @@ public void execute(ScmClient scmClient) throws IOException {
for (HddsProtos.NodeState state : STATES) {
List nodes = scmClient.queryNode(null, state,
HddsProtos.QueryScope.CLUSTER, "");
- if (nodes != null && nodes.size() > 0) {
+ if (nodes != null && !nodes.isEmpty()) {
if (nodeOperationalState != null) {
if (nodeOperationalState.equals("IN_SERVICE") ||
nodeOperationalState.equals("DECOMMISSIONING") ||
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
index aca99e84e7c4..14d6a0e84d1a 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
@@ -156,7 +156,7 @@ private void printDetails(ScmClient scmClient, long containerID,
printBreak();
}
if (json) {
- if (container.getPipeline().size() != 0) {
+ if (!container.getPipeline().isEmpty()) {
ContainerWithPipelineAndReplicas wrapper =
new ContainerWithPipelineAndReplicas(container.getContainerInfo(),
container.getPipeline(), replicas,
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ReportSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ReportSubcommand.java
index 4c360bf865d7..8bbb5ae5922b 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ReportSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ReportSubcommand.java
@@ -103,7 +103,7 @@ private void outputContainerSamples(ReplicationManagerReport report) {
for (ReplicationManagerReport.HealthState state
: ReplicationManagerReport.HealthState.values()) {
List containers = report.getSample(state);
- if (containers.size() > 0) {
+ if (!containers.isEmpty()) {
output("First " + ReplicationManagerReport.SAMPLE_LIMIT + " " +
state + " containers:");
output(containers
diff --git a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java
index 1788363f4166..d04df75dfdb0 100644
--- a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java
+++ b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java
@@ -335,13 +335,13 @@ public boolean isPrefix() {
}
public void ensureBucketAddress() throws OzoneClientException {
- if (keyName.length() > 0) {
+ if (!keyName.isEmpty()) {
throw new OzoneClientException(
"Invalid bucket name. Delimiters (/) not allowed in bucket name");
- } else if (volumeName.length() == 0) {
+ } else if (volumeName.isEmpty()) {
throw new OzoneClientException(
"Volume name is required.");
- } else if (bucketName.length() == 0) {
+ } else if (bucketName.isEmpty()) {
throw new OzoneClientException(
"Bucket name is required.");
}
@@ -350,13 +350,13 @@ public void ensureBucketAddress() throws OzoneClientException {
// Ensure prefix address with a prefix flag
// Allow CLI to differentiate key and prefix address
public void ensurePrefixAddress() throws OzoneClientException {
- if (keyName.length() == 0) {
+ if (keyName.isEmpty()) {
throw new OzoneClientException(
"prefix name is missing.");
- } else if (volumeName.length() == 0) {
+ } else if (volumeName.isEmpty()) {
throw new OzoneClientException(
"Volume name is missing");
- } else if (bucketName.length() == 0) {
+ } else if (bucketName.isEmpty()) {
throw new OzoneClientException(
"Bucket name is missing");
}
@@ -364,13 +364,13 @@ public void ensurePrefixAddress() throws OzoneClientException {
}
public void ensureKeyAddress() throws OzoneClientException {
- if (keyName.length() == 0) {
+ if (keyName.isEmpty()) {
throw new OzoneClientException(
"Key name is missing.");
- } else if (volumeName.length() == 0) {
+ } else if (volumeName.isEmpty()) {
throw new OzoneClientException(
"Volume name is missing");
- } else if (bucketName.length() == 0) {
+ } else if (bucketName.isEmpty()) {
throw new OzoneClientException(
"Bucket name is missing");
}
@@ -387,7 +387,7 @@ public void ensureKeyAddress() throws OzoneClientException {
*/
public void ensureSnapshotAddress()
throws OzoneClientException {
- if (keyName.length() > 0) {
+ if (!keyName.isEmpty()) {
if (OmUtils.isBucketSnapshotIndicator(keyName)) {
snapshotNameWithIndicator = keyName;
} else {
@@ -396,31 +396,31 @@ public void ensureSnapshotAddress()
"a bucket name. Only a snapshot name with " +
"a snapshot indicator is accepted");
}
- } else if (volumeName.length() == 0) {
+ } else if (volumeName.isEmpty()) {
throw new OzoneClientException(
"Volume name is missing.");
- } else if (bucketName.length() == 0) {
+ } else if (bucketName.isEmpty()) {
throw new OzoneClientException(
"Bucket name is missing.");
}
}
public void ensureVolumeAddress() throws OzoneClientException {
- if (keyName.length() != 0) {
+ if (!keyName.isEmpty()) {
throw new OzoneClientException(
"Invalid volume name. Delimiters (/) not allowed in volume name");
- } else if (volumeName.length() == 0) {
+ } else if (volumeName.isEmpty()) {
throw new OzoneClientException(
"Volume name is required");
- } else if (bucketName.length() != 0) {
+ } else if (!bucketName.isEmpty()) {
throw new OzoneClientException(
"Invalid volume name. Delimiters (/) not allowed in volume name");
}
}
public void ensureRootAddress() throws OzoneClientException {
- if (keyName.length() != 0 || bucketName.length() != 0
- || volumeName.length() != 0) {
+ if (!keyName.isEmpty() || !bucketName.isEmpty()
+ || !volumeName.isEmpty()) {
throw new OzoneClientException(
"Invalid URI. Volume/bucket/key elements should not been used");
}
@@ -463,7 +463,7 @@ public void print(PrintWriter out) {
}
public void ensureVolumeOrBucketAddress() throws OzoneClientException {
- if (keyName.length() > 0) {
+ if (!keyName.isEmpty()) {
if (OmUtils.isBucketSnapshotIndicator(keyName)) {
// If snapshot, ensure snapshot URI
ensureSnapshotAddress();
@@ -471,7 +471,7 @@ public void ensureVolumeOrBucketAddress() throws OzoneClientException {
}
throw new OzoneClientException(
"Key address is not supported.");
- } else if (volumeName.length() == 0) {
+ } else if (volumeName.isEmpty()) {
// Volume must be present
// Bucket may or may not be present
// Depending on operation is on volume or bucket
diff --git a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/DeleteBucketHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/DeleteBucketHandler.java
index f0c380f628d9..ae363a586a49 100644
--- a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/DeleteBucketHandler.java
+++ b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/DeleteBucketHandler.java
@@ -113,7 +113,7 @@ private void deleteOBSBucketRecursive(OzoneVolume vol, OzoneBucket bucket) {
}
}
// delete if any remaining keys left
- if (keys.size() > 0) {
+ if (!keys.isEmpty()) {
bucket.deleteKeys(keys);
}
}
diff --git a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java
index 9d5060ad1260..26af4d041630 100644
--- a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java
+++ b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java
@@ -50,7 +50,7 @@ protected void execute(OzoneClient client, OzoneAddress address)
client.getObjectStore().tenantGetUserInfo(userPrincipal);
final List accessIdInfoList =
tenantUserInfo.getAccessIdInfoList();
- if (accessIdInfoList.size() == 0) {
+ if (accessIdInfoList.isEmpty()) {
err().println("User '" + userPrincipal +
"' is not assigned to any tenant.");
return;
diff --git a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java
index f60d9317bd92..2c2f3d2f21c9 100644
--- a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java
+++ b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java
@@ -144,7 +144,7 @@ private boolean cleanOBSBucket(OzoneBucket bucket) {
}
}
// delete if any remaining keys left
- if (keys.size() > 0) {
+ if (!keys.isEmpty()) {
bucket.deleteKeys(keys);
}
}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index ba62bad2ac30..057ae61c10aa 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -1407,7 +1407,7 @@ List getNextShallowListOfKeys(String prevKey)
proxy.listStatusLight(volumeName, name, delimiterKeyPrefix, false,
startKey, listCacheSize, false);
- if (addedKeyPrefix && statuses.size() > 0) {
+ if (addedKeyPrefix && !statuses.isEmpty()) {
// previous round already include the startKey, so remove it
statuses.remove(0);
} else {
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java
index 28034518d66e..5da795b6d9cf 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java
@@ -384,7 +384,7 @@ DataChecksum.Type toHadoopChecksumType() {
FileChecksum makeCompositeCrcResult() throws IOException {
long blockSizeHint = 0;
- if (keyLocationInfos.size() > 0) {
+ if (!keyLocationInfos.isEmpty()) {
blockSizeHint = keyLocationInfos.get(0).getLength();
}
CrcComposer crcComposer =
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java
index 0c0763fa9297..66edadc3247d 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java
@@ -70,7 +70,7 @@ public void compute(OzoneClientConfig.ChecksumCombineMode combineMode)
}
private void computeMd5Crc() {
- Preconditions.checkArgument(chunkInfoList.size() > 0);
+ Preconditions.checkArgument(!chunkInfoList.isEmpty());
final MessageDigest digester = MD5Hash.getDigester();
@@ -102,7 +102,7 @@ private void computeMd5Crc() {
private void computeCompositeCrc() throws IOException {
DataChecksum.Type dataChecksumType;
- Preconditions.checkArgument(chunkInfoList.size() > 0);
+ Preconditions.checkArgument(!chunkInfoList.isEmpty());
final ContainerProtos.ChunkInfo firstChunkInfo = chunkInfoList.get(0);
switch (firstChunkInfo.getChecksumData().getType()) {
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedBlockChecksumComputer.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedBlockChecksumComputer.java
index 6c2c5dbeba59..f413e27fc931 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedBlockChecksumComputer.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedBlockChecksumComputer.java
@@ -94,7 +94,7 @@ private void computeCompositeCrc() throws IOException {
DataChecksum.Type dataChecksumType;
long bytesPerCrc;
long chunkSize;
- Preconditions.checkArgument(chunkInfoList.size() > 0);
+ Preconditions.checkArgument(!chunkInfoList.isEmpty());
final ContainerProtos.ChunkInfo firstChunkInfo = chunkInfoList.get(0);
switch (firstChunkInfo.getChecksumData().getType()) {
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
index 9b5e727292bc..76bd3d12c3f6 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
@@ -161,7 +161,7 @@ void hsyncKey(long offset) throws IOException {
if (keyArgs.getIsMultipartKey()) {
throw new IOException("Hsync is unsupported for multipart keys.");
} else {
- if (keyArgs.getLocationInfoList().size() == 0) {
+ if (keyArgs.getLocationInfoList().isEmpty()) {
omClient.hsyncKey(keyArgs, openID);
} else {
ContainerBlockID lastBLockId = keyArgs.getLocationInfoList().get(keyArgs.getLocationInfoList().size() - 1)
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
index 9d227a47992d..acaf3faa0537 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java
@@ -345,7 +345,7 @@ void hsyncKey(long offset) throws IOException {
if (keyArgs.getIsMultipartKey()) {
throw new IOException("Hsync is unsupported for multipart keys.");
} else {
- if (keyArgs.getLocationInfoList().size() == 0) {
+ if (keyArgs.getLocationInfoList().isEmpty()) {
MetricUtil.captureLatencyNs(clientMetrics::addOMHsyncLatency,
() -> omClient.hsyncKey(keyArgs, openID));
} else {
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java
index d8f03e3bda72..bf123c07ba8e 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java
@@ -237,7 +237,7 @@ public String getNonKeyPathNoPrefixDelim() {
}
public boolean isMount() {
- return mountName.length() > 0;
+ return !mountName.isEmpty();
}
private static boolean isInSameBucketAsInternal(
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
index ce2547d11e27..100ff74ed5e6 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -200,7 +200,7 @@ public static InetSocketAddress getOmAddressForClients(
*/
public static boolean isServiceIdsDefined(ConfigurationSource conf) {
String val = conf.get(OZONE_OM_SERVICE_IDS_KEY);
- return val != null && val.length() > 0;
+ return val != null && !val.isEmpty();
}
/**
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
index 5243342b238d..dd2db5b9a85f 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
@@ -134,7 +134,7 @@ private static String validateNameAndType(ACLIdentityType type, String name) {
if (type == ACLIdentityType.WORLD || type == ACLIdentityType.ANONYMOUS) {
if (!name.equals(ACLIdentityType.WORLD.name()) &&
!name.equals(ACLIdentityType.ANONYMOUS.name()) &&
- name.length() != 0) {
+ !name.isEmpty()) {
throw new IllegalArgumentException("Expected name " + type.name() + ", but was: " + name);
}
// For type WORLD and ANONYMOUS we allow only one acl to be set.
@@ -142,7 +142,7 @@ private static String validateNameAndType(ACLIdentityType type, String name) {
}
if (((type == ACLIdentityType.USER) || (type == ACLIdentityType.GROUP))
- && (name.length() == 0)) {
+ && (name.isEmpty())) {
throw new IllegalArgumentException(type + " name is required");
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index c273d0fb2d4c..4dffb14b11d6 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -195,7 +195,7 @@ public long getGeneration() {
}
public synchronized OmKeyLocationInfoGroup getLatestVersionLocations() {
- return keyLocationVersions.size() == 0 ? null :
+ return keyLocationVersions.isEmpty() ? null :
keyLocationVersions.get(keyLocationVersions.size() - 1);
}
@@ -340,7 +340,7 @@ public List updateLocationInfoList(
public synchronized void appendNewBlocks(
List newLocationList, boolean updateTime)
throws IOException {
- if (keyLocationVersions.size() == 0) {
+ if (keyLocationVersions.isEmpty()) {
throw new IOException("Appending new block, but no version exist");
}
OmKeyLocationInfoGroup currentLatestVersion =
@@ -370,7 +370,7 @@ public synchronized long addNewVersion(
keyLocationVersions.clear();
}
- if (keyLocationVersions.size() == 0) {
+ if (keyLocationVersions.isEmpty()) {
// no version exist, these blocks are the very first version.
keyLocationVersions.add(new OmKeyLocationInfoGroup(0, newLocationList));
latestVersionNum = 0;
@@ -673,7 +673,7 @@ public KeyInfo getProtobuf(boolean ignorePipeline, int clientVersion) {
*/
private KeyInfo getProtobuf(boolean ignorePipeline, String fullKeyName,
int clientVersion, boolean latestVersionBlocks) {
- long latestVersion = keyLocationVersions.size() == 0 ? -1 :
+ long latestVersion = keyLocationVersions.isEmpty() ? -1 :
keyLocationVersions.get(keyLocationVersions.size() - 1).getVersion();
List keyLocations = new ArrayList<>();
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index b10781b52df3..c79973bc2df0 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -719,11 +719,11 @@ public OpenKeySession openKey(OmKeyArgs args) throws IOException {
keyArgs.setDataSize(args.getDataSize());
}
- if (args.getMetadata() != null && args.getMetadata().size() > 0) {
+ if (args.getMetadata() != null && !args.getMetadata().isEmpty()) {
keyArgs.addAllMetadata(KeyValueUtil.toProtobuf(args.getMetadata()));
}
- if (args.getTags() != null && args.getTags().size() > 0) {
+ if (args.getTags() != null && !args.getTags().isEmpty()) {
keyArgs.addAllTags(KeyValueUtil.toProtobuf(args.getTags()));
}
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestOMFailoverProxyProvider.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestOMFailoverProxyProvider.java
index e97c7f1fb053..c06358cfbe83 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestOMFailoverProxyProvider.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestOMFailoverProxyProvider.java
@@ -118,7 +118,7 @@ public void testWaitTimeWithSuggestedNewNode() {
Collection allNodeIds = config.getTrimmedStringCollection(ConfUtils.
addKeySuffixes(OZONE_OM_NODES_KEY, OM_SERVICE_ID));
allNodeIds.remove(provider.getCurrentProxyOMNodeId());
- assertTrue(allNodeIds.size() > 0,
+ assertTrue(!allNodeIds.isEmpty(),
"This test needs at least 2 OMs");
provider.setNextOmProxy(allNodeIds.iterator().next());
assertEquals(0, provider.getWaitTime());
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java
index 9541635baca7..df05932188a7 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java
@@ -54,7 +54,7 @@ public class TestOzoneAclUtil {
@Test
public void testAddAcl() throws IOException {
List currentAcls = getDefaultAcls();
- assertTrue(currentAcls.size() > 0);
+ assertTrue(!currentAcls.isEmpty());
// Add new permission to existing acl entry.
OzoneAcl oldAcl = currentAcls.get(0);
@@ -86,7 +86,7 @@ public void testRemoveAcl() {
removeAndVerifyAcl(currentAcls, USER1, false, 0);
currentAcls = getDefaultAcls();
- assertTrue(currentAcls.size() > 0);
+ assertTrue(!currentAcls.isEmpty());
// Add new permission to existing acl entru.
OzoneAcl oldAcl = currentAcls.get(0);
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java
index 588bd281b066..b54adb50b762 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java
@@ -38,7 +38,7 @@ public void testKeyGenerationWithDefaults() throws Exception {
.equalsIgnoreCase(OzoneConsts.GDPR_ALGORITHM_NAME));
gkey.acceptKeyDetails(
- (k, v) -> assertTrue(v.length() > 0));
+ (k, v) -> assertTrue(!v.isEmpty()));
}
@Test
@@ -51,7 +51,7 @@ public void testKeyGenerationWithValidInput() throws Exception {
.equalsIgnoreCase(OzoneConsts.GDPR_ALGORITHM_NAME));
gkey.acceptKeyDetails(
- (k, v) -> assertTrue(v.length() > 0));
+ (k, v) -> assertTrue(!v.isEmpty()));
}
@Test
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/hadoop/FileSystemAccessService.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/hadoop/FileSystemAccessService.java
index cfbaebf6b51e..a8edd3908d60 100644
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/hadoop/FileSystemAccessService.java
+++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/hadoop/FileSystemAccessService.java
@@ -166,13 +166,13 @@ protected void init() throws ServiceException {
String keytab = System
.getProperty("user.home") + "/" + defaultName + ".keytab";
keytab = getServiceConfig().get(KERBEROS_KEYTAB, keytab).trim();
- if (keytab.length() == 0) {
+ if (keytab.isEmpty()) {
throw new ServiceException(FileSystemAccessException.ERROR.H01,
KERBEROS_KEYTAB);
}
String principal = defaultName + "/localhost@LOCALHOST";
principal = getServiceConfig().get(KERBEROS_PRINCIPAL, principal).trim();
- if (principal.length() == 0) {
+ if (principal.isEmpty()) {
throw new ServiceException(FileSystemAccessException.ERROR.H01,
KERBEROS_PRINCIPAL);
}
@@ -347,7 +347,7 @@ protected void closeFileSystem(FileSystem fs) throws IOException {
protected void validateNamenode(String namenode)
throws FileSystemAccessException {
- if (nameNodeWhitelist.size() > 0 && !nameNodeWhitelist.contains("*")) {
+ if (!nameNodeWhitelist.isEmpty() && !nameNodeWhitelist.contains("*")) {
if (!nameNodeWhitelist.contains(
StringUtils.toLowerCase(namenode))) {
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H05,
@@ -373,8 +373,7 @@ public T execute(String user,
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H04);
}
if (conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) == null ||
- conf.getTrimmed(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)
- .length() == 0) {
+ conf.getTrimmed(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY).isEmpty()) {
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H06,
CommonConfigurationKeysPublic
.FS_DEFAULT_NAME_KEY);
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/util/Check.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/util/Check.java
index 8515c1bf2d68..b9fbb6d23816 100644
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/util/Check.java
+++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/util/Check.java
@@ -61,7 +61,7 @@ public static String notEmpty(String str, String name) {
if (str == null) {
throw new IllegalArgumentException(name + " cannot be null");
}
- if (str.length() == 0) {
+ if (str.isEmpty()) {
throw new IllegalArgumentException(name + " cannot be empty");
}
return str;
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/wsrs/Param.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/wsrs/Param.java
index 16b48b4758fb..b9147b03abbb 100644
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/wsrs/Param.java
+++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/wsrs/Param.java
@@ -42,7 +42,7 @@ public String getName() {
public T parseParam(String str) {
try {
- value = (str != null && str.trim().length() > 0) ? parse(str) : value;
+ value = (str != null && !str.trim().isEmpty()) ? parse(str) : value;
} catch (Exception ex) {
throw new IllegalArgumentException(
MessageFormat.format("Parameter [{0}], invalid value [{1}], " +
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/wsrs/Parameters.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/wsrs/Parameters.java
index dd6d02dac05f..aabcbddd9137 100644
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/wsrs/Parameters.java
+++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/wsrs/Parameters.java
@@ -50,7 +50,7 @@ public Parameters(Map>> params) {
@SuppressWarnings("unchecked")
public > V get(String name, Class klass) {
List> multiParams = (List>)params.get(name);
- if (multiParams != null && multiParams.size() > 0) {
+ if (multiParams != null && !multiParams.isEmpty()) {
return ((T) multiParams.get(0)).value(); // Return first value;
}
return null;
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/wsrs/StringParam.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/wsrs/StringParam.java
index 785cca94f7a9..9bc0ff9d56fd 100644
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/wsrs/StringParam.java
+++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/wsrs/StringParam.java
@@ -43,7 +43,7 @@ public String parseParam(String str) {
try {
if (str != null) {
str = str.trim();
- if (str.length() > 0) {
+ if (!str.isEmpty()) {
value = parse(str);
}
}
diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Component.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Component.java
index 8d9797672842..d7c930d34db2 100644
--- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Component.java
+++ b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Component.java
@@ -98,7 +98,7 @@ public boolean equals(Object o) {
}
public String prefix() {
- return name + (id != null && id.length() > 0 ? "-" + id : "");
+ return name + (id != null && !id.isEmpty() ? "-" + id : "");
}
@Override
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
index 94c636812931..1af4bf24331a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java
@@ -505,7 +505,7 @@ public void testHSyncOpenKeyCommitAfterExpiry() throws Exception {
// Verify hsync openKey gets committed eventually
// Key without hsync is deleted
GenericTestUtils.waitFor(() ->
- 0 == getOpenKeyInfo(BUCKET_LAYOUT).size(), 1000, 12000);
+ getOpenKeyInfo(BUCKET_LAYOUT).isEmpty(), 1000, 12000);
// Verify only one key is still present in fileTable
assertThat(1 == getKeyInfo(BUCKET_LAYOUT).size());
@@ -590,7 +590,7 @@ public void testHSyncOpenKeyDeletionWhileDeleteDirectory() throws Exception {
// Verify if DELETED_HSYNC_KEY metadata is added to openKey
GenericTestUtils.waitFor(() -> {
List omKeyInfo = getOpenKeyInfo(BUCKET_LAYOUT);
- return omKeyInfo.size() > 0 && omKeyInfo.get(0).getMetadata().containsKey(OzoneConsts.DELETED_HSYNC_KEY);
+ return !omKeyInfo.isEmpty() && omKeyInfo.get(0).getMetadata().containsKey(OzoneConsts.DELETED_HSYNC_KEY);
}, 1000, 12000);
// Resume openKeyCleanupService
@@ -598,7 +598,7 @@ public void testHSyncOpenKeyDeletionWhileDeleteDirectory() throws Exception {
// Verify entry from openKey gets deleted eventually
GenericTestUtils.waitFor(() ->
- 0 == getOpenKeyInfo(BUCKET_LAYOUT).size(), 1000, 12000);
+ getOpenKeyInfo(BUCKET_LAYOUT).isEmpty(), 1000, 12000);
} catch (OMException ex) {
assertEquals(OMException.ResultCodes.DIRECTORY_NOT_FOUND, ex.getResult());
} finally {
@@ -1378,7 +1378,7 @@ public void testNormalKeyOverwriteHSyncKey() throws Exception {
// Verify entry from openKey gets deleted eventually
GenericTestUtils.waitFor(() -> {
try {
- return getAllOpenKeys(openKeyTable).size() == 0 && getAllDeletedKeys(deletedTable).size() == 2;
+ return getAllOpenKeys(openKeyTable).isEmpty() && getAllDeletedKeys(deletedTable).size() == 2;
} catch (IOException e) {
throw new RuntimeException(e);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
index bf5faa1401e9..70e7fb5e5bda 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
@@ -303,7 +303,7 @@ private void testBlockDeletionTransactions(MiniOzoneCluster cluster) throws Exce
try {
cluster.getStorageContainerManager().getScmHAManager()
.asSCMHADBTransactionBuffer().flush();
- return delLog.getFailedTransactions(-1, 0).size() == 0;
+ return delLog.getFailedTransactions(-1, 0).isEmpty();
} catch (IOException e) {
return false;
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java
index 3a05a8a520e9..9fcd7b184d1d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHddsUpgradeUtils.java
@@ -123,7 +123,7 @@ public static void testPostUpgradeConditionsSCM(StorageContainerManager scm,
PipelineManager scmPipelineManager = scm.getPipelineManager();
try {
GenericTestUtils.waitFor(
- () -> scmPipelineManager.getPipelines(RATIS_THREE, OPEN).size() >= 1,
+ () -> !scmPipelineManager.getPipelines(RATIS_THREE, OPEN).isEmpty(),
500, 60000);
} catch (TimeoutException | InterruptedException e) {
fail("Timeout waiting for Upgrade to complete on SCM.");
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
index 3e75542bf9f1..92bd630efb04 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/OzoneRpcClientTests.java
@@ -4133,7 +4133,7 @@ private void validateOzoneAccessAcl(OzoneObj ozObj) throws IOException {
List expectedAcls = getAclList(new OzoneConfiguration());
// Case:1 Add new acl permission to existing acl.
- if (expectedAcls.size() > 0) {
+ if (!expectedAcls.isEmpty()) {
OzoneAcl oldAcl = expectedAcls.get(0);
OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(),
oldAcl.getAclScope(), ACLType.READ_ACL);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
index 5b2d11fbcca9..178e3db7f74e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
@@ -223,7 +223,7 @@ void testDeleteKeyWithInAdequateDN() throws Exception {
List pipelineList =
cluster.getStorageContainerManager().getPipelineManager()
.getPipelines(RatisReplicationConfig.getInstance(THREE));
- Assumptions.assumeTrue(pipelineList.size() >= FACTOR_THREE_PIPELINE_COUNT);
+ Assumptions.assumeTrue(!pipelineList.isEmpty());
Pipeline pipeline = pipelineList.get(0);
for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
if (RatisTestHelper.isRatisFollower(dn, pipeline)) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
index c2f0aab6219c..00ec423b558b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
@@ -285,7 +285,7 @@ private void verifyLog(String... expected) throws Exception {
}
} catch (AssertionError ex) {
LOG.error("Error occurred in log verification", ex);
- if (lines.size() != 0) {
+ if (!lines.isEmpty()) {
LOG.error("Actual line ::: " + lines.get(0));
LOG.error("Expected tokens ::: " + Arrays.toString(expected));
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
index d1b842e680d4..c4af662f61b2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
@@ -369,7 +369,7 @@ private void waitForDNContainerState(ContainerInfo container,
.filter(r -> !ReplicationManager
.compareState(container.getState(), r.getState()))
.collect(Collectors.toList());
- return unhealthyReplicas.size() == 0;
+ return unhealthyReplicas.isEmpty();
} catch (ContainerNotFoundException e) {
return false;
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestFinalizeBlock.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestFinalizeBlock.java
index 31057dc57124..173ab547b560 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestFinalizeBlock.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestFinalizeBlock.java
@@ -209,8 +209,8 @@ private void testFinalizeBlockClearAfterCloseContainer(ContainerID containerId)
// Finalize Block should be cleared from container data.
GenericTestUtils.waitFor(() -> (
- (KeyValueContainerData) getContainerfromDN(cluster.getHddsDatanodes().get(0),
- containerId.getId()).getContainerData()).getFinalizedBlockSet().size() == 0,
+ (KeyValueContainerData)getContainerfromDN(cluster.getHddsDatanodes().get(0),
+ containerId.getId()).getContainerData()).getFinalizedBlockSet().isEmpty(),
100, 10 * 1000);
try {
// Restart DataNode
@@ -223,7 +223,7 @@ private void testFinalizeBlockClearAfterCloseContainer(ContainerID containerId)
assertTrue(((KeyValueContainerData)getContainerfromDN(
cluster.getHddsDatanodes().get(0),
containerId.getId()).getContainerData())
- .getFinalizedBlockSet().size() == 0);
+ .getFinalizedBlockSet().isEmpty());
}
private void testRejectPutAndWriteChunkAfterFinalizeBlock(ContainerID containerId, Pipeline pipeline,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java
index b36397896e1e..32ede9d5e1c6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java
@@ -120,7 +120,7 @@ private Path depthBFS(FileSystem fs, FileStatus[] fileStatuses,
LinkedList queue = new LinkedList();
FileStatus f1 = fileStatuses[0];
queue.add(f1);
- while (queue.size() != 0) {
+ while (!queue.isEmpty()) {
FileStatus f = queue.poll();
FileStatus[] temp = fs.listStatus(f.getPath());
if (temp.length > 0) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java
index fcd47b593a15..37d22b1720d9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java
@@ -136,7 +136,7 @@ public void testKeysPurgingByKeyDeletingService() throws Exception {
() -> {
try {
return keyManager.getPendingDeletionKeys(Integer.MAX_VALUE)
- .getKeyBlocksList().size() == 0;
+ .getKeyBlocksList().isEmpty();
} catch (IOException e) {
return false;
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java
index 235dd9f78f3f..3c656091341c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java
@@ -746,7 +746,7 @@ void testLinkBucketRemoveBucketAcl() throws Exception {
OzoneObj srcObj = buildBucketObj(srcBucket);
// As by default create will add some default acls in RpcClient.
List acls = getObjectStore().getAcl(linkObj);
- assertTrue(acls.size() > 0);
+ assertTrue(!acls.isEmpty());
// Remove an existing acl.
boolean removeAcl = getObjectStore().removeAcl(linkObj, acls.get(0));
assertTrue(removeAcl);
@@ -759,7 +759,7 @@ void testLinkBucketRemoveBucketAcl() throws Exception {
OzoneObj srcObj2 = buildBucketObj(srcBucket2);
// As by default create will add some default acls in RpcClient.
List acls2 = getObjectStore().getAcl(srcObj2);
- assertTrue(acls2.size() > 0);
+ assertTrue(!acls2.isEmpty());
// Remove an existing acl.
boolean removeAcl2 = getObjectStore().removeAcl(srcObj2, acls.get(0));
assertTrue(removeAcl2);
@@ -994,7 +994,7 @@ private void testSetAcl(String remoteUserName, OzoneObj ozoneObj,
OzoneObj.ResourceType.PREFIX.name())) {
List acls = objectStore.getAcl(ozoneObj);
- assertTrue(acls.size() > 0);
+ assertTrue(!acls.isEmpty());
}
OzoneAcl modifiedUserAcl = new OzoneAcl(USER, remoteUserName,
@@ -1051,7 +1051,7 @@ private void testRemoveAcl(String remoteUserName, OzoneObj ozoneObj,
}
acls = objectStore.getAcl(ozoneObj);
- assertTrue(acls.size() > 0);
+ assertTrue(!acls.isEmpty());
// Remove an existing acl.
boolean removeAcl = objectStore.removeAcl(ozoneObj, acls.get(0));
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java
index 3bea05d46186..fb25b0f64199 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java
@@ -136,7 +136,7 @@ public void testMissingContainerDownNode() throws Exception {
// Make sure Recon's pipeline state is initialized.
LambdaTestUtils.await(60000, 5000,
- () -> (reconPipelineManager.getPipelines().size() >= 1));
+ () -> (!reconPipelineManager.getPipelines().isEmpty()));
ContainerManager scmContainerManager = scm.getContainerManager();
ReconContainerManager reconContainerManager =
@@ -216,7 +216,7 @@ public void testEmptyMissingContainerDownNode() throws Exception {
// Make sure Recon's pipeline state is initialized.
LambdaTestUtils.await(60000, 1000,
- () -> (reconPipelineManager.getPipelines().size() >= 1));
+ () -> (!reconPipelineManager.getPipelines().isEmpty()));
ContainerManager scmContainerManager = scm.getContainerManager();
ReconContainerManager reconContainerManager =
@@ -254,7 +254,7 @@ public void testEmptyMissingContainerDownNode() throws Exception {
.getUnhealthyContainerStateStatsMap();
// Return true if the size of the fetched containers is 0 and the log shows 1 for EMPTY_MISSING state
- return allEmptyMissingContainers.size() == 0 &&
+ return allEmptyMissingContainers.isEmpty() &&
unhealthyContainerStateStatsMap.get(
ContainerSchemaDefinition.UnHealthyContainerStates.EMPTY_MISSING)
.getOrDefault(CONTAINER_COUNT, 0L) == 1;
@@ -292,7 +292,7 @@ public void testEmptyMissingContainerDownNode() throws Exception {
.getUnhealthyContainerStateStatsMap();
// Return true if the size of the fetched containers is 0 and the log shows 0 for EMPTY_MISSING state
- return allEmptyMissingContainers.size() == 0 &&
+ return allEmptyMissingContainers.isEmpty() &&
unhealthyContainerStateStatsMap.get(
ContainerSchemaDefinition.UnHealthyContainerStates.EMPTY_MISSING)
.getOrDefault(CONTAINER_COUNT, 0L) == 0;
@@ -321,7 +321,7 @@ public void testEmptyMissingContainerDownNode() throws Exception {
.getUnhealthyContainerStateStatsMap();
// Return true if the size of the fetched containers is 0 and the log shows 1 for EMPTY_MISSING state
- return allEmptyMissingContainers.size() == 0 &&
+ return allEmptyMissingContainers.isEmpty() &&
unhealthyContainerStateStatsMap.get(
ContainerSchemaDefinition.UnHealthyContainerStates.EMPTY_MISSING)
.getOrDefault(CONTAINER_COUNT, 0L) == 1;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index fb9e1f6f9205..4357542ff7b8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -1271,7 +1271,7 @@ private OzoneFileStatus getOzoneFileStatus(OmKeyArgs args,
bucketName);
try {
// Check if this is the root of the filesystem.
- if (keyName.length() == 0) {
+ if (keyName.isEmpty()) {
OMFileRequest.validateBucket(metadataManager, volumeName, bucketName);
return new OzoneFileStatus();
}
@@ -1416,7 +1416,7 @@ private OzoneFileStatus getOzoneFileStatusFSO(OmKeyArgs args,
bucketName);
try {
// Check if this is the root of the filesystem.
- if (keyName.length() == 0) {
+ if (keyName.isEmpty()) {
OMFileRequest.validateBucket(metadataManager, volumeName, bucketName);
return new OzoneFileStatus();
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index d448417b6600..6693b9fe3482 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -1705,7 +1705,7 @@ public PendingKeysDeletion getPendingDeletionKeys(final int keyCount,
if (Objects.equals(Optional.ofNullable(newPreviousSnapshotInfo).map(SnapshotInfo::getSnapshotId),
Optional.ofNullable(previousSnapshotInfo).map(SnapshotInfo::getSnapshotId))) {
// If all the versions are not reclaimable, then do nothing.
- if (notReclaimableKeyInfoList.size() > 0 &&
+ if (!notReclaimableKeyInfoList.isEmpty() &&
notReclaimableKeyInfoList.size() !=
infoList.getOmKeyInfoList().size()) {
keysToModify.put(kv.getKey(), notReclaimableKeyInfo);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java
index 17f0faf4de6f..cbcb7e2dc065 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java
@@ -658,7 +658,7 @@ public boolean isNativeAuthorizerEnabled() {
}
private ResourceType getResourceType(OmKeyArgs args) {
- if (args.getKeyName() == null || args.getKeyName().length() == 0) {
+ if (args.getKeyName() == null || args.getKeyName().isEmpty()) {
return ResourceType.BUCKET;
}
return ResourceType.KEY;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java
index 414ee4bc9961..7ae293971da8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java
@@ -297,7 +297,7 @@ private void inheritParentAcl(OzoneObj ozoneObj, OmPrefixInfo prefixInfo)
List prefixList = getLongestPrefixPathHelper(
prefixTree.getLongestPrefix(ozoneObj.getPath()));
- if (prefixList.size() > 0) {
+ if (!prefixList.isEmpty()) {
// Add all acls from direct parent to key.
OmPrefixInfo parentPrefixInfo = prefixList.get(prefixList.size() - 1);
if (parentPrefixInfo != null) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java
index 11b8bc9cb6c9..65074bb83790 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java
@@ -79,7 +79,7 @@ private void addSnapshotGlobal(UUID snapshotID, UUID prevGlobalID)
"Global Snapshot chain corruption. Snapshot with snapshotId: %s is " +
"already present in the chain.", snapshotID));
}
- if (globalSnapshotChain.size() > 0 && prevGlobalID == null) {
+ if (!globalSnapshotChain.isEmpty() && prevGlobalID == null) {
throw new IOException(String.format("Snapshot chain " +
"corruption. Adding snapshot %s as head node while there are %d " +
"snapshots in the global snapshot chain.", snapshotID,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
index 651d44ad5f87..723b977111f2 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
@@ -399,7 +399,7 @@ boolean iterate() throws IOException {
}
}
}
- if (keyPathList.size() > 0) {
+ if (!keyPathList.isEmpty()) {
if (!processKeyPath(keyPathList)) {
return false;
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java
index 4026851a35d7..7c10f3f84a7c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java
@@ -129,7 +129,7 @@ public static OMHANodeDetails loadOMHAConfig(OzoneConfiguration conf) {
Collection omNodeIds = OmUtils.getActiveOMNodeIds(conf,
serviceId);
- if (omNodeIds.size() == 0) {
+ if (omNodeIds.isEmpty()) {
throwConfException("Configuration does not have any value set for %s " +
"for the service %s. List of OM Node ID's should be specified " +
"for an OM service", OZONE_OM_NODES_KEY, serviceId);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
index 9e35496ca05f..9753fa3e0a0e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java
@@ -551,7 +551,7 @@ public static String isValidKeyPath(String path) throws OMException {
boolean isValid = true;
// If keyName is empty string throw error.
- if (path.length() == 0) {
+ if (path.isEmpty()) {
throw new OMException("Invalid KeyPath, empty keyName" + path,
INVALID_KEY_NAME);
} else if (path.startsWith("/")) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
index 34e2603e977a..b7adcb6a19d4 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java
@@ -144,7 +144,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
try {
// Check if this is the root of the filesystem.
- if (keyName.length() == 0) {
+ if (keyName.isEmpty()) {
throw new OMException("Directory create failed. Cannot create " +
"directory at root of the filesystem",
OMException.ResultCodes.CANNOT_CREATE_DIRECTORY_AT_ROOT);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java
index 3436ae6c9f6b..3ad7cbd17fec 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java
@@ -99,7 +99,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
try {
// Check if this is the root of the filesystem.
- if (keyName.length() == 0) {
+ if (keyName.isEmpty()) {
throw new OMException("Directory create failed. Cannot create " +
"directory at root of the filesystem",
OMException.ResultCodes.CANNOT_CREATE_DIRECTORY_AT_ROOT);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
index 72bb50a3a780..a76c2182e661 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
@@ -95,7 +95,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
validateKey(ozoneManager, validateArgs);
UserInfo userInfo = getUserInfo();
- if (keyArgs.getKeyName().length() == 0) {
+ if (keyArgs.getKeyName().isEmpty()) {
// Check if this is the root of the filesystem.
// Not throwing exception here, as need to throw exception after
// checking volume/bucket exists.
@@ -207,7 +207,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
- if (keyName.length() == 0) {
+ if (keyName.isEmpty()) {
// Check if this is the root of the filesystem.
throw new OMException("Can not write to directory: " + keyName,
OMException.ResultCodes.NOT_A_FILE);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java
index 8f6867b9c1d0..6b9a9d76aae4 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java
@@ -106,7 +106,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
Exception exception = null;
Result result = null;
try {
- if (keyName.length() == 0) {
+ if (keyName.isEmpty()) {
// Check if this is the root of the filesystem.
throw new OMException("Can not write to directory: " + keyName,
OMException.ResultCodes.NOT_A_FILE);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index 1690c560d34f..f8058bd7a897 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -203,7 +203,7 @@ public static OMPathInfoWithFSO verifyDirectoryKeysInPath(
String fileName = elements.next().toString();
fullKeyPath.append(OzoneConsts.OM_KEY_PREFIX);
fullKeyPath.append(fileName);
- if (missing.size() > 0) {
+ if (!missing.isEmpty()) {
// Add all the sub-dirs to the missing list except the leaf element.
// For example, /vol1/buck1/a/b/c/d/e/f/file1.txt.
// Assume /vol1/buck1/a/b/c exists, then add d, e, f into missing list.
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java
index e1abfc1481c3..a818a3ac89f0 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java
@@ -246,13 +246,13 @@ private RecoverLeaseResponse doWork(OzoneManager ozoneManager,
OmKeyLocationInfoGroup openKeyLatestVersionLocations = openKeyInfo.getLatestVersionLocations();
List openKeyLocationInfoList = openKeyLatestVersionLocations.getLocationList();
- if (keyLocationInfoList.size() > 0) {
+ if (!keyLocationInfoList.isEmpty()) {
updateBlockInfo(ozoneManager, keyLocationInfoList.get(keyLocationInfoList.size() - 1));
}
if (openKeyLocationInfoList.size() > 1) {
updateBlockInfo(ozoneManager, openKeyLocationInfoList.get(openKeyLocationInfoList.size() - 1));
updateBlockInfo(ozoneManager, openKeyLocationInfoList.get(openKeyLocationInfoList.size() - 2));
- } else if (openKeyLocationInfoList.size() > 0) {
+ } else if (!openKeyLocationInfoList.isEmpty()) {
updateBlockInfo(ozoneManager, openKeyLocationInfoList.get(0));
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java
index 740911aaedaf..d59c5f37b8ad 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java
@@ -144,7 +144,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
String toKey = null, fromKey = null;
Result result = null;
try {
- if (toKeyName.length() == 0 || fromKeyName.length() == 0) {
+ if (toKeyName.isEmpty() || fromKeyName.isEmpty()) {
throw new OMException("Key name is empty",
OMException.ResultCodes.INVALID_KEY_NAME);
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java
index cabc27430266..cbab201b55e6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java
@@ -98,7 +98,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
OmKeyInfo fromKeyValue;
Result result;
try {
- if (fromKeyName.length() == 0) {
+ if (fromKeyName.isEmpty()) {
throw new OMException("Source key name is empty",
OMException.ResultCodes.INVALID_KEY_NAME);
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
index 60e7fd224e28..17e0f6a4c042 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
@@ -421,7 +421,7 @@ protected List getAclsForKey(KeyArgs keyArgs,
keyArgs.getBucketName() + OZONE_URI_DELIMITER +
keyArgs.getKeyName());
- if (prefixList.size() > 0) {
+ if (!prefixList.isEmpty()) {
// Add all acls from direct parent to key.
OmPrefixInfo prefixInfo = prefixList.get(prefixList.size() - 1);
if (prefixInfo != null) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java
index e4d80b7195b5..1020ad8bbbcc 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java
@@ -132,7 +132,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
toKeyName = renameKey.getToKeyName();
RenameKeysMap.Builder unRenameKey = RenameKeysMap.newBuilder();
- if (toKeyName.length() == 0 || fromKeyName.length() == 0) {
+ if (toKeyName.isEmpty() || fromKeyName.isEmpty()) {
renameStatus = false;
unRenamedKeys.add(
unRenameKey.setFromKeyName(fromKeyName).setToKeyName(toKeyName)
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java
index 3b902f3855f9..d638cebd658c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java
@@ -115,7 +115,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
// the existing acl list, if list size becomes zero, delete the
// prefix from prefix table.
if (getOmRequest().hasRemoveAclRequest() &&
- omPrefixInfo.getAcls().size() == 0) {
+ omPrefixInfo.getAcls().isEmpty()) {
omMetadataManager.getPrefixTable().addCacheEntry(
new CacheKey<>(prefixPath),
CacheValue.get(trxnLogIndex));
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index 9dbdd3c4dcc0..bafac0d968a5 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -268,7 +268,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
}
- if (partsList.size() > 0) {
+ if (!partsList.isEmpty()) {
final OmMultipartKeyInfo.PartKeyInfoMap partKeyInfoMap
= multipartKeyInfo.getPartKeyInfoMap();
if (partKeyInfoMap.size() == 0) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java
index 6b93db4accfc..fdd71775380b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java
@@ -142,7 +142,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
LOG.debug("Tenant '{}' has volume '{}'", tenantId, volumeName);
// decVolumeRefCount is true if volumeName is not empty string
- decVolumeRefCount = volumeName.length() > 0;
+ decVolumeRefCount = !volumeName.isEmpty();
// Acquire the volume lock
mergeOmLockDetails(omMetadataManager.getLock().acquireWriteLock(
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java
index 491339823b93..bb5c8d8cb377 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java
@@ -193,7 +193,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
Preconditions.checkNotNull(principalInfo);
principalInfo.removeAccessId(accessId);
CacheValue cacheValue =
- principalInfo.getAccessIds().size() > 0
+ !principalInfo.getAccessIds().isEmpty()
? CacheValue.get(transactionLogIndex, principalInfo)
// Invalidate (remove) the entry if accessIds set is empty
: CacheValue.get(transactionLogIndex);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java
index 1982e8ae665c..d11cb3d4d7fe 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java
@@ -55,7 +55,7 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
BatchOperation batchOperation) throws IOException {
if (getOMResponse().hasRemoveAclResponse() &&
- prefixInfo.getAcls().size() == 0) {
+ prefixInfo.getAcls().isEmpty()) {
// if acl list size is zero, delete the entry.
omMetadataManager.getPrefixTable().deleteWithBatch(batchOperation,
prefixInfo.getName());
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantDeleteResponse.java
index b45c320e44d2..7a9f8077d2d9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantDeleteResponse.java
@@ -70,7 +70,7 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager,
omMetadataManager.getTenantStateTable().deleteWithBatch(
batchOperation, tenantId);
- if (volumeName.length() > 0) {
+ if (!volumeName.isEmpty()) {
Preconditions.checkNotNull(omVolumeArgs);
Preconditions.checkState(omVolumeArgs.getVolume().equals(volumeName));
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantRevokeUserAccessIdResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantRevokeUserAccessIdResponse.java
index 64f5d7038c2a..0eb4d889a314 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantRevokeUserAccessIdResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/tenant/OMTenantRevokeUserAccessIdResponse.java
@@ -86,7 +86,7 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
omMetadataManager.getTenantAccessIdTable().deleteWithBatch(
batchOperation, accessId);
- if (omDBUserPrincipalInfo.getAccessIds().size() > 0) {
+ if (!omDBUserPrincipalInfo.getAccessIds().isEmpty()) {
omMetadataManager.getPrincipalToAccessIdsTable().putWithBatch(
batchOperation, principal, omDBUserPrincipalInfo);
} else {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java
index ad37a4eb062a..3a56328c06b7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java
@@ -61,7 +61,7 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager,
String dbUserKey = omMetadataManager.getUserKey(owner);
PersistedUserVolumeInfo volumeList = updatedVolumeList;
- if (updatedVolumeList.getVolumeNamesList().size() == 0) {
+ if (updatedVolumeList.getVolumeNamesList().isEmpty()) {
omMetadataManager.getUserTable().deleteWithBatch(batchOperation,
dbUserKey);
} else {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java
index 8d774b5e5e3a..8f22c07cc5d9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java
@@ -84,7 +84,7 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager,
String oldOwnerKey = omMetadataManager.getUserKey(oldOwner);
String newOwnerKey =
omMetadataManager.getUserKey(newOwnerVolumeArgs.getOwnerName());
- if (oldOwnerVolumeList.getVolumeNamesList().size() == 0) {
+ if (oldOwnerVolumeList.getVolumeNamesList().isEmpty()) {
omMetadataManager.getUserTable().deleteWithBatch(batchOperation,
oldOwnerKey);
} else {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
index 66a1da44acc7..7c938c90565c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
@@ -203,7 +203,7 @@ private int submitPurgeKeysRequest(List results,
keysToUpdateList.add(keyToUpdate.build());
}
- if (keysToUpdateList.size() > 0) {
+ if (!keysToUpdateList.isEmpty()) {
purgeKeysRequest.addAllKeysToUpdate(keysToUpdateList);
}
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java
index 1683599d2a9c..98d5a2f93c3c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java
@@ -412,7 +412,7 @@ private void processSnapshotDeepClean(int delCount)
}
}
- if (newRepeatedOmKeyInfo.getOmKeyInfoList().size() > 0 &&
+ if (!newRepeatedOmKeyInfo.getOmKeyInfoList().isEmpty() &&
newRepeatedOmKeyInfo.getOmKeyInfoList().size() !=
repeatedOmKeyInfo.getOmKeyInfoList().size()) {
keysToModify.put(deletedKey, newRepeatedOmKeyInfo);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/FSODirectoryPathResolver.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/FSODirectoryPathResolver.java
index dec712ce669a..70aaa0c40342 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/FSODirectoryPathResolver.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/FSODirectoryPathResolver.java
@@ -83,13 +83,13 @@ public Map getAbsolutePathForObjectIDs(
objectIdPathVals.add(root);
addToPathMap(root, objIds, objectIdPathMap);
- while (!objectIdPathVals.isEmpty() && objIds.size() > 0) {
+ while (!objectIdPathVals.isEmpty() && !objIds.isEmpty()) {
Pair parent = objectIdPathVals.poll();
try (TableIterator>
subDirIter = dirInfoTable.iterator(
prefix + parent.getKey() + OM_KEY_PREFIX)) {
- while (objIds.size() > 0 && subDirIter.hasNext()) {
+ while (!objIds.isEmpty() && subDirIter.hasNext()) {
OmDirectoryInfo childDir = subDirIter.next().getValue();
Pair pathVal = Pair.of(childDir.getObjectID(),
parent.getValue().resolve(childDir.getName()));
@@ -99,7 +99,7 @@ public Map getAbsolutePathForObjectIDs(
}
}
// Invalid directory objectId which does not exist in the given bucket.
- if (objIds.size() > 0 && !skipUnresolvedObjs) {
+ if (!objIds.isEmpty() && !skipUnresolvedObjs) {
throw new IllegalArgumentException(
"Dir object Ids required but not found in bucket: " + objIds);
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java
index 94bf815e593b..9badbcf59f8f 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java
@@ -243,7 +243,7 @@ public void testCheckAccessForBucket(
createAll(keyName, prefixName, userRight, groupRight, expectedResult);
OzoneAcl userAcl = new OzoneAcl(USER, testUgi.getUserName(),
ACCESS, parentDirUserAcl);
- OzoneAcl groupAcl = new OzoneAcl(GROUP, testUgi.getGroups().size() > 0 ?
+ OzoneAcl groupAcl = new OzoneAcl(GROUP, !testUgi.getGroups().isEmpty() ?
testUgi.getGroups().get(0) : "", ACCESS, parentDirGroupAcl);
// Set access for volume.
// We should directly add to table because old API's update to DB.
@@ -265,7 +265,7 @@ public void testCheckAccessForKey(
createAll(keyName, prefixName, userRight, groupRight, expectedResult);
OzoneAcl userAcl = new OzoneAcl(USER, testUgi.getUserName(),
ACCESS, parentDirUserAcl);
- OzoneAcl groupAcl = new OzoneAcl(GROUP, testUgi.getGroups().size() > 0 ?
+ OzoneAcl groupAcl = new OzoneAcl(GROUP, !testUgi.getGroups().isEmpty() ?
testUgi.getGroups().get(0) : "", ACCESS, parentDirGroupAcl);
// Set access for volume & bucket. We should directly add to table
// because old API's update to DB.
@@ -295,7 +295,7 @@ public void testCheckAccessForPrefix(
OzoneAcl userAcl = new OzoneAcl(USER, testUgi.getUserName(),
ACCESS, parentDirUserAcl);
- OzoneAcl groupAcl = new OzoneAcl(GROUP, testUgi.getGroups().size() > 0 ?
+ OzoneAcl groupAcl = new OzoneAcl(GROUP, !testUgi.getGroups().isEmpty() ?
testUgi.getGroups().get(0) : "", ACCESS, parentDirGroupAcl);
// Set access for volume & bucket. We should directly add to table
// because old API's update to DB.
@@ -333,7 +333,7 @@ private void resetAclsAndValidateAccess(
throws IOException {
List acls;
String user = testUgi.getUserName();
- String group = (testUgi.getGroups().size() > 0) ?
+ String group = (!testUgi.getGroups().isEmpty()) ?
testUgi.getGroups().get(0) : "";
RequestContext.Builder builder = new RequestContext.Builder()
@@ -486,7 +486,7 @@ private String getAclName(ACLIdentityType identityType) {
case USER:
return testUgi.getUserName();
case GROUP:
- if (testUgi.getGroups().size() > 0) {
+ if (!testUgi.getGroups().isEmpty()) {
return testUgi.getGroups().get(0);
}
default:
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index b9a91b3659c7..51cdf6cd2963 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -1239,7 +1239,7 @@ boolean iterate() throws IOException {
}
}
}
- if (keyList.size() > 0) {
+ if (!keyList.isEmpty()) {
if (!processKey(keyList)) {
return false;
}
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index cc9340264ebe..0079e5b39dbc 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@ -536,7 +536,7 @@ public boolean createDirectory(String pathStr) throws IOException {
// given in pathStr, so getBucket above should handle the creation
// of volume and bucket. We won't feed empty keyStr to
// bucket.createDirectory as that would be a NPE.
- if (keyStr != null && keyStr.length() > 0) {
+ if (keyStr != null && !keyStr.isEmpty()) {
bucket.createDirectory(keyStr);
}
} catch (OMException e) {
@@ -563,7 +563,7 @@ public boolean deleteObject(String path, boolean recursive)
incrementCounter(Statistic.OBJECTS_DELETED, 1);
OFSPath ofsPath = new OFSPath(path, config);
String keyName = ofsPath.getKeyName();
- if (keyName.length() == 0) {
+ if (keyName.isEmpty()) {
return false;
}
try {
@@ -619,7 +619,7 @@ private boolean areInSameBucket(List keyNameList) {
*/
@Override
public boolean deleteObjects(List keyNameList) {
- if (keyNameList.size() == 0) {
+ if (keyNameList.isEmpty()) {
return true;
}
// Sanity check. Support only deleting a list of keys in the same bucket
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
index 17cf1e140f88..c135e78a2325 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
@@ -1491,7 +1491,7 @@ boolean iterate() throws IOException {
}
}
}
- if (keyPathList.size() > 0) {
+ if (!keyPathList.isEmpty()) {
if (!processKeyPath(keyPathList)) {
return false;
}
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java
index 54f287dd8e0e..2239874c048a 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java
@@ -227,7 +227,7 @@ public static FileChecksum getFileChecksumWithCombineMode(OzoneVolume volume,
ClientProtocol rpcClient) throws IOException {
Preconditions.checkArgument(length >= 0);
- if (keyName.length() == 0) {
+ if (keyName.isEmpty()) {
return null;
}
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volume.getName())
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java
index a1f80f516230..0da84ab066b2 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java
@@ -71,7 +71,7 @@ public NSSummaryEndpoint(ReconNamespaceSummaryManager namespaceSummaryManager,
public Response getBasicInfo(
@QueryParam("path") String path) throws IOException {
- if (path == null || path.length() == 0) {
+ if (path == null || path.isEmpty()) {
return Response.status(Response.Status.BAD_REQUEST).build();
}
@@ -112,7 +112,7 @@ public Response getDiskUsage(@QueryParam("path") String path,
@DefaultValue("false") @QueryParam("replica") boolean withReplica,
@DefaultValue("true") @QueryParam("sortSubPaths") boolean sortSubpaths)
throws IOException {
- if (path == null || path.length() == 0) {
+ if (path == null || path.isEmpty()) {
return Response.status(Response.Status.BAD_REQUEST).build();
}
@@ -143,7 +143,7 @@ public Response getDiskUsage(@QueryParam("path") String path,
public Response getQuotaUsage(@QueryParam("path") String path)
throws IOException {
- if (path == null || path.length() == 0) {
+ if (path == null || path.isEmpty()) {
return Response.status(Response.Status.BAD_REQUEST).build();
}
@@ -173,7 +173,7 @@ public Response getQuotaUsage(@QueryParam("path") String path)
public Response getFileSizeDistribution(@QueryParam("path") String path)
throws IOException {
- if (path == null || path.length() == 0) {
+ if (path == null || path.isEmpty()) {
return Response.status(Response.Status.BAD_REQUEST).build();
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
index 73608fcf2dd6..c07d841417a5 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
@@ -993,7 +993,7 @@ public Response listKeys(@QueryParam("replicationType") String replicationType,
// This API supports startPrefix from bucket level.
- if (startPrefix == null || startPrefix.length() == 0) {
+ if (startPrefix == null || startPrefix.isEmpty()) {
return Response.status(Response.Status.BAD_REQUEST).build();
}
String[] names = startPrefix.split(OM_KEY_PREFIX);
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
index 8aa0307b9def..f1b46e4db20e 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
@@ -424,8 +424,7 @@ private boolean containerDeletedInSCM(ContainerInfo containerInfo) {
containerInfo.containerID());
}
if (containerInfo.getState() == HddsProtos.LifeCycleState.DELETING &&
- containerManager.getContainerReplicas(containerInfo.containerID())
- .size() == 0
+ containerManager.getContainerReplicas(containerInfo.containerID()).isEmpty()
) {
containerManager.updateContainerState(containerInfo.containerID(),
HddsProtos.LifeCycleEvent.CLEANUP);
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ReconSafeModeMgrTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ReconSafeModeMgrTask.java
index 0553f87bca88..21ecf56e0163 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ReconSafeModeMgrTask.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ReconSafeModeMgrTask.java
@@ -95,10 +95,10 @@ public synchronized void start() {
private void tryReconExitSafeMode()
throws InterruptedException {
// Recon starting first time
- if (null == allNodes || allNodes.size() == 0) {
+ if (null == allNodes || allNodes.isEmpty()) {
return;
}
- if (null == containers || containers.size() == 0) {
+ if (null == containers || containers.isEmpty()) {
return;
}
final Set currentContainersInAllDatanodes =
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java
index e1cb7bd4061b..30776ab51027 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java
@@ -87,7 +87,7 @@ private void addBucketData(
List bucketList =
children.stream().filter(entity -> entity.getLabel().
equalsIgnoreCase(split[1])).collect(Collectors.toList());
- if (bucketList.size() > 0) {
+ if (!bucketList.isEmpty()) {
bucketEntity = bucketList.get(0);
}
if (children.contains(bucketEntity)) {
@@ -184,7 +184,7 @@ private void setEntityLevelAccessCount(
entity.setAccessCount(entity.getAccessCount() + child.getAccessCount());
});
// This is being taken as whole number
- if (entity.getAccessCount() > 0 && children.size() > 0) {
+ if (entity.getAccessCount() > 0 && !children.isEmpty()) {
entity.setAccessCount(entity.getAccessCount() / children.size());
}
}
@@ -232,10 +232,10 @@ private void updateRootEntitySize(
EntityReadAccessHeatMapResponse entity) {
List children =
entity.getChildren();
- if (children.size() == 0) {
+ if (children.isEmpty()) {
entity.setMaxAccessCount(entity.getMinAccessCount());
}
- if (children.size() > 0) {
+ if (!children.isEmpty()) {
entity.setMinAccessCount(Long.MAX_VALUE);
}
return children;
@@ -274,7 +274,7 @@ private void updateEntityAccessRatio(EntityReadAccessHeatMapResponse entity) {
List children =
entity.getChildren();
children.stream().forEach(path -> {
- if (path.getChildren().size() != 0) {
+ if (!path.getChildren().isEmpty()) {
updateEntityAccessRatio(path);
} else {
path.setColor(1.000);
@@ -418,7 +418,7 @@ public EntityReadAccessHeatMapResponse generateHeatMap(
List volumeList =
children.stream().filter(entity -> entity.getLabel().
equalsIgnoreCase(split[0])).collect(Collectors.toList());
- if (volumeList.size() > 0) {
+ if (!volumeList.isEmpty()) {
volumeEntity = volumeList.get(0);
}
if (null != volumeEntity) {
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
index 165306081e3e..b6c13505cdf5 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
@@ -552,7 +552,7 @@ public boolean syncWithSCMContainerInfo()
getListOfContainers(startContainerId,
Long.valueOf(containerCountPerCall).intValue(),
HddsProtos.LifeCycleState.CLOSED);
- if (null != listOfContainers && listOfContainers.size() > 0) {
+ if (null != listOfContainers && !listOfContainers.isEmpty()) {
LOG.info("Got list of containers from SCM : " +
listOfContainers.size());
listOfContainers.forEach(containerInfo -> {
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/SignedChunksInputStream.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/SignedChunksInputStream.java
index 8b030a26e8d4..73a8e907eace 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/SignedChunksInputStream.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/SignedChunksInputStream.java
@@ -118,7 +118,7 @@ private int readHeader() throws IOException {
curr = next;
}
String signatureLine = buf.toString().trim();
- if (signatureLine.length() == 0) {
+ if (signatureLine.isEmpty()) {
return -1;
}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
index 466293eeacad..a5ebb333cf2f 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
@@ -505,7 +505,7 @@ public MultiDeleteResponse multiDelete(@PathParam("bucket") String bucketName,
Map auditMap = getAuditParameters();
auditMap.put("failedDeletes", deleteKeys.toString());
- if (result.getErrors().size() != 0) {
+ if (!result.getErrors().isEmpty()) {
AUDIT.logWriteFailure(buildAuditMessageForFailure(s3GAction,
auditMap, new Exception("MultiDelete Exception")));
} else {
@@ -633,7 +633,7 @@ public Response putAcl(String bucketName, HttpHeaders httpHeaders,
List aclsToRemoveOnVolume = new ArrayList<>();
List currentAclsOnVolume = volume.getAcls();
// Remove input user/group's permission from Volume first
- if (currentAclsOnVolume.size() > 0) {
+ if (!currentAclsOnVolume.isEmpty()) {
for (OzoneAcl acl : acls) {
if (acl.getAclScope() == ACCESS) {
aclsToRemoveOnVolume.addAll(OzoneAclUtil.filterAclList(
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java
index f02839f303d1..7d8f2fe04e98 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java
@@ -134,7 +134,7 @@ public static LowerCaseKeyStringMap fromHeaderMap(
new LowerCaseKeyStringMap();
for (Entry> headerEntry : rawHeaders.entrySet()) {
- if (0 < headerEntry.getValue().size()) {
+ if (!headerEntry.getValue().isEmpty()) {
String headerKey = headerEntry.getKey();
if (headers.containsKey(headerKey)) {
//multiple headers from the same type are combined
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AuthorizationV4HeaderParser.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AuthorizationV4HeaderParser.java
index cee828fa8466..b327280d070c 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AuthorizationV4HeaderParser.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AuthorizationV4HeaderParser.java
@@ -104,7 +104,7 @@ private String parseSignedHeaders(String signedHeadersStr)
signedHeadersStr.substring(SIGNEDHEADERS.length());
Collection signedHeaders =
StringUtils.getStringCollection(parsedSignedHeaders, ";");
- if (signedHeaders.size() == 0) {
+ if (signedHeaders.isEmpty()) {
throw new MalformedResourceException("No signed headers found.",
authHeader);
}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AuthorizationV4QueryParser.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AuthorizationV4QueryParser.java
index 5c716d88e0c5..a70311271817 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AuthorizationV4QueryParser.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AuthorizationV4QueryParser.java
@@ -126,8 +126,8 @@ protected void validateDateAndExpires()
final String expiresString = queryParameters.get("X-Amz-Expires");
if (dateString == null ||
expiresString == null ||
- dateString.length() == 0 ||
- expiresString.length() == 0) {
+ dateString.isEmpty() ||
+ expiresString.isEmpty()) {
throw new MalformedResourceException(
"dateString or expiresString are missing or empty.");
}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/StringToSignProducer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/StringToSignProducer.java
index 8d2eafa648d4..50af6f044ca8 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/StringToSignProducer.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/StringToSignProducer.java
@@ -109,7 +109,7 @@ public static String createSignatureBase(
// If the absolute path is empty, use a forward slash (/)
String uri = signatureInfo.getUnfilteredURI();
- uri = (uri.trim().length() > 0) ? uri : "/";
+ uri = (!uri.trim().isEmpty()) ? uri : "/";
// Encode URI and preserve forward slashes
strToSign.append(signatureInfo.getAlgorithm() + NEWLINE);
if (signatureInfo.getDateTime() == null) {
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/ContinueToken.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/ContinueToken.java
index c4671e982138..571939d9ece0 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/ContinueToken.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/ContinueToken.java
@@ -42,7 +42,7 @@ public ContinueToken(String lastKey, String lastDir) {
Preconditions.checkNotNull(lastKey,
"The last key can't be null in the continue token.");
this.lastKey = lastKey;
- if (lastDir != null && lastDir.length() > 0) {
+ if (lastDir != null && !lastDir.isEmpty()) {
this.lastDir = lastDir;
}
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java
index 897192bb4658..e2b889ce935d 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java
@@ -83,7 +83,7 @@ public class DiskUsageSubCommand implements Callable {
@Override
public Void call() throws Exception {
- if (path == null || path.length() == 0) {
+ if (path == null || path.isEmpty()) {
printEmptyPathRequest();
return null;
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java
index 9e32ee3d8a82..c06dcefd7173 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java
@@ -56,7 +56,7 @@ public class FileSizeDistSubCommand implements Callable {
@Override
public Void call() throws Exception {
- if (path == null || path.length() == 0) {
+ if (path == null || path.isEmpty()) {
printEmptyPathRequest();
return null;
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java
index 5d62f458a0c9..748acd0a036f 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java
@@ -57,7 +57,7 @@ public class QuotaUsageSubCommand implements Callable {
@Override
public Void call() throws Exception {
- if (path == null || path.length() == 0) {
+ if (path == null || path.isEmpty()) {
printEmptyPathRequest();
return null;
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java
index a4905d255ce0..491337d7f862 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java
@@ -56,7 +56,7 @@ public class SummarySubCommand implements Callable {
@Override
public Void call() throws Exception {
- if (path == null || path.length() == 0) {
+ if (path == null || path.isEmpty()) {
printEmptyPathRequest();
return null;
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java
index a96e3ab95ab9..c7b2ed28073a 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java
@@ -285,7 +285,7 @@ public void init() {
failureCounter = new AtomicLong(0);
attemptCounter = new AtomicLong(0);
- if (prefix.length() == 0) {
+ if (prefix.isEmpty()) {
prefix = !allowEmptyPrefix() ? RandomStringUtils.randomAlphanumeric(10).toLowerCase() : "";
} else {
//replace environment variables to support multi-node execution
@@ -456,7 +456,7 @@ public static Pipeline findPipelineForTest(String pipelineId,
pipelines = pipelines
.peek(p -> log.debug("Found pipeline {}", p.getId().getId()));
}
- if (pipelineId != null && pipelineId.length() > 0) {
+ if (pipelineId != null && !pipelineId.isEmpty()) {
pipeline = pipelines
.filter(p -> p.getId().toString().equals(pipelineId))
.findFirst()
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ProgressBar.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ProgressBar.java
index b0dcdf6f3b13..b578a3430cfc 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ProgressBar.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ProgressBar.java
@@ -174,7 +174,7 @@ private void printProgressBar(PrintStream stream, long value) {
StringBuilder sb = new StringBuilder();
String realTimeMessage = supplier.get();
int shrinkTimes = 1;
- if (realTimeMessage.length() != 0) {
+ if (!realTimeMessage.isEmpty()) {
shrinkTimes = 3;
}
sb.append(" ").append(String.format("%.2f", percent)).append("% |");
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3EntityGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3EntityGenerator.java
index c2eac6d147cd..6e2e728a81e9 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3EntityGenerator.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3EntityGenerator.java
@@ -42,7 +42,7 @@ protected void s3ClientInit() {
AmazonS3ClientBuilder.standard()
.withCredentials(new EnvironmentVariableCredentialsProvider());
- if (endpoint.length() > 0) {
+ if (!endpoint.isEmpty()) {
amazonS3ClientBuilder
.withPathStyleAccessEnabled(true)
.withEndpointConfiguration(