diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java index c45a257b05a7..5dc44f4d4ec5 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java @@ -194,7 +194,7 @@ public class OzoneClientConfig { @Config(key = "ozone.client.ec.reconstruct.stripe.read.pool.limit", defaultValue = "30", - description = "Thread pool max size for parallelly read" + + description = "Thread pool max size for parallel read" + " available ec chunks to reconstruct the whole stripe.", tags = ConfigTag.CLIENT) // For the largest recommended EC policy rs-10-4-1024k, @@ -205,7 +205,7 @@ public class OzoneClientConfig { @Config(key = "ozone.client.ec.reconstruct.stripe.write.pool.limit", defaultValue = "30", - description = "Thread pool max size for parallelly write" + + description = "Thread pool max size for parallel write" + " available ec chunks to reconstruct the whole stripe.", tags = ConfigTag.CLIENT) private int ecReconstructStripeWritePoolLimit = 10 * 3; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java index 3b6211579850..ad6f62f86fbc 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java @@ -504,7 +504,7 @@ public HeartbeatEndpointTask build() { if (conf == null) { LOG.error("No config specified."); - throw new IllegalArgumentException("A valid configration is needed to" + + throw new IllegalArgumentException("A valid configuration is needed to" + " construct HeartbeatEndpointTask task"); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java index 4f528a83c201..0b05eec14a9e 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointServlet.java @@ -301,7 +301,7 @@ private static String[] parseFormDataParameters(HttpServletRequest request) { sstParam.add(Streams.asString(item.openStream())); } } catch (Exception e) { - LOG.warn("Exception occured during form data parsing {}", e.getMessage()); + LOG.warn("Exception occurred during form data parsing {}", e.getMessage()); } return sstParam.isEmpty() ? null : sstParam.toArray(new String[0]); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java index bf11e7681ea1..7ffa37956854 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java @@ -130,7 +130,7 @@ public synchronized void getMetrics(MetricsCollector collector, boolean all) { // exposing internal representation. FindBugs error raised. private MutableQuantiles[] grpcQueueTimeMillisQuantiles; - @Metric("Processsing time") + @Metric("Processing time") private MutableRate grpcProcessingTime; // There should be no getter method to avoid diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java index 627210ca9de0..4b280ff3cf1e 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/upgrade/BasicUpgradeFinalizer.java @@ -239,7 +239,7 @@ private synchronized StatusAndMessages initFinalize( private void assertClientId(String id) throws UpgradeException { if (this.clientID == null || !this.clientID.equals(id)) { throw new UpgradeException("Unknown client tries to get finalization " + - "status.\n The requestor is not the initiating client of the " + + "status.\n The requester is not the initiating client of the " + "finalization, if you want to take over, and get unsent status " + "messages, check -takeover option.", INVALID_REQUEST); } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHtmlQuoting.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHtmlQuoting.java index 9994f6a9a54d..16047492a60c 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHtmlQuoting.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHtmlQuoting.java @@ -86,7 +86,7 @@ public void testRequestQuoting() throws Exception { doReturn(null).when(mockReq).getParameter("x"); assertNull(quoter.getParameter("x"), - "Test that missing parameters dont cause NPE"); + "Test that missing parameters don't cause NPE"); doReturn(new String[] {"a excludedNodes, if (maxRetry == 0) { // avoid the infinite loop String errMsg = "No satisfied datanode to meet the space constrains. " - + "metadatadata size required: " + metadataSizeRequired + + + "metadata size required: " + metadataSizeRequired + " data size required: " + dataSizeRequired; LOG.info(errMsg); throw new SCMException(errMsg, null); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackScatter.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackScatter.java index 603add4df128..5b6f141adb3d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackScatter.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackScatter.java @@ -473,7 +473,7 @@ private Node chooseNode(String scope, List excludedNodes, if (maxRetry == 0) { // avoid the infinite loop LOG.info("No satisfied datanode to meet the constraints. " - + "Metadatadata size required: {} Data size required: {}, scope " + + "Metadata size required: {} Data size required: {}, scope " + "{}, excluded nodes {}", metadataSizeRequired, dataSizeRequired, scope, excludedNodes); return null; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java index 503126198a0d..1502a9c4cf09 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerUtil.java @@ -99,7 +99,7 @@ public static List getTargetDatanodes(PlacementPolicy policy, } } throw new SCMException(String.format("Placement Policy: %s did not return" - + " any nodes. Number of required Nodes %d, Datasize Required: %d", + + " any nodes. Number of required Nodes %d, Data size Required: %d", policy.getClass(), requiredNodes, dataSizeRequired), SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java index 045666eee583..bb04e7760430 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java @@ -110,7 +110,7 @@ private void parseHostname() throws InvalidHostStringException { } } catch (URISyntaxException e) { throw new InvalidHostStringException( - "Unable to parse the hoststring " + rawHostname, e); + "Unable to parse the host string " + rawHostname, e); } } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneCryptoInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneCryptoInputStream.java index c794161fecaf..521c1d9816e6 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneCryptoInputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneCryptoInputStream.java @@ -112,7 +112,7 @@ keyName, partIndex, getLength(), numBytesToRead, LOG.debug("OzoneCryptoInputStream for key: {} part: {} read {} bytes " + "instead of {} bytes to account for Crypto buffer boundary. " + "Client buffer will be copied with read data from position {}" + - "upto position {}, discarding the extra bytes read to " + + "up to position {}, discarding the extra bytes read to " + "maintain Crypto buffer boundary limits", keyName, partIndex, actualNumBytesRead, numBytesRead, readPositionAdjustedBy, actualNumBytesRead - readPositionAdjustedBy); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java index c29118ef4166..9121489cc358 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java @@ -76,12 +76,12 @@ public static void verifyMaxKeyLength(String length) maxKey = Integer.parseInt(length); } catch (NumberFormatException nfe) { throw new IllegalArgumentException( - "Invalid max key length, the vaule should be digital."); + "Invalid max key length, the value should be digital."); } if (maxKey <= 0) { throw new IllegalArgumentException( - "Invalid max key length, the vaule should be a positive number."); + "Invalid max key length, the value should be a positive number."); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java index 5fcbd08f284e..8b77338037d7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java @@ -115,7 +115,7 @@ public void teardown() throws Exception { LOG.info("Statistics {}", ioStatisticsSourceToString(uploader)); } catch (Exception e) { - LOG.warn("Exeception in teardown", e); + LOG.warn("Exception in teardown", e); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java index c41bf75fdc03..cdeea68e640c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java @@ -435,7 +435,7 @@ public void testTrackDeepDirectoryStructureToRemote() throws Exception { .withDirectWrite(shouldUseDirectWrite()) .withOverwrite(false))); - lsR("tracked udpate", remoteFS, destDir); + lsR("tracked update", remoteFS, destDir); // new file went over Path outputFileNew1 = new Path(outputSubDir2, "newfile1"); ContractTestUtils.assertIsFile(remoteFS, outputFileNew1); diff --git a/hadoop-ozone/mini-cluster/src/main/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/mini-cluster/src/main/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java index 6d93f663f8fe..e22152033313 100644 --- a/hadoop-ozone/mini-cluster/src/main/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ b/hadoop-ozone/mini-cluster/src/main/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java @@ -589,7 +589,7 @@ protected SCMHAService createSCMService() scm.getClientRpcAddress()); } else { inactiveSCMs.add(scm); - LOG.info("Intialized SCM at {}. This SCM is currently " + LOG.info("Initialized SCM at {}. This SCM is currently " + "inactive (not running).", scm.getClientRpcAddress()); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index ec332f3a83ae..5454a4438b56 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -1590,7 +1590,7 @@ private void initializeRatisDirs(OzoneConfiguration conf) throws IOException { throw new IOException( "Path of " + OMConfigKeys.OZONE_OM_RATIS_STORAGE_DIR + " and " + ScmConfigKeys.OZONE_SCM_HA_RATIS_STORAGE_DIR - + " should not be co located. Please change atleast one path."); + + " should not be co located. Please change at least one path."); } // Create Ratis snapshot dir diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java index e1bf2cbd179b..34813bb9a1df 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java @@ -80,7 +80,7 @@ public void initialize(Configuration conf, FileSystem fs) { * MSECS_PER_MINUTE); if (deletionInterval < 0) { LOG.warn("Invalid value {} for deletion interval," - + " deletion interaval can not be negative." + + " deletion interval can not be negative." + "Changing to default value 0", deletionInterval); this.deletionInterval = 0; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java index d41cefa0fec6..71e810d78eb8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java @@ -451,7 +451,7 @@ public synchronized void unpause(long newLastAppliedSnaphsotIndex, this.setLastAppliedTermIndex(TermIndex.valueOf( newLastAppliedSnapShotTermIndex, newLastAppliedSnaphsotIndex)); LOG.info("{}: OzoneManagerStateMachine un-pause completed. " + - "newLastAppliedSnaphsotIndex: {}, newLastAppliedSnapShotTermIndex: {}", + "newLastAppliedSnapshotIndex: {}, newLastAppliedSnapShotTermIndex: {}", getId(), newLastAppliedSnaphsotIndex, newLastAppliedSnapShotTermIndex); }); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java index 2d530737560a..a2bc5df9982e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java @@ -127,7 +127,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut //Check if bucket exist if (omBucketInfo == null) { LOG.debug("Bucket: {} not found ", bucketName); - throw new OMException("Bucket doesnt exist", + throw new OMException("Bucket doesn't exist", OMException.ResultCodes.BUCKET_NOT_FOUND); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DBScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DBScanner.java index cc988991d2aa..9ca84de17621 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DBScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DBScanner.java @@ -244,7 +244,7 @@ private boolean displayTable(ManagedRocksIterator iterator, boolean flg = parentFile.mkdirs(); if (!flg) { throw new IOException("An exception occurred while creating " + - "the directory. Directorys: " + parentFile.getAbsolutePath()); + "the directory. Directory: " + parentFile.getAbsolutePath()); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FollowerAppendLogEntryGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FollowerAppendLogEntryGenerator.java index 1e0a69efc490..362d907eb2ba 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FollowerAppendLogEntryGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FollowerAppendLogEntryGenerator.java @@ -119,7 +119,7 @@ public class FollowerAppendLogEntryGenerator extends BaseAppendLogGenerator @Option(names = {"-i", "--next-index"}, description = "The next index in the term 2 to continue a test. (If " - + "zero, a new ratis ring will be intialized with configureGroup " + + "zero, a new ratis ring will be initialized with configureGroup " + "call and vote)", defaultValue = "0") private long nextIndex; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/LeaderAppendLogEntryGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/LeaderAppendLogEntryGenerator.java index 780a1fe0948c..96ce91479cc4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/LeaderAppendLogEntryGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/LeaderAppendLogEntryGenerator.java @@ -109,7 +109,7 @@ public class LeaderAppendLogEntryGenerator extends BaseAppendLogGenerator @Option(names = {"-i", "--next-index"}, description = "The next index in the term 2 to continue a test. (If " - + "zero, a new ratis ring will be intialized with configureGroup " + + "zero, a new ratis ring will be initialized with configureGroup " + "call and vote)", defaultValue = "0") private long nextIndex; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java index efce02d54f5c..548777744c98 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java @@ -40,7 +40,7 @@ * Container generator for SCM metadata. */ @Command(name = "cgscm", - description = "Offline container metadata generator for Storage Conainer " + description = "Offline container metadata generator for Storage Container " + "Manager", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true,