From 262035b9e8469cd603c72bc1174961a645de5ff9 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Fri, 27 May 2022 20:06:53 -0700 Subject: [PATCH 1/7] HDFS-16599. Fix typo in RouterRpcClient(Configuration). --- .../hadoop/hdfs/server/federation/router/RouterRpcClient.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java index 34a2c47c3ef29..38b829da38952 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java @@ -139,7 +139,7 @@ public class RouterRpcClient { /** * Create a router RPC client to manage remote procedure calls to NNs. * - * @param conf Hdfs Configuation. + * @param conf Hdfs Configuration. * @param router A router using this RPC client. * @param resolver A NN resolver to determine the currently active NN in HA. * @param monitor Optional performance monitor. From 170d9377f3d47f026c54ba66d1c8836fddb953d0 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Fri, 27 May 2022 21:46:21 -0700 Subject: [PATCH 2/7] HDFS-16599. Fix typo in RouterRpcClient. --- .../federation/router/RouterRpcClient.java | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java index 38b829da38952..9b43fff0607fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java @@ -444,7 +444,7 @@ private RetryDecision shouldRetry(final IOException ioe, final int retryCount, * @param ugi User group information. * @param namenodes A prioritized list of namenodes within the same * nameservice. - * @param method Remote ClientProtcol method to invoke. + * @param method Remote ClientProtocol method to invoke. * @param params Variable list of parameters matching the method. * @return The result of invoking the method. * @throws ConnectException If it cannot connect to any Namenode. @@ -624,7 +624,7 @@ private void addClientIpToCallerContext() { * @param nsId Identifier for the namespace * @param retryCount Current retry times * @param method Method to invoke - * @param obj Target object for the method + * @param obj Target Object for the method * @param params Variable parameters * @return Response from the remote server * @throws IOException @@ -789,7 +789,7 @@ private static IOException getCleanException(IOException ioe) { * @param block Block used to determine appropriate nameservice. * @param method The remote method and parameters to invoke. * @return The result of invoking the method. - * @throws IOException If the invoke generated an error. + * @throws IOException If to invoke generated an error. */ public Object invokeSingle(final ExtendedBlock block, RemoteMethod method) throws IOException { @@ -807,7 +807,7 @@ public Object invokeSingle(final ExtendedBlock block, RemoteMethod method) * @param bpId Block pool identifier. * @param method The remote method and parameters to invoke. * @return The result of invoking the method. - * @throws IOException If the invoke generated an error. + * @throws IOException If to invoke generated an error. */ public Object invokeSingleBlockPool(final String bpId, RemoteMethod method) throws IOException { @@ -824,7 +824,7 @@ public Object invokeSingleBlockPool(final String bpId, RemoteMethod method) * @param nsId Target namespace for the method. * @param method The remote method and parameters to invoke. * @return The result of invoking the method. - * @throws IOException If the invoke generated an error. + * @throws IOException If to invoke generated an error. */ public Object invokeSingle(final String nsId, RemoteMethod method) throws IOException { @@ -855,7 +855,7 @@ public Object invokeSingle(final String nsId, RemoteMethod method) * @param method The remote method and parameters to invoke. * @param clazz Class for the return type. * @return The result of invoking the method. - * @throws IOException If the invoke generated an error. + * @throws IOException If to invoke generated an error. */ public T invokeSingle(final String nsId, RemoteMethod method, Class clazz) throws IOException { @@ -875,7 +875,7 @@ public T invokeSingle(final String nsId, RemoteMethod method, * @param method The remote method and parameters to invoke. * @param clazz Class for the return type. * @return The result of invoking the method. - * @throws IOException If the invoke generated an error. + * @throws IOException If to invoke generated an error. */ public T invokeSingle(final ExtendedBlock extendedBlock, RemoteMethod method, Class clazz) throws IOException { @@ -894,7 +894,7 @@ public T invokeSingle(final ExtendedBlock extendedBlock, * @param location RemoteLocation to invoke. * @param remoteMethod The remote method and parameters to invoke. * @return The result of invoking the method if successful. - * @throws IOException If the invoke generated an error. + * @throws IOException If to invoke generated an error. */ public T invokeSingle(final RemoteLocationContext location, RemoteMethod remoteMethod, Class clazz) throws IOException { @@ -1027,7 +1027,7 @@ public RemoteResult invokeSequential( } catch (Exception e) { // Unusual error, ClientProtocol calls always use IOException (or // RemoteException). Re-wrap in IOException for compatibility with - // ClientProtcol. + // ClientProtocol. LOG.error("Unexpected exception {} proxying {} to {}", e.getClass(), m.getName(), ns, e); IOException ioe = new IOException( @@ -1449,7 +1449,7 @@ public Map invokeConcurrent( results.add(new RemoteResult<>(location, ioe)); } catch (ExecutionException ex) { Throwable cause = ex.getCause(); - LOG.debug("Canot execute {} in {}: {}", + LOG.debug("Cannot execute {} in {}: {}", m.getName(), location, cause.getMessage()); // Convert into IOException if needed From a368485b688293527caf309e6fbd56447535fbba Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Fri, 27 May 2022 23:17:32 -0700 Subject: [PATCH 3/7] HDFS-16599. Fix typo in RBF-Moudle. --- .../hadoop/hdfs/rbfbalance/RouterFedBalance.java | 4 ++-- .../federation/metrics/FederationMBean.java | 4 ++-- .../metrics/FederationRPCPerformanceMonitor.java | 4 ++-- .../server/federation/metrics/RBFMetrics.java | 2 +- .../resolver/NamenodePriorityComparator.java | 2 +- .../federation/router/ConnectionContext.java | 4 ++-- .../federation/router/ConnectionManager.java | 16 ++++++++-------- .../server/federation/router/FederationUtil.java | 2 +- .../router/MountTableRefresherThread.java | 4 ++-- .../server/federation/router/RBFConfigKeys.java | 2 +- .../server/federation/router/RemoteMethod.java | 2 +- .../federation/router/RouterAdminServer.java | 10 +++++----- .../federation/router/RouterClientProtocol.java | 10 +++++----- .../router/RouterQuotaUpdateService.java | 2 +- .../federation/router/RouterRpcMonitor.java | 2 +- .../federation/router/RouterSafemodeService.java | 2 +- .../federation/router/RouterWebHdfsMethods.java | 2 +- .../ZKDelegationTokenSecretManagerImpl.java | 4 ++-- .../federation/store/CachedRecordStore.java | 6 +++--- .../federation/store/StateStoreService.java | 2 +- .../store/driver/StateStoreDriver.java | 6 +++--- .../store/impl/MountTableStoreImpl.java | 6 +++--- .../protocol/GetRouterRegistrationsRequest.java | 2 +- .../impl/pb/FederationProtocolPBTranslator.java | 2 +- .../hdfs/tools/federation/RouterAdmin.java | 6 +++--- .../fs/contract/router/SecurityConfUtil.java | 2 +- .../server/federation/MiniRouterDFSCluster.java | 6 +++--- .../hdfs/server/federation/MockResolver.java | 2 +- .../resolver/TestNamenodeResolver.java | 2 +- .../federation/router/TestConnectionManager.java | 6 +++--- .../router/TestDisableNameservices.java | 2 +- .../server/federation/router/TestRouter.java | 2 +- .../federation/router/TestRouterAdmin.java | 2 +- .../federation/router/TestRouterAdminCLI.java | 4 ++-- .../federation/router/TestRouterAllResolver.java | 2 +- .../router/TestRouterFaultTolerant.java | 12 ++++++------ .../federation/router/TestRouterMountTable.java | 2 +- .../router/TestRouterMountTableCacheRefresh.java | 8 ++++---- .../router/TestRouterQuotaManager.java | 14 +++++++------- ...RPCMultipleDestinationMountTableResolver.java | 2 +- .../router/TestRouterRpcMultiDestination.java | 4 ++-- .../server/federation/router/TestSafeMode.java | 2 +- .../TestZKDelegationTokenSecretManagerImpl.java | 4 ++-- .../store/TestStateStoreMembershipState.java | 4 ++-- .../store/driver/TestStateStoreDriverBase.java | 4 ++-- 45 files changed, 97 insertions(+), 97 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/rbfbalance/RouterFedBalance.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/rbfbalance/RouterFedBalance.java index 4161ab503fe77..0cb4b54bfc431 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/rbfbalance/RouterFedBalance.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/rbfbalance/RouterFedBalance.java @@ -63,7 +63,7 @@ * dst sub-namespace with distcp. * * 1. Move data from the source path to the destination path with distcp. - * 2. Update the the mount entry. + * 2. Update the mount entry. * 3. Delete the source path to trash. */ public class RouterFedBalance extends Configured implements Tool { @@ -77,7 +77,7 @@ public class RouterFedBalance extends Configured implements Tool { private static final String TRASH_PROCEDURE = "trash-procedure"; /** - * This class helps building the balance job. + * This class helps to build the balance job. */ private class Builder { /* Force close all open files while there is no diff. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java index e8b00d0b5dcfb..1a8a3b31f7fad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java @@ -50,7 +50,7 @@ public interface FederationMBean { /** * Get the latest state of all routers. - * @return JSON with all of the known routers or null if failure. + * @return JSON with all the known routers or null if failure. */ String getRouters(); @@ -345,7 +345,7 @@ public interface FederationMBean { long getHighestPriorityLowRedundancyECBlocks(); /** - * Returns the number of paths to be processed by storage policy satisfier. + * Returns the number of paths to be processed by storage policy satisfies. * * @return The number of paths to be processed by sps. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java index 5c6dac465fb7c..93a3addcfb823 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java @@ -52,7 +52,7 @@ public class FederationRPCPerformanceMonitor implements RouterRpcMonitor { /** Time for an operation to be received in the Router. */ private static final ThreadLocal START_TIME = new ThreadLocal<>(); - /** Time for an operation to be send to the Namenode. */ + /** Time for an operation to be sent to the Namenode. */ private static final ThreadLocal PROXY_TIME = new ThreadLocal<>(); /** Configuration for the performance monitor. */ @@ -246,7 +246,7 @@ public void routerFailureLocked() { /** - * Get time between we receiving the operation and sending it to the Namenode. + * Get time between we're receiving the operation and sending it to the Namenode. * @return Processing time in milliseconds. */ private long getProcessingTime() { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java index d5eabd1a3da82..be88069b49166 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java @@ -290,7 +290,7 @@ public String getMountTable() { // Dump mount table entries information into JSON for (MountTable entry : orderedMounts) { - // Sumarize destinations + // Summarize destinations Set nameservices = new LinkedHashSet<>(); Set paths = new LinkedHashSet<>(); for (RemoteLocation location : entry.getDestinations()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodePriorityComparator.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodePriorityComparator.java index e9724a3dee776..0fdf2155faa89 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodePriorityComparator.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodePriorityComparator.java @@ -60,7 +60,7 @@ public int compare(FederationNamenodeContext o1, */ private int compareModDates(FederationNamenodeContext o1, FederationNamenodeContext o2) { - // Reverse sort, lowest position is highest priority. + // Reverse sort, the lowest position is the highest priority. return (int) (o2.getDateModified() - o1.getDateModified()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java index 9a5434b91ce2f..bd2d8c9d69719 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java @@ -124,7 +124,7 @@ public synchronized void release() { */ public synchronized void close(boolean force) { if (!force && this.numThreads > 0) { - // this is an erroneous case but we have to close the connection + // this is an erroneous case, but we have to close the connection // anyway since there will be connection leak if we don't do so // the connection has been moved out of the pool LOG.error("Active connection with {} handlers will be closed", @@ -132,7 +132,7 @@ public synchronized void close(boolean force) { } this.closed = true; Object proxy = this.client.getProxy(); - // Nobody should be using this anymore so it should close right away + // Nobody should be using this anymore, so it should close right away RPC.stopProxy(proxy); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java index aad272f5831fc..4af0cafc08a24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java @@ -135,12 +135,12 @@ public void start() { this.creator.start(); // Schedule a task to remove stale connection pools and sockets - long recyleTimeMs = Math.min( + long recycleTimeMs = Math.min( poolCleanupPeriodMs, connectionCleanupPeriodMs); LOG.info("Cleaning every {} seconds", - TimeUnit.MILLISECONDS.toSeconds(recyleTimeMs)); + TimeUnit.MILLISECONDS.toSeconds(recycleTimeMs)); this.cleaner.scheduleAtFixedRate( - new CleanupTask(), 0, recyleTimeMs, TimeUnit.MILLISECONDS); + new CleanupTask(), 0, recycleTimeMs, TimeUnit.MILLISECONDS); // Mark the manager as running this.running = true; @@ -355,7 +355,7 @@ Map getPools() { /** * Clean the unused connections for this pool. * - * @param pool Connection pool to cleanup. + * @param pool Connection pool to clean up. */ @VisibleForTesting void cleanup(ConnectionPool pool) { @@ -364,9 +364,9 @@ void cleanup(ConnectionPool pool) { long timeSinceLastActive = Time.now() - pool.getLastActiveTime(); int total = pool.getNumConnections(); // Active is a transient status in many cases for a connection since - // the handler thread uses the connection very quickly. Thus the number + // the handler thread uses the connection very quickly. Thus, the number // of connections with handlers using at the call time is constantly low. - // Recently active is more lasting status and it shows how many + // Recently active is more lasting status, and it shows how many // connections have been used with a recent time period. (i.e. 30 seconds) int active = pool.getNumActiveConnectionsRecently(); float poolMinActiveRatio = pool.getMinActiveRatio(); @@ -376,9 +376,9 @@ void cleanup(ConnectionPool pool) { // The number should at least be 1 int targetConnectionsCount = Math.max(1, (int)(poolMinActiveRatio * total) - active); - List conns = + List connections = pool.removeConnections(targetConnectionsCount); - for (ConnectionContext conn : conns) { + for (ConnectionContext conn : connections) { conn.close(); } LOG.debug("Removed connection {} used {} seconds ago. " + diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java index 7ff853946d700..e593e888c9ac4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java @@ -231,7 +231,7 @@ public static ActiveNamenodeResolver newActiveNamenodeResolver( } /** - * Add the the number of children for an existing HdfsFileStatus object. + * Add the number of children for an existing HdfsFileStatus object. * @param dirStatus HdfsfileStatus object. * @param children number of children to be added. * @return HdfsFileStatus with the number of children specified. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java index a077c4b3f45a4..40ff843fa1dfe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/MountTableRefresherThread.java @@ -50,14 +50,14 @@ public MountTableRefresherThread(MountTableManager manager, /** * Refresh mount table cache of local and remote routers. Local and remote - * routers will be refreshed differently. Lets understand what are the + * routers will be refreshed differently. Let's understand what are the * local and remote routers and refresh will be done differently on these * routers. Suppose there are three routers R1, R2 and R3. User want to add * new mount table entry. He will connect to only one router, not all the * routers. Suppose He connects to R1 and calls add mount table entry through * API or CLI. Now in this context R1 is local router, R2 and R3 are remote * routers. Because add mount table entry is invoked on R1, R1 will update the - * cache locally it need not to make RPC call. But R1 will make RPC calls to + * cache locally it need not make RPC call. But R1 will make RPC calls to * update cache on R2 and R3. */ @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java index 741e470c6fc3f..c0a9e3f294cd8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java @@ -255,7 +255,7 @@ public class RBFConfigKeys extends CommonConfigurationKeysPublic { TimeUnit.MINUTES.toMillis(1); /** * Remote router mount table cache is updated through RouterClient(RPC call). - * To improve performance, RouterClient connections are cached but it should + * To improve performance, RouterClient connections are cached, but it should * not be kept in cache forever. This property defines the max time a * connection can be cached. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java index 6f1121ef9fd66..e5df4893a9155 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java @@ -36,7 +36,7 @@ public class RemoteMethod { private static final Logger LOG = LoggerFactory.getLogger(RemoteMethod.class); - /** List of parameters: static and dynamic values, matchings types. */ + /** List of parameters: static and dynamic values, matching types. */ private final Object[] params; /** List of method parameters types, matches parameters. */ private final Class[] types; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java index 127470a1264ed..42b6f670e462c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java @@ -104,7 +104,7 @@ import org.apache.hadoop.thirdparty.protobuf.BlockingService; /** - * This class is responsible for handling all of the Admin calls to the HDFS + * This class is responsible for handling all the Admin calls to the HDFS * router. It is created, started, and stopped by {@link Router}. */ public class RouterAdminServer extends AbstractService @@ -439,7 +439,7 @@ private boolean isQuotaUpdated(UpdateMountTableEntryRequest request, } return false; } else { - // If old entry is not available, sync quota always, since we can't + // If old entry is not available, sync quota always, since we can // conclude no change in quota. return true; } @@ -489,7 +489,7 @@ public RemoveMountTableEntryResponse removeMountTableEntry( synchronizeQuota(request.getSrcPath(), HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET, null); } catch (Exception e) { - // Ignore exception, if any while reseting quota. Specifically to handle + // Ignore exception, if any while resting quota. Specifically to handle // if the actual destination doesn't exist. LOG.warn("Unable to clear quota at the destinations for {}: {}", request.getSrcPath(), e.getMessage()); @@ -752,9 +752,9 @@ public static RouterPermissionChecker getPermissionChecker() } /** - * Get super user name. + * Get superUser name. * - * @return String super user name. + * @return String superUser name. */ public static String getSuperUser() { return routerOwner; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 469b16178a2c6..641ce91df82b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -305,7 +305,7 @@ public HdfsFileStatus create(String src, FsPermission masked, * Check if an exception is caused by an unavailable subcluster or not. It * also checks the causes. * @param ioe IOException to check. - * @return If caused by an unavailable subcluster. False if the should not be + * @return If caused by an unavailable subcluster. False if they should not be * retried (e.g., NSQuotaExceededException). */ protected static boolean isUnavailableSubclusterException( @@ -325,14 +325,14 @@ protected static boolean isUnavailableSubclusterException( /** * Check if a remote method can be retried in other subclusters when it * failed in the original destination. This method returns the list of - * locations to retry in. This is used by fault tolerant mount points. + * locations to retry in. This is used by fault-tolerant mount points. * @param method Method that failed and might be retried. * @param src Path where the method was invoked. * @param ioe Exception that was triggered. * @param excludeLoc Location that failed and should be excluded. * @param locations All the locations to retry. * @return The locations where we should retry (excluding the failed ones). - * @throws IOException If this path is not fault tolerant or the exception + * @throws IOException If this path is not fault-tolerant or the exception * should not be retried (e.g., NSQuotaExceededException). */ private List checkFaultTolerantRetry( @@ -1830,8 +1830,8 @@ public HAServiceProtocol.HAServiceState getHAServiceState() { } /** - * Determines combinations of eligible src/dst locations for a rename. A - * rename cannot change the namespace. Renames are only allowed if there is an + * Determines combinations of eligible src/dst locations for a renamed. A + * renamed cannot change the namespace. Renames are only allowed if there is an * eligible dst location in the same namespace as the source. * * @param srcLocations List of all potential source destinations where the diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java index a4a7d9e9ddc2d..e9b780d5bca29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java @@ -200,7 +200,7 @@ private List getMountTableEntries() throws IOException { * During this time, the quota usage cache will also be updated by * quota manager: * 1. Stale paths (entries) will be removed. - * 2. Existing entries will be override and updated. + * 2. Existing entries will be overridden and updated. * @return List of mount tables which quota was set. * @throws IOException */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java index 388fc5a0da496..039b40ae2e585 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java @@ -54,7 +54,7 @@ void init( /** * Start proxying an operation to the Namenode. - * @return Id of the thread doing the proxying. + * @return id of the thread doing the proxying. */ long proxyOp(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafemodeService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafemodeService.java index 23ce4fc18b5a9..7cca341aa6175 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafemodeService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafemodeService.java @@ -30,7 +30,7 @@ /** * Service to periodically check if the {@link * org.apache.hadoop.hdfs.server.federation.store.StateStoreService - * StateStoreService} cached information in the {@link Router} is up to date. + * StateStoreService} cached information in the {@link Router} is up-to-date. * This is for performance and removes the {@link * org.apache.hadoop.hdfs.server.federation.store.StateStoreService * StateStoreService} from the critical path in common operations. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java index accec4627eda8..a66953b1bd752 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java @@ -410,7 +410,7 @@ protected Response get( * @param path Path to check. * @param op Operation to perform. * @param openOffset Offset for opening a file. - * @param excludeDatanodes Blocks to excluded. + * @param excludeDatanodes Blocks to exclude. * @param parameters Other parameters. * @return Redirection URI. * @throws URISyntaxException If it cannot parse the URI. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java index a83be71e59d83..86e2dc6e8ce53 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java @@ -95,7 +95,7 @@ public void startThreads() throws IOException { if (!isTokenWatcherEnabled()) { LOG.info("Watcher for tokens is disabled in this secret manager"); try { - // By default set this variable + // By default, set this variable checkAgainstZkBeforeDeletion.set(true); // Ensure the token root path exists if (zkClient.checkExists().forPath(ZK_DTSM_TOKENS_ROOT) == null) { @@ -159,7 +159,7 @@ public DelegationTokenIdentifier createIdentifier() { private void rebuildTokenCache(boolean initial) throws IOException { localTokenCache.clear(); // Use bare zookeeper client to get all children since curator will - // wrap the same API with a sorting process. This is time consuming given + // wrap the same API with a sorting process. This is time-consuming given // millions of tokens List zkTokens; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java index 7b28c03a529ad..193ff78951baa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java @@ -85,7 +85,7 @@ protected CachedRecordStore(Class clazz, StateStoreDriver driver) { * * @param clazz Class of the record to store. * @param driver State Store driver. - * @param over If the entries should be override if they expire + * @param over If the entries should be overridden if they expire */ protected CachedRecordStore( Class clazz, StateStoreDriver driver, boolean over) { @@ -153,7 +153,7 @@ public boolean loadCache(boolean force) throws IOException { } /** - * Check if it's time to update the cache. Update it it was never updated. + * Check if it's time to update the cache. Update it is never updated. * * @return If it's time to update this cache. */ @@ -206,7 +206,7 @@ public void overrideExpiredRecords(QueryResult query) throws IOException { * Updates the state store with any record overrides we detected, such as an * expired state. * - * @param record Record record to be updated. + * @param record record to be updated. * @throws IOException If the values cannot be updated. */ public void overrideExpiredRecord(R record) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java index 507c83786a8f4..201c7a325f105 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java @@ -331,7 +331,7 @@ public StateStoreDriver getDriver() { } /** - * Fetch a unique identifier for this state store instance. Typically it is + * Fetch a unique identifier for this state store instance. Typically, it is * the address of the router. * * @return Unique identifier for this store. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java index d595a97d01717..5065d14d9787e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java @@ -32,8 +32,8 @@ /** * Driver class for an implementation of a {@link StateStoreService} - * provider. Driver implementations will extend this class and implement some of - * the default methods. + * provider. Driver implementations will extend this class and implement some + * default methods. */ public abstract class StateStoreDriver implements StateStoreRecordOperations { @@ -97,7 +97,7 @@ protected Configuration getConf() { } /** - * Gets a unique identifier for the running task/process. Typically the + * Gets a unique identifier for the running task/process. Typically, the * router address. * * @return Unique identifier for the running task. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java index 680752b8efe32..978877050ef99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java @@ -84,7 +84,7 @@ private void checkMountTableEntryPermission(String src, FsAction action) } /** - * Check parent path permission recursively. It needs WRITE permission + * Check parent path permission recursively. It needs to write permission * of the nearest parent entry and other EXECUTE permission. * @param src mount entry being checked * @throws AccessControlException if mount table cannot be accessed @@ -99,8 +99,8 @@ private void checkMountTablePermission(final String src) throws IOException { } /** - * When add mount table entry, it needs WRITE permission of the nearest parent - * entry if exist, and EXECUTE permission of other ancestor entries. + * When add mount table entry, it needs to write permission of the nearest parent + * entry if existed, and EXECUTE permission of other ancestor entries. * @param request add mount table entry request * @return add mount table entry response * @throws IOException if mount table cannot be accessed diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetRouterRegistrationsRequest.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetRouterRegistrationsRequest.java index b70cccf593ae9..10b172eb50a21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetRouterRegistrationsRequest.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/GetRouterRegistrationsRequest.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer; /** - * API request for retrieving a all non-expired router registrations present in + * API request for retrieving an all non-expired router registrations present in * the state store. */ public abstract class GetRouterRegistrationsRequest { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java index 8422a8c4b6d15..98988f8bb2d86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java @@ -92,7 +92,7 @@ public B getBuilder() { /** * Get the serialized proto object. If the translator was created from a byte - * stream, returns the intitial byte stream. Otherwise creates a new byte + * stream, returns the initial byte stream. Otherwise, creates a new byte * stream from the cached builder. * * @return Protobuf message object diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java index b8e7c796a147d..d7fcf862fb6e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java @@ -193,7 +193,7 @@ private String getUsage(String cmd) { /** * Usage: validates the maximum number of arguments for a command. - * @param arg List of of command line parameters. + * @param arg List of command line parameters. */ private void validateMax(String[] arg) { if (arg[0].equals("-ls")) { @@ -407,7 +407,7 @@ public int run(String[] argv) throws Exception { System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage()); printUsage(cmd); } catch (RemoteException e) { - // This is a error returned by the server. + // This is an error returned by the server. // Print out the first line of the error message, ignore the stack trace. exitCode = -1; debugException = e; @@ -807,7 +807,7 @@ public void listMounts(String[] argv, int i) throws IOException { } else if (argv[i].equals("-d")) { // Check if -d parameter is specified. detail = true; if (argv.length == 2) { - path = "/"; // If no path is provide with -ls -d. + path = "/"; // If no path is provided with -ls -d. } else { path = argv[++i]; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java index 47ab0d2707850..2fd18890618ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java @@ -99,7 +99,7 @@ public static Configuration initSecurity() throws Exception { assertTrue("Expected configuration to enable security", UserGroupInformation.isSecurityEnabled()); - // Setup the keytab + // Set up the keytab File keytabFile = new File(baseDir, "test.keytab"); String keytab = keytabFile.getAbsolutePath(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java index ac6ecd4398cba..95c7c3f507619 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java @@ -644,7 +644,7 @@ public void setNumDatanodesPerNameservice(int num) { /** * Set custom storage type configuration for each datanode. * If storageTypes is uninitialized or passed null then - * StorageType.DEFAULT is used. + * StorageType. DEFAULT is used. */ public void setStorageTypes(StorageType[][] storageTypes) { this.storageTypes = storageTypes; @@ -789,7 +789,7 @@ public void startCluster(Configuration overrideConf) { Configuration nnConf = generateNamenodeConfiguration(ns0); if (overrideConf != null) { nnConf.addResource(overrideConf); - // Router also uses this configurations as initial values. + // Router also uses these configurations as initial values. routerConf = new Configuration(overrideConf); } @@ -924,7 +924,7 @@ public void waitRouterRegistrationQuorum(RouterContext router, /** * Wait for name spaces to be active. - * @throws Exception If we cannot check the status or we timeout. + * @throws Exception If we cannot check the status or we time out. */ public void waitActiveNamespaces() throws Exception { for (RouterContext r : this.routers) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java index 43efd85228d72..1519bad74b5c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java @@ -137,7 +137,7 @@ public void updateActiveNamenode( break; } } - // This operation modifies the list so we need to be careful + // This operation modifies the list, so we need to be careful synchronized(namenodes) { Collections.sort(namenodes, new NamenodePriorityComparator()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java index df80037c69917..ed10a3a87317d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java @@ -297,7 +297,7 @@ public void testCacheUpdateOnNamenodeStateUpdate() throws IOException { String rpcAddr = namenode.getRpcAddress(); InetSocketAddress inetAddr = getInetSocketAddress(rpcAddr); - // If the namenode state changes and it serves request, + // If the namenode state changes, and it serves request, // RouterRpcClient calls updateActiveNamenode to update the state to active, // Check whether correct updated state is returned post update. namenodeResolver.updateActiveNamenode(NAMESERVICES[0], inetAddr); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java index e397692e9a86d..fc4e6418054be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java @@ -268,10 +268,10 @@ private void checkPoolConnections(UserGroupInformation ugi, @Test public void testConfigureConnectionActiveRatio() throws IOException { - // test 1 conn below the threshold and these conns are closed + // test 1 conn below the threshold and This conns are closed testConnectionCleanup(0.8f, 10, 7, 9); - // test 2 conn below the threshold and these conns are closed + // test 2 conn below the threshold and This conns are closed testConnectionCleanup(0.8f, 10, 6, 8); } @@ -304,7 +304,7 @@ private void testConnectionCleanup(float ratio, int totalConns, addConnectionsToPool(pool, totalConns - 1, activeConns - 1); // There are activeConn connections. - // We can cleanup the pool + // We can clean up the pool tmpConnManager.cleanup(pool); assertEquals(leftConns, pool.getNumConnections()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableNameservices.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableNameservices.java index ae04150d70fa9..15978341da380 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableNameservices.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableNameservices.java @@ -102,7 +102,7 @@ public static void setUp() throws Exception { private static void setupNamespace() throws IOException { - // Setup a mount table to map to the two namespaces + // Set up a mount table to map to the two namespaces MountTableManager mountTable = routerAdminClient.getMountTableManager(); Map destinations = new TreeMap<>(); destinations.put("ns0", "/dirns0"); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java index 0464877d3cd80..9b8fb67e68122 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java @@ -45,7 +45,7 @@ import org.junit.Test; /** - * The the safe mode for the {@link Router} controlled by + * The safe mode for the {@link Router} controlled by * {@link SafeModeTimer}. */ public class TestRouter { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java index c2eaddc17a2a0..1038e9aaae880 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java @@ -466,7 +466,7 @@ public void testNameserviceManager() throws IOException { disabled = getDisabledNameservices(nsManager); assertTrue(disabled.isEmpty()); - // Non existing name services should fail + // Non-existing name services should fail disableReq = DisableNameserviceRequest.newInstance("nsunknown"); disableResp = nsManager.disableNameservice(disableReq); assertFalse(disableResp.getStatus()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java index b9dff65b28a09..677f3b5e947e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java @@ -742,7 +742,7 @@ public void testMountTablePermissions() throws Exception { * @param mount * target mount table * @param canRead - * whether can list mount tables under specified mount + * whether you can list mount tables under specified mount * @param addCommandCode * expected return code of add command executed for specified mount * @param rmCommandCode @@ -1467,7 +1467,7 @@ public void testUpdateErrorCase() throws Exception { err.toString().contains("update: /noMount doesn't exist.")); err.reset(); - // Check update if non true/false value is passed for readonly. + // Check update if no true/false value is passed for readonly. argv = new String[] {"-update", src, "-readonly", "check"}; assertEquals(-1, ToolRunner.run(admin, argv)); assertTrue(err.toString(), err.toString().contains("update: " diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAllResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAllResolver.java index 715b627f69461..566d4eb9126f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAllResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAllResolver.java @@ -102,7 +102,7 @@ public void setup() throws Exception { cluster.registerNamenodes(); cluster.waitNamenodeRegistration(); - // Setup the test mount point + // Set up the test mount point createMountTableEntry(TEST_DIR_HASH_ALL, DestinationOrder.HASH_ALL); createMountTableEntry(TEST_DIR_RANDOM, DestinationOrder.RANDOM); createMountTableEntry(TEST_DIR_SPACE, DestinationOrder.SPACE); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java index bf571e2ff790c..b793ebbe47ee6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java @@ -178,7 +178,7 @@ public void cleanup() throws Exception { } /** - * Update a mount table entry to be fault tolerant. + * Update a mount table entry to be fault-tolerant. * @param mountPoint Mount point to update. * @throws IOException If it cannot update the mount point. */ @@ -278,8 +278,8 @@ private void testWriteWithFailedSubcluster(final DestinationOrder order) /** * Check directory creation on a mount point. - * If it is fault tolerant, it should be able to write everything. - * If it is not fault tolerant, it should fail to write some. + * If it is fault-tolerant, it should be able to write everything. + * If it is not fault-tolerant, it should fail to write some. */ private void checkDirectoriesFaultTolerant( Path mountPoint, DestinationOrder order, @@ -322,8 +322,8 @@ private void checkDirectoriesFaultTolerant( /** * Check file creation on a mount point. - * If it is fault tolerant, it should be able to write everything. - * If it is not fault tolerant, it should fail to write some of the files. + * If it is fault-tolerant, it should be able to write everything. + * If it is not fault-tolerant, it should fail to write some files. */ private void checkFilesFaultTolerant( Path mountPoint, DestinationOrder order, @@ -582,7 +582,7 @@ public String toString() { } /** - * Asserts that the results are the expected amount and it has both success + * Asserts that the results are the expected amount, and it has both success * and failure. * @param msg Message to show when the assertion fails. * @param expected Expected number of results. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java index a346c1a241a80..1f51fc27d2dd2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java @@ -594,7 +594,7 @@ public void testMountPointChildren() throws IOException { } /** - * Validate the number of children for the mount point pointing to multiple + * Validate the number of children for the mount point to multiple * destinations.It must be equal to the sum of number of children of the * destinations pointed by the mount point. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java index c90e614a5cd40..6ed00c118f6cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java @@ -234,7 +234,7 @@ public void testCachedRouterClientBehaviourAfterRouterStoped() assertEquals(srcPath, mountTableResult.getSourcePath()); } - // Lets stop one router + // Let's stop one router for (RouterContext rc : routers) { InetSocketAddress adminServerAddress = rc.getRouter().getAdminServerAddress(); @@ -308,9 +308,9 @@ public void run() { TimeUnit.SECONDS); mountTableRefresherService.init(config); // One router is not responding for 1 minute, still refresh should - // finished in 5 second as cache update timeout is set 5 second. + // be finished in 5 second as cache update timeout is set 5 second. mountTableRefresherService.refresh(); - // Test case timeout is assert for this test case. + // Test case timeout is asserted for this test case. } /** @@ -349,7 +349,7 @@ protected RouterClient createRouterClient( mountTableRefresherService.refresh(); assertNotEquals("No RouterClient is created.", 0, createCounter.get()); /* - * Wait for clients to expire. Lets wait triple the cache eviction period. + * Wait for clients to expire. Let's wait triple the cache eviction period. * After cache eviction period all created client must be removed and * closed. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuotaManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuotaManager.java index 4a1dd2e091bc7..d24ef8ebc3c4b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuotaManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuotaManager.java @@ -75,11 +75,11 @@ public void testGetChildrenPaths() { public void testGetQuotaUsage() { RouterQuotaUsage quotaGet; - // test case1: get quota with an non-exist path + // test case1: get quota with a non-exist path quotaGet = manager.getQuotaUsage("/non-exist-path"); assertNull(quotaGet); - // test case2: get quota from an no-quota set path + // test case2: get quota from a no-quota set path RouterQuotaUsage.Builder quota = new RouterQuotaUsage.Builder() .quota(HdfsConstants.QUOTA_RESET) .spaceQuota(HdfsConstants.QUOTA_RESET); @@ -88,7 +88,7 @@ public void testGetQuotaUsage() { // it should return null assertNull(quotaGet); - // test case3: get quota from an quota-set path + // test case3: get quota from a quota-set path quota.quota(1); quota.spaceQuota(HdfsConstants.QUOTA_RESET); manager.put("/hasQuotaSet", quota.build()); @@ -96,24 +96,24 @@ public void testGetQuotaUsage() { assertEquals(1, quotaGet.getQuota()); assertEquals(HdfsConstants.QUOTA_RESET, quotaGet.getSpaceQuota()); - // test case4: get quota with an non-exist child path + // test case4: get quota with a non-exist child path quotaGet = manager.getQuotaUsage("/hasQuotaSet/file"); // it will return the nearest ancestor which quota was set assertEquals(1, quotaGet.getQuota()); assertEquals(HdfsConstants.QUOTA_RESET, quotaGet.getSpaceQuota()); - // test case5: get quota with an child path which its parent + // test case5: get quota with a child path which its parent // wasn't quota set quota.quota(HdfsConstants.QUOTA_RESET); quota.spaceQuota(HdfsConstants.QUOTA_RESET); manager.put("/hasQuotaSet/noQuotaSet", quota.build()); - // here should returns the quota of path /hasQuotaSet + // here should return the quota of path /hasQuotaSet // (the nearest ancestor which quota was set) quotaGet = manager.getQuotaUsage("/hasQuotaSet/noQuotaSet/file"); assertEquals(1, quotaGet.getQuota()); assertEquals(HdfsConstants.QUOTA_RESET, quotaGet.getSpaceQuota()); - // test case6: get quota with an child path which its parent was quota set + // test case6: get quota with a child path which its parent was quota set quota.quota(2); quota.spaceQuota(HdfsConstants.QUOTA_RESET); manager.put("/hasQuotaSet/hasQuotaSet", quota.build()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java index 238d1b0301180..b05337443f659 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java @@ -511,7 +511,7 @@ public void testIsMultiDestDir() throws Exception { */ @Test public void testSnapshotPathResolution() throws Exception { - // Create a mount entry with non isPathAll order, so as to call + // Create a mount entry with non isPathAll order, to call // invokeSequential. Map destMap = new HashMap<>(); destMap.put("ns0", "/tmp_ns0"); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java index 370a1250a7c11..7d7ee2f63ef27 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java @@ -75,7 +75,7 @@ import org.junit.Test; /** - * The the RPC interface of the {@link getRouter()} implemented by + * The RPC interface of the {@link getRouter()} implemented by * {@link RouterRpcServer}. */ public class TestRouterRpcMultiDestination extends TestRouterRpc { @@ -130,7 +130,7 @@ public void testSetup() throws Exception { this.setNamenode(cluster.getNamenode(ns, null)); // Create a test file on a single NN that is accessed via a getRouter() path - // with 2 destinations. All tests should failover to the alternate + // with 2 destinations. All tests should fail over to the alternate // destination if the wrong NN is attempted first. Random r = new Random(); String randomString = "testfile-" + r.nextInt(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestSafeMode.java index d040b7aac1a4f..cb4c3a2109391 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestSafeMode.java @@ -51,7 +51,7 @@ public void setup() throws Exception { cluster.registerNamenodes(); cluster.waitNamenodeRegistration(); - // Setup the mount table + // Set up the mount table cluster.installMockLocations(); // Making one Namenodes active per nameservice diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/token/TestZKDelegationTokenSecretManagerImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/token/TestZKDelegationTokenSecretManagerImpl.java index 3c7f8e88a91d1..e4c293091b045 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/token/TestZKDelegationTokenSecretManagerImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/security/token/TestZKDelegationTokenSecretManagerImpl.java @@ -110,7 +110,7 @@ public void testMultiNodeTokenRemovalShortSyncWithoutWatch() conf.setBoolean(ZK_DTSM_TOKEN_WATCHER_ENABLED, false); // make sync quick conf.setInt(ZK_DTSM_ROUTER_TOKEN_SYNC_INTERVAL, 3); - // set the renew window and removal interval to be a + // set the renewal window and removal interval to be a // short time to trigger the background cleanup conf.setInt(RENEW_INTERVAL, 10); conf.setInt(REMOVAL_SCAN_INTERVAL, 10); @@ -170,7 +170,7 @@ public void testMultiNodeTokenRemovalLongSyncWithoutWatch() conf.setBoolean(ZK_DTSM_TOKEN_WATCHER_ENABLED, false); // make sync quick conf.setInt(ZK_DTSM_ROUTER_TOKEN_SYNC_INTERVAL, 20); - // set the renew window and removal interval to be a + // set the renewal window and removal interval to be a // short time to trigger the background cleanup conf.setInt(RENEW_INTERVAL, 10); conf.setInt(REMOVAL_SCAN_INTERVAL, 10); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java index 63bc6235a6116..b41e23edb8ac8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java @@ -186,7 +186,7 @@ public void testRegistrationMajorityQuorum() // 1) ns0:nn0 - Standby (newest) // 2) ns0:nn0 - Active (oldest) // 3) ns0:nn0 - Active (2nd oldest) - // 4) ns0:nn0 - Active (3nd oldest element, newest active element) + // 4) ns0:nn0 - Active (3nd the oldest element, the newest active element) // Verify the selected entry is the newest majority opinion (4) String ns = "ns0"; String nn = "nn0"; @@ -208,7 +208,7 @@ public void testRegistrationMajorityQuorum() ns, nn, ROUTERS[3], FederationNamenodeServiceState.ACTIVE); assertTrue(namenodeHeartbeat(report)); - // standby - newest overall + // standby - the newest overall report = createRegistration( ns, nn, ROUTERS[0], FederationNamenodeServiceState.STANDBY); assertTrue(namenodeHeartbeat(report)); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java index fe1b9a5bfa04c..b8bb7c4d2d115 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java @@ -556,7 +556,7 @@ private static Map> getFields(BaseRecord record) { } /** - * Get the type of a field. + * Get the type of field. * * @param fieldName * @return Field type @@ -601,7 +601,7 @@ private static Method locateGetter(BaseRecord record, String fieldName) { } /** - * Expands a data object from the store into an record object. Default store + * Expands a data object from the store into a record object. Default store * data type is a String. Override if additional serialization is required. * * @param data Object containing the serialized data. Only string is From 4c1dc81cf9545a9954ffe9afd19a2392185d3ec4 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Fri, 27 May 2022 23:21:32 -0700 Subject: [PATCH 4/7] HDFS-16599. Fix typo in RBF-Moudle. --- .../server/federation/router/RouterRpcClient.java | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java index 9b43fff0607fb..ff90854ebb7ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java @@ -624,7 +624,7 @@ private void addClientIpToCallerContext() { * @param nsId Identifier for the namespace * @param retryCount Current retry times * @param method Method to invoke - * @param obj Target Object for the method + * @param obj Target object for the method * @param params Variable parameters * @return Response from the remote server * @throws IOException @@ -789,7 +789,7 @@ private static IOException getCleanException(IOException ioe) { * @param block Block used to determine appropriate nameservice. * @param method The remote method and parameters to invoke. * @return The result of invoking the method. - * @throws IOException If to invoke generated an error. + * @throws IOException If the invoke generated an error. */ public Object invokeSingle(final ExtendedBlock block, RemoteMethod method) throws IOException { @@ -807,7 +807,7 @@ public Object invokeSingle(final ExtendedBlock block, RemoteMethod method) * @param bpId Block pool identifier. * @param method The remote method and parameters to invoke. * @return The result of invoking the method. - * @throws IOException If to invoke generated an error. + * @throws IOException If the invoke generated an error. */ public Object invokeSingleBlockPool(final String bpId, RemoteMethod method) throws IOException { @@ -824,7 +824,7 @@ public Object invokeSingleBlockPool(final String bpId, RemoteMethod method) * @param nsId Target namespace for the method. * @param method The remote method and parameters to invoke. * @return The result of invoking the method. - * @throws IOException If to invoke generated an error. + * @throws IOException If the invoke generated an error. */ public Object invokeSingle(final String nsId, RemoteMethod method) throws IOException { @@ -855,7 +855,7 @@ public Object invokeSingle(final String nsId, RemoteMethod method) * @param method The remote method and parameters to invoke. * @param clazz Class for the return type. * @return The result of invoking the method. - * @throws IOException If to invoke generated an error. + * @throws IOException If the invoke generated an error. */ public T invokeSingle(final String nsId, RemoteMethod method, Class clazz) throws IOException { @@ -875,7 +875,7 @@ public T invokeSingle(final String nsId, RemoteMethod method, * @param method The remote method and parameters to invoke. * @param clazz Class for the return type. * @return The result of invoking the method. - * @throws IOException If to invoke generated an error. + * @throws IOException If the invoke generated an error. */ public T invokeSingle(final ExtendedBlock extendedBlock, RemoteMethod method, Class clazz) throws IOException { @@ -894,7 +894,7 @@ public T invokeSingle(final ExtendedBlock extendedBlock, * @param location RemoteLocation to invoke. * @param remoteMethod The remote method and parameters to invoke. * @return The result of invoking the method if successful. - * @throws IOException If to invoke generated an error. + * @throws IOException If the invoke generated an error. */ public T invokeSingle(final RemoteLocationContext location, RemoteMethod remoteMethod, Class clazz) throws IOException { From 65668a30df79066a204355bd1f2f1fbcecbf55d4 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Fri, 27 May 2022 23:24:37 -0700 Subject: [PATCH 5/7] HDFS-16599. Fix typo in RBF-Moudle. --- .../hdfs/server/federation/router/RouterAdminServer.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java index 42b6f670e462c..ab106bdf685f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java @@ -752,9 +752,9 @@ public static RouterPermissionChecker getPermissionChecker() } /** - * Get superUser name. + * Get super user name. * - * @return String superUser name. + * @return String super user name. */ public static String getSuperUser() { return routerOwner; From 297a61865a1d134416321f4dfb71b903e27d82b4 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Sat, 28 May 2022 01:22:55 -0700 Subject: [PATCH 6/7] HDFS-16599. Fix typo in RBF-Moudle. --- .../server/federation/metrics/FederationMBean.java | 2 +- .../metrics/FederationRPCPerformanceMonitor.java | 2 +- .../resolver/NamenodePriorityComparator.java | 2 +- .../server/federation/router/ConnectionManager.java | 2 +- .../server/federation/router/RouterAdminServer.java | 4 ++-- .../server/federation/router/RouterClientProtocol.java | 8 ++++---- .../federation/router/RouterSafemodeService.java | 2 +- .../token/ZKDelegationTokenSecretManagerImpl.java | 4 ++-- .../server/federation/store/CachedRecordStore.java | 2 +- .../federation/store/driver/StateStoreDriver.java | 4 ++-- .../federation/store/impl/MountTableStoreImpl.java | 6 +++--- .../hadoop/fs/contract/router/SecurityConfUtil.java | 2 +- .../hdfs/server/federation/MiniRouterDFSCluster.java | 2 +- .../federation/router/TestConnectionManager.java | 6 +++--- .../federation/router/TestDisableNameservices.java | 2 +- .../hdfs/server/federation/router/TestRouterAdmin.java | 2 +- .../federation/router/TestRouterFaultTolerant.java | 10 +++++----- .../server/federation/router/TestRouterMountTable.java | 2 +- .../router/TestRouterMountTableCacheRefresh.java | 4 ++-- .../router/TestRouterRpcMultiDestination.java | 2 +- .../hdfs/server/federation/router/TestSafeMode.java | 2 +- .../store/TestStateStoreMembershipState.java | 4 ++-- 22 files changed, 38 insertions(+), 38 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java index 1a8a3b31f7fad..ed3069af83633 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java @@ -345,7 +345,7 @@ public interface FederationMBean { long getHighestPriorityLowRedundancyECBlocks(); /** - * Returns the number of paths to be processed by storage policy satisfies. + * Returns the number of paths to be processed by storage policy satisfier. * * @return The number of paths to be processed by sps. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java index 93a3addcfb823..159d08e26a161 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java @@ -246,7 +246,7 @@ public void routerFailureLocked() { /** - * Get time between we're receiving the operation and sending it to the Namenode. + * Get time between we receiving the operation and sending it to the Namenode. * @return Processing time in milliseconds. */ private long getProcessingTime() { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodePriorityComparator.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodePriorityComparator.java index 0fdf2155faa89..e9724a3dee776 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodePriorityComparator.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodePriorityComparator.java @@ -60,7 +60,7 @@ public int compare(FederationNamenodeContext o1, */ private int compareModDates(FederationNamenodeContext o1, FederationNamenodeContext o2) { - // Reverse sort, the lowest position is the highest priority. + // Reverse sort, lowest position is highest priority. return (int) (o2.getDateModified() - o1.getDateModified()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java index 4af0cafc08a24..5fe797bf5ce2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java @@ -355,7 +355,7 @@ Map getPools() { /** * Clean the unused connections for this pool. * - * @param pool Connection pool to clean up. + * @param pool Connection pool to cleanup. */ @VisibleForTesting void cleanup(ConnectionPool pool) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java index ab106bdf685f3..db1922ad80811 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java @@ -439,7 +439,7 @@ private boolean isQuotaUpdated(UpdateMountTableEntryRequest request, } return false; } else { - // If old entry is not available, sync quota always, since we can + // If old entry is not available, sync quota always, since we can't // conclude no change in quota. return true; } @@ -489,7 +489,7 @@ public RemoveMountTableEntryResponse removeMountTableEntry( synchronizeQuota(request.getSrcPath(), HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET, null); } catch (Exception e) { - // Ignore exception, if any while resting quota. Specifically to handle + // Ignore exception, if any while reseting quota. Specifically to handle // if the actual destination doesn't exist. LOG.warn("Unable to clear quota at the destinations for {}: {}", request.getSrcPath(), e.getMessage()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 641ce91df82b0..c1dafec92203b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -325,14 +325,14 @@ protected static boolean isUnavailableSubclusterException( /** * Check if a remote method can be retried in other subclusters when it * failed in the original destination. This method returns the list of - * locations to retry in. This is used by fault-tolerant mount points. + * locations to retry in. This is used by fault tolerant mount points. * @param method Method that failed and might be retried. * @param src Path where the method was invoked. * @param ioe Exception that was triggered. * @param excludeLoc Location that failed and should be excluded. * @param locations All the locations to retry. * @return The locations where we should retry (excluding the failed ones). - * @throws IOException If this path is not fault-tolerant or the exception + * @throws IOException If this path is not fault tolerant or the exception * should not be retried (e.g., NSQuotaExceededException). */ private List checkFaultTolerantRetry( @@ -1830,8 +1830,8 @@ public HAServiceProtocol.HAServiceState getHAServiceState() { } /** - * Determines combinations of eligible src/dst locations for a renamed. A - * renamed cannot change the namespace. Renames are only allowed if there is an + * Determines combinations of eligible src/dst locations for a rename. A + * rename cannot change the namespace. Renames are only allowed if there is an * eligible dst location in the same namespace as the source. * * @param srcLocations List of all potential source destinations where the diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafemodeService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafemodeService.java index 7cca341aa6175..23ce4fc18b5a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafemodeService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafemodeService.java @@ -30,7 +30,7 @@ /** * Service to periodically check if the {@link * org.apache.hadoop.hdfs.server.federation.store.StateStoreService - * StateStoreService} cached information in the {@link Router} is up-to-date. + * StateStoreService} cached information in the {@link Router} is up to date. * This is for performance and removes the {@link * org.apache.hadoop.hdfs.server.federation.store.StateStoreService * StateStoreService} from the critical path in common operations. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java index 86e2dc6e8ce53..a83be71e59d83 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java @@ -95,7 +95,7 @@ public void startThreads() throws IOException { if (!isTokenWatcherEnabled()) { LOG.info("Watcher for tokens is disabled in this secret manager"); try { - // By default, set this variable + // By default set this variable checkAgainstZkBeforeDeletion.set(true); // Ensure the token root path exists if (zkClient.checkExists().forPath(ZK_DTSM_TOKENS_ROOT) == null) { @@ -159,7 +159,7 @@ public DelegationTokenIdentifier createIdentifier() { private void rebuildTokenCache(boolean initial) throws IOException { localTokenCache.clear(); // Use bare zookeeper client to get all children since curator will - // wrap the same API with a sorting process. This is time-consuming given + // wrap the same API with a sorting process. This is time consuming given // millions of tokens List zkTokens; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java index 193ff78951baa..2b693aa936f00 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java @@ -153,7 +153,7 @@ public boolean loadCache(boolean force) throws IOException { } /** - * Check if it's time to update the cache. Update it is never updated. + * Check if it's time to update the cache. Update it was never updated. * * @return If it's time to update this cache. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java index 5065d14d9787e..a4e9c1ce82bbb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java @@ -32,8 +32,8 @@ /** * Driver class for an implementation of a {@link StateStoreService} - * provider. Driver implementations will extend this class and implement some - * default methods. + * provider. Driver implementations will extend this class and implement some of + * the default methods. */ public abstract class StateStoreDriver implements StateStoreRecordOperations { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java index 978877050ef99..2e15b8fae35a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java @@ -84,7 +84,7 @@ private void checkMountTableEntryPermission(String src, FsAction action) } /** - * Check parent path permission recursively. It needs to write permission + * Check parent path permission recursively. It needs WRITE permission * of the nearest parent entry and other EXECUTE permission. * @param src mount entry being checked * @throws AccessControlException if mount table cannot be accessed @@ -99,8 +99,8 @@ private void checkMountTablePermission(final String src) throws IOException { } /** - * When add mount table entry, it needs to write permission of the nearest parent - * entry if existed, and EXECUTE permission of other ancestor entries. + * When add mount table entry, it needs WRITE permission of the nearest parent + * entry if existe, and EXECUTE permission of other ancestor entries. * @param request add mount table entry request * @return add mount table entry response * @throws IOException if mount table cannot be accessed diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java index 2fd18890618ba..47ab0d2707850 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java @@ -99,7 +99,7 @@ public static Configuration initSecurity() throws Exception { assertTrue("Expected configuration to enable security", UserGroupInformation.isSecurityEnabled()); - // Set up the keytab + // Setup the keytab File keytabFile = new File(baseDir, "test.keytab"); String keytab = keytabFile.getAbsolutePath(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java index 95c7c3f507619..cb7882ee87419 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java @@ -924,7 +924,7 @@ public void waitRouterRegistrationQuorum(RouterContext router, /** * Wait for name spaces to be active. - * @throws Exception If we cannot check the status or we time out. + * @throws Exception If we cannot check the status or we timeout. */ public void waitActiveNamespaces() throws Exception { for (RouterContext r : this.routers) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java index fc4e6418054be..e397692e9a86d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java @@ -268,10 +268,10 @@ private void checkPoolConnections(UserGroupInformation ugi, @Test public void testConfigureConnectionActiveRatio() throws IOException { - // test 1 conn below the threshold and This conns are closed + // test 1 conn below the threshold and these conns are closed testConnectionCleanup(0.8f, 10, 7, 9); - // test 2 conn below the threshold and This conns are closed + // test 2 conn below the threshold and these conns are closed testConnectionCleanup(0.8f, 10, 6, 8); } @@ -304,7 +304,7 @@ private void testConnectionCleanup(float ratio, int totalConns, addConnectionsToPool(pool, totalConns - 1, activeConns - 1); // There are activeConn connections. - // We can clean up the pool + // We can cleanup the pool tmpConnManager.cleanup(pool); assertEquals(leftConns, pool.getNumConnections()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableNameservices.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableNameservices.java index 15978341da380..ae04150d70fa9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableNameservices.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableNameservices.java @@ -102,7 +102,7 @@ public static void setUp() throws Exception { private static void setupNamespace() throws IOException { - // Set up a mount table to map to the two namespaces + // Setup a mount table to map to the two namespaces MountTableManager mountTable = routerAdminClient.getMountTableManager(); Map destinations = new TreeMap<>(); destinations.put("ns0", "/dirns0"); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java index 1038e9aaae880..c2eaddc17a2a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java @@ -466,7 +466,7 @@ public void testNameserviceManager() throws IOException { disabled = getDisabledNameservices(nsManager); assertTrue(disabled.isEmpty()); - // Non-existing name services should fail + // Non existing name services should fail disableReq = DisableNameserviceRequest.newInstance("nsunknown"); disableResp = nsManager.disableNameservice(disableReq); assertFalse(disableResp.getStatus()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java index b793ebbe47ee6..ef5322ba218ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java @@ -178,7 +178,7 @@ public void cleanup() throws Exception { } /** - * Update a mount table entry to be fault-tolerant. + * Update a mount table entry to be fault tolerant. * @param mountPoint Mount point to update. * @throws IOException If it cannot update the mount point. */ @@ -278,8 +278,8 @@ private void testWriteWithFailedSubcluster(final DestinationOrder order) /** * Check directory creation on a mount point. - * If it is fault-tolerant, it should be able to write everything. - * If it is not fault-tolerant, it should fail to write some. + * If it is fault tolerant, it should be able to write everything. + * If it is not fault tolerant, it should fail to write some. */ private void checkDirectoriesFaultTolerant( Path mountPoint, DestinationOrder order, @@ -322,8 +322,8 @@ private void checkDirectoriesFaultTolerant( /** * Check file creation on a mount point. - * If it is fault-tolerant, it should be able to write everything. - * If it is not fault-tolerant, it should fail to write some files. + * If it is fault tolerant, it should be able to write everything. + * If it is not fault tolerant, it should fail to write some of the files. */ private void checkFilesFaultTolerant( Path mountPoint, DestinationOrder order, diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java index 1f51fc27d2dd2..a346c1a241a80 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java @@ -594,7 +594,7 @@ public void testMountPointChildren() throws IOException { } /** - * Validate the number of children for the mount point to multiple + * Validate the number of children for the mount point pointing to multiple * destinations.It must be equal to the sum of number of children of the * destinations pointed by the mount point. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java index 6ed00c118f6cc..82bc7d905e606 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java @@ -234,7 +234,7 @@ public void testCachedRouterClientBehaviourAfterRouterStoped() assertEquals(srcPath, mountTableResult.getSourcePath()); } - // Let's stop one router + // Lets stop one router for (RouterContext rc : routers) { InetSocketAddress adminServerAddress = rc.getRouter().getAdminServerAddress(); @@ -308,7 +308,7 @@ public void run() { TimeUnit.SECONDS); mountTableRefresherService.init(config); // One router is not responding for 1 minute, still refresh should - // be finished in 5 second as cache update timeout is set 5 second. + // finish in 5 second as cache update timeout is set 5 second. mountTableRefresherService.refresh(); // Test case timeout is asserted for this test case. } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java index 7d7ee2f63ef27..6ade57326e316 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java @@ -130,7 +130,7 @@ public void testSetup() throws Exception { this.setNamenode(cluster.getNamenode(ns, null)); // Create a test file on a single NN that is accessed via a getRouter() path - // with 2 destinations. All tests should fail over to the alternate + // with 2 destinations. All tests should failover to the alternate // destination if the wrong NN is attempted first. Random r = new Random(); String randomString = "testfile-" + r.nextInt(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestSafeMode.java index cb4c3a2109391..d040b7aac1a4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestSafeMode.java @@ -51,7 +51,7 @@ public void setup() throws Exception { cluster.registerNamenodes(); cluster.waitNamenodeRegistration(); - // Set up the mount table + // Setup the mount table cluster.installMockLocations(); // Making one Namenodes active per nameservice diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java index b41e23edb8ac8..45a240b866b85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java @@ -186,7 +186,7 @@ public void testRegistrationMajorityQuorum() // 1) ns0:nn0 - Standby (newest) // 2) ns0:nn0 - Active (oldest) // 3) ns0:nn0 - Active (2nd oldest) - // 4) ns0:nn0 - Active (3nd the oldest element, the newest active element) + // 4) ns0:nn0 - Active (3rd oldest element, newest active element) // Verify the selected entry is the newest majority opinion (4) String ns = "ns0"; String nn = "nn0"; @@ -208,7 +208,7 @@ public void testRegistrationMajorityQuorum() ns, nn, ROUTERS[3], FederationNamenodeServiceState.ACTIVE); assertTrue(namenodeHeartbeat(report)); - // standby - the newest overall + // standby - newest overall report = createRegistration( ns, nn, ROUTERS[0], FederationNamenodeServiceState.STANDBY); assertTrue(namenodeHeartbeat(report)); From 788606367c93407c7509a79fa9c4c3a732301d52 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Sat, 28 May 2022 01:26:11 -0700 Subject: [PATCH 7/7] HDFS-16599. Fix typo in RBF-Moudle. --- .../hdfs/server/federation/store/impl/MountTableStoreImpl.java | 2 +- .../hadoop/hdfs/server/federation/MiniRouterDFSCluster.java | 2 +- .../hdfs/server/federation/router/TestRouterAllResolver.java | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java index 2e15b8fae35a1..680752b8efe32 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java @@ -100,7 +100,7 @@ private void checkMountTablePermission(final String src) throws IOException { /** * When add mount table entry, it needs WRITE permission of the nearest parent - * entry if existe, and EXECUTE permission of other ancestor entries. + * entry if exist, and EXECUTE permission of other ancestor entries. * @param request add mount table entry request * @return add mount table entry response * @throws IOException if mount table cannot be accessed diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java index cb7882ee87419..87b99e5d9523c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java @@ -644,7 +644,7 @@ public void setNumDatanodesPerNameservice(int num) { /** * Set custom storage type configuration for each datanode. * If storageTypes is uninitialized or passed null then - * StorageType. DEFAULT is used. + * StorageType.DEFAULT is used. */ public void setStorageTypes(StorageType[][] storageTypes) { this.storageTypes = storageTypes; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAllResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAllResolver.java index 566d4eb9126f4..715b627f69461 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAllResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAllResolver.java @@ -102,7 +102,7 @@ public void setup() throws Exception { cluster.registerNamenodes(); cluster.waitNamenodeRegistration(); - // Set up the test mount point + // Setup the test mount point createMountTableEntry(TEST_DIR_HASH_ALL, DestinationOrder.HASH_ALL); createMountTableEntry(TEST_DIR_RANDOM, DestinationOrder.RANDOM); createMountTableEntry(TEST_DIR_SPACE, DestinationOrder.SPACE);