diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index d5423d4ec0bb..48c77f2c8634 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -364,7 +364,6 @@ public void writeOnRetry(long len) throws IOException {
* it is a no op.
* @param bufferFull flag indicating whether bufferFull condition is hit or
* its called as part flush/close
- * @return minimum commit index replicated to all nodes
* @throws IOException IOException in case watch gets timed out
*/
public void watchForCommit(boolean bufferFull) throws IOException {
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java
index d347dee85121..8287a5a78bb2 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java
@@ -43,7 +43,6 @@ public interface BlockInputStreamFactory {
* @param blockInfo The blockInfo representing the block.
* @param pipeline The pipeline to be used for reading the block
* @param token The block Access Token
- * @param verifyChecksum Whether to verify checksums or not.
* @param xceiverFactory Factory to create the xceiver in the client
* @param refreshFunction Function to refresh the block location if needed
* @return BlockExtendedInputStream of the correct type.
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java
index 8a87234a7707..d9cadc948a61 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java
@@ -71,7 +71,6 @@ public BlockInputStreamFactoryImpl(ByteBufferPool byteBufferPool,
* @param blockInfo The blockInfo representing the block.
* @param pipeline The pipeline to be used for reading the block
* @param token The block Access Token
- * @param verifyChecksum Whether to verify checksums or not.
* @param xceiverFactory Factory to create the xceiver in the client
* @param refreshFunction Function to refresh the pipeline if needed
* @return BlockExtendedInputStream of the correct type.
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java
index 66e7a31337a6..aca3cfed465f 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java
@@ -45,7 +45,6 @@ public interface ECBlockInputStreamFactory {
* know are bad and should not be used.
* @param repConfig The replication Config
* @param blockInfo The blockInfo representing the block.
- * @param verifyChecksum Whether to verify checksums or not.
* @param xceiverFactory Factory to create the xceiver in the client
* @param refreshFunction Function to refresh the block location if needed
* @return BlockExtendedInputStream of the correct type.
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java
index 01d0b0a7b7e8..41c46aad379c 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java
@@ -68,7 +68,6 @@ private ECBlockInputStreamFactoryImpl(BlockInputStreamFactory streamFactory,
* know are bad and should not be used.
* @param repConfig The replication Config
* @param blockInfo The blockInfo representing the block.
- * @param verifyChecksum Whether to verify checksums or not.
* @param xceiverFactory Factory to create the xceiver in the client
* @param refreshFunction Function to refresh the pipeline if needed
* @return BlockExtendedInputStream of the correct type.
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java
index 31f94e0acad6..229cc3f3e36e 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java
@@ -85,7 +85,7 @@
* Parity elements long. Missing or not needed elements should be set to null
* in the array. The elements should be assigned to the array in EC index order.
*
- * Assuming we have n missing data locations, where n <= parity locations, the
+ * Assuming we have n missing data locations, where n {@literal <=} parity locations, the
* ByteBuffers passed in from the client are either assigned to the decoder
* input array, or they are assigned to the decoder output array, where
* reconstructed data is written. The required number of parity buffers will be
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceStability.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceStability.java
index 4251344139ab..6e9ee9467907 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceStability.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceStability.java
@@ -27,9 +27,9 @@
* class or method not changing over time. Currently the stability can be
* {@link Stable}, {@link Evolving} or {@link Unstable}.
*
- *
+ * {@code
* |----used----| (avail) |++mvfs++|++++reserved+++++++|
* |<- capacity ->|
* | fsAvail |-------other-----------|
* |<- fsCapacity ->|
+ * }
*
* What we could directly get from local fs:
* fsCapacity, fsAvail, (fsUsed = fsCapacity - fsAvail)
@@ -78,10 +80,12 @@
* then we should use DedicatedDiskSpaceUsage for
* `hdds.datanode.du.factory.classname`,
* Then it is much simpler, since we don't care about other usage:
- *
+ *
+ * {@code
* |----used----| (avail)/fsAvail |
* |<- capacity/fsCapacity ->|
- *
+ * }
+ *
* We have avail == fsAvail.
*/
public final class VolumeInfo {
@@ -154,9 +158,12 @@ public long getCapacity() {
/**
* Calculate available space use method A.
+ *
+ * {@code
* |----used----| (avail) |++++++++reserved++++++++|
* |<- capacity ->|
- *
+ * }
+ *
* A) avail = capacity - used
*/
public long getAvailable() {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
index 7e138b057168..a91f0c1f72a5 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
@@ -77,11 +77,15 @@ public long getUsedSpace() {
}
/**
+ *
+ * {@code
* Calculate available space use method B.
* |----used----| (avail) |++++++++reserved++++++++|
* | fsAvail |-------other-------|
* ->|~~~~|<-
* remainingReserved
+ * }
+ *
* B) avail = fsAvail - Max(reserved - other, 0);
*/
public SpaceUsageSource getCurrentUsage() {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index b7d5b5fa59eb..691ccaa630d1 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -439,13 +439,13 @@ public static boolean isSameSchemaVersion(String schema, String other) {
/**
* Moves container directory to a new location
- * under "* Servlet that runs async-profiler as web-endpoint. - **/ public class ProfileServlet extends HttpServlet { private static final long serialVersionUID = 1L; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index c45e772c2417..41fea63d2056 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -129,7 +129,7 @@ private HddsServerUtil() { * @param conf configuration * @param protocol Protocol interface * @param service service that implements the protocol - * @param server RPC server to which the protocol & implementation is added to + * @param server RPC server to which the protocol and implementation is added to */ public static void addPBProtocol(Configuration conf, Class> protocol, BlockingService service, RPC.Server server) throws IOException { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java index f153823db7c5..5cfdcdb8a037 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java @@ -308,7 +308,6 @@ List extends KeyValue+ * * Source: https://github.com/apache/hive/blob/master/common/src/java/org * /apache/hive/http/ProfileServlet.java - *
+ * * Following options from async-profiler can be specified as query parameter. * // -e event profiling event: cpu|alloc|lock|cache-misses etc. * // -d duration run profiling for
seconds @@ -79,7 +80,7 @@ * curl "http://localhost:10002/prof" * - To collect 1 minute CPU profile of current process and output in tree * format (html) - * curl "http://localhost:10002/prof?output=tree&duration=60" + * curl{@literal "http://localhost:10002/prof?output=tree&duration=60"} * - To collect 30 second heap allocation profile of current process (returns * FlameGraph svg) * curl "http://localhost:10002/prof?event=alloc" @@ -111,6 +112,7 @@ * The default output format of the newest async profiler is HTML. * If the user is using an older version such as 1.5, HTML is not supported. * Please specify the corresponding output format. + *
+ * {@code
* (1) (3)(4)
* --------------------------->
* (2) scm2(Follower)
@@ -130,8 +132,8 @@ public class RootCARotationManager extends StatefulService {
* --------------------------->
* (2) scm3(Follower)
* <---------------------------
- *
- *
+ * }
+ *
* (1) Rotation Prepare
* (2) Rotation Prepare Ack
* (3) Rotation Commit
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
index cca2df003742..6f5429a853bd 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
@@ -289,12 +289,12 @@ public interface ContainerReport {
public enum ContainerReportType {
/**
* Incremental container report type
- * {@liks IncrementalContainerReportFromDatanode}.
+ * {@link IncrementalContainerReportFromDatanode}.
*/
ICR,
/**
* Full container report type
- * {@liks ContainerReportFromDatanode}.
+ * {@link ContainerReportFromDatanode}.
*/
FCR
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java
index 2b6fa032b538..5aaf4b7b4852 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java
@@ -70,9 +70,10 @@ public String getKerberosKeytab() {
* This static class is required to support other classes
* that reference the key names and also require attributes.
* Example: SCMSecurityProtocol where the KerberosInfo references
- * the old configuration with the annotation shown below:-
- * @KerberosInfo(serverPrincipal =
- * ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
+ * the old configuration with the annotation shown below:
+ *
* Note: Currently, this is only intended to be a special use case in
* Snapshot. If this is used elsewhere, consider moving this to
- * @link OMMetadataManager}.
+ * {@link OMMetadataManager}.
*
* @param volumeName volume name
* @param bucketName bucket name
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
index 4dc70bfa569d..f873b43ae983 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java
@@ -193,7 +193,6 @@ public int read(long position, ByteBuffer buf) throws IOException {
/**
* @param buf the ByteBuffer to receive the results of the read operation.
* @param position offset
- * @return void
* @throws IOException if there is some error performing the read
* @throws EOFException if end of file reached before reading fully
*/
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
index 5c9f6a5f4e12..1a2a705fc0f5 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
@@ -352,7 +352,8 @@ private static void triggerRebuild(ReconNamespaceSummaryManager reconNamespaceSu
* @param url url to call
* @param isSpnego is SPNEGO enabled
* @return HttpURLConnection instance of the HTTP call.
- * @throws IOException, AuthenticationException While reading the response.
+ * @throws IOException While reading the response,
+ * @throws AuthenticationException
*/
public HttpURLConnection makeHttpCall(URLConnectionFactory connectionFactory,
String url, boolean isSpnego)
@@ -569,7 +570,6 @@ public static boolean isInitializationComplete(ReconOMMetadataManager omMetadata
* @param dateFormat
* @param timeZone
* @return the epoch milliseconds representation of the date.
- * @throws ParseException
*/
public static long convertToEpochMillis(String dateString, String dateFormat, TimeZone timeZone) {
String localDateFormat = dateFormat;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
index cbdc198f8aaf..c4d2d35bef94 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
@@ -157,7 +157,7 @@ public ContainerEndpoint(OzoneStorageContainerManager reconSCM,
}
/**
- * Return @{@link org.apache.hadoop.hdds.scm.container}
+ * Return {@linkplain org.apache.hadoop.hdds.scm.container}
* for the containers starting from the given "prev-key" query param for the
* given "limit". The given "prev-key" is skipped from the results returned.
*
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
index 3f95c04fc916..4620b69fbe33 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
@@ -734,7 +734,7 @@ public Response getDeletedDirectorySummary() {
* /volume1/fso-bucket/dir1/dir2/dir3/file1
* Input Request for OBS bucket:
*
- * `api/v1/keys/listKeys?startPrefix=/volume1/obs-bucket&limit=2&replicationType=RATIS`
+ * {@literal `api/v1/keys/listKeys?startPrefix=/volume1/obs-bucket&limit=2&replicationType=RATIS`}
* Output Response:
*
* {
@@ -832,7 +832,7 @@ public Response getDeletedDirectorySummary() {
* }
* Input Request for FSO bucket:
*
- * `api/v1/keys/listKeys?startPrefix=/volume1/fso-bucket&limit=2&replicationType=RATIS`
+ * {@literal `api/v1/keys/listKeys?startPrefix=/volume1/fso-bucket&limit=2&replicationType=RATIS`}
* Output Response:
*
* {
@@ -930,7 +930,6 @@ public Response getDeletedDirectorySummary() {
* }
*
* ********************************************************
- * @throws IOException
*/
@GET
@Path("/listKeys")
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java
index 59957e116244..bfb2b05aad35 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerMetadataManager.java
@@ -177,7 +177,7 @@ Map