Skip to content
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,6 @@ public void writeOnRetry(long len) throws IOException {
* it is a no op.
* @param bufferFull flag indicating whether bufferFull condition is hit or
* its called as part flush/close
* @return minimum commit index replicated to all nodes
* @throws IOException IOException in case watch gets timed out
*/
public void watchForCommit(boolean bufferFull) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@ public interface BlockInputStreamFactory {
* @param blockInfo The blockInfo representing the block.
* @param pipeline The pipeline to be used for reading the block
* @param token The block Access Token
* @param verifyChecksum Whether to verify checksums or not.
* @param xceiverFactory Factory to create the xceiver in the client
* @param refreshFunction Function to refresh the block location if needed
* @return BlockExtendedInputStream of the correct type.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,6 @@ public BlockInputStreamFactoryImpl(ByteBufferPool byteBufferPool,
* @param blockInfo The blockInfo representing the block.
* @param pipeline The pipeline to be used for reading the block
* @param token The block Access Token
* @param verifyChecksum Whether to verify checksums or not.
* @param xceiverFactory Factory to create the xceiver in the client
* @param refreshFunction Function to refresh the pipeline if needed
* @return BlockExtendedInputStream of the correct type.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@ public interface ECBlockInputStreamFactory {
* know are bad and should not be used.
* @param repConfig The replication Config
* @param blockInfo The blockInfo representing the block.
* @param verifyChecksum Whether to verify checksums or not.
* @param xceiverFactory Factory to create the xceiver in the client
* @param refreshFunction Function to refresh the block location if needed
* @return BlockExtendedInputStream of the correct type.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ private ECBlockInputStreamFactoryImpl(BlockInputStreamFactory streamFactory,
* know are bad and should not be used.
* @param repConfig The replication Config
* @param blockInfo The blockInfo representing the block.
* @param verifyChecksum Whether to verify checksums or not.
* @param xceiverFactory Factory to create the xceiver in the client
* @param refreshFunction Function to refresh the pipeline if needed
* @return BlockExtendedInputStream of the correct type.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@
* Parity elements long. Missing or not needed elements should be set to null
* in the array. The elements should be assigned to the array in EC index order.
*
* Assuming we have n missing data locations, where n <= parity locations, the
* Assuming we have n missing data locations, where n {@literal <=} parity locations, the
* ByteBuffers passed in from the client are either assigned to the decoder
* input array, or they are assigned to the decoder output array, where
* reconstructed data is written. The required number of parity buffers will be
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,9 @@
* class or method not changing over time. Currently the stability can be
* {@link Stable}, {@link Evolving} or {@link Unstable}. <br>
*
* <ul><li>All classes that are annotated with {@link Public} or
* {@link LimitedPrivate} must have InterfaceStability annotation. </li>
* <li>Classes that are {@link Private} are to be considered unstable unless
* <ul><li>All classes that are annotated with {@link InterfaceAudience.Public} or
* {@link InterfaceAudience.LimitedPrivate} must have InterfaceStability annotation. </li>
* <li>Classes that are {@link InterfaceAudience.Private} are to be considered unstable unless
* a different InterfaceStability annotation states otherwise.</li>
* <li>Incompatible changes must not be made to classes marked as stable.</li>
* </ul>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,13 @@

/**
* This class contains constants for Recon related configuration keys used in
* SCM & Datanode.
* SCM and Datanode.
*/
public final class ReconConfigKeys {

/**
* This class contains constants for Recon related configuration keys used in
* SCM and Datanode.
* Never constructed.
*/
private ReconConfigKeys() {
Expand Down Expand Up @@ -71,7 +73,7 @@ private ReconConfigKeys() {
* Recon administrator users delimited by a comma.
* This is the list of users who can access admin only information from recon.
* Users defined in
* {@link org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS}
* {@link org.apache.hadoop.ozone.OzoneConfigKeys#OZONE_ADMINISTRATORS}
* will always be able to access all recon information regardless of this
* setting.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -206,8 +206,7 @@ public int getScmDefaultLayoutVersionOnInit() {
* required for SCMSecurityProtocol where the KerberosInfo references
* the old configuration with
* the annotation shown below:-
* @KerberosInfo(serverPrincipal = ScmConfigKeys
* .HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
* {@code @KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)}
*/
public static class ConfigStrings {
public static final String HDDS_SCM_KERBEROS_PRINCIPAL_KEY =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -414,7 +414,7 @@ StartContainerBalancerResponseProto startContainerBalancer(
* considered to be failed if it has been sent more than MAX_RETRY limit
* and its count is reset to -1.
*
* @param count Maximum num of returned transactions, if < 0. return all.
* @param count Maximum num of returned transactions, if {@literal < 0}. return all.
* @param startTxId The least transaction id to start with.
* @return a list of failed deleted block transactions.
* @throws IOException
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
* considered to be failed if it has been sent more than MAX_RETRY limit
* and its count is reset to -1.
*
* @param count Maximum num of returned transactions, if < 0. return all.
* @param count Maximum num of returned transactions, if {@literal < 0}. return all.
* @param startTxId The least transaction id to start with.
* @return a list of failed deleted block transactions.
* @throws IOException
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -363,7 +363,7 @@ private OzoneConsts() {
* contains illegal characters when creating/renaming key.
*
* Avoid the following characters in a key name:
* "\", "{", "}", "<", ">", "^", "%", "~", "#", "|", "`", "[", "]", Quotation
* {@literal "\", "{", "}", "<", ">", "^", "%", "~", "#", "|", "`", "[", "]"}, Quotation
* marks and Non-printable ASCII characters (128–255 decimal characters).
* https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ static ChunkBuffer allocate(int capacity) {
*
* @param increment
* the increment size so that this buffer is allocated incrementally.
* When increment <= 0, entire buffer is allocated in the beginning.
* When increment {@literal <= 0}, entire buffer is allocated in the beginning.
*/
static ChunkBuffer allocate(int capacity, int increment) {
if (increment > 0 && increment < capacity) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,18 +37,16 @@

/**
* Generic factory which stores different instances of Type 'T' sharded by
* a key & version. A single key can be associated with different versions
* a key and version. A single key can be associated with different versions
* of 'T'.
*
* Why does this class exist?
* A typical use case during upgrade is to have multiple versions of a class
* / method / object and chose them based on current layout
* version at runtime. Before finalizing, an older version is typically
* needed, and after finalize, a newer version is needed. This class serves
* this purpose in a generic way.
*
* For example, we can create a Factory to create multiple versions of
* OMRequests sharded by Request Type & Layout Version Supported.
* OMRequests sharded by Request Type and Layout Version Supported.
*/
public class LayoutVersionInstanceFactory<T> {

Expand Down Expand Up @@ -141,7 +139,7 @@ private boolean isValid(LayoutVersionManager lvm, int version) {
* From the list of versioned instances for a given "key", this
* returns the "floor" value corresponding to the given version.
* For example, if we have key = "CreateKey", entry -> [(1, CreateKeyV1),
* (3, CreateKeyV2), and if the passed in key = CreateKey & version = 2, we
* (3, CreateKeyV2), and if the passed in key = CreateKey and version = 2, we
* return CreateKeyV1.
* Since this is a priority queue based implementation, we use a O(1) peek()
* lookup to get the current valid version.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,6 @@ public int containerCount() {
* Send FCR which will not contain removed containers.
*
* @param context StateContext
* @return
*/
public void handleVolumeFailures(StateContext context) {
AtomicBoolean failedVolume = new AtomicBoolean(false);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -489,7 +489,7 @@ public Builder setEndpointStateMachine(EndpointStateMachine rpcEndPoint) {
/**
* Sets the LayoutVersionManager.
*
* @param versionMgr - config
* @param lvm config
* @return Builder
*/
public Builder setLayoutVersionManager(HDDSLayoutVersionManager lvm) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ public Builder setConfig(ConfigurationSource config) {
/**
* Sets the LayoutVersionManager.
*
* @param versionMgr - config
* @param lvm config
* @return Builder.
*/
public Builder setLayoutVersionManager(HDDSLayoutVersionManager lvm) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ public void shutdown() {

/**
* Delete all files under
* <volume>/hdds/<cluster-id>/tmp/deleted-containers.
* volume/hdds/cluster-id/tmp/deleted-containers.
* This is the directory where containers are moved when they are deleted
* from the system, but before being removed from the filesystem. This
* makes the deletion atomic.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,15 +46,17 @@
* - fsCapacity: reported total capacity from local fs.
* - minVolumeFreeSpace (mvfs) : determines the free space for closing
containers.This is like adding a few reserved bytes to reserved space.
Dn's will send close container action to SCM at this limit & it is
Dn's will send close container action to SCM at this limit, and it is
configurable.

*
*
* <pre>
* {@code
* |----used----| (avail) |++mvfs++|++++reserved+++++++|
* |<- capacity ->|
* | fsAvail |-------other-----------|
* |<- fsCapacity ->|
* }</pre>
*
* What we could directly get from local fs:
* fsCapacity, fsAvail, (fsUsed = fsCapacity - fsAvail)
Expand All @@ -78,10 +80,12 @@
* then we should use DedicatedDiskSpaceUsage for
* `hdds.datanode.du.factory.classname`,
* Then it is much simpler, since we don't care about other usage:
*
* <pre>
* {@code
* |----used----| (avail)/fsAvail |
* |<- capacity/fsCapacity ->|
*
* }
* </pre>
* We have avail == fsAvail.
*/
public final class VolumeInfo {
Expand Down Expand Up @@ -154,9 +158,12 @@ public long getCapacity() {

/**
* Calculate available space use method A.
* <pre>
* {@code
* |----used----| (avail) |++++++++reserved++++++++|
* |<- capacity ->|
*
* }
*</pre>
* A) avail = capacity - used
*/
public long getAvailable() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,11 +77,15 @@ public long getUsedSpace() {
}

/**
* <pre>
* {@code
* Calculate available space use method B.
* |----used----| (avail) |++++++++reserved++++++++|
* | fsAvail |-------other-------|
* ->|~~~~|<-
* remainingReserved
* }
* </pre>
* B) avail = fsAvail - Max(reserved - other, 0);
*/
public SpaceUsageSource getCurrentUsage() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -439,13 +439,13 @@ public static boolean isSameSchemaVersion(String schema, String other) {

/**
* Moves container directory to a new location
* under "<volume>/hdds/<cluster-id>/tmp/deleted-containers"
* under "volume/hdds/cluster-id/tmp/deleted-containers"
* and updates metadata and chunks path.
* Containers will be moved under it before getting deleted
* to avoid, in case of failure, having artifact leftovers
* on the default container path on the disk.
*
* Delete operation for Schema < V3
* Delete operation for Schema &lt; V3
* 1. Container is marked DELETED
* 2. Container is removed from memory container set
* 3. Container DB handler from cache is removed and closed
Expand All @@ -460,7 +460,6 @@ public static boolean isSameSchemaVersion(String schema, String other) {
* 5. Container is deleted from tmp directory.
*
* @param keyValueContainerData
* @return true if renaming was successful
*/
public static void moveToDeletedContainerDir(
KeyValueContainerData keyValueContainerData,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,6 @@ public void closeContainer(final long containerId) throws IOException {
* Returns the Container given a container id.
*
* @param containerId ID of the container
* @return Container
*/
public void addFinalizedBlock(final long containerId,
final long localId) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,6 @@ private SCMDatanodeResponse submitRequest(Type type,
/**
* Returns SCM version.
*
* @param unused - set to null and unused.
* @return Version info.
*/
@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ public boolean addSCM(AddSCMRequest request) throws IOException {
}
/**
* Sort the datanodes based on distance from client.
* @return List<DatanodeDetails></>
* @return list of datanodes;
* @throws IOException
*/
@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ public DefaultApprover(PKIProfile pkiProfile, SecurityConfig config) {
* @param certSerialId - the new certificate id.
* @return Signed Certificate.
* @throws IOException - On Error
* @throws OperatorCreationException - on Error.
* @throws CertificateException - on Error.
*/
@SuppressWarnings("ParameterNumber")
@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -195,8 +195,6 @@ public CertPath getCaCertPath()
*
* @param certSerialId - Certificate for this CA.
* @return X509Certificate
* @throws CertificateException - usually thrown if this CA is not
* initialized.
* @throws IOException - on Error.
*/
@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,11 +41,12 @@
import org.slf4j.LoggerFactory;

/**
* <pre>
* Servlet that runs async-profiler as web-endpoint.
* <p>
*
* Source: https://github.com/apache/hive/blob/master/common/src/java/org
* /apache/hive/http/ProfileServlet.java
* <p>
*
* Following options from async-profiler can be specified as query parameter.
* // -e event profiling event: cpu|alloc|lock|cache-misses etc.
* // -d duration run profiling for <duration> seconds
Expand Down Expand Up @@ -79,7 +80,7 @@
* curl "http://localhost:10002/prof"
* - To collect 1 minute CPU profile of current process and output in tree
* format (html)
* curl "http://localhost:10002/prof?output=tree&duration=60"
* curl{@literal "http://localhost:10002/prof?output=tree&duration=60"}
* - To collect 30 second heap allocation profile of current process (returns
* FlameGraph svg)
* curl "http://localhost:10002/prof?event=alloc"
Expand Down Expand Up @@ -111,6 +112,7 @@
* The default output format of the newest async profiler is HTML.
* If the user is using an older version such as 1.5, HTML is not supported.
* Please specify the corresponding output format.
* </pre>
*/
public class ProfileServlet extends HttpServlet {
private static final long serialVersionUID = 1L;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ private HddsServerUtil() {
* @param conf configuration
* @param protocol Protocol interface
* @param service service that implements the protocol
* @param server RPC server to which the protocol & implementation is added to
* @param server RPC server to which the protocol and implementation is added to
*/
public static void addPBProtocol(Configuration conf, Class<?> protocol,
BlockingService service, RPC.Server server) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,6 @@ List<? extends KeyValue<KEY, VALUE>> getSequentialRangeKVs(KEY startKey,
* as part of a batch operation.
* @param batch
* @param prefix
* @return
*/
void deleteBatchWithPrefix(BatchOperation batch, KEY prefix)
throws IOException;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@
* This interface must be implemented by entities requiring audit logging.
* For example - OMVolumeArgs, OMBucketArgs.
* The implementing class must override toAuditMap() to return an
* instance of Map<Key, Value> where both Key and Value are String.
* instance of {@code Map<Key, Value>} where both Key and Value are String.
*
* Key: must contain printable US ASCII characters
* May not contain a space, =, ], or "
Expand Down
Loading