diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java
index 4ac46e8cc5dc..00b6631379bc 100644
--- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java
+++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java
@@ -368,7 +368,7 @@ private static class BuilderPayloadSetter {
* Create a ByteString from byte array without copying (wrap), and then set it as the payload
* for the builder.
* @param builder builder for HDFS DataTransferEncryptorMessage.
- * @param payload byte array of payload. n
+ * @param payload byte array of payload.
*/
static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder,
byte[] payload) throws IOException {
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
index a543b577b7ae..ed1755ad5021 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
@@ -165,7 +165,7 @@ public static boolean isBackupEnabled(Configuration conf) {
}
/**
- * Get configuration n
+ * Get configuration
*/
Configuration getConf() {
return conf;
@@ -192,7 +192,8 @@ public void close() {
* @param tableList table list
* @param targetRootDir root dir
* @param workers number of parallel workers
- * @param bandwidth bandwidth per worker in MB per sec n * @throws BackupException exception
+ * @param bandwidth bandwidth per worker in MB per sec
+ * @throws BackupException exception
*/
public BackupInfo createBackupInfo(String backupId, BackupType type, List tableList,
String targetRootDir, int workers, long bandwidth) throws BackupException {
diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
index 68b08544196e..6c021bf622a5 100644
--- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
+++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
@@ -184,9 +184,7 @@ public static ServerName[] getFavoredNodesList(byte[] favoredNodes) throws IOExc
return servers;
}
- /**
- * n * @return PB'ed bytes of {@link FavoredNodes} generated by the server list.
- */
+ /** Returns PB'ed bytes of {@link FavoredNodes} generated by the server list. */
public static byte[] getFavoredNodes(List serverAddrList) {
FavoredNodes.Builder f = FavoredNodes.newBuilder();
for (ServerName s : serverAddrList) {
@@ -319,8 +317,8 @@ public ServerName[] getSecondaryAndTertiary(RegionInfo regionInfo, ServerName pr
/**
* For regions that share the primary, avoid placing the secondary and tertiary on a same RS. Used
- * for generating new assignments for the primary/secondary/tertiary RegionServers n * @return the
- * map of regions to the servers the region-files should be hosted on
+ * for generating new assignments for the primary/secondary/tertiary RegionServers
+ * @return the map of regions to the servers the region-files should be hosted on
*/
public Map
placeSecondaryAndTertiaryWithRestrictions(Map primaryRSMap) {
diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java
index 4c6f2b3cc27d..3b3aedad6c66 100644
--- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java
+++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java
@@ -80,7 +80,7 @@ public List getFavoredNodes(RegionInfo region) {
/**
* Return the position of the server in the favoredNodes list. Assumes the favoredNodes list is of
- * size 3. n
+ * size 3.
*/
public static Position getFavoredServerPosition(List favoredNodes,
ServerName server) {
diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java
index 8858e13da705..d86201a34fb0 100644
--- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java
+++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java
@@ -294,7 +294,7 @@ public void fillUp(TableName tableName, SnapshotOfRegionAssignmentFromMeta snaps
}
/**
- * Use this to project the dispersion scores nnn
+ * Use this to project the dispersion scores
*/
public void fillUpDispersion(TableName tableName, SnapshotOfRegionAssignmentFromMeta snapshot,
FavoredNodesPlan newPlan) {
@@ -566,7 +566,8 @@ int getTotalFavoredAssignments() {
/**
* Return the number of regions based on the position (primary/secondary/ tertiary) assigned to
- * their favored nodes n * @return the number of regions
+ * their favored nodes
+ * @return the number of regions
*/
int getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position position) {
return favoredNodes[position.ordinal()];
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java
index 8c675c4522e6..67438677dadd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java
@@ -53,7 +53,8 @@ public byte[] toByteArray() {
/**
* Parse the serialized representation of the {@link ClusterId}
* @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix
- * @return An instance of {@link ClusterId} made from bytes n * @see #toByteArray()
+ * @return An instance of {@link ClusterId} made from bytes
+ * @see #toByteArray()
*/
public static ClusterId parseFrom(final byte[] bytes) throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
@@ -79,9 +80,7 @@ public ClusterIdProtos.ClusterId convert() {
return builder.setClusterId(this.id).build();
}
- /**
- * n * @return A {@link ClusterId} made from the passed in cid
- */
+ /** Returns A {@link ClusterId} made from the passed in cid */
public static ClusterId convert(final ClusterIdProtos.ClusterId cid) {
return new ClusterId(cid.getClusterId());
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
index ebf6d919374d..4d554fa19bf2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
@@ -80,9 +80,7 @@ public int hashCode() {
return this.serverName.hashCode();
}
- /**
- * n
- */
+ /** Returns regionInfo */
public RegionInfo getRegion() {
return regionInfo;
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java
index a15833ac17a1..bc156353a1b7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java
@@ -34,8 +34,7 @@ public NotAllMetaRegionsOnlineException() {
}
/**
- * n
- */
+ * */
public NotAllMetaRegionsOnlineException(String message) {
super(message);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
index e0c408781f8d..2684886ba3d5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
@@ -76,7 +76,7 @@ default String getVersion() {
Map> getReplicationLoadSourceMap();
/**
- * Call directly from client such as hbase shell n
+ * Call directly from client such as hbase shell
*/
@Nullable
ReplicationLoadSink getReplicationLoadSink();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 597dfcd266a1..f5da0aa0bde7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -373,9 +373,10 @@ default void enableTable(TableName tableName) throws IOException {
* Disable table and wait on completion. May timeout eventually. Use
* {@link #disableTableAsync(org.apache.hadoop.hbase.TableName)} and
* {@link #isTableDisabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in
- * enabled state for it to be disabled. n * @throws IOException There could be couple types of
- * IOException TableNotFoundException means the table doesn't exist. TableNotEnabledException
- * means the table isn't in enabled state.
+ * enabled state for it to be disabled.
+ * @throws IOException There could be couple types of IOException TableNotFoundException means the
+ * table doesn't exist. TableNotEnabledException means the table isn't in
+ * enabled state.
*/
default void disableTable(TableName tableName) throws IOException {
get(disableTableAsync(tableName), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
@@ -598,7 +599,7 @@ Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[]
* then it returns. It does not wait on the completion of Compaction (it can take a while).
* @param tableName table to compact
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
- * @throws IOException if a remote or network exception occurs n
+ * @throws IOException if a remote or network exception occurs
*/
void compact(TableName tableName, CompactType compactType)
throws IOException, InterruptedException;
@@ -610,7 +611,7 @@ void compact(TableName tableName, CompactType compactType)
* @param tableName table to compact
* @param columnFamily column family within a table
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
- * @throws IOException if not a mob column family or if a remote or network exception occurs n
+ * @throws IOException if not a mob column family or if a remote or network exception occurs
*/
void compact(TableName tableName, byte[] columnFamily, CompactType compactType)
throws IOException, InterruptedException;
@@ -659,7 +660,7 @@ void compact(TableName tableName, byte[] columnFamily, CompactType compactType)
* while).
* @param tableName table to compact
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
- * @throws IOException if a remote or network exception occurs n
+ * @throws IOException if a remote or network exception occurs
*/
void majorCompact(TableName tableName, CompactType compactType)
throws IOException, InterruptedException;
@@ -671,7 +672,7 @@ void majorCompact(TableName tableName, CompactType compactType)
* @param tableName table to compact
* @param columnFamily column family within a table
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
- * @throws IOException if not a mob column family or if a remote or network exception occurs n
+ * @throws IOException if not a mob column family or if a remote or network exception occurs
*/
void majorCompact(TableName tableName, byte[] columnFamily, CompactType compactType)
throws IOException, InterruptedException;
@@ -1880,7 +1881,7 @@ default int getMasterInfoPort() throws IOException {
/**
* Return the set of supported security capabilities.
- * @throws IOException if a remote or network exception occurs n
+ * @throws IOException if a remote or network exception occurs
*/
List getSecurityCapabilities() throws IOException;
@@ -2215,7 +2216,7 @@ void recommissionRegionServer(ServerName server, List encodedRegionNames
* Clear compacting queues on a regionserver.
* @param serverName the region server name
* @param queues the set of queue name
- * @throws IOException if a remote or network exception occurs n
+ * @throws IOException if a remote or network exception occurs
*/
void clearCompactionQueues(ServerName serverName, Set queues)
throws IOException, InterruptedException;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index 6304065ae37b..81cf86ed2070 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -63,7 +63,7 @@ public class Append extends Mutation {
*
* This range is used as [minStamp, maxStamp).
* @param minStamp minimum timestamp value, inclusive
- * @param maxStamp maximum timestamp value, exclusive n
+ * @param maxStamp maximum timestamp value, exclusive
*/
public Append setTimeRange(long minStamp, long maxStamp) {
tr = TimeRange.between(minStamp, maxStamp);
@@ -71,7 +71,7 @@ public Append setTimeRange(long minStamp, long maxStamp) {
}
/**
- * Gets the TimeRange used for this append. n
+ * Gets the TimeRange used for this append.
*/
public TimeRange getTimeRange() {
return this.tr;
@@ -83,7 +83,7 @@ protected long extraHeapSize() {
}
/**
- * n * True (default) if the append operation should return the results. A client that is not
+ * True (default) if the append operation should return the results. A client that is not
* interested in the result can save network bandwidth setting this to false.
*/
@Override
@@ -122,7 +122,7 @@ public Append(Append appendToCopy) {
* Create a Append operation for the specified row.
*
* At least one column must be appended to.
- * @param rowArray Makes a copy out of this buffer. nn
+ * @param rowArray Makes a copy out of this buffer.
*/
public Append(final byte[] rowArray, final int rowOffset, final int rowLength) {
checkRow(rowArray, rowOffset, rowLength);
@@ -144,7 +144,7 @@ public Append(byte[] row, long ts, NavigableMap> familyMap) {
* Add the specified column and value to this Append operation.
* @param family family name
* @param qualifier column qualifier
- * @param value value to append to specified column n
+ * @param value value to append to specified column
*/
public Append addColumn(byte[] family, byte[] qualifier, byte[] value) {
KeyValue kv = new KeyValue(this.row, family, qualifier, this.ts, KeyValue.Type.Put, value);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 19a0490e361c..6070c553f5e1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -211,7 +211,7 @@ CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[
CompletableFuture enableTable(TableName tableName);
/**
- * Disable a table. The table has to be in enabled state for it to be disabled. n
+ * Disable a table. The table has to be in enabled state for it to be disabled.
*/
CompletableFuture disableTable(TableName tableName);
@@ -1156,7 +1156,7 @@ default CompletableFuture getMasterInfoPort() {
CompletableFuture stopMaster();
/**
- * Stop the designated regionserver. n
+ * Stop the designated regionserver.
*/
CompletableFuture stopRegionServer(ServerName serverName);
@@ -1365,8 +1365,8 @@ default CompletableFuture normalize() {
CompletableFuture normalize(NormalizeTableFilterParams ntfp);
/**
- * Turn the cleaner chore on/off. n * @return Previous cleaner state wrapped by a
- * {@link CompletableFuture}
+ * Turn the cleaner chore on/off.
+ * @return Previous cleaner state wrapped by a {@link CompletableFuture}
*/
CompletableFuture cleanerChoreSwitch(boolean on);
@@ -1385,8 +1385,8 @@ default CompletableFuture normalize() {
CompletableFuture runCleanerChore();
/**
- * Turn the catalog janitor on/off. n * @return the previous state wrapped by a
- * {@link CompletableFuture}
+ * Turn the catalog janitor on/off.
+ * @return the previous state wrapped by a {@link CompletableFuture}
*/
CompletableFuture catalogJanitorSwitch(boolean on);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index 6198086d503f..1f29b556b127 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -185,7 +185,7 @@ private void spawnRenewalChore(final UserGroupInformation user) {
}
/**
- * If choreService has not been created yet, create the ChoreService. n
+ * If choreService has not been created yet, create the ChoreService.
*/
synchronized ChoreService getChoreService() {
if (isClosed()) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
index 6a092a221fdd..369b2be8ecda 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java
@@ -133,7 +133,7 @@ public interface ColumnFamilyDescriptor {
int getMinVersions();
/**
- * Get the mob compact partition policy for this family n
+ * Get the mob compact partition policy for this family
*/
MobCompactPartitionPolicy getMobCompactPartitionPolicy();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
index 3c11bef53c78..42f25fdc56f4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java
@@ -861,8 +861,8 @@ public ModifyableColumnFamilyDescriptor setIndexBlockEncoding(IndexBlockEncoding
/**
* Set whether the tags should be compressed along with DataBlockEncoding. When no
- * DataBlockEncoding is been used, this is having no effect. n * @return this (for chained
- * invocation)
+ * DataBlockEncoding is been used, this is having no effect.
+ * @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setCompressTags(boolean compressTags) {
return setValue(COMPRESS_TAGS_BYTES, String.valueOf(compressTags));
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index 8ec670d445f9..f97db8a116d6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -97,7 +97,7 @@ public Delete(byte[] row, long timestamp) {
*
* This timestamp is ONLY used for a delete row operation. If specifying families or columns, you
* must specify each timestamp individually.
- * @param row We make a local copy of this passed in row. nn
+ * @param row We make a local copy of this passed in row.
*/
public Delete(final byte[] row, final int rowOffset, final int rowLength) {
this(row, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP);
@@ -141,7 +141,7 @@ public Delete(byte[] row, long ts, NavigableMap> familyMap) {
/**
* Add an existing delete marker to this Delete object.
* @param cell An existing cell of type "delete".
- * @return this for invocation chaining n
+ * @return this for invocation chaining
*/
@Override
public Delete add(Cell cell) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
index f4e06101255d..617f67b9a87a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
@@ -86,7 +86,7 @@ public Get(byte[] row) {
}
/**
- * Copy-constructor n
+ * Copy-constructor
*/
public Get(Get get) {
this(get.getRow());
@@ -125,7 +125,7 @@ public Get(Get get) {
}
/**
- * Create a Get operation for the specified row. nnn
+ * Create a Get operation for the specified row.
*/
public Get(byte[] row, int rowOffset, int rowLength) {
Mutation.checkRow(row, rowOffset, rowLength);
@@ -133,7 +133,7 @@ public Get(byte[] row, int rowOffset, int rowLength) {
}
/**
- * Create a Get operation for the specified row. n
+ * Create a Get operation for the specified row.
*/
public Get(ByteBuffer row) {
Mutation.checkRow(row);
@@ -294,7 +294,7 @@ public boolean getCacheBlocks() {
}
/**
- * Method for retrieving the get's row n
+ * Method for retrieving the get's row
*/
@Override
public byte[] getRow() {
@@ -326,7 +326,7 @@ public int getRowOffsetPerColumnFamily() {
}
/**
- * Method for retrieving the get's TimeRange n
+ * Method for retrieving the get's TimeRange
*/
public TimeRange getTimeRange() {
return this.tr;
@@ -357,7 +357,7 @@ public boolean hasFamilies() {
}
/**
- * Method for retrieving the get's familyMap n
+ * Method for retrieving the get's familyMap
*/
public Map> getFamilyMap() {
return this.familyMap;
@@ -365,7 +365,7 @@ public Map> getFamilyMap() {
/**
* Compile the table and column family (i.e. schema) information into a String. Useful for parsing
- * and aggregation by debugging, logging, and administration tools. n
+ * and aggregation by debugging, logging, and administration tools.
*/
@Override
public Map getFingerprint() {
@@ -382,7 +382,7 @@ public Map getFingerprint() {
* Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
* Map along with the fingerprinted information. Useful for debugging, logging, and administration
* tools.
- * @param maxCols a limit on the number of columns output prior to truncation n
+ * @param maxCols a limit on the number of columns output prior to truncation
*/
@Override
public Map toMap(int maxCols) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
index e4b177e3bca8..aad853f8c06c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
@@ -94,7 +94,8 @@ public Increment(byte[] row, long ts, NavigableMap> familyMap
/**
* Add the specified KeyValue to this operation.
- * @param cell individual Cell n * @throws java.io.IOException e
+ * @param cell individual Cell
+ * @throws java.io.IOException e
*/
@Override
public Increment add(Cell cell) throws IOException {
@@ -123,7 +124,7 @@ public Increment addColumn(byte[] family, byte[] qualifier, long amount) {
}
/**
- * Gets the TimeRange used for this increment. n
+ * Gets the TimeRange used for this increment.
*/
public TimeRange getTimeRange() {
return this.tr;
@@ -141,7 +142,7 @@ public TimeRange getTimeRange() {
* This range is used as [minStamp, maxStamp).
* @param minStamp minimum timestamp value, inclusive
* @param maxStamp maximum timestamp value, exclusive
- * @throws IOException if invalid time range n
+ * @throws IOException if invalid time range
*/
public Increment setTimeRange(long minStamp, long maxStamp) throws IOException {
tr = TimeRange.between(minStamp, maxStamp);
@@ -211,8 +212,7 @@ public Map> getFamilyMapOfLongs() {
}
/**
- * n
- */
+ * */
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
index fbb76ea4f65d..a9382f3a9bed 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
@@ -179,7 +179,7 @@ public byte[] getEndKey() {
}
/**
- * Get current table name of the region n
+ * Get current table name of the region
*/
@Override
public TableName getTable() {
@@ -231,7 +231,7 @@ public boolean isSplit() {
/**
* Change the split status flag.
- * @param split set split status n
+ * @param split set split status
*/
public MutableRegionInfo setSplit(boolean split) {
this.split = split;
@@ -252,7 +252,7 @@ public boolean isOffline() {
/**
* The parent of a region split is offline while split daughters hold references to the parent.
* Offlined regions are closed.
- * @param offLine Set online/offline status. n
+ * @param offLine Set online/offline status.
*/
public MutableRegionInfo setOffline(boolean offLine) {
this.offLine = offLine;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
index df9e92f74dcd..0be0325d499f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
@@ -205,7 +205,7 @@ public Map getFingerprint() {
* Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
* Map along with the fingerprinted information. Useful for debugging, logging, and administration
* tools.
- * @param maxCols a limit on the number of columns output prior to truncation n
+ * @param maxCols a limit on the number of columns output prior to truncation
*/
@Override
public Map toMap(int maxCols) {
@@ -268,7 +268,7 @@ private static Map cellToStringMap(Cell c) {
}
/**
- * Set the durability for this mutation n
+ * Set the durability for this mutation
*/
public Mutation setDurability(Durability d) {
this.durability = d;
@@ -281,7 +281,7 @@ public Durability getDurability() {
}
/**
- * Method for retrieving the put's familyMap n
+ * Method for retrieving the put's familyMap
*/
public NavigableMap> getFamilyCellMap() {
return this.familyMap;
@@ -296,7 +296,7 @@ public boolean isEmpty() {
}
/**
- * Method for retrieving the delete's row n
+ * Method for retrieving the delete's row
*/
@Override
public byte[] getRow() {
@@ -304,7 +304,7 @@ public byte[] getRow() {
}
/**
- * Method for retrieving the timestamp. n
+ * Method for retrieving the timestamp.
*/
public long getTimestamp() {
return this.ts;
@@ -340,7 +340,7 @@ public List getClusterIds() {
}
/**
- * Sets the visibility expression associated with cells in this Mutation. n
+ * Sets the visibility expression associated with cells in this Mutation.
*/
public Mutation setCellVisibility(CellVisibility expression) {
this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY,
@@ -356,8 +356,8 @@ public CellVisibility getCellVisibility() throws DeserializationException {
}
/**
- * Create a protocol buffer CellVisibility based on a client CellVisibility. n * @return a
- * protocol buffer CellVisibility
+ * Create a protocol buffer CellVisibility based on a client CellVisibility.
+ * @return a protocol buffer CellVisibility
*/
static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVisibility) {
ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder();
@@ -366,8 +366,8 @@ static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVisibilit
}
/**
- * Convert a protocol buffer CellVisibility to a client CellVisibility n * @return the converted
- * client CellVisibility
+ * Convert a protocol buffer CellVisibility to a client CellVisibility
+ * @return the converted client CellVisibility
*/
private static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto) {
if (proto == null) return null;
@@ -375,8 +375,8 @@ private static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto
}
/**
- * Convert a protocol buffer CellVisibility bytes to a client CellVisibility n * @return the
- * converted client CellVisibility n
+ * Convert a protocol buffer CellVisibility bytes to a client CellVisibility
+ * @return the converted client CellVisibility
*/
private static CellVisibility toCellVisibility(byte[] protoBytes)
throws DeserializationException {
@@ -483,7 +483,7 @@ public long getTTL() {
/**
* Set the TTL desired for the result of the mutation, in milliseconds.
- * @param ttl the TTL desired for the result of the mutation, in milliseconds n
+ * @param ttl the TTL desired for the result of the mutation, in milliseconds
*/
public Mutation setTTL(long ttl) {
setAttribute(OP_ATTRIBUTE_TTL, Bytes.toBytes(ttl));
@@ -660,8 +660,9 @@ static byte[] checkRow(final byte[] row) {
}
/**
- * @param row Row to check nn * @throws IllegalArgumentException Thrown if row is
- * empty or null or > {@link HConstants#MAX_ROW_LENGTH}
+ * @param row Row to check
+ * @throws IllegalArgumentException Thrown if row is empty or null or >
+ * {@link HConstants#MAX_ROW_LENGTH}
* @return row
*/
static byte[] checkRow(final byte[] row, final int offset, final int length) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java
index a517f0bb43a9..2cad5ef73255 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java
@@ -94,7 +94,7 @@ public String toString(int maxCols) {
/**
* Produces a string representation of this Operation. It defaults to a JSON representation, but
* falls back to a string representation of the fingerprint and details in the case of a JSON
- * encoding failure. n
+ * encoding failure.
*/
@Override
public String toString() {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java
index e34c9d6eacb4..33c1d853e1ae 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java
@@ -106,7 +106,7 @@ protected long getAttributeSize() {
* This method allows you to set an identifier on an operation. The original motivation for this
* was to allow the identifier to be used in slow query logging, but this could obviously be
* useful in other places. One use of this could be to put a class.method identifier in here to
- * see where the slow query is coming from. n * id to set for the scan
+ * see where the slow query is coming from. id to set for the scan
*/
public OperationWithAttributes setId(String id) {
setAttribute(ID_ATRIBUTE, Bytes.toBytes(id));
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
index 5e821f075461..dc470069f90c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
@@ -61,7 +61,7 @@ public Put(byte[] row, long ts) {
}
/**
- * We make a copy of the passed in row key to keep local. nnn
+ * We make a copy of the passed in row key to keep local.
*/
public Put(byte[] rowArray, int rowOffset, int rowLength) {
this(rowArray, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP);
@@ -89,7 +89,7 @@ public Put(ByteBuffer row) {
}
/**
- * We make a copy of the passed in row key to keep local. nnnn
+ * We make a copy of the passed in row key to keep local.
*/
public Put(byte[] rowArray, int rowOffset, int rowLength, long ts) {
checkRow(rowArray, rowOffset, rowLength);
@@ -156,7 +156,7 @@ public Put(byte[] row, long ts, NavigableMap> familyMap) {
* Add the specified column and value to this Put operation.
* @param family family name
* @param qualifier column qualifier
- * @param value column value n
+ * @param value column value
*/
public Put addColumn(byte[] family, byte[] qualifier, byte[] value) {
return addColumn(family, qualifier, this.ts, value);
@@ -168,7 +168,7 @@ public Put addColumn(byte[] family, byte[] qualifier, byte[] value) {
* @param family family name
* @param qualifier column qualifier
* @param ts version timestamp
- * @param value column value n
+ * @param value column value
*/
public Put addColumn(byte[] family, byte[] qualifier, long ts, byte[] value) {
if (ts < 0) {
@@ -186,7 +186,7 @@ public Put addColumn(byte[] family, byte[] qualifier, long ts, byte[] value) {
* @param family family name
* @param qualifier column qualifier
* @param ts version timestamp
- * @param value column value n
+ * @param value column value
*/
public Put addColumn(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value) {
if (ts < 0) {
@@ -201,7 +201,8 @@ public Put addColumn(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer va
/**
* Add the specified KeyValue to this Put operation. Operation assumes that the passed KeyValue is
* immutable and its backing array will not be modified for the duration of this Put.
- * @param cell individual cell n * @throws java.io.IOException e
+ * @param cell individual cell
+ * @throws java.io.IOException e
*/
@Override
public Put add(Cell cell) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
index cf892ae0d74b..944a70376829 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
@@ -47,9 +47,6 @@ public abstract class Query extends OperationWithAttributes {
protected Map colFamTimeRangeMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
protected Boolean loadColumnFamiliesOnDemand = null;
- /**
- * n
- */
public Filter getFilter() {
return filter;
}
@@ -67,7 +64,7 @@ public Query setFilter(Filter filter) {
}
/**
- * Sets the authorizations to be used by this Query n
+ * Sets the authorizations to be used by this Query
*/
public Query setAuthorizations(Authorizations authorizations) {
this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY,
@@ -133,7 +130,7 @@ public Query setConsistency(Consistency consistency) {
* Specify region replica id where Query will fetch data from. Use this together with
* {@link #setConsistency(Consistency)} passing {@link Consistency#TIMELINE} to read data from a
* specific replicaId.
- * Expert: This is an advanced API exposed. Only use it if you know what you are doing n
+ * Expert: This is an advanced API exposed. Only use it if you know what you are doing
*/
public Query setReplicaId(int Id) {
this.targetReplicaId = Id;
@@ -209,7 +206,7 @@ public boolean doLoadColumnFamiliesOnDemand() {
* Column Family time ranges take precedence over the global time range.
* @param cf the column family for which you want to restrict
* @param minStamp minimum timestamp value, inclusive
- * @param maxStamp maximum timestamp value, exclusive n
+ * @param maxStamp maximum timestamp value, exclusive
*/
public Query setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java
index 58163a2d74a2..3f353b5799d4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java
@@ -59,7 +59,8 @@ public static byte[] getEndKeyForDisplay(RegionInfo ri, Configuration conf) {
}
/**
- * Get the start key for display. Optionally hide the real start key. nn * @return the startkey
+ * Get the start key for display. Optionally hide the real start key.
+ * @return the startkey
*/
public static byte[] getStartKeyForDisplay(RegionInfo ri, Configuration conf) {
boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true);
@@ -68,15 +69,16 @@ public static byte[] getStartKeyForDisplay(RegionInfo ri, Configuration conf) {
}
/**
- * Get the region name for display. Optionally hide the start key. nn * @return region name as
- * String
+ * Get the region name for display. Optionally hide the start key.
+ * @return region name as String
*/
public static String getRegionNameAsStringForDisplay(RegionInfo ri, Configuration conf) {
return Bytes.toStringBinary(getRegionNameForDisplay(ri, conf));
}
/**
- * Get the region name for display. Optionally hide the start key. nn * @return region name bytes
+ * Get the region name for display. Optionally hide the start key.
+ * @return region name bytes
*/
public static byte[] getRegionNameForDisplay(RegionInfo ri, Configuration conf) {
boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java
index 1d6708b49d14..df1bfd61ca6a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java
@@ -84,7 +84,7 @@ public static boolean isDefaultReplica(RegionInfo hri) {
}
/**
- * Removes the non-default replicas from the passed regions collection n
+ * Removes the non-default replicas from the passed regions collection
*/
public static void removeNonDefaultRegions(Collection regions) {
Iterator iterator = regions.iterator();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
index a1ab6075fe7b..86cdaaeef3cc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
@@ -186,7 +186,7 @@ private Result(Cell[] cells, Boolean exists, boolean stale, boolean mayHaveMoreC
/**
* Method for retrieving the row key that corresponds to the row from which this Result was
- * created. n
+ * created.
*/
public byte[] getRow() {
if (this.row == null) {
@@ -227,8 +227,9 @@ public List listCells() {
* or Get) only requested 1 version the list will contain at most 1 entry. If the column did not
* exist in the result set (either the column does not exist or the column was not selected in the
* query) the list will be empty. Also see getColumnLatest which returns just a Cell
- * @param family the family n * @return a list of Cells for this column or empty list if the
- * column did not exist in the result set
+ * @param family the family
+ * @return a list of Cells for this column or empty list if the column did not exist in the result
+ * set
*/
public List getColumnCells(byte[] family, byte[] qualifier) {
List result = new ArrayList<>();
@@ -324,7 +325,7 @@ protected int binarySearch(final Cell[] kvs, final byte[] family, final int foff
}
/**
- * The Cell for the most recent timestamp for a given column. nn *
+ * The Cell for the most recent timestamp for a given column.
* @return the Cell for the column, or null if no value exists in the row or none have been
* selected in the query (Get/Scan)
*/
@@ -677,8 +678,7 @@ public int size() {
}
/**
- * n
- */
+ * */
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
@@ -800,7 +800,8 @@ public static Result createCompleteResult(Iterable partialResults) throw
}
/**
- * Get total size of raw cells n * @return Total size.
+ * Get total size of raw cells
+ * @return Total size.
*/
public static long getTotalSizeOfCells(Result result) {
long size = 0;
@@ -816,7 +817,7 @@ public static long getTotalSizeOfCells(Result result) {
/**
* Copy another Result into this one. Needed for the old Mapred framework
* @throws UnsupportedOperationException if invoked on instance of EMPTY_RESULT (which is supposed
- * to be immutable). n
+ * to be immutable).
*/
public void copyFrom(Result other) {
checkReadonly();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
index 74ff6de6f933..ebb27ceff751 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
@@ -86,7 +86,7 @@ public Result next() {
* setting (or hbase.client.scanner.caching in hbase-site.xml).
* @param nbRows number of rows to return
* @return Between zero and nbRows rowResults. Scan is done if returned array is of zero-length
- * (We never return null). n
+ * (We never return null).
*/
default Result[] next(int nbRows) throws IOException {
List resultSets = new ArrayList<>(nbRows);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java
index be44c26190b1..0694d9e39e27 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java
@@ -38,8 +38,8 @@ public class RowMutations implements Row {
/**
* Create a {@link RowMutations} with the specified mutations.
- * @param mutations the mutations to send n * @throws IOException if any row in mutations is
- * different to another
+ * @param mutations the mutations to send
+ * @throws IOException if any row in mutations is different to another
*/
public static RowMutations of(List extends Mutation> mutations) throws IOException {
if (CollectionUtils.isEmpty(mutations)) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index bdca990ca549..b9adefb40cde 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -259,7 +259,7 @@ public boolean isGetScan() {
* Get all columns from the specified family.
*
* Overrides previous calls to addColumn for this family.
- * @param family family name n
+ * @param family family name
*/
public Scan addFamily(byte[] family) {
familyMap.remove(family);
@@ -272,7 +272,7 @@ public Scan addFamily(byte[] family) {
*
* Overrides previous calls to addFamily for this family.
* @param family family name
- * @param qualifier column qualifier n
+ * @param qualifier column qualifier
*/
public Scan addColumn(byte[] family, byte[] qualifier) {
NavigableSet set = familyMap.get(family);
@@ -294,7 +294,7 @@ public Scan addColumn(byte[] family, byte[] qualifier) {
* @param minStamp minimum timestamp value, inclusive
* @param maxStamp maximum timestamp value, exclusive
* @see #readAllVersions()
- * @see #readVersions(int) n
+ * @see #readVersions(int)
*/
public Scan setTimeRange(long minStamp, long maxStamp) throws IOException {
tr = TimeRange.between(minStamp, maxStamp);
@@ -307,7 +307,7 @@ public Scan setTimeRange(long minStamp, long maxStamp) throws IOException {
* number of versions beyond the defaut.
* @param timestamp version timestamp
* @see #readAllVersions()
- * @see #readVersions(int) n
+ * @see #readVersions(int)
*/
public Scan setTimestamp(long timestamp) {
try {
@@ -336,9 +336,9 @@ public Scan setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) {
* {@link #setStartStopRowForPrefixScan(byte[])}. Doing so will make the scan result
* unexpected or even undefined.
*
- * @param startRow row to start scanner at or after n * @throws IllegalArgumentException if
- * startRow does not meet criteria for a row key (when length exceeds
- * {@link HConstants#MAX_ROW_LENGTH})
+ * @param startRow row to start scanner at or after
+ * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
+ * exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
public Scan withStartRow(byte[] startRow) {
return withStartRow(startRow, true);
@@ -355,9 +355,9 @@ public Scan withStartRow(byte[] startRow) {
* unexpected or even undefined.
*
* @param startRow row to start scanner at or after
- * @param inclusive whether we should include the start row when scan n * @throws
- * IllegalArgumentException if startRow does not meet criteria for a row key
- * (when length exceeds {@link HConstants#MAX_ROW_LENGTH})
+ * @param inclusive whether we should include the start row when scan
+ * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
+ * exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
public Scan withStartRow(byte[] startRow, boolean inclusive) {
if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) {
@@ -378,9 +378,9 @@ public Scan withStartRow(byte[] startRow, boolean inclusive) {
* {@link #setStartStopRowForPrefixScan(byte[])}. Doing so will make the scan result
* unexpected or even undefined.
*
- * @param stopRow row to end at (exclusive) n * @throws IllegalArgumentException if stopRow does
- * not meet criteria for a row key (when length exceeds
- * {@link HConstants#MAX_ROW_LENGTH})
+ * @param stopRow row to end at (exclusive)
+ * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
+ * exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
public Scan withStopRow(byte[] stopRow) {
return withStopRow(stopRow, false);
@@ -397,9 +397,9 @@ public Scan withStopRow(byte[] stopRow) {
* unexpected or even undefined.
*
* @param stopRow row to end at
- * @param inclusive whether we should include the stop row when scan n * @throws
- * IllegalArgumentException if stopRow does not meet criteria for a row key (when
- * length exceeds {@link HConstants#MAX_ROW_LENGTH})
+ * @param inclusive whether we should include the stop row when scan
+ * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
+ * exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
public Scan withStopRow(byte[] stopRow, boolean inclusive) {
if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) {
@@ -427,11 +427,10 @@ public Scan withStopRow(byte[] stopRow, boolean inclusive) {
* This CANNOT be used in combination with withStartRow and/or withStopRow. Such
* a combination will yield unexpected and even undefined results.
*
- * @param rowPrefix the prefix all rows must start with. (Set null to remove the filter.) n
- * * @deprecated since 2.5.0, will be removed in 4.0.0. The name of this method
- * is considered to be confusing as it does not use a {@link Filter} but uses
- * setting the startRow and stopRow instead. Use
- * {@link #setStartStopRowForPrefixScan(byte[])} instead.
+ * @param rowPrefix the prefix all rows must start with. (Set null to remove the filter.)
+ * @deprecated since 2.5.0, will be removed in 4.0.0. The name of this method is considered to be
+ * confusing as it does not use a {@link Filter} but uses setting the startRow and
+ * stopRow instead. Use {@link #setStartStopRowForPrefixScan(byte[])} instead.
*/
@Deprecated
public Scan setRowPrefixFilter(byte[] rowPrefix) {
@@ -454,7 +453,7 @@ public Scan setRowPrefixFilter(byte[] rowPrefix) {
* This CANNOT be used in combination with withStartRow and/or withStopRow. Such
* a combination will yield unexpected and even undefined results.
*
- * @param rowPrefix the prefix all rows must start with. (Set null to remove the filter.) n
+ * @param rowPrefix the prefix all rows must start with. (Set null to remove the filter.)
*/
public Scan setStartStopRowForPrefixScan(byte[] rowPrefix) {
if (rowPrefix == null) {
@@ -468,7 +467,7 @@ public Scan setStartStopRowForPrefixScan(byte[] rowPrefix) {
}
/**
- * Get all available versions. n
+ * Get all available versions.
*/
public Scan readAllVersions() {
this.maxVersions = Integer.MAX_VALUE;
@@ -477,7 +476,7 @@ public Scan readAllVersions() {
/**
* Get up to the specified number of versions of each column.
- * @param versions specified number of versions for each column n
+ * @param versions specified number of versions for each column
*/
public Scan readVersions(int versions) {
this.maxVersions = versions;
@@ -555,7 +554,7 @@ public Scan setFilter(Filter filter) {
/**
* Setting the familyMap
- * @param familyMap map of family to qualifier n
+ * @param familyMap map of family to qualifier
*/
public Scan setFamilyMap(Map> familyMap) {
this.familyMap = familyMap;
@@ -563,7 +562,7 @@ public Scan setFamilyMap(Map> familyMap) {
}
/**
- * Getting the familyMap n
+ * Getting the familyMap
*/
public Map> getFamilyMap() {
return this.familyMap;
@@ -638,16 +637,12 @@ public int getCaching() {
return this.caching;
}
- /**
- * n
- */
+ /** Returns TimeRange */
public TimeRange getTimeRange() {
return this.tr;
}
- /**
- * n
- */
+ /** Returns RowFilter */
@Override
public Filter getFilter() {
return filter;
@@ -682,7 +677,7 @@ public boolean getCacheBlocks() {
* Set whether this scan is a reversed one
*
* This is false by default which means forward(normal) scan.
- * @param reversed if true, scan will be backward order n
+ * @param reversed if true, scan will be backward order
*/
public Scan setReversed(boolean reversed) {
this.reversed = reversed;
@@ -701,7 +696,8 @@ public boolean isReversed() {
* Setting whether the caller wants to see the partial results when server returns
* less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client. By
* default this value is false and the complete results will be assembled client side before being
- * delivered to the caller. nn * @see Result#mayHaveMoreCellsInRow()
+ * delivered to the caller.
+ * @see Result#mayHaveMoreCellsInRow()
* @see #setBatch(int)
*/
public Scan setAllowPartialResults(final boolean allowPartialResults) {
@@ -725,7 +721,7 @@ public Scan setLoadColumnFamiliesOnDemand(boolean value) {
/**
* Compile the table and column family (i.e. schema) information into a String. Useful for parsing
- * and aggregation by debugging, logging, and administration tools. n
+ * and aggregation by debugging, logging, and administration tools.
*/
@Override
public Map getFingerprint() {
@@ -747,7 +743,7 @@ public Map getFingerprint() {
* Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
* Map along with the fingerprinted information. Useful for debugging, logging, and administration
* tools.
- * @param maxCols a limit on the number of columns output prior to truncation n
+ * @param maxCols a limit on the number of columns output prior to truncation
*/
@Override
public Map toMap(int maxCols) {
@@ -904,7 +900,7 @@ public int getLimit() {
* reaches this value.
*
* This condition will be tested at last, after all other conditions such as stopRow, filter, etc.
- * @param limit the limit of rows for this scan n
+ * @param limit the limit of rows for this scan
*/
public Scan setLimit(int limit) {
this.limit = limit;
@@ -913,7 +909,7 @@ public Scan setLimit(int limit) {
/**
* Call this when you only want to get one row. It will set {@code limit} to {@code 1}, and also
- * set {@code readType} to {@link ReadType#PREAD}. n
+ * set {@code readType} to {@link ReadType#PREAD}.
*/
public Scan setOneRowLimit() {
return setLimit(1).setReadType(ReadType.PREAD);
@@ -935,7 +931,7 @@ public ReadType getReadType() {
* Set the read type for this scan.
*
* Notice that we may choose to use pread even if you specific {@link ReadType#STREAM} here. For
- * example, we will always use pread if this is a get scan. n
+ * example, we will always use pread if this is a get scan.
*/
public Scan setReadType(ReadType readType) {
this.readType = readType;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
index 53c33a667c3c..7feefc831ca0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
@@ -116,7 +116,8 @@ default boolean[] exists(List gets) throws IOException {
* @param results Empty Object[], same size as actions. Provides access to partial results, in
* case an exception is thrown. A null in the result array means that the call for
* that action failed, even after retries. The order of the objects in the results
- * array corresponds to the order of actions in the request list. n * @since 0.90.0
+ * array corresponds to the order of actions in the request list.
+ * @since 0.90.0
*/
default void batch(final List extends Row> actions, final Object[] results)
throws IOException, InterruptedException {
@@ -264,8 +265,8 @@ default void delete(Delete delete) throws IOException {
* @apiNote In 3.0.0 version, the input list {@code deletes} will no longer be modified. Also,
* {@link #put(List)} runs pre-flight validations on the input list on client. Currently
* {@link #delete(List)} doesn't run validations on the client, there is no need
- * currently, but this may change in the future. An * {@link IllegalArgumentException}
- * will be thrown in this case.
+ * currently, but this may change in the future. An {@link IllegalArgumentException} will
+ * be thrown in this case.
*/
default void delete(List deletes) throws IOException {
throw new NotImplementedException("Add an implementation!");
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
index f500a1128a51..1c91819ac4b9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
@@ -153,7 +153,7 @@ public interface TableDescriptor {
String getRegionSplitPolicyClassName();
/**
- * Get the name of the table n
+ * Get the name of the table
*/
TableName getTableName();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
index d0d3e36aa8ff..43ca935ffa17 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
@@ -886,7 +886,7 @@ public Durability getDurability() {
}
/**
- * Get the name of the table n
+ * Get the name of the table
*/
@Override
public TableName getTableName() {
@@ -1299,7 +1299,8 @@ public ColumnFamilyDescriptor removeColumnFamily(final byte[] column) {
* org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class
* can be loaded or not. Whether a coprocessor is loadable or not will be determined when a
* region is opened.
- * @param className Full class name. n * @return the modifyable TD
+ * @param className Full class name.
+ * @return the modifyable TD
*/
public ModifyableTableDescriptor setCoprocessor(String className) throws IOException {
return setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(className)
@@ -1347,8 +1348,8 @@ public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp) throws
* org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class
* can be loaded or not. Whether a coprocessor is loadable or not will be determined when a
* region is opened.
- * @param specStr The Coprocessor specification all in in one String n * @return the modifyable
- * TD
+ * @param specStr The Coprocessor specification all in in one String
+ * @return the modifyable TD
* @deprecated used by HTableDescriptor and admin.rb. As of release 2.0.0, this will be removed
* in HBase 3.0.0.
*/
@@ -1461,8 +1462,8 @@ private byte[] toByteArray() {
/**
* @param bytes A pb serialized {@link ModifyableTableDescriptor} instance with pb magic prefix
- * @return An instance of {@link ModifyableTableDescriptor} made from bytes n
- * * @see #toByteArray()
+ * @return An instance of {@link ModifyableTableDescriptor} made from bytes
+ * @see #toByteArray()
*/
private static TableDescriptor parseFrom(final byte[] bytes) throws DeserializationException {
if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
index 4e20302be45e..bf54f6e59042 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
@@ -42,7 +42,7 @@ public static enum State {
/**
* Covert from PB version of State
- * @param state convert from n
+ * @param state convert from
*/
public static State convert(HBaseProtos.TableState.State state) {
State ret;
@@ -66,7 +66,7 @@ public static State convert(HBaseProtos.TableState.State state) {
}
/**
- * Covert to PB version of State n
+ * Covert to PB version of State
*/
public HBaseProtos.TableState.State convert() {
HBaseProtos.TableState.State state;
@@ -140,7 +140,7 @@ public State getState() {
}
/**
- * Table name for state n
+ * Table name for state
*/
public TableName getTableName() {
return tableName;
@@ -168,7 +168,7 @@ public boolean inStates(State... states) {
}
/**
- * Covert to PB version of TableState n
+ * Covert to PB version of TableState
*/
public HBaseProtos.TableState convert() {
return HBaseProtos.TableState.newBuilder().setState(this.state.convert()).build();
@@ -177,7 +177,7 @@ public HBaseProtos.TableState convert() {
/**
* Covert from PB version of TableState
* @param tableName table this state of
- * @param tableState convert from n
+ * @param tableState convert from
*/
public static TableState convert(TableName tableName, HBaseProtos.TableState tableState) {
TableState.State state = State.convert(tableState.getState());
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
index ab5915ec9750..76a0d6addf3c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/backoff/ServerStatistics.java
@@ -33,7 +33,7 @@ public class ServerStatistics {
/**
* Good enough attempt. Last writer wins. It doesn't really matter which one gets to update, as
- * something gets set nn
+ * something gets set
*/
public void update(byte[] region, RegionLoadStats currentStats) {
RegionStatistics regionStat = this.stats.get(region);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
index 7a266de33453..c705463b62c5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java
@@ -35,8 +35,8 @@ public class ServerSideScanMetrics {
private final Map counters = new HashMap<>();
/**
- * Create a new counter with the specified name n * @return {@link AtomicLong} instance for the
- * counter with counterName
+ * Create a new counter with the specified name
+ * @return {@link AtomicLong} instance for the counter with counterName
*/
protected AtomicLong createCounter(String counterName) {
AtomicLong c = new AtomicLong(0);
@@ -59,9 +59,6 @@ protected AtomicLong createCounter(String counterName) {
*/
public final AtomicLong countOfRowsScanned = createCounter(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME);
- /**
- * nn
- */
public void setCounter(String counterName, long value) {
AtomicLong c = this.counters.get(counterName);
if (c != null) {
@@ -69,23 +66,16 @@ public void setCounter(String counterName, long value) {
}
}
- /**
- * n * @return true if a counter exists with the counterName
- */
+ /** Returns true if a counter exists with the counterName */
public boolean hasCounter(String counterName) {
return this.counters.containsKey(counterName);
}
- /**
- * n * @return {@link AtomicLong} instance for this counter name, null if counter does not exist.
- */
+ /** Returns {@link AtomicLong} instance for this counter name, null if counter does not exist. */
public AtomicLong getCounter(String counterName) {
return this.counters.get(counterName);
}
- /**
- * nn
- */
public void addToCounter(String counterName, long delta) {
AtomicLong c = this.counters.get(counterName);
if (c != null) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
index 73e3b53eb369..c8eab212446a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java
@@ -52,36 +52,27 @@
public abstract class ColumnInterpreter {
- /**
- * nnn * @return value of type T n
- */
+ /** Returns value of type T */
public abstract T getValue(byte[] colFamily, byte[] colQualifier, Cell c) throws IOException;
- /**
- * nn * @return sum or non null value among (if either of them is null); otherwise returns a null.
- */
+ /** Returns sum or non null value among (if either of them is null); otherwise returns a null. */
public abstract S add(S l1, S l2);
/**
- * returns the maximum value for this type T n
+ * returns the maximum value for this type T
*/
-
public abstract T getMaxValue();
public abstract T getMinValue();
- /**
- * nnn
- */
+ /** Returns multiplication */
public abstract S multiply(S o1, S o2);
- /**
- * nn
- */
+ /** Returns increment */
public abstract S increment(S o);
/**
- * provides casting opportunity between the data types. nn
+ * provides casting opportunity between the data types.
*/
public abstract S castToReturnType(T o);
@@ -96,7 +87,7 @@ public abstract class ColumnInterpreter clazz, String s) {
super("Coprocessor [" + clazz.getName() + "]: " + s);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java
index fd9936dc5025..5f2b98c83707 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ClientExceptionsUtil.java
@@ -150,7 +150,7 @@ public static boolean isConnectionException(Throwable e) {
/**
* Translates exception for preemptive fast fail checks.
* @param t exception to check
- * @return translated exception n
+ * @return translated exception
*/
public static Throwable translatePFFE(Throwable t) throws IOException {
if (t instanceof NoSuchMethodError) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java
index ae15777a7f09..00774e37094f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.java
@@ -35,15 +35,13 @@ public FailedSanityCheckException() {
}
/**
- * n
- */
+ * */
public FailedSanityCheckException(String message) {
super(message);
}
/**
- * nn
- */
+ * */
public FailedSanityCheckException(String message, Throwable cause) {
super(message, cause);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java
index e7c06d44aeff..1991100d0daa 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java
@@ -68,9 +68,7 @@ public ColumnValueFilter(final byte[] family, final byte[] qualifier, final Comp
this.comparator = Preconditions.checkNotNull(comparator, "Comparator should not be null");
}
- /**
- * n
- */
+ /** Returns operator */
public CompareOperator getCompareOperator() {
return op;
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
index a5f5efcaba17..8140793fc77a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java
@@ -216,8 +216,9 @@ public static Filter parseFrom(final byte[] pbBytes) throws DeserializationExcep
/**
* Concrete implementers can signal a failure condition in their code by throwing an
- * {@link IOException}. n * @return true if and only if the fields of the filter that are
- * serialized are equal to the corresponding fields in other. Used for testing.
+ * {@link IOException}.
+ * @return true if and only if the fields of the filter that are serialized are equal to the
+ * corresponding fields in other. Used for testing.
*/
abstract boolean areSerializedFieldsEqual(Filter other);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
index ff637c7f0527..713c4acb2700 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java
@@ -134,9 +134,9 @@ public byte[] toByteArray() throws IOException {
}
/**
- * Default implementation so that writers of custom filters aren't forced to implement. n
- * * @return true if and only if the fields of the filter that are serialized are equal to the
- * corresponding fields in other. Used for testing.
+ * Default implementation so that writers of custom filters aren't forced to implement.
+ * @return true if and only if the fields of the filter that are serialized are equal to the
+ * corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter other) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
index 3b7c136c6e1f..cb42072e1d80 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java
@@ -84,7 +84,7 @@ public FilterList(final List filters) {
/**
* Constructor that takes a var arg number of {@link Filter}s. The default operator MUST_PASS_ALL
- * is assumed. n
+ * is assumed.
*/
public FilterList(final Filter... filters) {
this(Operator.MUST_PASS_ALL, Arrays.asList(filters));
@@ -108,14 +108,14 @@ public FilterList(final Operator operator, final Filter... filters) {
}
/**
- * Get the operator. n
+ * Get the operator.
*/
public Operator getOperator() {
return operator;
}
/**
- * Get the filters. n
+ * Get the filters.
*/
public List getFilters() {
return filterListBase.getFilters();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java
index 4a15af277266..9b0fd99dc94f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterListBase.java
@@ -92,7 +92,7 @@ protected int compareCell(Cell a, Cell b) {
* the current child, we should set the traverse result (transformed cell) of previous node(s) as
* the initial value. (HBASE-18879).
* @param c The cell in question.
- * @return the transformed cell. n
+ * @return the transformed cell.
*/
@Override
public Cell transformCell(Cell c) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
index 1506eca5df66..2feac5527f7a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FuzzyRowFilter.java
@@ -116,7 +116,8 @@ private void preprocessSearchKey(Pair p) {
/**
* We need to preprocess mask array, as since we treat 2's as unfixed positions and -1 (0xff) as
- * fixed positions n * @return mask array
+ * fixed positions
+ * @return mask array
*/
private byte[] preprocessMask(byte[] mask) {
if (!UNSAFE_UNALIGNED) {
@@ -588,8 +589,8 @@ static byte[] getNextForFuzzyRule(boolean reverse, byte[] row, int offset, int l
/**
* For forward scanner, next cell hint should not contain any trailing zeroes unless they are part
- * of fuzzyKeyMeta hint = '\x01\x01\x01\x00\x00' will skip valid row '\x01\x01\x01' nn * @param
- * toInc - position of incremented byte
+ * of fuzzyKeyMeta hint = '\x01\x01\x01\x00\x00' will skip valid row '\x01\x01\x01'
+ * @param toInc - position of incremented byte
* @return trimmed version of result
*/
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java
index 099f38026feb..1fdf051941a4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java
@@ -37,7 +37,7 @@ public class RandomRowFilter extends FilterBase {
protected boolean filterOutRow;
/**
- * Create a new filter with a specified chance for a row to be included. n
+ * Create a new filter with a specified chance for a row to be included.
*/
public RandomRowFilter(float chance) {
this.chance = chance;
@@ -49,7 +49,7 @@ public float getChance() {
}
/**
- * Set the chance that a row is included. n
+ * Set the chance that a row is included.
*/
public void setChance(float chance) {
this.chance = chance;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
index 14bdc04a754c..3293a2106a9d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
@@ -71,7 +71,7 @@ public SingleColumnValueExcludeFilter(byte[] family, byte[] qualifier, CompareOp
}
/**
- * Constructor for protobuf deserialization only. nnnnnn
+ * Constructor for protobuf deserialization only.
*/
protected SingleColumnValueExcludeFilter(final byte[] family, final byte[] qualifier,
final CompareOperator op, ByteArrayComparable comparator, final boolean filterIfMissing,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
index 7be5ce914058..43b3316db779 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
@@ -111,7 +111,7 @@ public SingleColumnValueFilter(final byte[] family, final byte[] qualifier,
}
/**
- * Constructor for protobuf deserialization only. nnnnnn
+ * Constructor for protobuf deserialization only.
*/
protected SingleColumnValueFilter(final byte[] family, final byte[] qualifier,
final CompareOperator op, org.apache.hadoop.hbase.filter.ByteArrayComparable comparator,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
index b3f821d75e49..235691ef7cb1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
@@ -54,7 +54,7 @@ public class TimestampsFilter extends FilterBase {
long minTimestamp = Long.MAX_VALUE;
/**
- * Constructor for filter that retains only the specified timestamps in the list. n
+ * Constructor for filter that retains only the specified timestamps in the list.
*/
public TimestampsFilter(List timestamps) {
this(timestamps, false);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java
index b2b3698aa2cb..e7364ca3b429 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CellBlockBuilder.java
@@ -104,9 +104,10 @@ public int size() {
/**
* Puts CellScanner Cells into a cell block using passed in codec and/or
- * compressor. nnn * @return Null or byte buffer filled with a cellblock filled with
- * passed-in Cells encoded using passed in codec and/or compressor; the
- * returned buffer has been flipped and is ready for reading. Use limit to find total size. n
+ * compressor.
+ * @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using
+ * passed in codec and/or compressor; the returned buffer has
+ * been flipped and is ready for reading. Use limit to find total size.
*/
public ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor,
final CellScanner cellScanner) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java
index d63f28cdab84..155c721b98a5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/LeaseException.java
@@ -34,8 +34,7 @@ public LeaseException() {
}
/**
- * n
- */
+ * */
public LeaseException(String message) {
super(message);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java
index 2e2a3a895cea..c0330034810f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedLogCloseException.java
@@ -33,8 +33,7 @@ public FailedLogCloseException() {
}
/**
- * n
- */
+ * */
public FailedLogCloseException(String msg) {
super(msg);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java
index feab0b07f2f6..a2a43203b647 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FailedSyncBeforeLogCloseException.java
@@ -32,8 +32,7 @@ public FailedSyncBeforeLogCloseException() {
}
/**
- * n
- */
+ * */
public FailedSyncBeforeLogCloseException(String msg) {
super(msg);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java
index 92ca03945aae..87b2287a6014 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java
@@ -50,7 +50,7 @@ public abstract class AbstractHBaseSaslRpcClient {
* @param token token to use if needed by the authentication method
* @param serverAddr the address of the hbase service
* @param securityInfo the security details for the remote hbase service
- * @param fallbackAllowed does the client allow fallback to simple authentication n
+ * @param fallbackAllowed does the client allow fallback to simple authentication
*/
protected AbstractHBaseSaslRpcClient(Configuration conf,
SaslClientAuthenticationProvider provider, Token extends TokenIdentifier> token,
@@ -66,7 +66,7 @@ protected AbstractHBaseSaslRpcClient(Configuration conf,
* @param serverAddr the address of the hbase service
* @param securityInfo the security details for the remote hbase service
* @param fallbackAllowed does the client allow fallback to simple authentication
- * @param rpcProtection the protection level ("authentication", "integrity" or "privacy") n
+ * @param rpcProtection the protection level ("authentication", "integrity" or "privacy")
*/
protected AbstractHBaseSaslRpcClient(Configuration conf,
SaslClientAuthenticationProvider provider, Token extends TokenIdentifier> token,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
index 5a816877ba84..6c755f9a94cd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
@@ -62,7 +62,7 @@ private EncryptionUtil() {
* @param conf configuration
* @param key the raw key bytes
* @param algorithm the algorithm to use with this key material
- * @return the encrypted key bytes n
+ * @return the encrypted key bytes
*/
public static byte[] wrapKey(Configuration conf, byte[] key, String algorithm)
throws IOException {
@@ -115,7 +115,7 @@ public static byte[] wrapKey(Configuration conf, String subject, Key key) throws
* @param conf configuration
* @param subject subject key alias
* @param value the encrypted key bytes
- * @return the raw key bytes nn
+ * @return the raw key bytes
*/
public static Key unwrapKey(Configuration conf, String subject, byte[] value)
throws IOException, KeyException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
index 93ad9245f656..0394bb0f2a3b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java
@@ -86,7 +86,7 @@ private static void readStatus(DataInputStream inStream) throws IOException {
* Do client side SASL authentication with server via the given InputStream and OutputStream
* @param inS InputStream to use
* @param outS OutputStream to use
- * @return true if connection is set up, or false if needs to switch to simple Auth. n
+ * @return true if connection is set up, or false if needs to switch to simple Auth.
*/
public boolean saslConnect(InputStream inS, OutputStream outS) throws IOException {
DataInputStream inStream = new DataInputStream(new BufferedInputStream(inS));
@@ -185,7 +185,7 @@ public void initCryptoCipher(RPCProtos.CryptoCipherMeta cryptoCipherMeta, Config
/**
* Get a SASL wrapped InputStream. Can be called only after saslConnect() has been called.
- * @return a SASL wrapped InputStream n
+ * @return a SASL wrapped InputStream
*/
public InputStream getInputStream() throws IOException {
if (!saslClient.isComplete()) {
@@ -248,7 +248,7 @@ private void readNextRpcPacket() throws IOException {
/**
* Get a SASL wrapped OutputStream. Can be called only after saslConnect() has been called.
- * @return a SASL wrapped OutputStream n
+ * @return a SASL wrapped OutputStream
*/
public OutputStream getOutputStream() throws IOException {
if (!saslClient.isComplete()) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
index e30041d46c48..2ea60f8ed571 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
@@ -45,7 +45,7 @@ public class AccessControlClient {
/**
* Return true if authorization is supported and enabled
* @param connection The connection to use
- * @return true if authorization is supported and enabled, false otherwise n
+ * @return true if authorization is supported and enabled, false otherwise
*/
public static boolean isAuthorizationEnabled(Connection connection) throws IOException {
return connection.getAdmin().getSecurityCapabilities()
@@ -55,7 +55,7 @@ public static boolean isAuthorizationEnabled(Connection connection) throws IOExc
/**
* Return true if cell authorization is supported and enabled
* @param connection The connection to use
- * @return true if cell authorization is supported and enabled, false otherwise n
+ * @return true if cell authorization is supported and enabled, false otherwise
*/
public static boolean isCellAuthorizationEnabled(Connection connection) throws IOException {
return connection.getAdmin().getSecurityCapabilities()
@@ -146,7 +146,7 @@ private static void grant(Connection connection, final String userName,
/**
* Grant global permissions for the specified user. If permissions for the specified user exists,
- * later granted permissions will override previous granted permissions. nnnn
+ * later granted permissions will override previous granted permissions.
*/
public static void grant(Connection connection, final String userName,
final Permission.Action... actions) throws Throwable {
@@ -162,7 +162,7 @@ public static boolean isAccessControllerRunning(Connection connection)
/**
* Revokes the permission on the table
- * @param connection The Connection instance to use nnnnnn
+ * @param connection The Connection instance to use
*/
public static void revoke(Connection connection, final TableName tableName, final String username,
final byte[] family, final byte[] qualifier, final Permission.Action... actions)
@@ -173,7 +173,7 @@ public static void revoke(Connection connection, final TableName tableName, fina
/**
* Revokes the permission on the namespace for the specified user.
- * @param connection The Connection instance to use nnnn
+ * @param connection The Connection instance to use
*/
public static void revoke(Connection connection, final String namespace, final String userName,
final Permission.Action... actions) throws Throwable {
@@ -197,7 +197,7 @@ public static void revoke(Connection connection, final String userName,
* along with the list of superusers would be returned. Else, no rows get returned.
* @param connection The Connection instance to use
* @param tableRegex The regular expression string to match against
- * @return List of UserPermissions n
+ * @return List of UserPermissions
*/
public static List getUserPermissions(Connection connection, String tableRegex)
throws Throwable {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java
index e0eb79aa0257..970c3f2b04d4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java
@@ -450,8 +450,8 @@ public static UserPermission toUserPermission(AccessControlProtos.UserPermission
* It's also called by the shell, in case you want to find references.
* @param protocol the AccessControlService protocol proxy
* @param userShortName the short name of the user to grant permissions
- * @param actions the permissions to be granted n * @deprecated Use
- * {@link Admin#grant(UserPermission, boolean)} instead.
+ * @param actions the permissions to be granted
+ * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead.
*/
@Deprecated
public static void grant(RpcController controller,
@@ -478,8 +478,8 @@ public static void grant(RpcController controller,
* @param tableName optional table name
* @param f optional column family
* @param q optional qualifier
- * @param actions the permissions to be granted n * @deprecated Use
- * {@link Admin#grant(UserPermission, boolean)} instead.
+ * @param actions the permissions to be granted
+ * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead.
*/
@Deprecated
public static void grant(RpcController controller,
@@ -504,8 +504,8 @@ public static void grant(RpcController controller,
* @param controller RpcController
* @param protocol the AccessControlService protocol proxy
* @param namespace the short name of the user to grant permissions
- * @param actions the permissions to be granted n * @deprecated Use
- * {@link Admin#grant(UserPermission, boolean)} instead.
+ * @param actions the permissions to be granted
+ * @deprecated Use {@link Admin#grant(UserPermission, boolean)} instead.
*/
@Deprecated
public static void grant(RpcController controller,
@@ -621,9 +621,8 @@ public static List getUserPermissions(RpcController controller,
* A utility used to get user's global permissions based on the specified user name.
* @param controller RpcController
* @param protocol the AccessControlService protocol proxy
- * @param userName User name, if empty then all user permissions will be retrieved. n
- * * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)}
- * instead.
+ * @param userName User name, if empty then all user permissions will be retrieved.
+ * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
*/
@Deprecated
public static List getUserPermissions(RpcController controller,
@@ -651,8 +650,8 @@ public static List getUserPermissions(RpcController controller,
* It's also called by the shell, in case you want to find references.
* @param controller RpcController
* @param protocol the AccessControlService protocol proxy
- * @param t optional table name n * @deprecated Use
- * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
+ * @param t optional table name
+ * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
*/
@Deprecated
public static List getUserPermissions(RpcController controller,
@@ -668,9 +667,8 @@ public static List getUserPermissions(RpcController controller,
* @param t optional table name
* @param columnFamily Column family
* @param columnQualifier Column qualifier
- * @param userName User name, if empty then all user permissions will be retrieved. n
- * * @deprecated Use
- * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
+ * @param userName User name, if empty then all user permissions will be retrieved.
+ * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
*/
@Deprecated
public static List getUserPermissions(RpcController controller,
@@ -708,8 +706,8 @@ public static List getUserPermissions(RpcController controller,
* It's also called by the shell, in case you want to find references.
* @param controller RpcController
* @param protocol the AccessControlService protocol proxy
- * @param namespace name of the namespace n * @deprecated Use
- * {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
+ * @param namespace name of the namespace
+ * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
*/
@Deprecated
public static List getUserPermissions(RpcController controller,
@@ -722,9 +720,8 @@ public static List getUserPermissions(RpcController controller,
* @param controller RpcController
* @param protocol the AccessControlService protocol proxy
* @param namespace name of the namespace
- * @param userName User name, if empty then all user permissions will be retrieved. n
- * * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)}
- * instead.
+ * @param userName User name, if empty then all user permissions will be retrieved.
+ * @deprecated Use {@link Admin#getUserPermissions(GetUserPermissionsRequest)} instead.
*/
@Deprecated
public static List getUserPermissions(RpcController controller,
@@ -762,8 +759,8 @@ public static List getUserPermissions(RpcController controller,
* will not be considered if columnFamily is passed as null or empty.
* @param userName User name, it shouldn't be null or empty.
* @param actions Actions
- * @return true if access allowed, otherwise false n * @deprecated Use
- * {@link Admin#hasUserPermissions(String, List)} instead.
+ * @return true if access allowed, otherwise false
+ * @deprecated Use {@link Admin#hasUserPermissions(String, List)} instead.
*/
@Deprecated
public static boolean hasPermission(RpcController controller,
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
index 7bae98d59bab..931f976f2f43 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
@@ -55,7 +55,7 @@ public class VisibilityClient {
/**
* Return true if cell visibility features are supported and enabled
* @param connection The connection to use
- * @return true if cell visibility features are supported and enabled, false otherwise n
+ * @return true if cell visibility features are supported and enabled, false otherwise
*/
public static boolean isCellVisibilityEnabled(Connection connection) throws IOException {
return connection.getAdmin().getSecurityCapabilities()
@@ -63,7 +63,7 @@ public static boolean isCellVisibilityEnabled(Connection connection) throws IOEx
}
/**
- * Utility method for adding label to the system. nnnn
+ * Utility method for adding label to the system.
*/
public static VisibilityLabelsResponse addLabel(Connection connection, final String label)
throws Throwable {
@@ -71,7 +71,7 @@ public static VisibilityLabelsResponse addLabel(Connection connection, final Str
}
/**
- * Utility method for adding labels to the system. nnnn
+ * Utility method for adding labels to the system.
*/
public static VisibilityLabelsResponse addLabels(Connection connection, final String[] labels)
throws Throwable {
@@ -109,7 +109,7 @@ public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOE
}
/**
- * Sets given labels globally authorized for the user. nnnnn
+ * Sets given labels globally authorized for the user.
*/
public static VisibilityLabelsResponse setAuths(Connection connection, final String[] auths,
final String user) throws Throwable {
@@ -154,7 +154,7 @@ public GetAuthsResponse call(VisibilityLabelsService service) throws IOException
* Retrieve the list of visibility labels defined in the system.
* @param connection The Connection instance to use.
* @param regex The regular expression to filter which labels are returned.
- * @return labels The list of visibility labels defined in the system. n
+ * @return labels The list of visibility labels defined in the system.
*/
public static ListLabelsResponse listLabels(Connection connection, final String regex)
throws Throwable {
@@ -190,7 +190,7 @@ public ListLabelsResponse call(VisibilityLabelsService service) throws IOExcepti
}
/**
- * Removes given labels from user's globally authorized list of labels. nnnnn
+ * Removes given labels from user's globally authorized list of labels.
*/
public static VisibilityLabelsResponse clearAuths(Connection connection, final String[] auths,
final String user) throws Throwable {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 35c361be5629..079ddbb4218e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -562,7 +562,7 @@ public static ClientProtos.MutationProto.Durability toDurability(final Durabilit
/**
* Convert a protocol buffer Get to a client Get
* @param proto the protocol buffer Get to convert
- * @return the converted client Get n
+ * @return the converted client Get
*/
public static Get toGet(final ClientProtos.Get proto) throws IOException {
if (proto == null) return null;
@@ -647,7 +647,7 @@ public static ClientProtos.Consistency toConsistency(Consistency consistency) {
/**
* Convert a protocol buffer Mutate to a Put.
* @param proto The protocol buffer MutationProto to convert
- * @return A client Put. n
+ * @return A client Put.
*/
public static Put toPut(final MutationProto proto) throws IOException {
return toPut(proto, null);
@@ -657,7 +657,7 @@ public static Put toPut(final MutationProto proto) throws IOException {
* Convert a protocol buffer Mutate to a Put.
* @param proto The protocol buffer MutationProto to convert
* @param cellScanner If non-null, the Cell data that goes with this proto.
- * @return A client Put. n
+ * @return A client Put.
*/
public static Put toPut(final MutationProto proto, final CellScanner cellScanner)
throws IOException {
@@ -741,7 +741,7 @@ public static Put toPut(final MutationProto proto, final CellScanner cellScanner
/**
* Convert a protocol buffer Mutate to a Delete
* @param proto the protocol buffer Mutate to convert
- * @return the converted client Delete n
+ * @return the converted client Delete
*/
public static Delete toDelete(final MutationProto proto) throws IOException {
return toDelete(proto, null);
@@ -751,7 +751,7 @@ public static Delete toDelete(final MutationProto proto) throws IOException {
* Convert a protocol buffer Mutate to a Delete
* @param proto the protocol buffer Mutate to convert
* @param cellScanner if non-null, the data that goes with this delete.
- * @return the converted client Delete n
+ * @return the converted client Delete
*/
public static Delete toDelete(final MutationProto proto, final CellScanner cellScanner)
throws IOException {
@@ -920,7 +920,7 @@ public static Increment toIncrement(final MutationProto proto, final CellScanner
/**
* Convert a MutateRequest to Mutation
* @param proto the protocol buffer Mutate to convert
- * @return the converted Mutation n
+ * @return the converted Mutation
*/
public static Mutation toMutation(final MutationProto proto) throws IOException {
MutationType type = proto.getMutateType();
@@ -968,7 +968,7 @@ public static Scan.ReadType toReadType(ClientProtos.Scan.ReadType readType) {
/**
* Convert a client Scan to a protocol buffer Scan
* @param scan the client Scan to convert
- * @return the converted protocol buffer Scan n
+ * @return the converted protocol buffer Scan
*/
public static ClientProtos.Scan toScan(final Scan scan) throws IOException {
ClientProtos.Scan.Builder scanBuilder = ClientProtos.Scan.newBuilder();
@@ -1062,7 +1062,7 @@ public static ClientProtos.Scan toScan(final Scan scan) throws IOException {
/**
* Convert a protocol buffer Scan to a client Scan
* @param proto the protocol buffer Scan to convert
- * @return the converted client Scan n
+ * @return the converted client Scan
*/
public static Scan toScan(final ClientProtos.Scan proto) throws IOException {
byte[] startRow = HConstants.EMPTY_START_ROW;
@@ -1182,7 +1182,7 @@ public static Cursor toCursor(ClientProtos.Cursor cursor) {
/**
* Create a protocol buffer Get based on a client Get.
* @param get the client Get
- * @return a protocol buffer Get n
+ * @return a protocol buffer Get
*/
public static ClientProtos.Get toGet(final Get get) throws IOException {
ClientProtos.Get.Builder builder = ClientProtos.Get.newBuilder();
@@ -1248,7 +1248,8 @@ public static MutationProto toMutation(final MutationType type, final Mutation m
}
/**
- * Create a protocol buffer Mutate based on a client Mutation nn * @return a protobuf'd Mutation n
+ * Create a protocol buffer Mutate based on a client Mutation
+ * @return a protobuf'd Mutation
*/
public static MutationProto toMutation(final MutationType type, final Mutation mutation,
final long nonce) throws IOException {
@@ -1297,8 +1298,8 @@ public static MutationProto toMutation(final MutationType type, final Mutation m
/**
* Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data.
- * Understanding is that the Cell will be transported other than via protobuf. nnn * @return a
- * protobuf'd Mutation n
+ * Understanding is that the Cell will be transported other than via protobuf.
+ * @return a protobuf'd Mutation
*/
public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation,
final MutationProto.Builder builder) throws IOException {
@@ -1307,8 +1308,8 @@ public static MutationProto toMutationNoData(final MutationType type, final Muta
/**
* Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data.
- * Understanding is that the Cell will be transported other than via protobuf. nn * @return a
- * protobuf'd Mutation n
+ * Understanding is that the Cell will be transported other than via protobuf.
+ * @return a protobuf'd Mutation
*/
public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation)
throws IOException {
@@ -1334,8 +1335,8 @@ public static MutationProto toMutationNoData(final MutationType type, final Muta
/**
* Code shared by {@link #toMutation(MutationType, Mutation)} and
- * {@link #toMutationNoData(MutationType, Mutation)} nn * @return A partly-filled out protobuf'd
- * Mutation.
+ * {@link #toMutationNoData(MutationType, Mutation)}
+ * @return A partly-filled out protobuf'd Mutation.
*/
private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final MutationType type,
final Mutation mutation, MutationProto.Builder builder) {
@@ -1468,7 +1469,7 @@ public static Result toResult(final ClientProtos.Result proto, boolean decodeTag
* Convert a protocol buffer Result to a client Result
* @param proto the protocol buffer Result to convert
* @param scanner Optional cell scanner.
- * @return the converted client Result n
+ * @return the converted client Result
*/
public static Result toResult(final ClientProtos.Result proto, final CellScanner scanner)
throws IOException {
@@ -1583,8 +1584,8 @@ public static FilterProtos.Filter toFilter(Filter filter) throws IOException {
}
/**
- * Convert a delete KeyValue type to protocol buffer DeleteType. n * @return protocol buffer
- * DeleteType n
+ * Convert a delete KeyValue type to protocol buffer DeleteType.
+ * @return protocol buffer DeleteType
*/
public static DeleteType toDeleteType(KeyValue.Type type) throws IOException {
switch (type) {
@@ -1604,7 +1605,7 @@ public static DeleteType toDeleteType(KeyValue.Type type) throws IOException {
/**
* Convert a protocol buffer DeleteType to delete KeyValue type.
* @param type The DeleteType
- * @return The type. n
+ * @return The type.
*/
public static KeyValue.Type fromDeleteType(DeleteType type) throws IOException {
switch (type) {
@@ -1690,7 +1691,7 @@ public static GetRegionInfoRequest getGetRegionInfoRequest(final byte[] regionNa
}
/**
- * A helper to close a region given a region name using admin protocol. nnn
+ * A helper to close a region given a region name using admin protocol.
*/
public static void closeRegion(final RpcController controller,
final AdminService.BlockingInterface admin, final ServerName server, final byte[] regionName)
@@ -1705,7 +1706,7 @@ public static void closeRegion(final RpcController controller,
}
/**
- * A helper to warmup a region given a region name using admin protocol nn *
+ * A helper to warmup a region given a region name using admin protocol
*/
public static void warmupRegion(final RpcController controller,
final AdminService.BlockingInterface admin,
@@ -1722,7 +1723,7 @@ public static void warmupRegion(final RpcController controller,
}
/**
- * A helper to open a region using admin protocol. nnn
+ * A helper to open a region using admin protocol.
*/
public static void openRegion(final RpcController controller,
final AdminService.BlockingInterface admin, ServerName server,
@@ -1736,8 +1737,8 @@ public static void openRegion(final RpcController controller,
}
/**
- * A helper to get the all the online regions on a region server using admin protocol. n * @return
- * a list of online region info n
+ * A helper to get the all the online regions on a region server using admin protocol.
+ * @return a list of online region info
*/
public static List
getOnlineRegions(final AdminService.BlockingInterface admin) throws IOException {
@@ -2069,7 +2070,8 @@ public static RegionEventDescriptor toRegionEventDescriptor(EventType eventType,
/**
* Return short version of Message toString'd, shorter than TextFormat#shortDebugString. Tries to
* NOT print out data both because it can be big but also so we do not have data in our logs. Use
- * judiciously. n * @return toString of passed m
+ * judiciously.
+ * @return toString of passed m
*/
public static String getShortTextFormat(Message m) {
if (m == null) return "null";
@@ -2216,8 +2218,8 @@ public static TableName[] getTableNameArray(List tableNam
}
/**
- * Convert a protocol buffer CellVisibility to a client CellVisibility n * @return the converted
- * client CellVisibility
+ * Convert a protocol buffer CellVisibility to a client CellVisibility
+ * @return the converted client CellVisibility
*/
public static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto) {
if (proto == null) return null;
@@ -2225,8 +2227,8 @@ public static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto)
}
/**
- * Convert a protocol buffer CellVisibility bytes to a client CellVisibility n * @return the
- * converted client CellVisibility n
+ * Convert a protocol buffer CellVisibility bytes to a client CellVisibility
+ * @return the converted client CellVisibility
*/
public static CellVisibility toCellVisibility(byte[] protoBytes) throws DeserializationException {
if (protoBytes == null) return null;
@@ -2242,8 +2244,8 @@ public static CellVisibility toCellVisibility(byte[] protoBytes) throws Deserial
}
/**
- * Create a protocol buffer CellVisibility based on a client CellVisibility. n * @return a
- * protocol buffer CellVisibility
+ * Create a protocol buffer CellVisibility based on a client CellVisibility.
+ * @return a protocol buffer CellVisibility
*/
public static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVisibility) {
ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder();
@@ -2252,8 +2254,8 @@ public static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVi
}
/**
- * Convert a protocol buffer Authorizations to a client Authorizations n * @return the converted
- * client Authorizations
+ * Convert a protocol buffer Authorizations to a client Authorizations
+ * @return the converted client Authorizations
*/
public static Authorizations toAuthorizations(ClientProtos.Authorizations proto) {
if (proto == null) return null;
@@ -2261,8 +2263,8 @@ public static Authorizations toAuthorizations(ClientProtos.Authorizations proto)
}
/**
- * Convert a protocol buffer Authorizations bytes to a client Authorizations n * @return the
- * converted client Authorizations n
+ * Convert a protocol buffer Authorizations bytes to a client Authorizations
+ * @return the converted client Authorizations
*/
public static Authorizations toAuthorizations(byte[] protoBytes) throws DeserializationException {
if (protoBytes == null) return null;
@@ -2278,8 +2280,8 @@ public static Authorizations toAuthorizations(byte[] protoBytes) throws Deserial
}
/**
- * Create a protocol buffer Authorizations based on a client Authorizations. n * @return a
- * protocol buffer Authorizations
+ * Create a protocol buffer Authorizations based on a client Authorizations.
+ * @return a protocol buffer Authorizations
*/
public static ClientProtos.Authorizations toAuthorizations(Authorizations authorizations) {
ClientProtos.Authorizations.Builder builder = ClientProtos.Authorizations.newBuilder();
@@ -2290,8 +2292,8 @@ public static ClientProtos.Authorizations toAuthorizations(Authorizations author
}
/**
- * Convert a protocol buffer TimeUnit to a client TimeUnit n * @return the converted client
- * TimeUnit
+ * Convert a protocol buffer TimeUnit to a client TimeUnit
+ * @return the converted client TimeUnit
*/
public static TimeUnit toTimeUnit(final HBaseProtos.TimeUnit proto) {
switch (proto) {
@@ -2314,8 +2316,8 @@ public static TimeUnit toTimeUnit(final HBaseProtos.TimeUnit proto) {
}
/**
- * Convert a client TimeUnit to a protocol buffer TimeUnit n * @return the converted protocol
- * buffer TimeUnit
+ * Convert a client TimeUnit to a protocol buffer TimeUnit
+ * @return the converted protocol buffer TimeUnit
*/
public static HBaseProtos.TimeUnit toProtoTimeUnit(final TimeUnit timeUnit) {
switch (timeUnit) {
@@ -2338,8 +2340,8 @@ public static HBaseProtos.TimeUnit toProtoTimeUnit(final TimeUnit timeUnit) {
}
/**
- * Convert a protocol buffer ThrottleType to a client ThrottleType n * @return the converted
- * client ThrottleType
+ * Convert a protocol buffer ThrottleType to a client ThrottleType
+ * @return the converted client ThrottleType
*/
public static ThrottleType toThrottleType(final QuotaProtos.ThrottleType proto) {
switch (proto) {
@@ -2367,8 +2369,8 @@ public static ThrottleType toThrottleType(final QuotaProtos.ThrottleType proto)
}
/**
- * Convert a client ThrottleType to a protocol buffer ThrottleType n * @return the converted
- * protocol buffer ThrottleType
+ * Convert a client ThrottleType to a protocol buffer ThrottleType
+ * @return the converted protocol buffer ThrottleType
*/
public static QuotaProtos.ThrottleType toProtoThrottleType(final ThrottleType type) {
switch (type) {
@@ -2396,8 +2398,8 @@ public static QuotaProtos.ThrottleType toProtoThrottleType(final ThrottleType ty
}
/**
- * Convert a protocol buffer QuotaScope to a client QuotaScope n * @return the converted client
- * QuotaScope
+ * Convert a protocol buffer QuotaScope to a client QuotaScope
+ * @return the converted client QuotaScope
*/
public static QuotaScope toQuotaScope(final QuotaProtos.QuotaScope proto) {
switch (proto) {
@@ -2410,8 +2412,8 @@ public static QuotaScope toQuotaScope(final QuotaProtos.QuotaScope proto) {
}
/**
- * Convert a client QuotaScope to a protocol buffer QuotaScope n * @return the converted protocol
- * buffer QuotaScope
+ * Convert a client QuotaScope to a protocol buffer QuotaScope
+ * @return the converted protocol buffer QuotaScope
*/
public static QuotaProtos.QuotaScope toProtoQuotaScope(final QuotaScope scope) {
switch (scope) {
@@ -2424,8 +2426,8 @@ public static QuotaProtos.QuotaScope toProtoQuotaScope(final QuotaScope scope) {
}
/**
- * Convert a protocol buffer QuotaType to a client QuotaType n * @return the converted client
- * QuotaType
+ * Convert a protocol buffer QuotaType to a client QuotaType
+ * @return the converted client QuotaType
*/
public static QuotaType toQuotaScope(final QuotaProtos.QuotaType proto) {
switch (proto) {
@@ -2438,8 +2440,8 @@ public static QuotaType toQuotaScope(final QuotaProtos.QuotaType proto) {
}
/**
- * Convert a client QuotaType to a protocol buffer QuotaType n * @return the converted protocol
- * buffer QuotaType
+ * Convert a client QuotaType to a protocol buffer QuotaType
+ * @return the converted protocol buffer QuotaType
*/
public static QuotaProtos.QuotaType toProtoQuotaScope(final QuotaType type) {
switch (type) {
@@ -2566,7 +2568,7 @@ public static WALProtos.BulkLoadDescriptor toBulkLoadDescriptor(TableName tableN
* This version of protobuf's mergeDelimitedFrom avoid the hard-coded 64MB limit for decoding
* buffers
* @param builder current message builder
- * @param in Inputsream with delimited protobuf data n
+ * @param in Inputsream with delimited protobuf data
*/
public static void mergeDelimitedFrom(Message.Builder builder, InputStream in)
throws IOException {
@@ -2588,7 +2590,7 @@ public static void mergeDelimitedFrom(Message.Builder builder, InputStream in)
* where the message size is known
* @param builder current message builder
* @param in InputStream containing protobuf data
- * @param size known size of protobuf data n
+ * @param size known size of protobuf data
*/
public static void mergeFrom(Message.Builder builder, InputStream in, int size)
throws IOException {
@@ -2602,7 +2604,7 @@ public static void mergeFrom(Message.Builder builder, InputStream in, int size)
* This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers
* where the message size is not known
* @param builder current message builder
- * @param in InputStream containing protobuf data n
+ * @param in InputStream containing protobuf data
*/
public static void mergeFrom(Message.Builder builder, InputStream in) throws IOException {
final CodedInputStream codedInput = CodedInputStream.newInstance(in);
@@ -2615,7 +2617,7 @@ public static void mergeFrom(Message.Builder builder, InputStream in) throws IOE
* This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
* working with ByteStrings
* @param builder current message builder
- * @param bs ByteString containing the n
+ * @param bs ByteString containing the
*/
public static void mergeFrom(Message.Builder builder, ByteString bs) throws IOException {
final CodedInputStream codedInput = bs.newCodedInput();
@@ -2628,7 +2630,7 @@ public static void mergeFrom(Message.Builder builder, ByteString bs) throws IOEx
* This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
* working with byte arrays
* @param builder current message builder
- * @param b byte array n
+ * @param b byte array
*/
public static void mergeFrom(Message.Builder builder, byte[] b) throws IOException {
final CodedInputStream codedInput = CodedInputStream.newInstance(b);
@@ -2641,7 +2643,7 @@ public static void mergeFrom(Message.Builder builder, byte[] b) throws IOExcepti
* This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
* working with byte arrays
* @param builder current message builder
- * @param b byte array nnn
+ * @param b byte array
*/
public static void mergeFrom(Message.Builder builder, byte[] b, int offset, int length)
throws IOException {
@@ -2821,7 +2823,7 @@ public static TableDescriptor toTableDescriptor(final TableSchema ts) {
/**
* Creates {@link CompactionState} from {@link GetRegionInfoResponse.CompactionState} state
- * @param state the protobuf CompactionState n
+ * @param state the protobuf CompactionState
*/
public static CompactionState createCompactionState(GetRegionInfoResponse.CompactionState state) {
return CompactionState.valueOf(state.toString());
@@ -2833,7 +2835,7 @@ public static GetRegionInfoResponse.CompactionState createCompactionState(Compac
/**
* Creates {@link CompactionState} from {@link RegionLoad.CompactionState} state
- * @param state the protobuf CompactionState n
+ * @param state the protobuf CompactionState
*/
public static CompactionState
createCompactionStateForRegionLoad(RegionLoad.CompactionState state) {
@@ -2938,9 +2940,7 @@ public static RegionLoadStats createRegionLoadStats(ClientProtos.RegionLoadStats
stats.getCompactionPressure());
}
- /**
- * n * @return A String version of the passed in msg
- */
+ /** Returns A String version of the passed in msg */
public static String toText(Message msg) {
return TextFormat.shortDebugString(msg);
}
@@ -2950,7 +2950,7 @@ public static byte[] toBytes(ByteString bs) {
}
/**
- * Contain ServiceException inside here. Take a callable that is doing our pb rpc and run it. n
+ * Contain ServiceException inside here. Take a callable that is doing our pb rpc and run it.
*/
public static T call(Callable callable) throws IOException {
try {
@@ -3061,7 +3061,7 @@ public static RegionState parseMetaRegionStateFrom(final byte[] data, int replic
* magic and that is then followed by a protobuf that has a serialized
* {@link ServerName} in it.
* @return Returns null if data is null else converts passed data to a ServerName
- * instance. n
+ * instance.
*/
public static ServerName parseServerNameFrom(final byte[] data) throws DeserializationException {
if (data == null || data.length <= 0) return null;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index f678a43986d7..9c88b61fd678 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -195,7 +195,7 @@ public static GetRequest buildGetRequest(final byte[] regionName, final Get get)
/**
* Create a protocol buffer MutateRequest for a conditioned put/delete/increment/append
- * @return a mutate request n
+ * @return a mutate request
*/
public static MutateRequest buildMutateRequest(final byte[] regionName, final byte[] row,
final byte[] family, final byte[] qualifier, final CompareOperator op, final byte[] value,
@@ -215,7 +215,7 @@ public static MutateRequest buildMutateRequest(final byte[] regionName, final by
/**
* Create a protocol buffer MultiRequest for conditioned row mutations
- * @return a multi request n
+ * @return a multi request
*/
public static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionName,
final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator op,
@@ -272,7 +272,8 @@ private static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionNa
}
/**
- * Create a protocol buffer MutateRequest for a put nn * @return a mutate request n
+ * Create a protocol buffer MutateRequest for a put
+ * @return a mutate request
*/
public static MutateRequest buildMutateRequest(final byte[] regionName, final Put put)
throws IOException {
@@ -284,7 +285,8 @@ public static MutateRequest buildMutateRequest(final byte[] regionName, final Pu
}
/**
- * Create a protocol buffer MutateRequest for an append nn * @return a mutate request n
+ * Create a protocol buffer MutateRequest for an append
+ * @return a mutate request
*/
public static MutateRequest buildMutateRequest(final byte[] regionName, final Append append,
long nonceGroup, long nonce) throws IOException {
@@ -300,7 +302,8 @@ public static MutateRequest buildMutateRequest(final byte[] regionName, final Ap
}
/**
- * Create a protocol buffer MutateRequest for a client increment nn * @return a mutate request
+ * Create a protocol buffer MutateRequest for a client increment
+ * @return a mutate request
*/
public static MutateRequest buildMutateRequest(final byte[] regionName, final Increment increment,
final long nonceGroup, final long nonce) throws IOException {
@@ -316,7 +319,8 @@ public static MutateRequest buildMutateRequest(final byte[] regionName, final In
}
/**
- * Create a protocol buffer MutateRequest for a delete nn * @return a mutate request n
+ * Create a protocol buffer MutateRequest for a delete
+ * @return a mutate request
*/
public static MutateRequest buildMutateRequest(final byte[] regionName, final Delete delete)
throws IOException {
@@ -336,7 +340,8 @@ public static RegionAction.Builder getRegionActionBuilderWithRegion(
}
/**
- * Create a protocol buffer ScanRequest for a client Scan nnnn * @return a scan request n
+ * Create a protocol buffer ScanRequest for a client Scan
+ * @return a scan request
*/
public static ScanRequest buildScanRequest(byte[] regionName, Scan scan, int numberOfRows,
boolean closeScanner) throws IOException {
@@ -356,7 +361,8 @@ public static ScanRequest buildScanRequest(byte[] regionName, Scan scan, int num
}
/**
- * Create a protocol buffer ScanRequest for a scanner id nnn * @return a scan request
+ * Create a protocol buffer ScanRequest for a scanner id
+ * @return a scan request
*/
public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boolean closeScanner,
boolean trackMetrics) {
@@ -371,7 +377,8 @@ public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boo
}
/**
- * Create a protocol buffer ScanRequest for a scanner id nnnn * @return a scan request
+ * Create a protocol buffer ScanRequest for a scanner id
+ * @return a scan request
*/
public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boolean closeScanner,
long nextCallSeq, boolean trackMetrics, boolean renew, int limitOfRows) {
@@ -391,7 +398,8 @@ public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boo
}
/**
- * Create a protocol buffer bulk load request nnnnnn * @return a bulk load request
+ * Create a protocol buffer bulk load request
+ * @return a bulk load request
*/
public static BulkLoadHFileRequest buildBulkLoadHFileRequest(
final List> familyPaths, final byte[] regionName, boolean assignSeqNum,
@@ -457,7 +465,7 @@ public static BulkLoadHFileRequest buildBulkLoadHFileRequest(
* @param mutationBuilder mutationBuilder to be used to build mutation.
* @param nonceGroup nonceGroup to be applied.
* @param indexMap Map of created RegionAction to the original index for a
- * RowMutations/CheckAndMutate within the original list of actions n
+ * RowMutations/CheckAndMutate within the original list of actions
*/
public static void buildNoDataRegionActions(final byte[] regionName,
final Iterable actions, final List cells,
@@ -825,7 +833,8 @@ public static WarmupRegionRequest buildWarmupRegionRequest(final RegionInfo regi
/**
* Create a CompactRegionRequest for a given region name
* @param regionName the name of the region to get info
- * @param major indicator if it is a major compaction n * @return a CompactRegionRequest
+ * @param major indicator if it is a major compaction
+ * @return a CompactRegionRequest
*/
public static CompactRegionRequest buildCompactRegionRequest(byte[] regionName, boolean major,
byte[] columnFamily) {
@@ -883,7 +892,8 @@ public static RegionSpecifier buildRegionSpecifier(final RegionSpecifierType typ
}
/**
- * Create a protocol buffer AddColumnRequest nn * @return an AddColumnRequest
+ * Create a protocol buffer AddColumnRequest
+ * @return an AddColumnRequest
*/
public static AddColumnRequest buildAddColumnRequest(final TableName tableName,
final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) {
@@ -896,7 +906,8 @@ public static AddColumnRequest buildAddColumnRequest(final TableName tableName,
}
/**
- * Create a protocol buffer DeleteColumnRequest nn * @return a DeleteColumnRequest
+ * Create a protocol buffer DeleteColumnRequest
+ * @return a DeleteColumnRequest
*/
public static DeleteColumnRequest buildDeleteColumnRequest(final TableName tableName,
final byte[] columnName, final long nonceGroup, final long nonce) {
@@ -909,7 +920,8 @@ public static DeleteColumnRequest buildDeleteColumnRequest(final TableName table
}
/**
- * Create a protocol buffer ModifyColumnRequest nn * @return an ModifyColumnRequest
+ * Create a protocol buffer ModifyColumnRequest
+ * @return an ModifyColumnRequest
*/
public static ModifyColumnRequest buildModifyColumnRequest(final TableName tableName,
final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) {
@@ -935,7 +947,8 @@ public static ModifyColumnStoreFileTrackerRequest buildModifyColumnStoreFileTrac
}
/**
- * Create a protocol buffer MoveRegionRequest nn * @return A MoveRegionRequest
+ * Create a protocol buffer MoveRegionRequest
+ * @return A MoveRegionRequest
*/
public static MoveRegionRequest buildMoveRegionRequest(byte[] encodedRegionName,
ServerName destServerName) {
@@ -976,7 +989,8 @@ public static SplitTableRegionRequest buildSplitTableRegionRequest(final RegionI
}
/**
- * Create a protocol buffer AssignRegionRequest n * @return an AssignRegionRequest
+ * Create a protocol buffer AssignRegionRequest
+ * @return an AssignRegionRequest
*/
public static AssignRegionRequest buildAssignRegionRequest(final byte[] regionName) {
AssignRegionRequest.Builder builder = AssignRegionRequest.newBuilder();
@@ -985,7 +999,8 @@ public static AssignRegionRequest buildAssignRegionRequest(final byte[] regionNa
}
/**
- * Creates a protocol buffer UnassignRegionRequest n * @return an UnassignRegionRequest
+ * Creates a protocol buffer UnassignRegionRequest
+ * @return an UnassignRegionRequest
*/
public static UnassignRegionRequest buildUnassignRegionRequest(final byte[] regionName) {
UnassignRegionRequest.Builder builder = UnassignRegionRequest.newBuilder();
@@ -994,7 +1009,8 @@ public static UnassignRegionRequest buildUnassignRegionRequest(final byte[] regi
}
/**
- * Creates a protocol buffer OfflineRegionRequest n * @return an OfflineRegionRequest
+ * Creates a protocol buffer OfflineRegionRequest
+ * @return an OfflineRegionRequest
*/
public static OfflineRegionRequest buildOfflineRegionRequest(final byte[] regionName) {
OfflineRegionRequest.Builder builder = OfflineRegionRequest.newBuilder();
@@ -1003,7 +1019,8 @@ public static OfflineRegionRequest buildOfflineRegionRequest(final byte[] region
}
/**
- * Creates a protocol buffer DeleteTableRequest n * @return a DeleteTableRequest
+ * Creates a protocol buffer DeleteTableRequest
+ * @return a DeleteTableRequest
*/
public static DeleteTableRequest buildDeleteTableRequest(final TableName tableName,
final long nonceGroup, final long nonce) {
@@ -1031,7 +1048,8 @@ public static TruncateTableRequest buildTruncateTableRequest(final TableName tab
}
/**
- * Creates a protocol buffer EnableTableRequest n * @return an EnableTableRequest
+ * Creates a protocol buffer EnableTableRequest
+ * @return an EnableTableRequest
*/
public static EnableTableRequest buildEnableTableRequest(final TableName tableName,
final long nonceGroup, final long nonce) {
@@ -1043,7 +1061,8 @@ public static EnableTableRequest buildEnableTableRequest(final TableName tableNa
}
/**
- * Creates a protocol buffer DisableTableRequest n * @return a DisableTableRequest
+ * Creates a protocol buffer DisableTableRequest
+ * @return a DisableTableRequest
*/
public static DisableTableRequest buildDisableTableRequest(final TableName tableName,
final long nonceGroup, final long nonce) {
@@ -1055,7 +1074,8 @@ public static DisableTableRequest buildDisableTableRequest(final TableName table
}
/**
- * Creates a protocol buffer CreateTableRequest nn * @return a CreateTableRequest
+ * Creates a protocol buffer CreateTableRequest
+ * @return a CreateTableRequest
*/
public static CreateTableRequest buildCreateTableRequest(final TableDescriptor tableDescriptor,
final byte[][] splitKeys, final long nonceGroup, final long nonce) {
@@ -1072,7 +1092,8 @@ public static CreateTableRequest buildCreateTableRequest(final TableDescriptor t
}
/**
- * Creates a protocol buffer ModifyTableRequest nn * @return a ModifyTableRequest
+ * Creates a protocol buffer ModifyTableRequest
+ * @return a ModifyTableRequest
*/
public static ModifyTableRequest buildModifyTableRequest(final TableName tableName,
final TableDescriptor tableDesc, final long nonceGroup, final long nonce) {
@@ -1096,7 +1117,8 @@ public static ModifyTableStoreFileTrackerRequest buildModifyTableStoreFileTracke
}
/**
- * Creates a protocol buffer GetTableDescriptorsRequest n * @return a GetTableDescriptorsRequest
+ * Creates a protocol buffer GetTableDescriptorsRequest
+ * @return a GetTableDescriptorsRequest
*/
public static GetTableDescriptorsRequest
buildGetTableDescriptorsRequest(final List tableNames) {
@@ -1193,7 +1215,8 @@ public static IsMasterRunningRequest buildIsMasterRunningRequest() {
}
/**
- * Creates a protocol buffer SetBalancerRunningRequest nn * @return a SetBalancerRunningRequest
+ * Creates a protocol buffer SetBalancerRunningRequest
+ * @return a SetBalancerRunningRequest
*/
public static SetBalancerRunningRequest buildSetBalancerRunningRequest(boolean on,
boolean synchronous) {
@@ -1278,8 +1301,8 @@ public static IsCleanerChoreEnabledRequest buildIsCleanerChoreEnabledRequest() {
}
/**
- * Creates a request for querying the master the last flushed sequence Id for a region n * @return
- * A {@link GetLastFlushedSequenceIdRequest}
+ * Creates a request for querying the master the last flushed sequence Id for a region
+ * @return A {@link GetLastFlushedSequenceIdRequest}
*/
public static GetLastFlushedSequenceIdRequest
buildGetLastFlushedSequenceIdRequest(byte[] regionName) {
@@ -1330,7 +1353,8 @@ public static IsNormalizerEnabledRequest buildIsNormalizerEnabledRequest() {
}
/**
- * Creates a protocol buffer SetNormalizerRunningRequest n * @return a SetNormalizerRunningRequest
+ * Creates a protocol buffer SetNormalizerRunningRequest
+ * @return a SetNormalizerRunningRequest
*/
public static SetNormalizerRunningRequest buildSetNormalizerRunningRequest(boolean on) {
return SetNormalizerRunningRequest.newBuilder().setOn(on).build();
@@ -1438,7 +1462,8 @@ public static ListReplicationPeersRequest buildListReplicationPeersRequest(Patte
}
/**
- * Creates a protocol buffer CreateNamespaceRequest n * @return a CreateNamespaceRequest
+ * Creates a protocol buffer CreateNamespaceRequest
+ * @return a CreateNamespaceRequest
*/
public static CreateNamespaceRequest
buildCreateNamespaceRequest(final NamespaceDescriptor descriptor) {
@@ -1448,7 +1473,8 @@ public static ListReplicationPeersRequest buildListReplicationPeersRequest(Patte
}
/**
- * Creates a protocol buffer ModifyNamespaceRequest n * @return a ModifyNamespaceRequest
+ * Creates a protocol buffer ModifyNamespaceRequest
+ * @return a ModifyNamespaceRequest
*/
public static ModifyNamespaceRequest
buildModifyNamespaceRequest(final NamespaceDescriptor descriptor) {
@@ -1458,7 +1484,8 @@ public static ListReplicationPeersRequest buildListReplicationPeersRequest(Patte
}
/**
- * Creates a protocol buffer DeleteNamespaceRequest n * @return a DeleteNamespaceRequest
+ * Creates a protocol buffer DeleteNamespaceRequest
+ * @return a DeleteNamespaceRequest
*/
public static DeleteNamespaceRequest buildDeleteNamespaceRequest(final String name) {
DeleteNamespaceRequest.Builder builder = DeleteNamespaceRequest.newBuilder();
@@ -1467,8 +1494,8 @@ public static DeleteNamespaceRequest buildDeleteNamespaceRequest(final String na
}
/**
- * Creates a protocol buffer GetNamespaceDescriptorRequest n * @return a
- * GetNamespaceDescriptorRequest
+ * Creates a protocol buffer GetNamespaceDescriptorRequest
+ * @return a GetNamespaceDescriptorRequest
*/
public static GetNamespaceDescriptorRequest
buildGetNamespaceDescriptorRequest(final String name) {
@@ -1592,7 +1619,7 @@ public static SetSnapshotCleanupRequest buildSetSnapshotCleanupRequest(final boo
/**
* Creates IsSnapshotCleanupEnabledRequest to determine if auto snapshot cleanup based on TTL
- * expiration is turned on n
+ * expiration is turned on
*/
public static IsSnapshotCleanupEnabledRequest buildIsSnapshotCleanupEnabledRequest() {
return IsSnapshotCleanupEnabledRequest.newBuilder().build();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
index 440891382e7a..09cbc460f22e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
@@ -90,7 +90,7 @@ public static SingleResponse getResult(final ClientProtos.MutateRequest request,
* @param request the original protocol buffer MultiRequest
* @param response the protocol buffer MultiResponse to convert
* @param cells Cells to go with the passed in proto. Can be null.
- * @return the results that were in the MultiResponse (a Result or an Exception). n
+ * @return the results that were in the MultiResponse (a Result or an Exception).
*/
public static org.apache.hadoop.hbase.client.MultiResponse getResults(final MultiRequest request,
final MultiResponse response, final CellScanner cells) throws IOException {
@@ -103,7 +103,7 @@ public static org.apache.hadoop.hbase.client.MultiResponse getResults(final Mult
* @param indexMap Used to support RowMutations/CheckAndMutate in batch
* @param response the protocol buffer MultiResponse to convert
* @param cells Cells to go with the passed in proto. Can be null.
- * @return the results that were in the MultiResponse (a Result or an Exception). n
+ * @return the results that were in the MultiResponse (a Result or an Exception).
*/
public static org.apache.hadoop.hbase.client.MultiResponse getResults(final MultiRequest request,
final Map indexMap, final MultiResponse response, final CellScanner cells)
@@ -247,7 +247,8 @@ public static CheckAndMutateResult getCheckAndMutateResult(
}
/**
- * Wrap a throwable to an action result. n * @return an action result builder
+ * Wrap a throwable to an action result.
+ * @return an action result builder
*/
public static ResultOrException.Builder buildActionResult(final Throwable t) {
ResultOrException.Builder builder = ResultOrException.newBuilder();
@@ -256,7 +257,8 @@ public static ResultOrException.Builder buildActionResult(final Throwable t) {
}
/**
- * Wrap a throwable to an action result. n * @return an action result builder
+ * Wrap a throwable to an action result.
+ * @return an action result builder
*/
public static ResultOrException.Builder buildActionResult(final ClientProtos.Result r) {
ResultOrException.Builder builder = ResultOrException.newBuilder();
@@ -264,9 +266,7 @@ public static ResultOrException.Builder buildActionResult(final ClientProtos.Res
return builder;
}
- /**
- * n * @return NameValuePair of the exception name to stringified version os exception.
- */
+ /** Returns NameValuePair of the exception name to stringified version os exception. */
public static NameBytesPair buildException(final Throwable t) {
NameBytesPair.Builder parameterBuilder = NameBytesPair.newBuilder();
parameterBuilder.setName(t.getClass().getName());
@@ -307,7 +307,8 @@ public static boolean isClosed(final CloseRegionResponse proto) {
}
/**
- * A utility to build a GetServerInfoResponse. nn * @return the response
+ * A utility to build a GetServerInfoResponse.
+ * @return the response
*/
public static GetServerInfoResponse buildGetServerInfoResponse(final ServerName serverName,
final int webuiPort) {
@@ -322,7 +323,8 @@ public static GetServerInfoResponse buildGetServerInfoResponse(final ServerName
}
/**
- * A utility to build a GetOnlineRegionResponse. n * @return the response
+ * A utility to build a GetOnlineRegionResponse.
+ * @return the response
*/
public static GetOnlineRegionResponse
buildGetOnlineRegionResponse(final List regions) {
@@ -405,7 +407,7 @@ public static IOException getControllerException(RpcController controller) throw
}
/**
- * Create Results from the cells using the cells meta data. nnn
+ * Create Results from the cells using the cells meta data.
*/
public static Result[] getResults(CellScanner cellScanner, ScanResponse response)
throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
index 091515c325ed..2787b5ab7f9d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
@@ -91,7 +91,7 @@ public static byte[] getBytes(final Writable... ws) throws IOException {
* @return The passed Writable after its readFields has been called fed by the passed
* bytes array or IllegalArgumentException if passed null or an empty
* bytes array.
- * @throws IOException e n
+ * @throws IOException e
*/
public static Writable getWritable(final byte[] bytes, final Writable w) throws IOException {
return getWritable(bytes, 0, bytes.length, w);
@@ -107,7 +107,7 @@ public static Writable getWritable(final byte[] bytes, final Writable w) throws
* @return The passed Writable after its readFields has been called fed by the passed
* bytes array or IllegalArgumentException if passed null or an empty
* bytes array.
- * @throws IOException e n
+ * @throws IOException e
*/
public static Writable getWritable(final byte[] bytes, final int offset, final int length,
final Writable w) throws IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
index 97c8302b2211..8d3fcd2c342d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
@@ -144,7 +144,7 @@ public String getZNodeForReplica(int replicaId) {
/**
* Parses the meta replicaId from the passed path.
- * @param path the name of the full path which includes baseZNode. n
+ * @param path the name of the full path which includes baseZNode.
*/
public int getMetaReplicaIdFromPath(String path) {
// Extract the znode from path. The prefix is of the following format.
@@ -155,7 +155,7 @@ public int getMetaReplicaIdFromPath(String path) {
/**
* Parse the meta replicaId from the passed znode
- * @param znode the name of the znode, does not include baseZNode n
+ * @param znode the name of the znode, does not include baseZNode
*/
public int getMetaReplicaIdFromZNode(String znode) {
return znode.equals(metaZNodePrefix)
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java
index cc329cd3d03a..cce3ba4e4e3f 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java
@@ -42,7 +42,7 @@ public class TestDeleteTimeStamp {
private static final byte[] QUALIFIER = Bytes.toBytes("testQualifier");
/*
- * Test for verifying that the timestamp in delete object is being honored. n
+ * Test for verifying that the timestamp in delete object is being honored.
*/
@Test
public void testTimeStamp() {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
index 27cf51e7c9f9..d7eef52a4f9f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
@@ -100,7 +100,8 @@ private AuthUtil() {
/**
* For kerberized cluster, return login user (from kinit or from keytab if specified). For
* non-kerberized cluster, return system user.
- * @param conf configuartion file n * @throws IOException login exception
+ * @param conf configuartion file
+ * @throws IOException login exception
*/
@InterfaceAudience.Private
public static User loginClient(Configuration conf) throws IOException {
@@ -160,7 +161,8 @@ private static User loginFromKeytabAndReturnUser(UserProvider provider) throws I
*
* NOT recommend to use to method unless you're sure what you're doing, it is for canary only.
* Please use User#loginClient.
- * @param conf configuration file n * @throws IOException login exception
+ * @param conf configuration file
+ * @throws IOException login exception
*/
private static User loginClientAsService(Configuration conf) throws IOException {
UserProvider provider = UserProvider.instantiate(conf);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
index e5050b864ca8..a29a98a8c091 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
@@ -54,7 +54,7 @@ public ByteBufferKeyOnlyKeyValue(ByteBuffer buf, int offset, int length) {
/**
* A setter that helps to avoid object creation every time and whenever there is a need to create
- * new OffheapKeyOnlyKeyValue. nnn
+ * new OffheapKeyOnlyKeyValue.
*/
public void setKey(ByteBuffer key, int offset, int length) {
setKey(key, offset, length, ByteBufferUtils.toShort(key, offset));
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java
index 28128ee37c6c..677ed2295cea 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java
@@ -48,7 +48,7 @@ public interface CellBuilder {
Cell build();
/**
- * Remove all internal elements from builder. n
+ * Remove all internal elements from builder.
*/
CellBuilder clear();
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
index b4d3b5549dbd..2c19c0f1043d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
@@ -661,9 +661,8 @@ static int compareRows(final Cell left, int leftRowLength, final Cell right, int
/**
* Compares the row part of the cell with a simple plain byte[] like the stopRow in Scan. This
* should be used with context where for hbase:meta cells the
- * {{@link MetaCellComparator#META_COMPARATOR} should be used n * the cell to be compared n * the
- * kv serialized byte[] to be compared with n * the offset in the byte[] n * the length in the
- * byte[]
+ * {{@link MetaCellComparator#META_COMPARATOR} should be used the cell to be compared the kv
+ * serialized byte[] to be compared with the offset in the byte[] the length in the byte[]
* @return 0 if both cell and the byte[] are equal, 1 if the cell is bigger than byte[], -1
* otherwise
*/
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
index c28d0d87525e..80dcf8c505db 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
@@ -77,7 +77,8 @@ public static byte[] cloneValue(Cell cell) {
/**
* Makes a column in family:qualifier form from separate byte arrays.
*
- * Not recommended for usage as this is old-style API. nn * @return family:qualifier
+ * Not recommended for usage as this is old-style API.
+ * @return family:qualifier
*/
public static byte[] makeColumn(byte[] family, byte[] qualifier) {
return Bytes.add(family, COLUMN_FAMILY_DELIM_ARRAY, qualifier);
@@ -292,9 +293,7 @@ public static int copyValueTo(Cell cell, ByteBuffer destination, int destination
return destinationOffset + vlen;
}
- /**
- * n * @return CellScanner interface over cellIterables
- */
+ /** Returns CellScanner interface over cellIterables */
public static CellScanner
createCellScanner(final List extends CellScannable> cellScannerables) {
return new CellScanner() {
@@ -320,17 +319,15 @@ public boolean advance() throws IOException {
};
}
- /**
- * n * @return CellScanner interface over cellIterable
- */
+ /** Returns CellScanner interface over cellIterable */
public static CellScanner createCellScanner(final Iterable cellIterable) {
if (cellIterable == null) return null;
return createCellScanner(cellIterable.iterator());
}
/**
- * n * @return CellScanner interface over cellIterable or null if cells
- * is null
+ * Returns CellScanner interface over cellIterable or null if cells is
+ * null
*/
public static CellScanner createCellScanner(final Iterator cells) {
if (cells == null) return null;
@@ -352,9 +349,7 @@ public boolean advance() {
};
}
- /**
- * n * @return CellScanner interface over cellArray
- */
+ /** Returns CellScanner interface over cellArray */
public static CellScanner createCellScanner(final Cell[] cellArray) {
return new CellScanner() {
private final Cell[] cells = cellArray;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java
index ddbf71cac137..432556d26421 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CompoundConfiguration.java
@@ -145,7 +145,7 @@ public CompoundConfiguration add(final Configuration conf) {
/**
* Add Bytes map to config list. This map is generally created by HTableDescriptor or
* HColumnDescriptor, but can be abstractly used. The added configuration overrides the previous
- * ones if there are name collisions. n * Bytes map
+ * ones if there are name collisions. Bytes map
* @return this, for builder pattern
*/
public CompoundConfiguration addBytesMap(final Map map) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
index b3b7a1c5e57e..28e648ec466e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
@@ -41,7 +41,7 @@ public interface ExtendedCell extends RawCell, HeapSize {
* <tags>
* @param out Stream to which cell has to be written
* @param withTags Whether to write tags.
- * @return how many bytes are written. n
+ * @return how many bytes are written.
*/
// TODO remove the boolean param once HBASE-16706 is done.
default int write(OutputStream out, boolean withTags) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
index 5d428d0b434e..5fc030581dad 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java
@@ -182,7 +182,7 @@ public static boolean isShowConfInServlet() {
* @param conf configuration instance for accessing the passwords
* @param alias the name of the password element
* @param defPass the default password
- * @return String password or default password n
+ * @return String password or default password
*/
public static String getPassword(Configuration conf, String alias, String defPass)
throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index 59c54b0c0319..3661c063e88c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -1840,8 +1840,8 @@ public int compareIgnoringPrefix(int commonPrefix, byte[] left, int loffset, int
* Compare columnFamily, qualifier, timestamp, and key type (everything except the row). This
* method is used both in the normal comparator and the "same-prefix" comparator. Note that we
* are assuming that row portions of both KVs have already been parsed and found identical, and
- * we don't validate that assumption here. n * the length of the common prefix of the two
- * key-values being compared, including row length and row
+ * we don't validate that assumption here. the length of the common prefix of the two key-values
+ * being compared, including row length and row
*/
private int compareWithoutRow(int commonPrefix, byte[] left, int loffset, int llength,
byte[] right, int roffset, int rlength, short rowlength) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java
index 4291d904fe89..ed3687e9ed4d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueTestUtil.java
@@ -55,8 +55,8 @@ public static ByteBuffer toByteBufferAndRewind(final Iterable extends KeyValue
/**
* Checks whether KeyValues from kvCollection2 are contained in kvCollection1. The comparison is
- * made without distinguishing MVCC version of the KeyValues nn * @return true if KeyValues from
- * kvCollection2 are contained in kvCollection1
+ * made without distinguishing MVCC version of the KeyValues
+ * @return true if KeyValues from kvCollection2 are contained in kvCollection1
*/
public static boolean containsIgnoreMvccVersion(Collection extends Cell> kvCollection1,
Collection extends Cell> kvCollection2) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
index bdf77d511af6..71f1da9a8a67 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
@@ -58,8 +58,8 @@ public static int length(short rlen, byte flen, int qlen, int vlen, int tlen, bo
/**
* Returns number of bytes this cell's key part would have been used if serialized as in
- * {@link KeyValue}. Key includes rowkey, family, qualifier, timestamp and type. n * @return the
- * key length
+ * {@link KeyValue}. Key includes rowkey, family, qualifier, timestamp and type.
+ * @return the key length
*/
public static int keyLength(final Cell cell) {
return keyLength(cell.getRowLength(), cell.getFamilyLength(), cell.getQualifierLength());
@@ -96,8 +96,8 @@ public static KeyValue copyToNewKeyValue(final Cell cell) {
}
/**
- * The position will be set to the beginning of the new ByteBuffer n * @return the Bytebuffer
- * containing the key part of the cell
+ * The position will be set to the beginning of the new ByteBuffer
+ * @return the Bytebuffer containing the key part of the cell
*/
public static ByteBuffer copyKeyToNewByteBuffer(final Cell cell) {
byte[] bytes = new byte[keyLength(cell)];
@@ -107,8 +107,8 @@ public static ByteBuffer copyKeyToNewByteBuffer(final Cell cell) {
}
/**
- * Copies the key to a new KeyValue n * @return the KeyValue that consists only the key part of
- * the incoming cell
+ * Copies the key to a new KeyValue
+ * @return the KeyValue that consists only the key part of the incoming cell
*/
public static KeyValue toNewKeyCell(final Cell cell) {
byte[] bytes = new byte[keyLength(cell)];
@@ -203,7 +203,7 @@ public static void appendToByteBuffer(final ByteBuffer bb, final KeyValue kv,
/**
* Creates a new KeyValue object positioned in the supplied ByteBuffer and sets the ByteBuffer's
- * position to the start of the next KeyValue. Does not allocate a new array or copy data. nnn
+ * position to the start of the next KeyValue. Does not allocate a new array or copy data.
*/
public static KeyValue nextShallowCopy(final ByteBuffer bb, final boolean includesMvccVersion,
boolean includesTags) {
@@ -236,7 +236,8 @@ public static KeyValue nextShallowCopy(final ByteBuffer bb, final boolean includ
/**
* Decrement the timestamp. For tests (currently wasteful) Remember timestamps are sorted reverse
- * chronologically. n * @return previous key
+ * chronologically.
+ * @return previous key
*/
public static KeyValue previousKey(final KeyValue in) {
return createFirstOnRow(CellUtil.cloneRow(in), CellUtil.cloneFamily(in),
@@ -246,9 +247,8 @@ public static KeyValue previousKey(final KeyValue in) {
/**
* Create a KeyValue for the specified row, family and qualifier that would be larger than or
* equal to all other possible KeyValues that have the same row, family, qualifier. Used for
- * reseeking. Should NEVER be returned to a client. n * row key n * row offset n * row length n *
- * family name n * family offset n * family length n * column qualifier n * qualifier offset n *
- * qualifier length
+ * reseeking. Should NEVER be returned to a client. row key row offset row length family name
+ * family offset family length column qualifier qualifier offset qualifier length
* @return Last possible key on passed row, family, qualifier.
*/
public static KeyValue createLastOnRow(final byte[] row, final int roffset, final int rlength,
@@ -408,11 +408,11 @@ public static KeyValue createFirstOnRow(byte[] buffer, final int boffset, final
/*************** misc **********************************/
/**
- * n * @return cell if it is an object of class {@link KeyValue} else we will return
- * a new {@link KeyValue} instance made from cell Note: Even if the cell is an object
- * of any of the subclass of {@link KeyValue}, we will create a new {@link KeyValue} object
- * wrapping same buffer. This API is used only with MR based tools which expect the type to be
- * exactly KeyValue. That is the reason for doing this way.
+ * @return cell if it is an object of class {@link KeyValue} else we will return a
+ * new {@link KeyValue} instance made from cell Note: Even if the cell is an
+ * object of any of the subclass of {@link KeyValue}, we will create a new
+ * {@link KeyValue} object wrapping same buffer. This API is used only with MR based tools
+ * which expect the type to be exactly KeyValue. That is the reason for doing this way.
* @deprecated without any replacement.
*/
@Deprecated
@@ -444,8 +444,9 @@ public KeyValue apply(Cell arg0) {
}
/**
- * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable. nn
- * * @return Length written on stream n * @see #create(DataInput) for the inverse function
+ * Write out a KeyValue in the manner in which we used to when KeyValue was a Writable.
+ * @return Length written on stream
+ * @see #create(DataInput) for the inverse function
*/
public static long write(final KeyValue kv, final DataOutput out) throws IOException {
// This is how the old Writables write used to serialize KVs. Need to figure
@@ -639,7 +640,7 @@ private static int checkKeyValueTagBytes(byte[] buf, int offset, int length, int
* @param in inputStream to read.
* @param withTags whether the keyvalue should include tags are not
* @return Created KeyValue OR if we find a length of zero, we will return null which can be
- * useful marking a stream as done. n
+ * useful marking a stream as done.
*/
public static KeyValue createKeyValueFromInputStream(InputStream in, boolean withTags)
throws IOException {
@@ -663,24 +664,24 @@ public static KeyValue createKeyValueFromInputStream(InputStream in, boolean wit
}
/**
- * n * @return A KeyValue made of a byte array that holds the key-only part. Needed to convert
- * hfile index members to KeyValues.
+ * Returns a KeyValue made of a byte array that holds the key-only part. Needed to convert hfile
+ * index members to KeyValues.
*/
public static KeyValue createKeyValueFromKey(final byte[] b) {
return createKeyValueFromKey(b, 0, b.length);
}
/**
- * n * @return A KeyValue made of a byte buffer that holds the key-only part. Needed to convert
- * hfile index members to KeyValues.
+ * Return a KeyValue made of a byte buffer that holds the key-only part. Needed to convert hfile
+ * index members to KeyValues.
*/
public static KeyValue createKeyValueFromKey(final ByteBuffer bb) {
return createKeyValueFromKey(bb.array(), bb.arrayOffset(), bb.limit());
}
/**
- * nnn * @return A KeyValue made of a byte array that holds the key-only part. Needed to convert
- * hfile index members to KeyValues.
+ * Return a KeyValue made of a byte array that holds the key-only part. Needed to convert hfile
+ * index members to KeyValues.
*/
public static KeyValue createKeyValueFromKey(final byte[] b, final int o, final int l) {
byte[] newb = new byte[l + KeyValue.ROW_OFFSET];
@@ -691,19 +692,19 @@ public static KeyValue createKeyValueFromKey(final byte[] b, final int o, final
}
/**
- * n * Where to read bytes from. Creates a byte array to hold the KeyValue backing bytes copied
- * from the steam.
+ * Where to read bytes from. Creates a byte array to hold the KeyValue backing bytes copied from
+ * the steam.
* @return KeyValue created by deserializing from in OR if we find a length of zero,
- * we will return null which can be useful marking a stream as done. n
+ * we will return null which can be useful marking a stream as done.
*/
public static KeyValue create(final DataInput in) throws IOException {
return create(in.readInt(), in);
}
/**
- * Create a KeyValue reading length from in nn * @return Created
- * KeyValue OR if we find a length of zero, we will return null which can be useful marking a
- * stream as done. n
+ * Create a KeyValue reading length from in
+ * @return Created KeyValue OR if we find a length of zero, we will return null which can be
+ * useful marking a stream as done.
*/
public static KeyValue create(int length, final DataInput in) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
index 1b035966da2b..58c4b2d1cf15 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
@@ -1046,7 +1046,7 @@ public static void writeRowKeyExcludingCommon(Cell cell, short rLen, int commonP
* Writes the row from the given cell to the output stream excluding the common prefix
* @param out The dataoutputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param rlength the row length n
+ * @param rlength the row length
*/
public static void writeRowSkippingBytes(DataOutputStream out, Cell cell, short rlength,
int commonPrefix) throws IOException {
@@ -1234,7 +1234,6 @@ public static final int compareKeyBasedOnColHint(CellComparator comparator, Cell
/**
* Compares only the key portion of a cell. It does not include the sequence id/mvcc of the cell
- * nn
* @return an int greater than 0 if left > than right lesser than 0 if left < than right
* equal to 0 if left is equal to right
*/
@@ -2195,7 +2194,7 @@ public Type getType() {
/**
* Writes the Cell's key part as it would have serialized in a KeyValue. The format is <2 bytes
* rk len><rk><1 byte cf len><cf><qualifier><8 bytes
- * timestamp><1 byte type> nnn
+ * timestamp><1 byte type>
*/
public static void writeFlatKey(Cell cell, DataOutput out) throws IOException {
short rowLen = cell.getRowLength();
@@ -2227,7 +2226,7 @@ public static void writeFlatKey(Cell cell, DataOutput out) throws IOException {
/**
* Deep clones the given cell if the cell supports deep cloning
* @param cell the cell to be cloned
- * @return the cloned cell n
+ * @return the cloned cell
*/
public static Cell deepClone(Cell cell) throws CloneNotSupportedException {
if (cell instanceof ExtendedCell) {
@@ -2241,7 +2240,7 @@ public static Cell deepClone(Cell cell) throws CloneNotSupportedException {
* @param cell the cell to be written
* @param out the outputstream
* @param withTags if tags are to be written or not
- * @return the total bytes written n
+ * @return the total bytes written
*/
public static int writeCell(Cell cell, OutputStream out, boolean withTags) throws IOException {
if (cell instanceof ExtendedCell) {
@@ -2316,8 +2315,8 @@ public static int writeFlatKey(Cell cell, OutputStream out) throws IOException {
/**
* Sets the given seqId to the cell. Marked as audience Private as of 1.2.0. Setting a Cell
- * sequenceid is an internal implementation detail not for general public use. nn * @throws
- * IOException when the passed cell is not of type {@link ExtendedCell}
+ * sequenceid is an internal implementation detail not for general public use.
+ * @throws IOException when the passed cell is not of type {@link ExtendedCell}
*/
public static void setSequenceId(Cell cell, long seqId) throws IOException {
if (cell instanceof ExtendedCell) {
@@ -2329,8 +2328,8 @@ public static void setSequenceId(Cell cell, long seqId) throws IOException {
}
/**
- * Sets the given timestamp to the cell. nn * @throws IOException when the passed cell is not of
- * type {@link ExtendedCell}
+ * Sets the given timestamp to the cell.
+ * @throws IOException when the passed cell is not of type {@link ExtendedCell}
*/
public static void setTimestamp(Cell cell, long ts) throws IOException {
if (cell instanceof ExtendedCell) {
@@ -2386,7 +2385,7 @@ public static boolean updateLatestStamp(Cell cell, byte[] ts) throws IOException
* Writes the row from the given cell to the output stream
* @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param rlength the row length n
+ * @param rlength the row length
*/
public static void writeRow(OutputStream out, Cell cell, short rlength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2401,7 +2400,7 @@ public static void writeRow(OutputStream out, Cell cell, short rlength) throws I
* Writes the family from the given cell to the output stream
* @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param flength the family length n
+ * @param flength the family length
*/
public static void writeFamily(OutputStream out, Cell cell, byte flength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2416,7 +2415,7 @@ public static void writeFamily(OutputStream out, Cell cell, byte flength) throws
* Writes the qualifier from the given cell to the output stream
* @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param qlength the qualifier length n
+ * @param qlength the qualifier length
*/
public static void writeQualifier(OutputStream out, Cell cell, int qlength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2432,7 +2431,7 @@ public static void writeQualifier(OutputStream out, Cell cell, int qlength) thro
* Writes the qualifier from the given cell to the output stream excluding the common prefix
* @param out The dataoutputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param qlength the qualifier length n
+ * @param qlength the qualifier length
*/
public static void writeQualifierSkippingBytes(DataOutputStream out, Cell cell, int qlength,
int commonPrefix) throws IOException {
@@ -2451,7 +2450,7 @@ public static void writeQualifierSkippingBytes(DataOutputStream out, Cell cell,
* Writes the value from the given cell to the output stream
* @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param vlength the value length n
+ * @param vlength the value length
*/
public static void writeValue(OutputStream out, Cell cell, int vlength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2466,7 +2465,7 @@ public static void writeValue(OutputStream out, Cell cell, int vlength) throws I
* Writes the tag from the given cell to the output stream
* @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
- * @param tagsLength the tag length n
+ * @param tagsLength the tag length
*/
public static void writeTags(OutputStream out, Cell cell, int tagsLength) throws IOException {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2499,7 +2498,8 @@ public static boolean equalsIgnoreMvccVersion(Cell a, Cell b) {
}
/**
- * Converts the rowkey bytes of the given cell into an int value n * @return rowkey as int
+ * Converts the rowkey bytes of the given cell into an int value
+ * @return rowkey as int
*/
public static int getRowAsInt(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2510,7 +2510,8 @@ public static int getRowAsInt(Cell cell) {
}
/**
- * Converts the value bytes of the given cell into a long value n * @return value as long
+ * Converts the value bytes of the given cell into a long value
+ * @return value as long
*/
public static long getValueAsLong(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2521,7 +2522,8 @@ public static long getValueAsLong(Cell cell) {
}
/**
- * Converts the value bytes of the given cell into a int value n * @return value as int
+ * Converts the value bytes of the given cell into a int value
+ * @return value as int
*/
public static int getValueAsInt(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2532,7 +2534,8 @@ public static int getValueAsInt(Cell cell) {
}
/**
- * Converts the value bytes of the given cell into a double value n * @return value as double
+ * Converts the value bytes of the given cell into a double value
+ * @return value as double
*/
public static double getValueAsDouble(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2543,7 +2546,8 @@ public static double getValueAsDouble(Cell cell) {
}
/**
- * Converts the value bytes of the given cell into a BigDecimal n * @return value as BigDecimal
+ * Converts the value bytes of the given cell into a BigDecimal
+ * @return value as BigDecimal
*/
public static BigDecimal getValueAsBigDecimal(Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2764,8 +2768,9 @@ public byte getTypeByte() {
/**
* Estimate based on keyvalue's serialization format in the RPC layer. Note that there is an extra
* SIZEOF_INT added to the size here that indicates the actual length of the cell for cases where
- * cell's are serialized in a contiguous format (For eg in RPCs). n * @return Estimate of the
- * cell size in bytes plus an extra SIZEOF_INT indicating the actual cell length.
+ * cell's are serialized in a contiguous format (For eg in RPCs).
+ * @return Estimate of the cell size in bytes plus an extra SIZEOF_INT indicating the
+ * actual cell length.
*/
public static int estimatedSerializedSizeOf(final Cell cell) {
return cell.getSerializedSize() + Bytes.SIZEOF_INT;
@@ -2785,9 +2790,9 @@ public static int estimatedSerializedSizeOfKey(final Cell cell) {
/**
* This method exists just to encapsulate how we serialize keys. To be replaced by a factory that
* we query to figure what the Cell implementation is and then, what serialization engine to use
- * and further, how to serialize the key for inclusion in hfile index. TODO. n * @return The key
- * portion of the Cell serialized in the old-school KeyValue way or null if passed a null
- * cell
+ * and further, how to serialize the key for inclusion in hfile index. TODO.
+ * @return The key portion of the Cell serialized in the old-school KeyValue way or null if passed
+ * a null cell
*/
public static byte[] getCellKeySerializedAsKeyValueKey(final Cell cell) {
if (cell == null) return null;
@@ -2797,8 +2802,8 @@ public static byte[] getCellKeySerializedAsKeyValueKey(final Cell cell) {
}
/**
- * Create a Cell that is smaller than all other possible Cells for the given Cell's row. n
- * * @return First possible Cell on passed Cell's row.
+ * Create a Cell that is smaller than all other possible Cells for the given Cell's row.
+ * @return First possible Cell on passed Cell's row.
*/
public static Cell createFirstOnRow(final Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2862,8 +2867,8 @@ public static Cell createFirstOnNextRow(final Cell cell) {
/**
* Create a Cell that is smaller than all other possible Cells for the given Cell's rk:cf and
- * passed qualifier. nnnn * @return Last possible Cell on passed Cell's rk:cf and passed
- * qualifier.
+ * passed qualifier.
+ * @return Last possible Cell on passed Cell's rk:cf and passed qualifier.
*/
public static Cell createFirstOnRowCol(final Cell cell, byte[] qArray, int qoffest, int qlength) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2883,7 +2888,7 @@ public static Cell createFirstOnRowCol(final Cell cell, byte[] qArray, int qoffe
* Creates the first cell with the row/family/qualifier of this cell and the given timestamp. Uses
* the "maximum" type that guarantees that the new cell is the lowest possible for this
* combination of row, family, qualifier, and timestamp. This cell's own timestamp is ignored.
- * @param cell - cell n
+ * @param cell - cell
*/
public static Cell createFirstOnRowColTS(Cell cell, long ts) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2901,8 +2906,8 @@ public static Cell createFirstOnRowColTS(Cell cell, long ts) {
}
/**
- * Create a Cell that is larger than all other possible Cells for the given Cell's row. n
- * * @return Last possible Cell on passed Cell's row.
+ * Create a Cell that is larger than all other possible Cells for the given Cell's row.
+ * @return Last possible Cell on passed Cell's row.
*/
public static Cell createLastOnRow(final Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
@@ -2919,7 +2924,8 @@ public static Cell createLastOnRow(final byte[] row) {
/**
* Create a Cell that is larger than all other possible Cells for the given Cell's rk:cf:q. Used
* in creating "fake keys" for the multi-column Bloom filter optimization to skip the row/column
- * we already know is not in the file. n * @return Last possible Cell on passed Cell's rk:cf:q.
+ * we already know is not in the file.
+ * @return Last possible Cell on passed Cell's rk:cf:q.
*/
public static Cell createLastOnRowCol(final Cell cell) {
if (cell instanceof ByteBufferExtendedCell) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java
index be8e4e769ba0..9a2a29356b14 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java
@@ -95,7 +95,7 @@ protected InputStream getInputStream() {
/**
* Extract a Cell.
* @return a parsed Cell or throws an Exception. EOFException or a generic IOException maybe
- * thrown if EOF is reached prematurely. Does not return null. n
+ * thrown if EOF is reached prematurely. Does not return null.
*/
@NonNull
protected abstract Cell parseCell() throws IOException;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java
index e7facdbfbf23..f4552c038267 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java
@@ -62,7 +62,7 @@ public void write(Cell cell) throws IOException {
}
/**
- * Write int length followed by array bytes. nnnn
+ * Write int length followed by array bytes.
*/
private void write(final byte[] bytes, final int offset, final int length) throws IOException {
// TODO add BB backed os check and do for write. Pass Cell
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java
index 75e3d48d9faa..07bfb53d5df7 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodecWithTags.java
@@ -65,7 +65,7 @@ public void write(Cell cell) throws IOException {
}
/**
- * Write int length followed by array bytes. nnnn
+ * Write int length followed by array bytes.
*/
private void write(final byte[] bytes, final int offset, final int length) throws IOException {
this.out.write(Bytes.toBytes(length));
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java
index 86a2fefae7a3..2b21546a72a4 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java
@@ -82,7 +82,7 @@ private static ByteBuffer allocate(final int capacity, final boolean useDirectBy
}
/**
- * This flips the underlying BB so be sure to use it _last_! n
+ * This flips the underlying BB so be sure to use it _last_!
*/
public ByteBuffer getByteBuffer() {
curBuf.flip();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
index 1613bd563d0d..d1310137e8ce 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
@@ -38,14 +38,14 @@ public interface CellOutputStream {
* Implementation must copy the entire state of the Cell. If the written Cell is modified
* immediately after the write method returns, the modifications must have absolutely no effect on
* the copy of the Cell that was added in the write.
- * @param cell Cell to write out n
+ * @param cell Cell to write out
*/
void write(Cell cell) throws IOException;
/**
* Let the implementation decide what to do. Usually means writing accumulated data into a byte[]
* that can then be read from the implementation to be sent to disk, put in the block cache, or
- * sent over the network. n
+ * sent over the network.
*/
void flush() throws IOException;
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
index 593802bf3b68..08942426f875 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ImmutableBytesWritable.java
@@ -154,8 +154,9 @@ public int compareTo(ImmutableBytesWritable that) {
}
/**
- * Compares the bytes in this object to the specified byte array n * @return Positive if left is
- * bigger than right, 0 if they are equal, and negative if left is smaller than right.
+ * Compares the bytes in this object to the specified byte array
+ * @return Positive if left is bigger than right, 0 if they are equal, and negative if left is
+ * smaller than right.
*/
public int compareTo(final byte[] that) {
return WritableComparator.compareBytes(this.bytes, this.offset, this.length, that, 0,
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
index af9126d942d2..74b0f2db108c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
@@ -57,7 +57,7 @@ public void clear() {
* @param out Stream to which the compressed tags to be written
* @param in Source where tags are available
* @param offset Offset for the tags bytes
- * @param length Length of all tag bytes n
+ * @param length Length of all tag bytes
*/
public void compressTags(OutputStream out, byte[] in, int offset, int length) throws IOException {
int pos = offset;
@@ -76,7 +76,7 @@ public void compressTags(OutputStream out, byte[] in, int offset, int length) th
* @param out Stream to which the compressed tags to be written
* @param in Source buffer where tags are available
* @param offset Offset for the tags byte buffer
- * @param length Length of all tag bytes n
+ * @param length Length of all tag bytes
*/
public void compressTags(OutputStream out, ByteBuffer in, int offset, int length)
throws IOException {
@@ -101,7 +101,7 @@ public void compressTags(OutputStream out, ByteBuffer in, int offset, int length
* @param src Stream where the compressed tags are available
* @param dest Destination array where to write the uncompressed tags
* @param offset Offset in destination where tags to be written
- * @param length Length of all tag bytes n
+ * @param length Length of all tag bytes
*/
public void uncompressTags(InputStream src, byte[] dest, int offset, int length)
throws IOException {
@@ -133,7 +133,7 @@ public void uncompressTags(InputStream src, byte[] dest, int offset, int length)
* @param dest Destination array where to write the uncompressed tags
* @param offset Offset in destination where tags to be written
* @param length Length of all tag bytes
- * @return bytes count read from source to uncompress all tags. n
+ * @return bytes count read from source to uncompress all tags.
*/
public int uncompressTags(ByteBuff src, byte[] dest, int offset, int length) throws IOException {
int srcBeginPos = src.position();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Cipher.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Cipher.java
index 09647b4ce912..f01529681627 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Cipher.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Cipher.java
@@ -86,7 +86,7 @@ public CipherProvider getProvider() {
* @param out the output stream to wrap
* @param context the encryption context
* @param iv initialization vector
- * @return the encrypting wrapper n
+ * @return the encrypting wrapper
*/
public abstract OutputStream createEncryptionStream(OutputStream out, Context context, byte[] iv)
throws IOException;
@@ -95,7 +95,7 @@ public abstract OutputStream createEncryptionStream(OutputStream out, Context co
* Create an encrypting output stream given an initialized encryptor
* @param out the output stream to wrap
* @param encryptor the encryptor
- * @return the encrypting wrapper n
+ * @return the encrypting wrapper
*/
public abstract OutputStream createEncryptionStream(OutputStream out, Encryptor encryptor)
throws IOException;
@@ -105,7 +105,7 @@ public abstract OutputStream createEncryptionStream(OutputStream out, Encryptor
* @param in the input stream to wrap
* @param context the encryption context
* @param iv initialization vector
- * @return the decrypting wrapper n
+ * @return the decrypting wrapper
*/
public abstract InputStream createDecryptionStream(InputStream in, Context context, byte[] iv)
throws IOException;
@@ -114,7 +114,7 @@ public abstract InputStream createDecryptionStream(InputStream in, Context conte
* Create a decrypting output stream given an initialized decryptor
* @param in the input stream to wrap
* @param decryptor the decryptor
- * @return the decrypting wrapper n
+ * @return the decrypting wrapper
*/
public abstract InputStream createDecryptionStream(InputStream in, Decryptor decryptor)
throws IOException;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Decryptor.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Decryptor.java
index 0d29fe990b92..938227845945 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Decryptor.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Decryptor.java
@@ -28,7 +28,7 @@
public interface Decryptor {
/**
- * Set the secret key n
+ * Set the secret key
*/
public void setKey(Key key);
@@ -45,12 +45,12 @@ public interface Decryptor {
public int getBlockSize();
/**
- * Set the initialization vector n
+ * Set the initialization vector
*/
public void setIv(byte[] iv);
/**
- * Create a stream for decryption n
+ * Create a stream for decryption
*/
public InputStream createDecryptionStream(InputStream in);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
index 761fe04d6fce..13e335b82ee3 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
@@ -318,7 +318,7 @@ private static byte[] generateSecretKey(String algorithm, int keyLengthBytes, ch
*
* The encryptor's state will be finalized. It should be reinitialized or returned to the pool.
* @param out ciphertext
- * @param src plaintext nnnn
+ * @param src plaintext
*/
public static void encrypt(OutputStream out, byte[] src, int offset, int length, Encryptor e)
throws IOException {
@@ -333,7 +333,7 @@ public static void encrypt(OutputStream out, byte[] src, int offset, int length,
/**
* Encrypt a block of plaintext
* @param out ciphertext
- * @param src plaintext nnnnn
+ * @param src plaintext
*/
public static void encrypt(OutputStream out, byte[] src, int offset, int length, Context context,
byte[] iv) throws IOException {
@@ -349,7 +349,7 @@ public static void encrypt(OutputStream out, byte[] src, int offset, int length,
*
* The encryptor's state will be finalized. It should be reinitialized or returned to the pool.
* @param out ciphertext
- * @param in plaintext nn
+ * @param in plaintext
*/
public static void encrypt(OutputStream out, InputStream in, Encryptor e) throws IOException {
OutputStream cout = e.createEncryptionStream(out);
@@ -363,7 +363,7 @@ public static void encrypt(OutputStream out, InputStream in, Encryptor e) throws
/**
* Encrypt a stream of plaintext given a context and IV
* @param out ciphertext
- * @param in plaintet nnn
+ * @param in plaintet
*/
public static void encrypt(OutputStream out, InputStream in, Context context, byte[] iv)
throws IOException {
@@ -378,7 +378,6 @@ public static void encrypt(OutputStream out, InputStream in, Context context, by
* Decrypt a block of ciphertext read in from a stream with the given cipher and context
*
* The decryptor's state will be finalized. It should be reinitialized or returned to the pool.
- * nnnnnn
*/
public static void decrypt(byte[] dest, int destOffset, InputStream in, int destSize, Decryptor d)
throws IOException {
@@ -391,7 +390,7 @@ public static void decrypt(byte[] dest, int destOffset, InputStream in, int dest
}
/**
- * Decrypt a block of ciphertext from a stream given a context and IV nnnnnnn
+ * Decrypt a block of ciphertext from a stream given a context and IV
*/
public static void decrypt(byte[] dest, int destOffset, InputStream in, int destSize,
Context context, byte[] iv) throws IOException {
@@ -402,7 +401,7 @@ public static void decrypt(byte[] dest, int destOffset, InputStream in, int dest
}
/**
- * Decrypt a stream of ciphertext given a decryptor nnnnn
+ * Decrypt a stream of ciphertext given a decryptor
*/
public static void decrypt(OutputStream out, InputStream in, int outLen, Decryptor d)
throws IOException {
@@ -425,7 +424,7 @@ public static void decrypt(OutputStream out, InputStream in, int outLen, Decrypt
}
/**
- * Decrypt a stream of ciphertext given a context and IV nnnnnn
+ * Decrypt a stream of ciphertext given a context and IV
*/
public static void decrypt(OutputStream out, InputStream in, int outLen, Context context,
byte[] iv) throws IOException {
@@ -436,7 +435,8 @@ public static void decrypt(OutputStream out, InputStream in, int outLen, Context
}
/**
- * Resolves a key for the given subject nn * @return a key for the given subject
+ * Resolves a key for the given subject
+ * @return a key for the given subject
* @throws IOException if the key is not found
*/
public static Key getSecretKeyForSubject(String subject, Configuration conf) throws IOException {
@@ -460,7 +460,7 @@ public static Key getSecretKeyForSubject(String subject, Configuration conf) thr
* @param in plaintext
* @param conf configuration
* @param cipher the encryption algorithm
- * @param iv the initialization vector, can be null n
+ * @param iv the initialization vector, can be null
*/
public static void encryptWithSubjectKey(OutputStream out, InputStream in, String subject,
Configuration conf, Cipher cipher, byte[] iv) throws IOException {
@@ -482,7 +482,7 @@ public static void encryptWithSubjectKey(OutputStream out, InputStream in, Strin
* @param subject the subject's key alias
* @param conf configuration
* @param cipher the encryption algorithm
- * @param iv the initialization vector, can be null n
+ * @param iv the initialization vector, can be null
*/
public static void decryptWithSubjectKey(OutputStream out, InputStream in, int outLen,
String subject, Configuration conf, Cipher cipher, byte[] iv) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java
index f030de3e1746..34f0fa4c0f7c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryptor.java
@@ -28,7 +28,7 @@
public interface Encryptor {
/**
- * Set the secret key n
+ * Set the secret key
*/
public void setKey(Key key);
@@ -50,12 +50,12 @@ public interface Encryptor {
public byte[] getIv();
/**
- * Set the initialization vector n
+ * Set the initialization vector
*/
public void setIv(byte[] iv);
/**
- * Create a stream for encryption n
+ * Create a stream for encryption
*/
public OutputStream createEncryptionStream(OutputStream out);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java
index 6c6ec5dd7596..0852bc7f13f4 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyProvider.java
@@ -31,13 +31,13 @@ public interface KeyProvider {
public static final String PASSWORDFILE = "passwordfile";
/**
- * Initialize the key provider n
+ * Initialize the key provider
*/
public void init(String params);
/**
- * Retrieve the key for a given key aliase n * @return the keys corresponding to the supplied
- * alias, or null if a key is not found
+ * Retrieve the key for a given key aliase
+ * @return the keys corresponding to the supplied alias, or null if a key is not found
*/
public Key getKey(String alias);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
index 7f13b2c6f665..52825b6c683d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java
@@ -90,9 +90,8 @@ ByteBuffer decodeKeyValues(DataInputStream source, HFileBlockDecodingContext dec
EncodedSeeker createSeeker(HFileBlockDecodingContext decodingCtx);
/**
- * Creates a encoder specific encoding context n * store configuration n * encoding strategy used
- * n * header bytes to be written, put a dummy header here if the header is unknown n * HFile meta
- * data
+ * Creates a encoder specific encoding context store configuration encoding strategy used header
+ * bytes to be written, put a dummy header here if the header is unknown HFile meta data
* @return a newly created encoding context
*/
HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf,
@@ -100,7 +99,7 @@ HFileBlockEncodingContext newDataBlockEncodingContext(Configuration conf,
/**
* Creates an encoder specific decoding context, which will prepare the data before actual
- * decoding n * store configuration n * HFile meta data
+ * decoding store configuration HFile meta data
* @return a newly created decoding context
*/
HFileBlockDecodingContext newDataBlockDecodingContext(Configuration conf, HFileContext meta);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
index 21f6c92ef358..4eba8fd854eb 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.java
@@ -99,7 +99,7 @@ public void writeIdInBytes(OutputStream stream) throws IOException {
/**
* Writes id bytes to the given array starting from offset.
* @param dest output array
- * @param offset starting offset of the output array n
+ * @param offset starting offset of the output array
*/
// System.arraycopy is static native. Nothing we can do this until we have minimum JDK 9.
@SuppressWarnings("UnsafeFinalization")
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
index 3948aee35aed..68b300ae60fe 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java
@@ -181,7 +181,7 @@ public int getSize() {
* @param inputBuffer Array to be compressed.
* @param offset Offset to beginning of the data.
* @param length Length to be compressed.
- * @return Size of compressed data in bytes. n
+ * @return Size of compressed data in bytes.
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH_EXCEPTION",
justification = "No sure what findbugs wants but looks to me like no NPE")
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
index 6835a8bac3ce..63f173c38cc1 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDecodingContext.java
@@ -32,9 +32,9 @@ public interface HFileBlockDecodingContext {
/**
* Perform all actions that need to be done before the encoder's real decoding process.
* Decompression needs to be done if {@link HFileContext#getCompression()} returns a valid
- * compression algorithm. n * numBytes after block and encoding headers n * numBytes without
- * header required to store the block after decompressing (not decoding) n * ByteBuffer pointed
- * after the header but before the data n * on disk data to be decoded
+ * compression algorithm. numBytes after block and encoding headers numBytes without header
+ * required to store the block after decompressing (not decoding) ByteBuffer pointed after the
+ * header but before the data on disk data to be decoded
*/
void prepareDecoding(int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader,
ByteBuff blockBufferWithoutHeader, ByteBuff onDiskBlock) throws IOException;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/IndexBlockEncoding.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/IndexBlockEncoding.java
index ed97147ac9bb..ad193cad613f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/IndexBlockEncoding.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/IndexBlockEncoding.java
@@ -91,7 +91,7 @@ public void writeIdInBytes(OutputStream stream) throws IOException {
/**
* Writes id bytes to the given array starting from offset.
* @param dest output array
- * @param offset starting offset of the output array n
+ * @param offset starting offset of the output array
*/
public void writeIdInBytes(byte[] dest, int offset) throws IOException {
System.arraycopy(idInBytes, 0, dest, offset, ID_SIZE);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
index 5a61622101bb..a2e63b9fda0d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java
@@ -93,7 +93,8 @@ public int read(byte[] b, int off, int len) throws IOException {
/**
* Read bytes starting from the specified position. This requires rawStream is an instance of
- * {@link PositionedReadable}. nnnn * @return the number of bytes read
+ * {@link PositionedReadable}.
+ * @return the number of bytes read
*/
public int read(long position, byte[] buffer, int offset, int length) throws IOException {
if (!(rawStream instanceof PositionedReadable)) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
index 157df98a9b07..b1ab8a9b28d4 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java
@@ -80,7 +80,7 @@ public interface Dictionary {
* @param data the data to be written in byte[]
* @param offset the offset
* @param length length to be written
- * @param dict the dictionary whose contents are to written n
+ * @param dict the dictionary whose contents are to written
*/
public static void write(OutputStream out, byte[] data, int offset, int length, Dictionary dict)
throws IOException {
@@ -103,7 +103,7 @@ public static void write(OutputStream out, byte[] data, int offset, int length,
* @param data the data to be written in ByteBuffer
* @param offset the offset
* @param length length to be written
- * @param dict the dictionary whose contents are to written n
+ * @param dict the dictionary whose contents are to written
*/
public static void write(OutputStream out, ByteBuffer data, int offset, int length,
Dictionary dict) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
index 7cfa007478f4..97e1e9d3345a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java
@@ -118,8 +118,8 @@ public static int readRawVarint32(ByteBuff input) throws IOException {
}
/**
- * Reads a varInt value stored in an array. n * Input array where the varInt is available n *
- * Offset in the input array where varInt is available
+ * Reads a varInt value stored in an array. Input array where the varInt is available Offset in
+ * the input array where varInt is available
* @return A pair of integers in which first value is the actual decoded varInt value and second
* value as number of bytes taken by this varInt for it's storage in the input array.
* @throws IOException When varint is malformed and not able to be read correctly
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
index 27eca9479d6a..9e77bfcd04bb 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java
@@ -85,7 +85,8 @@ public boolean release() {
public abstract int position();
/**
- * Sets this ByteBuff's position to the given value. n * @return this object
+ * Sets this ByteBuff's position to the given value.
+ * @return this object
*/
public abstract ByteBuff position(int position);
@@ -184,7 +185,7 @@ public boolean release() {
public abstract byte get();
/**
- * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers n
+ * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers
* @return the byte at the given index
*/
public abstract byte get(int index);
@@ -244,7 +245,8 @@ public boolean release() {
public abstract ByteBuff put(byte[] src, int offset, int length);
/**
- * Copies from the given byte[] to this ByteBuff n * @return this ByteBuff
+ * Copies from the given byte[] to this ByteBuff
+ * @return this ByteBuff
* @param src source byte array
* @return this ByteBuff
*/
@@ -269,14 +271,15 @@ public boolean release() {
* Fetches the short value at the given index. Does not change position of the underlying
* ByteBuffers. The caller is sure that the index will be after the current position of this
* ByteBuff. So even if the current short does not fit in the current item we can safely move to
- * the next item and fetch the remaining bytes forming the short n * @return the short value at
- * the given index
+ * the next item and fetch the remaining bytes forming the short
+ * @return the short value at the given index
*/
public abstract short getShort(int index);
/**
* Fetches the short value at the given offset from current position. Does not change position of
- * the underlying ByteBuffers. n * @return the short value at the given index.
+ * the underlying ByteBuffers.
+ * @return the short value at the given index.
*/
public abstract short getShortAfterPosition(int offset);
@@ -319,13 +322,15 @@ public boolean release() {
* Fetches the long at the given index. Does not change position of the underlying ByteBuffers.
* The caller is sure that the index will be after the current position of this ByteBuff. So even
* if the current long does not fit in the current item we can safely move to the next item and
- * fetch the remaining bytes forming the long n * @return the long value at the given index
+ * fetch the remaining bytes forming the long
+ * @return the long value at the given index
*/
public abstract long getLong(int index);
/**
* Fetches the long value at the given offset from current position. Does not change position of
- * the underlying ByteBuffers. n * @return the long value at the given index.
+ * the underlying ByteBuffers.
+ * @return the long value at the given index.
*/
public abstract long getLongAfterPosition(int offset);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
index c55ee021bd00..ddd567eb4b92 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/MultiByteBuff.java
@@ -149,8 +149,8 @@ public int capacity() {
}
/**
- * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers n
- * * @return the byte at the given index
+ * Fetches the byte at the given index. Does not change position of the underlying ByteBuffers
+ * @return the byte at the given index
*/
@Override
public byte get(int index) {
@@ -201,8 +201,8 @@ private int getItemIndexFromCurItemIndex(int elemIndex) {
}
/**
- * Fetches the int at the given index. Does not change position of the underlying ByteBuffers n
- * * @return the int value at the given index
+ * Fetches the int at the given index. Does not change position of the underlying ByteBuffers
+ * @return the int value at the given index
*/
@Override
public int getInt(int index) {
@@ -235,8 +235,8 @@ public int getIntAfterPosition(int offset) {
}
/**
- * Fetches the short at the given index. Does not change position of the underlying ByteBuffers n
- * * @return the short value at the given index
+ * Fetches the short at the given index. Does not change position of the underlying ByteBuffers
+ * @return the short value at the given index
*/
@Override
public short getShort(int index) {
@@ -347,8 +347,8 @@ private long getLong(int index, int itemIndex) {
}
/**
- * Fetches the long at the given index. Does not change position of the underlying ByteBuffers n
- * * @return the long value at the given index
+ * Fetches the long at the given index. Does not change position of the underlying ByteBuffers
+ * @return the long value at the given index
*/
@Override
public long getLong(int index) {
@@ -388,7 +388,8 @@ public int position() {
}
/**
- * Sets this MBB's position to the given value. n * @return this object
+ * Sets this MBB's position to the given value.
+ * @return this object
*/
@Override
public MultiByteBuff position(int position) {
@@ -569,7 +570,7 @@ public long getLong() {
/**
* Copies the content from this MBB's current position to the byte array and fills it. Also
- * advances the position of the MBB by the length of the byte[]. n
+ * advances the position of the MBB by the length of the byte[].
*/
@Override
public void get(byte[] dst) {
@@ -615,7 +616,8 @@ public void get(int sourceOffset, byte[] dst, int offset, int length) {
}
/**
- * Marks the limit of this MBB. n * @return This MBB
+ * Marks the limit of this MBB.
+ * @return This MBB
*/
@Override
public MultiByteBuff limit(int limit) {
@@ -686,8 +688,8 @@ public MultiByteBuff duplicate() {
}
/**
- * Writes a byte to this MBB at the current position and increments the position n * @return this
- * object
+ * Writes a byte to this MBB at the current position and increments the position
+ * @return this object
*/
@Override
public MultiByteBuff put(byte b) {
@@ -960,7 +962,7 @@ private static byte long0(long x) {
}
/**
- * Jumps the current position of this MBB by specified length. n
+ * Jumps the current position of this MBB by specified length.
*/
@Override
public MultiByteBuff skip(int length) {
@@ -982,7 +984,7 @@ public MultiByteBuff skip(int length) {
}
/**
- * Jumps back the current position of this MBB by specified length. n
+ * Jumps back the current position of this MBB by specified length.
*/
@Override
public MultiByteBuff moveBack(int length) {
@@ -1109,8 +1111,8 @@ public void get(ByteBuffer out, int sourceOffset, int length) {
}
/**
- * Copy the content from this MBB to a byte[] based on the given offset and length n * the
- * position from where the copy should start n * the length upto which the copy has to be done
+ * Copy the content from this MBB to a byte[] based on the given offset and length the position
+ * from where the copy should start the length upto which the copy has to be done
* @return byte[] with the copied contents from this MBB.
*/
@Override
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
index 9ef9e2ddc175..e2cac4b6b567 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java
@@ -179,7 +179,7 @@ public static T runAsLoginUser(PrivilegedExceptionAction action) throws I
/**
* Wraps an underlying {@code UserGroupInformation} instance.
- * @param ugi The base Hadoop user n
+ * @param ugi The base Hadoop user
*/
public static User create(UserGroupInformation ugi) {
if (ugi == null) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java
index fcf6cc648969..436b5bbc69a0 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/UserProvider.java
@@ -181,7 +181,7 @@ public User getCurrent() throws IOException {
/**
* Wraps an underlying {@code UserGroupInformation} instance.
- * @param ugi The base Hadoop user n
+ * @param ugi The base Hadoop user
*/
public User create(UserGroupInformation ugi) {
if (ugi == null) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
index 179074ef00c0..88ee9c9666a3 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
@@ -69,7 +69,7 @@ public PositionedByteRange set(byte[] bytes, int offset, int length) {
/**
* Update the beginning of this range. {@code offset + length} may not be greater than
- * {@code bytes.length}. Resets {@code position} to 0. n * the new start of this range.
+ * {@code bytes.length}. Resets {@code position} to 0. the new start of this range.
* @return this.
*/
@Override
@@ -82,7 +82,7 @@ public PositionedByteRange setOffset(int offset) {
/**
* Update the length of this range. {@code offset + length} should not be greater than
* {@code bytes.length}. If {@code position} is greater than the new {@code length}, sets
- * {@code position} to {@code length}. n * The new length of this range.
+ * {@code position} to {@code length}. The new length of this range.
* @return this.
*/
@Override
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java
index 32c6779bc04d..be1868b70d7f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java
@@ -865,7 +865,7 @@ public static boolean equals(ByteBuffer buf1, int o1, int l1, ByteBuffer buf2, i
}
/**
- * n * ByteBuffer to hash n * offset to start from n * length to hash
+ * ByteBuffer to hash offset to start from length to hash
*/
public static int hashCode(ByteBuffer buf, int offset, int length) {
int hash = 1;
@@ -980,7 +980,7 @@ public static int toInt(ByteBuffer buffer, int offset) {
* @param buf The ByteBuffer
* @param offset Offset to int value
* @param length Number of bytes used to store the int value.
- * @return the int value n * if there's not enough bytes left in the buffer after the given offset
+ * @return the int value if there's not enough bytes left in the buffer after the given offset
*/
public static int readAsInt(ByteBuffer buf, int offset, final int length) {
if (offset + length > buf.limit()) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
index 64bd5cb3b6c8..4addf9057e2f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java
@@ -63,14 +63,13 @@ public interface ByteRange extends Comparable {
/**
* Nullifies this ByteRange. That is, it becomes a husk, being a range over no byte[] whatsoever.
- * n
*/
public ByteRange unset();
/**
* Reuse this {@code ByteRange} over a new byte[]. {@code offset} is set to 0 and {@code length}
* is set to {@code capacity}.
- * @param capacity the size of a new byte[]. n
+ * @param capacity the size of a new byte[].
*/
public ByteRange set(int capacity);
@@ -78,7 +77,7 @@ public interface ByteRange extends Comparable {
* Reuse this {@code ByteRange} over a new byte[]. {@code offset} is set to 0 and {@code length}
* is set to {@code bytes.length}. A null {@code bytes} IS supported, in which case this method
* will behave equivalently to {@link #unset()}.
- * @param bytes the array to wrap. n
+ * @param bytes the array to wrap.
*/
public ByteRange set(byte[] bytes);
@@ -188,21 +187,21 @@ public interface ByteRange extends Comparable {
/**
* Store the short value at {@code index}
* @param index the index in the range where {@code val} is stored
- * @param val the value to store n
+ * @param val the value to store
*/
public ByteRange putShort(int index, short val);
/**
* Store the int value at {@code index}
* @param index the index in the range where {@code val} is stored
- * @param val the value to store n
+ * @param val the value to store
*/
public ByteRange putInt(int index, int val);
/**
* Store the long value at {@code index}
* @param index the index in the range where {@code val} is stored
- * @param val the value to store n
+ * @param val the value to store
*/
public ByteRange putLong(int index, long val);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
index d66625060402..0203cc390fe8 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
@@ -231,8 +231,9 @@ public int compareTo(Bytes that) {
}
/**
- * Compares the bytes in this object to the specified byte array n * @return Positive if left is
- * bigger than right, 0 if they are equal, and negative if left is smaller than right.
+ * Compares the bytes in this object to the specified byte array
+ * @return Positive if left is bigger than right, 0 if they are equal, and negative if left is
+ * smaller than right.
*/
public int compareTo(final byte[] that) {
return BYTES_RAWCOMPARATOR.compare(this.bytes, this.offset, this.length, that, 0, that.length);
@@ -534,7 +535,8 @@ public static String toString(final byte[] b, int off, int len) {
/**
* Write a printable representation of a byte array.
- * @param b byte array n * @see #toStringBinary(byte[], int, int)
+ * @param b byte array
+ * @see #toStringBinary(byte[], int, int)
*/
public static String toStringBinary(final byte[] b) {
if (b == null) return "null";
@@ -2055,7 +2057,7 @@ public static byte[] copy(byte[] bytes) {
* Copy the byte array given in parameter and return an instance of a new byte array with the same
* length and the same content.
* @param bytes the byte array to copy from
- * @return a copy of the given designated byte array nn
+ * @return a copy of the given designated byte array
*/
public static byte[] copy(byte[] bytes, final int offset, final int length) {
if (bytes == null) return null;
@@ -2236,7 +2238,7 @@ public static void zero(byte[] b) {
}
/**
- * Fill given array with zeros at the specified position. nnn
+ * Fill given array with zeros at the specified position.
*/
public static void zero(byte[] b, int offset, int length) {
checkPositionIndex(offset, b.length, "offset");
@@ -2319,7 +2321,8 @@ public static byte[] createMaxByteArray(int maxByteCount) {
}
/**
- * Create a byte array which is multiple given bytes nn * @return byte array
+ * Create a byte array which is multiple given bytes
+ * @return byte array
*/
public static byte[] multiple(byte[] srcBytes, int multiNum) {
if (multiNum <= 0) {
@@ -2374,7 +2377,7 @@ private static byte hexCharsToByte(char c1, char c2) {
/**
* Create a byte array from a string of hash digits. The length of the string must be a multiple
- * of 2 n
+ * of 2
*/
public static byte[] fromHex(String hex) {
checkArgument(hex.length() % 2 == 0, "length must be a multiple of 2");
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java
index d943803fb2fa..dc810834a660 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ChecksumType.java
@@ -85,8 +85,8 @@ public byte getCode() {
}
/**
- * Cannot rely on enum ordinals . They change if item is removed or moved. Do our own codes. n
- * * @return Type associated with passed code.
+ * Cannot rely on enum ordinals . They change if item is removed or moved. Do our own codes.
+ * @return Type associated with passed code.
*/
public static ChecksumType codeToType(final byte b) {
for (ChecksumType t : ChecksumType.values()) {
@@ -98,8 +98,8 @@ public static ChecksumType codeToType(final byte b) {
}
/**
- * Map a checksum name to a specific type. Do our own names. n * @return Type associated with
- * passed code.
+ * Map a checksum name to a specific type. Do our own names.
+ * @return Type associated with passed code.
*/
public static ChecksumType nameToType(final String name) {
for (ChecksumType t : ChecksumType.values()) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java
index 84e70873727e..1b3eef180a57 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Classes.java
@@ -27,10 +27,9 @@ public class Classes {
/**
* Equivalent of {@link Class#forName(String)} which also returns classes for primitives like
- * boolean, etc. n * The name of the class to retrieve. Can be either a normal class
- * or a primitive class.
- * @return The class specified by className n * If the requested class can not be
- * found.
+ * boolean, etc. The name of the class to retrieve. Can be either a normal class or a
+ * primitive class.
+ * @return The class specified by className If the requested class can not be found.
*/
public static Class> extendedForName(String className) throws ClassNotFoundException {
Class> valueType;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
index ca8d27d8eebc..800764954569 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
@@ -328,7 +328,7 @@ public static Path getWALRootDir(final Configuration c) throws IOException {
* Returns the URI in the string format
* @param c configuration
* @param p path
- * @return - the URI's to string format n
+ * @return - the URI's to string format
*/
public static String getDirUri(final Configuration c, Path p) throws IOException {
if (p.toUri().getScheme() != null) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
index 531d12085fe1..a5e6a65efc9b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
@@ -215,7 +215,7 @@ public static void clearCache() {
* @param parent the parent class loader for exempted classes
* @param pathPrefix a prefix used in temp path name to store the jar file locally
* @param conf the configuration used to create the class loader, if needed
- * @return a CoprocessorClassLoader for the coprocessor jar path n
+ * @return a CoprocessorClassLoader for the coprocessor jar path
*/
public static CoprocessorClassLoader getClassLoader(final Path path, final ClassLoader parent,
final String pathPrefix, final Configuration conf) throws IOException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
index 276e436ed13c..0cd1b41c5022 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java
@@ -58,7 +58,7 @@ public ReentrantLock createObject(K key) {
}, NB_CONCURRENT_LOCKS);
/**
- * Return a lock for the given key. The lock is already locked. n
+ * Return a lock for the given key. The lock is already locked.
*/
public ReentrantLock acquireLock(K key) {
if (key == null) throw new IllegalArgumentException("key must not be null");
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java
index d967f5d53a77..7e143e15de28 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MD5Hash.java
@@ -29,8 +29,8 @@
public class MD5Hash {
/**
- * Given a byte array, returns in MD5 hash as a hex string. n * @return SHA1 hash as a 32
- * character hex string.
+ * Given a byte array, returns in MD5 hash as a hex string.
+ * @return SHA1 hash as a 32 character hex string.
*/
public static String getMD5AsHex(byte[] key) {
return getMD5AsHex(key, 0, key.length);
@@ -39,8 +39,8 @@ public static String getMD5AsHex(byte[] key) {
/**
* Given a byte array, returns its MD5 hash as a hex string. Only "length" number of bytes
* starting at "offset" within the byte array are used.
- * @param key the key to hash (variable length byte array) nn * @return MD5 hash as a 32 character
- * hex string.
+ * @param key the key to hash (variable length byte array)
+ * @return MD5 hash as a 32 character hex string.
*/
public static String getMD5AsHex(byte[] key, int offset, int length) {
try {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java
index dd8eb4f18584..fe8d111dfbe9 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Pair.java
@@ -74,14 +74,14 @@ public void setSecond(T2 b) {
}
/**
- * Return the first element stored in the pair. n
+ * Return the first element stored in the pair.
*/
public T1 getFirst() {
return first;
}
/**
- * Return the second element stored in the pair. n
+ * Return the second element stored in the pair.
*/
public T2 getSecond() {
return second;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java
index 44bc2b81dc06..ef44fc4e0436 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PairOfSameType.java
@@ -42,14 +42,14 @@ public PairOfSameType(T a, T b) {
}
/**
- * Return the first element stored in the pair. n
+ * Return the first element stored in the pair.
*/
public T getFirst() {
return first;
}
/**
- * Return the second element stored in the pair. n
+ * Return the second element stored in the pair.
*/
public T getSecond() {
return second;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java
index efa52612be63..cb61cfbe246d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PositionedByteRange.java
@@ -156,12 +156,12 @@ public interface PositionedByteRange extends ByteRange {
public PositionedByteRange put(byte[] val, int offset, int length);
/**
- * Limits the byte range upto a specified value. Limit cannot be greater than capacity nn
+ * Limits the byte range upto a specified value. Limit cannot be greater than capacity
*/
public PositionedByteRange setLimit(int limit);
/**
- * Return the current limit n
+ * Return the current limit
*/
public int getLimit();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
index c3d4d82f6bdd..f73064f70a8e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java
@@ -77,8 +77,8 @@ public static String format(final String value, final Unit unit) {
/**
* Convert a human readable string to its value.
- * @see org.apache.hadoop.hbase.util.PrettyPrinter#format(String, Unit) nn * @return the value
- * corresponding to the human readable string
+ * @see org.apache.hadoop.hbase.util.PrettyPrinter#format(String, Unit)
+ * @return the value corresponding to the human readable string
*/
public static String valueOf(final String pretty, final Unit unit) throws HBaseException {
StringBuilder value = new StringBuilder();
@@ -155,7 +155,8 @@ private static String humanReadableTTL(final long interval) {
* Convert a human readable time interval to seconds. Examples of the human readable time
* intervals are: 50 DAYS 1 HOUR 30 MINUTES , 25000 SECONDS etc. The units of time specified can
* be in uppercase as well as lowercase. Also, if a single number is specified without any time
- * unit, it is assumed to be in seconds. n * @return value in seconds
+ * unit, it is assumed to be in seconds.
+ * @return value in seconds
*/
private static long humanReadableIntervalToSec(final String humanReadableInterval)
throws HBaseException {
@@ -261,7 +262,7 @@ private static String humanReadableByte(final long size) {
* KB , 25000 B etc. The units of size specified can be in uppercase as well as lowercase. Also,
* if a single number is specified without any time unit, it is assumed to be in bytes.
* @param humanReadableSize human readable size
- * @return value in bytes n
+ * @return value in bytes
*/
private static long humanReadableSizeToBytes(final String humanReadableSize)
throws HBaseException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleMutableByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleMutableByteRange.java
index 24b9f2d997b0..868c731e0a89 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleMutableByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimpleMutableByteRange.java
@@ -66,22 +66,22 @@ public SimpleMutableByteRange() {
/**
* Create a new {@code ByteRange} over a new backing array of size {@code capacity}. The range's
- * offset and length are 0 and {@code capacity}, respectively. n * the size of the backing array.
+ * offset and length are 0 and {@code capacity}, respectively. the size of the backing array.
*/
public SimpleMutableByteRange(int capacity) {
this(new byte[capacity]);
}
/**
- * Create a new {@code ByteRange} over the provided {@code bytes}. n * The array to wrap.
+ * Create a new {@code ByteRange} over the provided {@code bytes}. The array to wrap.
*/
public SimpleMutableByteRange(byte[] bytes) {
set(bytes);
}
/**
- * Create a new {@code ByteRange} over the provided {@code bytes}. n * The array to wrap. n * The
- * offset into {@code bytes} considered the beginning of this range. n * The length of this range.
+ * Create a new {@code ByteRange} over the provided {@code bytes}. The array to wrap. The offset
+ * into {@code bytes} considered the beginning of this range. The length of this range.
*/
public SimpleMutableByteRange(byte[] bytes, int offset, int length) {
set(bytes, offset, length);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java
index d91fd712f37c..68e99c3053bc 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/SimplePositionedMutableByteRange.java
@@ -70,7 +70,7 @@ public SimplePositionedMutableByteRange() {
/**
* Create a new {@code PositionedByteRange} over a new backing array of size {@code capacity}. The
- * range's offset and length are 0 and {@code capacity}, respectively. n * the size of the backing
+ * range's offset and length are 0 and {@code capacity}, respectively. the size of the backing
* array.
*/
public SimplePositionedMutableByteRange(int capacity) {
@@ -78,17 +78,15 @@ public SimplePositionedMutableByteRange(int capacity) {
}
/**
- * Create a new {@code PositionedByteRange} over the provided {@code bytes}. n * The array to
- * wrap.
+ * Create a new {@code PositionedByteRange} over the provided {@code bytes}. The array to wrap.
*/
public SimplePositionedMutableByteRange(byte[] bytes) {
set(bytes);
}
/**
- * Create a new {@code PositionedByteRange} over the provided {@code bytes}. n * The array to
- * wrap. n * The offset into {@code bytes} considered the beginning of this range. n * The length
- * of this range.
+ * Create a new {@code PositionedByteRange} over the provided {@code bytes}. The array to wrap.
+ * The offset into {@code bytes} considered the beginning of this range. The length of this range.
*/
public SimplePositionedMutableByteRange(byte[] bytes, int offset, int length) {
set(bytes, offset, length);
@@ -130,7 +128,7 @@ public PositionedByteRange set(byte[] bytes, int offset, int length) {
/**
* Update the beginning of this range. {@code offset + length} may not be greater than
- * {@code bytes.length}. Resets {@code position} to 0. n * the new start of this range.
+ * {@code bytes.length}. Resets {@code position} to 0. the new start of this range.
* @return this.
*/
@Override
@@ -143,7 +141,7 @@ public PositionedByteRange setOffset(int offset) {
/**
* Update the length of this range. {@code offset + length} should not be greater than
* {@code bytes.length}. If {@code position} is greater than the new {@code length}, sets
- * {@code position} to {@code length}. n * The new length of this range.
+ * {@code position} to {@code length}. The new length of this range.
* @return this.
*/
@Override
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java
index 0caecf649ce9..e23c62045fab 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/TimeMeasurable.java
@@ -27,7 +27,7 @@
public interface TimeMeasurable {
/**
- * Measure elapsed time. n
+ * Measure elapsed time.
*/
T measure();
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java
index 48b60a496164..3aa8a6ec123f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAccess.java
@@ -193,7 +193,7 @@ public static int toInt(ByteBuffer buf, int offset) {
/**
* Reads a int value at the given Object's offset considering it was written in big-endian format.
- * nn * @return int value at offset
+ * @return int value at offset
*/
public static int toInt(Object ref, long offset) {
if (LITTLE_ENDIAN) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java
index 154bc0e42dbb..2c600e3c5fd7 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/WindowMovingAverage.java
@@ -73,7 +73,7 @@ protected int getNumberOfStatistics() {
/**
* Get statistics at index.
- * @param index index of bar n
+ * @param index index of bar
*/
protected long getStatisticsAtIndex(int index) {
if (index < 0 || index >= getNumberOfStatistics()) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java
index de0cbdfa918a..32cfde410d56 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java
@@ -106,8 +106,8 @@ private static Properties makeZKPropsFromHbaseConfig(Configuration conf) {
}
/**
- * Return the ZK Quorum servers string given the specified configuration n * @return Quorum
- * servers String
+ * Return the ZK Quorum servers string given the specified configuration
+ * @return Quorum servers String
*/
private static String getZKQuorumServersStringFromHbaseConfig(Configuration conf) {
String defaultClientPort = Integer.toString(
@@ -191,8 +191,8 @@ public static void validateClusterKey(String key) throws IOException {
/**
* Separate the given key into the three configurations it should contain: hbase.zookeeper.quorum,
- * hbase.zookeeper.client.port and zookeeper.znode.parent n * @return the three configuration in
- * the described order n
+ * hbase.zookeeper.client.port and zookeeper.znode.parent
+ * @return the three configuration in the described order
*/
public static ZKClusterKey transformClusterKey(String key) throws IOException {
List parts = Splitter.on(':').splitToList(key);
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
index 6bb93c09c091..e959f77a7220 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestHBaseConfiguration.java
@@ -258,7 +258,7 @@ private Method loadMethod(Class> clz, String name, Class>... classes) throws
}
/**
- * Wrapper to fetch the configured {@code List}s. n * Configuration with
+ * Wrapper to fetch the configured {@code List}s. Configuration with
* GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS defined
* @return List of CredentialProviders, or null if they could not be loaded
*/
@@ -283,8 +283,8 @@ protected List