diff --git a/dev-support/pmd/pmd-ruleset.xml b/dev-support/pmd/pmd-ruleset.xml
index e3af967ff3f1..83a44946ba97 100644
--- a/dev-support/pmd/pmd-ruleset.xml
+++ b/dev-support/pmd/pmd-ruleset.xml
@@ -42,5 +42,12 @@
+
+
+
+
+
+
+
.*/generated-sources/.*
diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java
index 379ea32b8537..61fbcf03568a 100644
--- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java
+++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java
@@ -22,10 +22,10 @@
*/
public final class NativeConstants {
+ public static final String ROCKS_TOOLS_NATIVE_LIBRARY_NAME = "ozone_rocksdb_tools";
+ public static final String ROCKS_TOOLS_NATIVE_PROPERTY = "rocks_tools_native";
+
private NativeConstants() {
}
-
- public static final String ROCKS_TOOLS_NATIVE_LIBRARY_NAME = "ozone_rocksdb_tools";
- public static final String ROCKS_TOOLS_NATIVE_PROPERTY = "rocks_tools_native";
}
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionLogEntry.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionLogEntry.java
index 336045e10baa..8ee48e43f903 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionLogEntry.java
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionLogEntry.java
@@ -39,10 +39,6 @@ public final class CompactionLogEntry implements
CompactionLogEntry::getProtobuf,
CompactionLogEntry.class);
- public static Codec getCodec() {
- return CODEC;
- }
-
private final long dbSequenceNumber;
private final long compactionTime;
private final List inputFileInfoList;
@@ -62,6 +58,10 @@ public CompactionLogEntry(long dbSequenceNumber,
this.compactionReason = compactionReason;
}
+ public static Codec getCodec() {
+ return CODEC;
+ }
+
public List getInputFileInfoList() {
return inputFileInfoList;
}
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/graph/PrintableGraph.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/graph/PrintableGraph.java
index cb76da6d144a..e51f9b312e5f 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/graph/PrintableGraph.java
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/graph/PrintableGraph.java
@@ -38,26 +38,6 @@
*/
public class PrintableGraph {
- /**
- * Enum to print different type of node's name in the graph image.
- */
- public enum GraphType {
- /**
- * To use SST file name as node name.
- */
- FILE_NAME,
-
- /**
- * To use SST file name and total key in the file as node name.
- */
- KEY_SIZE,
-
- /**
- * To use SST file name and cumulative key as node name.
- */
- CUMULATIVE_SIZE
- }
-
private final Graph graph;
public PrintableGraph(MutableGraph guavaGraph,
@@ -119,4 +99,24 @@ private String getVertex(CompactionNode node, GraphType graphType) {
return node.getFileName();
}
}
+
+ /**
+ * Enum to print different type of node's name in the graph image.
+ */
+ public enum GraphType {
+ /**
+ * To use SST file name as node name.
+ */
+ FILE_NAME,
+
+ /**
+ * To use SST file name and total key in the file as node name.
+ */
+ KEY_SIZE,
+
+ /**
+ * To use SST file name and cumulative key as node name.
+ */
+ CUMULATIVE_SIZE
+ }
}
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
index 0444e4244c36..c9f8c726d284 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
@@ -179,6 +179,35 @@ public class RocksDBCheckpointDiffer implements AutoCloseable,
public static final Set COLUMN_FAMILIES_TO_TRACK_IN_DAG =
ImmutableSet.of("keyTable", "directoryTable", "fileTable");
+ // Hash table to track CompactionNode for a given SST File.
+ private final ConcurrentHashMap compactionNodeMap =
+ new ConcurrentHashMap<>();
+
+ // We are maintaining a two way DAG. This allows easy traversal from
+ // source snapshot to destination snapshot as well as the other direction.
+
+ private final MutableGraph forwardCompactionDAG =
+ GraphBuilder.directed().build();
+
+ private final MutableGraph backwardCompactionDAG =
+ GraphBuilder.directed().build();
+
+ public static final Integer DEBUG_DAG_BUILD_UP = 2;
+ public static final Integer DEBUG_DAG_TRAVERSAL = 3;
+ public static final Integer DEBUG_DAG_LIVE_NODES = 4;
+ public static final Integer DEBUG_READ_ALL_DB_KEYS = 5;
+ private static final HashSet DEBUG_LEVEL = new HashSet<>();
+
+ static {
+ addDebugLevel(DEBUG_DAG_BUILD_UP);
+ addDebugLevel(DEBUG_DAG_TRAVERSAL);
+ addDebugLevel(DEBUG_DAG_LIVE_NODES);
+ }
+
+ static {
+ RocksDB.loadLibrary();
+ }
+
/**
* This is a package private constructor and should not be used other than
* testing. Caller should use RocksDBCheckpointDifferHolder#getInstance() to
@@ -309,35 +338,6 @@ public void close() {
}
}
- // Hash table to track CompactionNode for a given SST File.
- private final ConcurrentHashMap compactionNodeMap =
- new ConcurrentHashMap<>();
-
- // We are maintaining a two way DAG. This allows easy traversal from
- // source snapshot to destination snapshot as well as the other direction.
-
- private final MutableGraph forwardCompactionDAG =
- GraphBuilder.directed().build();
-
- private final MutableGraph backwardCompactionDAG =
- GraphBuilder.directed().build();
-
- public static final Integer DEBUG_DAG_BUILD_UP = 2;
- public static final Integer DEBUG_DAG_TRAVERSAL = 3;
- public static final Integer DEBUG_DAG_LIVE_NODES = 4;
- public static final Integer DEBUG_READ_ALL_DB_KEYS = 5;
- private static final HashSet DEBUG_LEVEL = new HashSet<>();
-
- static {
- addDebugLevel(DEBUG_DAG_BUILD_UP);
- addDebugLevel(DEBUG_DAG_TRAVERSAL);
- addDebugLevel(DEBUG_DAG_LIVE_NODES);
- }
-
- static {
- RocksDB.loadLibrary();
- }
-
public static void addDebugLevel(Integer level) {
DEBUG_LEVEL.add(level);
}
diff --git a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/ListOptions.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/ListOptions.java
index e5ba46dd9340..3b0d7f117f8f 100644
--- a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/ListOptions.java
+++ b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/ListOptions.java
@@ -26,19 +26,6 @@ public class ListOptions {
@CommandLine.ArgGroup(exclusive = true)
private ExclusiveLimit exclusiveLimit = new ExclusiveLimit();
-
- static class ExclusiveLimit {
- @CommandLine.Option(names = {"--length", "-l"},
- description = "Maximum number of items to list",
- defaultValue = "100",
- showDefaultValue = CommandLine.Help.Visibility.ALWAYS)
- private int limit;
-
- @CommandLine.Option(names = {"--all", "-a"},
- description = "List all results",
- defaultValue = "false")
- private boolean all;
- }
@CommandLine.Option(names = {"--start", "-s"},
description = "The item to start the listing from.\n" +
@@ -72,4 +59,17 @@ public String getStartItem() {
public String getPrefix() {
return prefix;
}
+
+ static class ExclusiveLimit {
+ @CommandLine.Option(names = {"--length", "-l"},
+ description = "Maximum number of items to list",
+ defaultValue = "100",
+ showDefaultValue = CommandLine.Help.Visibility.ALWAYS)
+ private int limit;
+
+ @CommandLine.Option(names = {"--all", "-a"},
+ description = "List all results",
+ defaultValue = "false")
+ private boolean all;
+ }
}
diff --git a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java
index a8bf348eed9c..e74fb0b1b431 100644
--- a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java
+++ b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java
@@ -57,28 +57,6 @@ public class CreateBucketHandler extends BucketHandler {
" user if not specified")
private String ownerName;
- private static class LayoutConverter implements CommandLine.ITypeConverter {
- @Override
- public BucketLayout convert(String value) {
- if (value == null) {
- return null;
- }
- switch (value) {
- case "fso":
- return BucketLayout.FILE_SYSTEM_OPTIMIZED;
- case "obs":
- return BucketLayout.OBJECT_STORE;
- default:
- for (BucketLayout candidate : BucketLayout.values()) {
- if (candidate.name().equalsIgnoreCase(value)) {
- return candidate;
- }
- }
- throw new IllegalArgumentException("Unknown bucket layout: " + value);
- }
- }
- }
-
@Option(names = { "--layout", "-l" }, converter = LayoutConverter.class,
description = "Allowed Bucket Layouts: fso (for file system optimized buckets FILE_SYSTEM_OPTIMIZED), " +
"obs (for object store optimized OBJECT_STORE) and legacy (LEGACY is Deprecated)")
@@ -149,4 +127,26 @@ public void execute(OzoneClient client, OzoneAddress address)
printObjectAsJson(bucket);
}
}
+
+ private static class LayoutConverter implements CommandLine.ITypeConverter {
+ @Override
+ public BucketLayout convert(String value) {
+ if (value == null) {
+ return null;
+ }
+ switch (value) {
+ case "fso":
+ return BucketLayout.FILE_SYSTEM_OPTIMIZED;
+ case "obs":
+ return BucketLayout.OBJECT_STORE;
+ default:
+ for (BucketLayout candidate : BucketLayout.values()) {
+ if (candidate.name().equalsIgnoreCase(value)) {
+ return candidate;
+ }
+ }
+ throw new IllegalArgumentException("Unknown bucket layout: " + value);
+ }
+ }
+ }
}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
index 705ac1745c39..87a3443aa77d 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
@@ -50,14 +50,13 @@ public final class OzoneClientFactory {
private static final Logger LOG = LoggerFactory.getLogger(
OzoneClientFactory.class);
+ private static final LeakDetector OZONE_CLIENT_LEAK_DETECTOR = new LeakDetector("OzoneClientObject");
+
/**
* Private constructor, class is not meant to be initialized.
*/
private OzoneClientFactory() { }
- private static final LeakDetector OZONE_CLIENT_LEAK_DETECTOR =
- new LeakDetector("OzoneClientObject");
-
public static UncheckedAutoCloseable track(AutoCloseable object) {
final Class> clazz = object.getClass();
final StackTraceElement[] stackTrace = HddsUtils.getStackTrace(LOG);
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedBlockChecksumComputer.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedBlockChecksumComputer.java
index f413e27fc931..12eee917a789 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedBlockChecksumComputer.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedBlockChecksumComputer.java
@@ -39,14 +39,14 @@ public class ReplicatedBlockChecksumComputer extends
private static final Logger LOG =
LoggerFactory.getLogger(ReplicatedBlockChecksumComputer.class);
+ private final List chunkInfoList;
+
static MD5Hash digest(ByteBuffer data) {
final MessageDigest digester = MD5Hash.getDigester();
digester.update(data);
return new MD5Hash(digester.digest());
}
- private final List chunkInfoList;
-
public ReplicatedBlockChecksumComputer(
List chunkInfoList) {
this.chunkInfoList = chunkInfoList;
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java
index d712c2f4c0f7..193b4d078f5c 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java
@@ -55,6 +55,9 @@
*/
public final class ECKeyOutputStream extends KeyOutputStream
implements KeyMetadataAware {
+
+ private static final Logger LOG = LoggerFactory.getLogger(KeyOutputStream.class);
+
private OzoneClientConfig config;
private ECChunkBuffers ecChunkBufferCache;
private final BlockingQueue ecStripeQueue;
@@ -75,14 +78,6 @@ public final class ECKeyOutputStream extends KeyOutputStream
*/
private boolean atomicKeyCreation;
- private enum StripeWriteStatus {
- SUCCESS,
- FAILED
- }
-
- private static final Logger LOG =
- LoggerFactory.getLogger(KeyOutputStream.class);
-
private volatile boolean closed;
private volatile boolean closing;
// how much of data is actually written yet to underlying stream
@@ -730,4 +725,9 @@ private void releaseBuffers(ByteBuffer[] buffers) {
}
}
}
+
+ private enum StripeWriteStatus {
+ SUCCESS,
+ FAILED
+ }
}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
index 7e4b59ec7db1..dedc36af919c 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
@@ -58,18 +58,10 @@
public class KeyDataStreamOutput extends AbstractDataStreamOutput
implements KeyMetadataAware {
- private OzoneClientConfig config;
-
- /**
- * Defines stream action while calling handleFlushOrClose.
- */
- enum StreamAction {
- FLUSH, HSYNC, CLOSE, FULL
- }
-
private static final Logger LOG =
LoggerFactory.getLogger(KeyDataStreamOutput.class);
+ private OzoneClientConfig config;
private boolean closed;
// how much of data is actually written yet to underlying stream
@@ -560,4 +552,11 @@ private void checkNotClosed() throws IOException {
+ blockDataStreamOutputEntryPool.getKeyName());
}
}
+
+ /**
+ * Defines stream action while calling handleFlushOrClose.
+ */
+ enum StreamAction {
+ FLUSH, HSYNC, CLOSE, FULL
+ }
}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
index 9103f30b96e0..c9e5a312ca90 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
@@ -75,18 +75,11 @@
public class KeyOutputStream extends OutputStream
implements Syncable, KeyMetadataAware {
- private final ReplicationConfig replication;
-
- /**
- * Defines stream action while calling handleFlushOrClose.
- */
- enum StreamAction {
- FLUSH, HSYNC, CLOSE, FULL
- }
-
private static final Logger LOG =
LoggerFactory.getLogger(KeyOutputStream.class);
+ private final ReplicationConfig replication;
+
private boolean closed;
private final Map, RetryPolicy> retryPolicyMap;
private int retryCount;
@@ -850,4 +843,11 @@ private void checkNotClosed() throws IOException {
+ blockOutputStreamEntryPool.getKeyName());
}
}
+
+ /**
+ * Defines stream action while calling handleFlushOrClose.
+ */
+ enum StreamAction {
+ FLUSH, HSYNC, CLOSE, FULL
+ }
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index 200c63028b75..89b6e2a10312 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -31,12 +31,6 @@ public final class OMConfigKeys {
"ozone.om.snapshot.load.native.lib";
public static final boolean OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT = true;
- /**
- * Never constructed.
- */
- private OMConfigKeys() {
- }
-
public static final String OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY =
"ozone.filesystem.snapshot.enabled";
public static final boolean OZONE_FILESYSTEM_SNAPSHOT_ENABLED_DEFAULT = true;
@@ -646,4 +640,10 @@ private OMConfigKeys() {
= "ozone.om.compaction.service.columnfamilies";
public static final String OZONE_OM_COMPACTION_SERVICE_COLUMNFAMILIES_DEFAULT =
"keyTable,fileTable,directoryTable,deletedTable,deletedDirectoryTable,multipartInfoTable";
+
+ /**
+ * Never constructed.
+ */
+ private OMConfigKeys() {
+ }
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java
index d4a1c3d87ff7..128939a87146 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java
@@ -174,11 +174,6 @@ enum ACLIdentityType {
// TODO: Add support for acl checks based on CLIENT_IP.
- @Override
- public String toString() {
- return value;
- }
-
/**
* String value for this Enum.
*/
@@ -192,6 +187,11 @@ public String toString() {
ACLIdentityType(String val) {
value = val;
}
+
+ @Override
+ public String toString() {
+ return value;
+ }
}
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffResponse.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffResponse.java
index 47b586b8dd05..ad1dc50c3d5f 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffResponse.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffResponse.java
@@ -26,45 +26,6 @@
*/
public class SnapshotDiffResponse {
- /**
- * Snapshot diff job status enum.
- */
- public enum JobStatus {
- QUEUED,
- IN_PROGRESS,
- DONE,
- REJECTED,
- FAILED,
- CANCELLED;
-
- public JobStatusProto toProtobuf() {
- return JobStatusProto.valueOf(this.name());
- }
-
- public static JobStatus fromProtobuf(JobStatusProto jobStatusProto) {
- return JobStatus.valueOf(jobStatusProto.name());
- }
- }
-
- /**
- * Snapshot diff job sub-status enum.
- */
- public enum SubStatus {
- SST_FILE_DELTA_DAG_WALK,
- SST_FILE_DELTA_FULL_DIFF,
- OBJECT_ID_MAP_GEN_OBS,
- OBJECT_ID_MAP_GEN_FSO,
- DIFF_REPORT_GEN;
-
- public static SubStatus fromProtoBuf(OzoneManagerProtocolProtos.SnapshotDiffResponse.SubStatus subStatusProto) {
- return SubStatus.valueOf(subStatusProto.name());
- }
-
- public OzoneManagerProtocolProtos.SnapshotDiffResponse.SubStatus toProtoBuf() {
- return OzoneManagerProtocolProtos.SnapshotDiffResponse.SubStatus.valueOf(this.name());
- }
- }
-
private final SnapshotDiffReportOzone snapshotDiffReport;
private final JobStatus jobStatus;
private final long waitTimeInMs;
@@ -154,4 +115,43 @@ public String toString() {
}
return str.toString();
}
+
+ /**
+ * Snapshot diff job status enum.
+ */
+ public enum JobStatus {
+ QUEUED,
+ IN_PROGRESS,
+ DONE,
+ REJECTED,
+ FAILED,
+ CANCELLED;
+
+ public JobStatusProto toProtobuf() {
+ return JobStatusProto.valueOf(this.name());
+ }
+
+ public static JobStatus fromProtobuf(JobStatusProto jobStatusProto) {
+ return JobStatus.valueOf(jobStatusProto.name());
+ }
+ }
+
+ /**
+ * Snapshot diff job sub-status enum.
+ */
+ public enum SubStatus {
+ SST_FILE_DELTA_DAG_WALK,
+ SST_FILE_DELTA_FULL_DIFF,
+ OBJECT_ID_MAP_GEN_OBS,
+ OBJECT_ID_MAP_GEN_FSO,
+ DIFF_REPORT_GEN;
+
+ public static SubStatus fromProtoBuf(OzoneManagerProtocolProtos.SnapshotDiffResponse.SubStatus subStatusProto) {
+ return SubStatus.valueOf(subStatusProto.name());
+ }
+
+ public OzoneManagerProtocolProtos.SnapshotDiffResponse.SubStatus toProtoBuf() {
+ return OzoneManagerProtocolProtos.SnapshotDiffResponse.SubStatus.valueOf(this.name());
+ }
+ }
}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServer.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServer.java
index 4623dee4832a..262b9fa69455 100644
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServer.java
+++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServer.java
@@ -98,10 +98,6 @@
@InterfaceAudience.Private
public class HttpFSServer {
- enum AccessMode {
- READWRITE, WRITEONLY, READONLY;
- }
-
private static final Logger AUDIT_LOG
= LoggerFactory.getLogger("httpfsaudit");
private static final Logger LOG = LoggerFactory.getLogger(HttpFSServer.class);
@@ -109,10 +105,6 @@ enum AccessMode {
private static final HttpFSParametersProvider PARAMETERS_PROVIDER =
new HttpFSParametersProvider();
- private Parameters getParams(HttpServletRequest request) {
- return PARAMETERS_PROVIDER.get(request);
- }
-
private AccessMode accessMode = AccessMode.READWRITE;
public HttpFSServer() {
@@ -129,6 +121,10 @@ public HttpFSServer() {
}
}
+ private Parameters getParams(HttpServletRequest request) {
+ return PARAMETERS_PROVIDER.get(request);
+ }
+
/**
* Executes a {@link FileSystemAccess.FileSystemExecutor} using a filesystem
* for the effective user.
@@ -1245,4 +1241,8 @@ private Response handleCreate(InputStream is,
}
return response;
}
+
+ enum AccessMode {
+ READWRITE, WRITEONLY, READONLY;
+ }
}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/JsonUtil.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/JsonUtil.java
index 829ac339c30a..9a6d57b0c6d1 100644
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/JsonUtil.java
+++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/JsonUtil.java
@@ -28,16 +28,15 @@
/** JSON Utilities. */
final class JsonUtil {
-
- private JsonUtil() {
- }
-
// Reuse ObjectMapper instance for improving performance.
// ObjectMapper is thread safe as long as we always configure instance
// before use. We don't have a re-entrant call pattern in WebHDFS,
// so we just need to worry about thread-safety.
private static final ObjectMapper MAPPER = new ObjectMapper();
+ private JsonUtil() {
+ }
+
private static String toJsonString(final Class> clazz, final Object value) {
return toJsonString(clazz.getSimpleName(), value);
}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/lang/XException.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/lang/XException.java
index d9e014798799..ff35fbcc4f8a 100644
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/lang/XException.java
+++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/lang/XException.java
@@ -27,23 +27,6 @@
*/
@InterfaceAudience.Private
public class XException extends Exception {
-
- /**
- * Interface to define error codes.
- */
- public interface ERROR {
-
- /**
- * Returns the template for the error.
- *
- * @return the template for the error, the template must be in JDK
- * MessageFormat syntax (using {#} positional
- * parameters).
- */
- String getTemplate();
-
- }
-
private ERROR error;
/**
@@ -124,4 +107,17 @@ private static Throwable getCause(Object... params) {
return throwable;
}
+ /**
+ * Interface to define error codes.
+ */
+ public interface ERROR {
+ /**
+ * Returns the template for the error.
+ *
+ * @return the template for the error, the template must be in JDK
+ * MessageFormat syntax (using {#} positional
+ * parameters).
+ */
+ String getTemplate();
+ }
}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/Server.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/Server.java
index 82534ac73855..1a74e55ffd32 100644
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/Server.java
+++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/server/Server.java
@@ -95,44 +95,6 @@ public class Server {
*/
public static final String CONF_STARTUP_STATUS = "startup.status";
- /**
- * Enumeration that defines the server status.
- */
- @InterfaceAudience.Private
- public enum Status {
- UNDEF(false, false),
- BOOTING(false, true),
- HALTED(true, true),
- ADMIN(true, true),
- NORMAL(true, true),
- SHUTTING_DOWN(false, true),
- SHUTDOWN(false, false);
-
- private boolean settable;
- private boolean operational;
-
- /**
- * Status constructor.
- *
- * @param settable indicates if the status is settable.
- * @param operational indicates if the server is operational
- * when in this status.
- */
- Status(boolean settable, boolean operational) {
- this.settable = settable;
- this.operational = operational;
- }
-
- /**
- * Returns if this server status is operational.
- *
- * @return if this server status is operational.
- */
- public boolean isOperational() {
- return operational;
- }
- }
-
/**
* Name of the log4j configuration file the Server will load from the
* classpath if the #SERVER#-log4j.properties is not defined
@@ -836,4 +798,41 @@ public void setService(Class extends Service> klass)
}
}
+ /**
+ * Enumeration that defines the server status.
+ */
+ @InterfaceAudience.Private
+ public enum Status {
+ UNDEF(false, false),
+ BOOTING(false, true),
+ HALTED(true, true),
+ ADMIN(true, true),
+ NORMAL(true, true),
+ SHUTTING_DOWN(false, true),
+ SHUTDOWN(false, false);
+
+ private boolean settable;
+ private boolean operational;
+
+ /**
+ * Status constructor.
+ *
+ * @param settable indicates if the status is settable.
+ * @param operational indicates if the server is operational
+ * when in this status.
+ */
+ Status(boolean settable, boolean operational) {
+ this.settable = settable;
+ this.operational = operational;
+ }
+
+ /**
+ * Returns if this server status is operational.
+ *
+ * @return if this server status is operational.
+ */
+ public boolean isOperational() {
+ return operational;
+ }
+ }
}
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/hadoop/FileSystemAccessService.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/hadoop/FileSystemAccessService.java
index c4dfc28df47a..8ae106ad2184 100644
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/hadoop/FileSystemAccessService.java
+++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/lib/service/hadoop/FileSystemAccessService.java
@@ -83,6 +83,21 @@ public class FileSystemAccessService extends BaseService
public static final String FILE_SYSTEM_SERVICE_CREATED
= "FileSystemAccessService.created";
+ private static final String HTTPFS_FS_USER = "httpfs.fs.user";
+
+ private Collection nameNodeWhitelist;
+
+ private Configuration serviceHadoopConf;
+
+ private Configuration fileSystemConf;
+
+ private AtomicInteger unmanagedFileSystems = new AtomicInteger();
+
+ private ConcurrentHashMap fsCache =
+ new ConcurrentHashMap();
+
+ private long purgeTimeout;
+
private static class CachedFileSystem {
private FileSystem fs;
private long lastUse;
@@ -141,21 +156,6 @@ public FileSystemAccessService() {
super(PREFIX);
}
- private Collection nameNodeWhitelist;
-
- // Suppressed because serviceHadoopConf only used in this class and in the
- // tests, which will be removed later.
- @SuppressWarnings("checkstyle:VisibilityModifier")
- Configuration serviceHadoopConf;
- private Configuration fileSystemConf;
-
- private AtomicInteger unmanagedFileSystems = new AtomicInteger();
-
- private ConcurrentHashMap fsCache =
- new ConcurrentHashMap();
-
- private long purgeTimeout;
-
@Override
protected void init() throws ServiceException {
LOG.info("Using FileSystemAccess JARs version [{}]",
@@ -325,8 +325,6 @@ protected void setRequiredServiceHadoopConf(Configuration conf) {
conf.set("fs.hdfs.impl.disable.cache", "true");
}
- private static final String HTTPFS_FS_USER = "httpfs.fs.user";
-
protected FileSystem createFileSystem(Configuration namenodeConf)
throws IOException {
String user = UserGroupInformation.getCurrentUser().getShortUserName();
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java
index 6760cbd05f81..1644100f8fb5 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java
@@ -43,10 +43,6 @@ public final class OmPrefixInfo extends WithObjectID implements CopyObject getCodec() {
- return CODEC;
- }
-
private final String name;
private final CopyOnWriteArrayList acls;
@@ -56,6 +52,10 @@ private OmPrefixInfo(Builder b) {
acls = new CopyOnWriteArrayList<>(b.acls);
}
+ public static Codec getCodec() {
+ return CODEC;
+ }
+
/**
* Returns the ACL's associated with this prefix.
* @return {@literal List}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeletingServiceMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeletingServiceMetrics.java
index eed4e7f0beb8..3e6a4b937f47 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeletingServiceMetrics.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeletingServiceMetrics.java
@@ -35,6 +35,37 @@ public final class DeletingServiceMetrics {
DeletingServiceMetrics.class.getSimpleName();
private MetricsRegistry registry;
+ /*
+ * Total directory deletion metrics across all iterations of DirectoryDeletingService since last restart.
+ */
+ @Metric("Total no. of deleted directories sent for purge")
+ private MutableGaugeLong numDirsSentForPurge;
+ @Metric("Total no. of sub-directories sent for purge")
+ private MutableGaugeLong numSubDirsSentForPurge;
+ @Metric("Total no. of sub-files sent for purge")
+ private MutableGaugeLong numSubFilesSentForPurge;
+ /*
+ * Total key deletion metrics across all iterations of KeyDeletingService since last restart.
+ */
+ @Metric("Total no. of keys processed")
+ private MutableGaugeLong numKeysProcessed;
+ @Metric("Total no. of deleted keys sent for purge")
+ private MutableGaugeLong numKeysSentForPurge;
+ /*
+ * Directory purge request metrics.
+ */
+ @Metric("Total no. of directories purged")
+ private MutableGaugeLong numDirsPurged;
+ @Metric("Total no. of subFiles moved to deletedTable")
+ private MutableGaugeLong numSubFilesMovedToDeletedTable;
+ @Metric("Total no. of subDirectories moved to deletedDirTable")
+ private MutableGaugeLong numSubDirsMovedToDeletedDirTable;
+ /*
+ * Key purge request metrics.
+ */
+ @Metric("Total no. of keys purged")
+ private MutableGaugeLong numKeysPurged;
+
private DeletingServiceMetrics() {
this.registry = new MetricsRegistry(METRICS_SOURCE_NAME);
}
@@ -57,16 +88,6 @@ public static void unregister() {
DefaultMetricsSystem.instance().unregisterSource(METRICS_SOURCE_NAME);
}
- /*
- * Total directory deletion metrics across all iterations of DirectoryDeletingService since last restart.
- */
- @Metric("Total no. of deleted directories sent for purge")
- private MutableGaugeLong numDirsSentForPurge;
- @Metric("Total no. of sub-directories sent for purge")
- private MutableGaugeLong numSubDirsSentForPurge;
- @Metric("Total no. of sub-files sent for purge")
- private MutableGaugeLong numSubFilesSentForPurge;
-
public void incrNumDirsSentForPurge(long dirDel) {
numDirsSentForPurge.incr(dirDel);
}
@@ -97,14 +118,6 @@ public long getNumSubFilesSentForPurge() {
return numSubFilesSentForPurge.value();
}
- /*
- * Total key deletion metrics across all iterations of KeyDeletingService since last restart.
- */
- @Metric("Total no. of keys processed")
- private MutableGaugeLong numKeysProcessed;
- @Metric("Total no. of deleted keys sent for purge")
- private MutableGaugeLong numKeysSentForPurge;
-
public void incrNumKeysProcessed(long keysProcessed) {
this.numKeysProcessed.incr(keysProcessed);
}
@@ -113,16 +126,6 @@ public void incrNumKeysSentForPurge(long keysPurge) {
this.numKeysSentForPurge.incr(keysPurge);
}
- /*
- * Directory purge request metrics.
- */
- @Metric("Total no. of directories purged")
- private MutableGaugeLong numDirsPurged;
- @Metric("Total no. of subFiles moved to deletedTable")
- private MutableGaugeLong numSubFilesMovedToDeletedTable;
- @Metric("Total no. of subDirectories moved to deletedDirTable")
- private MutableGaugeLong numSubDirsMovedToDeletedDirTable;
-
public void incrNumDirPurged(long dirPurged) {
this.numDirsPurged.incr(dirPurged);
}
@@ -147,12 +150,6 @@ public long getNumSubDirsMovedToDeletedDirTable() {
return numSubDirsMovedToDeletedDirTable.value();
}
- /*
- * Key purge request metrics.
- */
- @Metric("Total no. of keys purged")
- private MutableGaugeLong numKeysPurged;
-
public void incrNumKeysPurged(long keysPurged) {
this.numKeysPurged.incr(keysPurged);
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotMetrics.java
index 3fe8c154cd3d..65cb1d567323 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotMetrics.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotMetrics.java
@@ -35,9 +35,6 @@ public final class OmSnapshotMetrics implements OmMetadataReaderMetrics {
private static final String SOURCE_NAME =
OmSnapshotMetrics.class.getSimpleName();
- private OmSnapshotMetrics() {
- }
-
private static final Supplier SUPPLIER =
MemoizedSupplier.valueOf(() -> {
MetricsSystem ms = DefaultMetricsSystem.instance();
@@ -46,15 +43,31 @@ private OmSnapshotMetrics() {
new OmSnapshotMetrics());
});
+ private @Metric MutableCounterLong numKeyLookup;
+ private @Metric MutableCounterLong numKeyLookupFails;
+ private @Metric MutableCounterLong numGetKeyInfo;
+ private @Metric MutableCounterLong numGetKeyInfoFails;
+ private @Metric MutableCounterLong numListStatus;
+ private @Metric MutableCounterLong numListStatusFails;
+ private @Metric MutableCounterLong numGetFileStatus;
+ private @Metric MutableCounterLong numGetFileStatusFails;
+ private @Metric MutableCounterLong numLookupFile;
+ private @Metric MutableCounterLong numLookupFileFails;
+ private @Metric MutableCounterLong numKeyLists;
+ private @Metric MutableCounterLong numKeyListFails;
+ private @Metric MutableCounterLong numGetAcl;
+ private @Metric MutableCounterLong numKeyOps;
+ private @Metric MutableCounterLong numFSOps;
+ private @Metric MutableCounterLong numGetObjectTagging;
+ private @Metric MutableCounterLong numGetObjectTaggingFails;
+
+ private OmSnapshotMetrics() {
+ }
+
public static OmSnapshotMetrics getInstance() {
return SUPPLIER.get();
}
- private @Metric
- MutableCounterLong numKeyLookup;
- private @Metric
- MutableCounterLong numKeyLookupFails;
-
@Override
public void incNumKeyLookups() {
numKeyOps.incr();
@@ -66,11 +79,6 @@ public void incNumKeyLookupFails() {
numKeyLookupFails.incr();
}
- private @Metric
- MutableCounterLong numGetKeyInfo;
- private @Metric
- MutableCounterLong numGetKeyInfoFails;
-
@Override
public void incNumGetKeyInfo() {
numKeyOps.incr();
@@ -82,11 +90,6 @@ public void incNumGetKeyInfoFails() {
numGetKeyInfoFails.incr();
}
- private @Metric
- MutableCounterLong numListStatus;
- private @Metric
- MutableCounterLong numListStatusFails;
-
@Override
public void incNumListStatus() {
numKeyOps.incr();
@@ -99,11 +102,6 @@ public void incNumListStatusFails() {
numListStatusFails.incr();
}
- private @Metric
- MutableCounterLong numGetFileStatus;
- private @Metric
- MutableCounterLong numGetFileStatusFails;
-
@Override
public void incNumGetFileStatus() {
numKeyOps.incr();
@@ -116,11 +114,6 @@ public void incNumGetFileStatusFails() {
numGetFileStatusFails.incr();
}
- private @Metric
- MutableCounterLong numLookupFile;
- private @Metric
- MutableCounterLong numLookupFileFails;
-
@Override
public void incNumLookupFile() {
numKeyOps.incr();
@@ -133,12 +126,6 @@ public void incNumLookupFileFails() {
numLookupFileFails.incr();
}
- private @Metric
- MutableCounterLong numKeyLists;
-
- private @Metric
- MutableCounterLong numKeyListFails;
-
@Override
public void incNumKeyLists() {
numKeyLists.incr();
@@ -149,24 +136,11 @@ public void incNumKeyListFails() {
numKeyListFails.incr();
}
- private @Metric
- MutableCounterLong numGetAcl;
-
@Override
public void incNumGetAcl() {
numGetAcl.incr();
}
- private @Metric
- MutableCounterLong numKeyOps;
- private @Metric
- MutableCounterLong numFSOps;
-
- private @Metric
- MutableCounterLong numGetObjectTagging;
- private @Metric
- MutableCounterLong numGetObjectTaggingFails;
-
@Override
public void incNumGetObjectTagging() {
numGetObjectTagging.incr();
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/Statistic.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/Statistic.java
index 504a25643d14..45f4e31d3348 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/Statistic.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/Statistic.java
@@ -90,14 +90,14 @@ public enum Statistic {
}
}
+ private final String symbol;
+ private final String description;
+
Statistic(String symbol, String description) {
this.symbol = symbol;
this.description = description;
}
- private final String symbol;
- private final String description;
-
public String getSymbol() {
return symbol;
}
diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/apache/ozone/recon/schema/ContainerSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/apache/ozone/recon/schema/ContainerSchemaDefinition.java
index 0fffeb9edeff..8a28d0eee098 100644
--- a/hadoop-ozone/recon-codegen/src/main/java/org/apache/ozone/recon/schema/ContainerSchemaDefinition.java
+++ b/hadoop-ozone/recon-codegen/src/main/java/org/apache/ozone/recon/schema/ContainerSchemaDefinition.java
@@ -43,20 +43,6 @@ public class ContainerSchemaDefinition implements ReconSchemaDefinition {
private static final Logger LOG =
LoggerFactory.getLogger(ContainerSchemaDefinition.class);
- /**
- * ENUM describing the allowed container states which can be stored in the
- * unhealthy containers table.
- */
- public enum UnHealthyContainerStates {
- MISSING,
- EMPTY_MISSING,
- UNDER_REPLICATED,
- OVER_REPLICATED,
- MIS_REPLICATED,
- ALL_REPLICAS_BAD,
- NEGATIVE_SIZE // Added new state to track containers with negative sizes
- }
-
private static final String CONTAINER_ID = "container_id";
private static final String CONTAINER_STATE = "container_state";
private final DataSource dataSource;
@@ -104,4 +90,18 @@ public DSLContext getDSLContext() {
public DataSource getDataSource() {
return dataSource;
}
+
+ /**
+ * ENUM describing the allowed container states which can be stored in the
+ * unhealthy containers table.
+ */
+ public enum UnHealthyContainerStates {
+ MISSING,
+ EMPTY_MISSING,
+ UNDER_REPLICATED,
+ OVER_REPLICATED,
+ MIS_REPLICATED,
+ ALL_REPLICAS_BAD,
+ NEGATIVE_SIZE // Added new state to track containers with negative sizes
+ }
}
diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/apache/ozone/recon/schema/ReconSqlDbConfig.java b/hadoop-ozone/recon-codegen/src/main/java/org/apache/ozone/recon/schema/ReconSqlDbConfig.java
index 49afac6c37da..7acd93490379 100644
--- a/hadoop-ozone/recon-codegen/src/main/java/org/apache/ozone/recon/schema/ReconSqlDbConfig.java
+++ b/hadoop-ozone/recon-codegen/src/main/java/org/apache/ozone/recon/schema/ReconSqlDbConfig.java
@@ -38,14 +38,6 @@ public class ReconSqlDbConfig {
)
private String driverClass;
- public String getDriverClass() {
- return driverClass;
- }
-
- public void setDriverClass(String driverClass) {
- this.driverClass = driverClass;
- }
-
@Config(key = "jdbc.url",
type = ConfigType.STRING,
defaultValue = "jdbc:derby:${ozone.recon.db.dir}/ozone_recon_derby.db",
@@ -54,14 +46,6 @@ public void setDriverClass(String driverClass) {
)
private String jdbcUrl;
- public String getJdbcUrl() {
- return jdbcUrl;
- }
-
- public void setJdbcUrl(String jdbcUrl) {
- this.jdbcUrl = jdbcUrl;
- }
-
@Config(key = "username",
type = ConfigType.STRING,
defaultValue = "",
@@ -70,14 +54,6 @@ public void setJdbcUrl(String jdbcUrl) {
)
private String username;
- public String getUsername() {
- return username;
- }
-
- public void setUsername(String username) {
- this.username = username;
- }
-
@Config(key = "password",
type = ConfigType.STRING,
defaultValue = "",
@@ -86,14 +62,6 @@ public void setUsername(String username) {
)
private String password;
- public String getPassword() {
- return password;
- }
-
- public void setPassword(String password) {
- this.password = password;
- }
-
@Config(key = "auto.commit",
type = ConfigType.BOOLEAN,
defaultValue = "true",
@@ -103,14 +71,6 @@ public void setPassword(String password) {
)
private boolean autoCommit;
- public boolean isAutoCommit() {
- return autoCommit;
- }
-
- public void setAutoCommit(boolean autoCommit) {
- this.autoCommit = autoCommit;
- }
-
@Config(key = "conn.timeout",
type = ConfigType.TIME,
defaultValue = "30000ms",
@@ -120,14 +80,6 @@ public void setAutoCommit(boolean autoCommit) {
)
private long connectionTimeout;
- public long getConnectionTimeout() {
- return connectionTimeout;
- }
-
- public void setConnectionTimeout(long connectionTimeout) {
- this.connectionTimeout = connectionTimeout;
- }
-
@Config(key = "conn.max.active",
type = ConfigType.INT,
defaultValue = "5",
@@ -136,14 +88,6 @@ public void setConnectionTimeout(long connectionTimeout) {
)
private int maxActiveConnections;
- public int getMaxActiveConnections() {
- return maxActiveConnections;
- }
-
- public void setMaxActiveConnections(int maxActiveConnections) {
- this.maxActiveConnections = maxActiveConnections;
- }
-
@Config(key = "conn.max.age",
type = ConfigType.TIME, timeUnit = SECONDS,
defaultValue = "1800s",
@@ -152,14 +96,6 @@ public void setMaxActiveConnections(int maxActiveConnections) {
)
private long connectionMaxAge;
- public long getConnectionMaxAge() {
- return connectionMaxAge;
- }
-
- public void setConnectionMaxAge(long connectionMaxAge) {
- this.connectionMaxAge = connectionMaxAge;
- }
-
@Config(key = "conn.idle.max.age",
type = ConfigType.TIME, timeUnit = SECONDS,
defaultValue = "3600s",
@@ -168,14 +104,6 @@ public void setConnectionMaxAge(long connectionMaxAge) {
)
private long connectionIdleMaxAge;
- public long getConnectionIdleMaxAge() {
- return connectionIdleMaxAge;
- }
-
- public void setConnectionIdleMaxAge(long connectionIdleMaxAge) {
- this.connectionIdleMaxAge = connectionIdleMaxAge;
- }
-
@Config(key = "conn.idle.test.period",
type = ConfigType.TIME, timeUnit = SECONDS,
defaultValue = "60s",
@@ -184,14 +112,6 @@ public void setConnectionIdleMaxAge(long connectionIdleMaxAge) {
)
private long connectionIdleTestPeriod;
- public long getConnectionIdleTestPeriod() {
- return connectionIdleTestPeriod;
- }
-
- public void setConnectionIdleTestPeriod(long connectionIdleTestPeriod) {
- this.connectionIdleTestPeriod = connectionIdleTestPeriod;
- }
-
@Config(key = "conn.idle.test",
type = ConfigType.STRING,
defaultValue = "SELECT 1",
@@ -201,14 +121,6 @@ public void setConnectionIdleTestPeriod(long connectionIdleTestPeriod) {
)
private String idleTestQuery;
- public String getIdleTestQuery() {
- return idleTestQuery;
- }
-
- public void setIdleTestQuery(String idleTestQuery) {
- this.idleTestQuery = idleTestQuery;
- }
-
@Config(key = "jooq.dialect",
type = ConfigType.STRING,
defaultValue = "DERBY",
@@ -220,6 +132,94 @@ public void setIdleTestQuery(String idleTestQuery) {
)
private String sqlDbDialect;
+ public String getDriverClass() {
+ return driverClass;
+ }
+
+ public void setDriverClass(String driverClass) {
+ this.driverClass = driverClass;
+ }
+
+ public String getJdbcUrl() {
+ return jdbcUrl;
+ }
+
+ public void setJdbcUrl(String jdbcUrl) {
+ this.jdbcUrl = jdbcUrl;
+ }
+
+ public String getUsername() {
+ return username;
+ }
+
+ public void setUsername(String username) {
+ this.username = username;
+ }
+
+ public String getPassword() {
+ return password;
+ }
+
+ public void setPassword(String password) {
+ this.password = password;
+ }
+
+ public boolean isAutoCommit() {
+ return autoCommit;
+ }
+
+ public void setAutoCommit(boolean autoCommit) {
+ this.autoCommit = autoCommit;
+ }
+
+ public long getConnectionTimeout() {
+ return connectionTimeout;
+ }
+
+ public void setConnectionTimeout(long connectionTimeout) {
+ this.connectionTimeout = connectionTimeout;
+ }
+
+ public int getMaxActiveConnections() {
+ return maxActiveConnections;
+ }
+
+ public void setMaxActiveConnections(int maxActiveConnections) {
+ this.maxActiveConnections = maxActiveConnections;
+ }
+
+ public long getConnectionMaxAge() {
+ return connectionMaxAge;
+ }
+
+ public void setConnectionMaxAge(long connectionMaxAge) {
+ this.connectionMaxAge = connectionMaxAge;
+ }
+
+ public long getConnectionIdleMaxAge() {
+ return connectionIdleMaxAge;
+ }
+
+ public void setConnectionIdleMaxAge(long connectionIdleMaxAge) {
+ this.connectionIdleMaxAge = connectionIdleMaxAge;
+ }
+
+ public long getConnectionIdleTestPeriod() {
+ return connectionIdleTestPeriod;
+ }
+
+ public void setConnectionIdleTestPeriod(long connectionIdleTestPeriod) {
+ this.connectionIdleTestPeriod = connectionIdleTestPeriod;
+ }
+
+ public String getIdleTestQuery() {
+ return idleTestQuery;
+ }
+
+ public void setIdleTestQuery(String idleTestQuery) {
+ this.idleTestQuery = idleTestQuery;
+ }
+
public String getSqlDbDialect() {
return sqlDbDialect;
}
diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/apache/ozone/recon/schema/SqlDbUtils.java b/hadoop-ozone/recon-codegen/src/main/java/org/apache/ozone/recon/schema/SqlDbUtils.java
index 0ec998d5b8fc..22c00cb9685b 100644
--- a/hadoop-ozone/recon-codegen/src/main/java/org/apache/ozone/recon/schema/SqlDbUtils.java
+++ b/hadoop-ozone/recon-codegen/src/main/java/org/apache/ozone/recon/schema/SqlDbUtils.java
@@ -47,6 +47,21 @@ public final class SqlDbUtils {
private static final Logger LOG =
LoggerFactory.getLogger(SqlDbUtils.class);
+ /**
+ * Helper function to check if table exists through JOOQ.
+ */
+ public static final BiPredicate TABLE_EXISTS_CHECK =
+ (conn, tableName) -> {
+ try {
+ DSL.using(conn).select(count()).from(tableName).execute();
+ } catch (DataAccessException ex) {
+ LOG.debug(ex.getMessage());
+ return false;
+ }
+ LOG.info("{} table already exists, skipping creation.", tableName);
+ return true;
+ };
+
private SqlDbUtils() {
}
@@ -82,21 +97,6 @@ public void write(int b) throws IOException {
};
}
- /**
- * Helper function to check if table exists through JOOQ.
- */
- public static final BiPredicate TABLE_EXISTS_CHECK =
- (conn, tableName) -> {
- try {
- DSL.using(conn).select(count()).from(tableName).execute();
- } catch (DataAccessException ex) {
- LOG.debug(ex.getMessage());
- return false;
- }
- LOG.info("{} table already exists, skipping creation.", tableName);
- return true;
- };
-
/**
* Utility method to list all user-defined tables in the database.
*
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java
index a7a2a1daf80d..00950cfd716b 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java
@@ -24,10 +24,6 @@
*/
public final class ReconConstants {
- private ReconConstants() {
- // Never Constructed
- }
-
public static final String RECON_CONTAINER_KEY_DB = "recon-container-key.db";
public static final String CONTAINER_COUNT_KEY = "containerCount";
@@ -97,6 +93,10 @@ private ReconConstants() {
public static final AtomicBoolean CONTAINER_KEY_TABLES_TRUNCATED = new AtomicBoolean(false);
+ private ReconConstants() {
+ // Never Constructed
+ }
+
/**
* Resets the table truncated flag for the given tables. This should be called once per reprocess cycle,
* for example by the OM task controller, before the tasks run.
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
index 86035c45df10..2999c21e61dd 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
@@ -96,14 +96,14 @@
@Singleton
public class ReconUtils {
- public ReconUtils() {
- }
-
private static Logger log = LoggerFactory.getLogger(
ReconUtils.class);
private static AtomicBoolean rebuildTriggered = new AtomicBoolean(false);
+ public ReconUtils() {
+ }
+
public static File getReconScmDbDir(ConfigurationSource conf) {
return new ReconUtils().getReconDbDir(conf, OZONE_RECON_SCM_DB_DIR);
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefixImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefixImpl.java
index dc5ecdd89bde..33d7114f1697 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefixImpl.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefixImpl.java
@@ -27,10 +27,6 @@
*/
final class ContainerKeyPrefixImpl
implements ContainerKeyPrefix, KeyPrefixContainer {
- static ContainerKeyPrefixImpl get(long containerId, String keyPrefix,
- long keyVersion) {
- return new ContainerKeyPrefixImpl(containerId, keyPrefix, keyVersion);
- }
private final long containerId;
private final String keyPrefix;
@@ -43,6 +39,11 @@ private ContainerKeyPrefixImpl(long containerId, String keyPrefix,
this.keyVersion = keyVersion;
}
+ static ContainerKeyPrefixImpl get(long containerId, String keyPrefix,
+ long keyVersion) {
+ return new ContainerKeyPrefixImpl(containerId, keyPrefix, keyVersion);
+ }
+
@Override
public long getContainerId() {
return containerId;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfoProtoWrapper.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfoProtoWrapper.java
index 830ebe9773c9..b515ff14756d 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfoProtoWrapper.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfoProtoWrapper.java
@@ -31,14 +31,6 @@
*/
public final class KeyEntityInfoProtoWrapper {
- public static Codec getCodec() {
- return new DelegatedCodec<>(
- Proto2Codec.get(OzoneManagerProtocolProtos.KeyInfo.getDefaultInstance()),
- KeyEntityInfoProtoWrapper::getFromProtobuf,
- KeyEntityInfoProtoWrapper::toProtobuf,
- KeyEntityInfoProtoWrapper.class);
- }
-
private final OzoneManagerProtocolProtos.KeyInfo keyInfoProto;
/** This is key table key of rocksDB and will help UI to implement pagination
@@ -63,6 +55,14 @@ private KeyEntityInfoProtoWrapper(OzoneManagerProtocolProtos.KeyInfo proto) {
this.replicatedSize = QuotaUtil.getReplicatedSize(getSize(), getReplicationConfig());
}
+ public static Codec getCodec() {
+ return new DelegatedCodec<>(
+ Proto2Codec.get(OzoneManagerProtocolProtos.KeyInfo.getDefaultInstance()),
+ KeyEntityInfoProtoWrapper::getFromProtobuf,
+ KeyEntityInfoProtoWrapper::toProtobuf,
+ KeyEntityInfoProtoWrapper.class);
+ }
+
public static KeyEntityInfoProtoWrapper getFromProtobuf(OzoneManagerProtocolProtos.KeyInfo keyInfo) {
return new KeyEntityInfoProtoWrapper(keyInfo);
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
index 9541f51c80f2..1d5b33cbcc65 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
@@ -39,10 +39,6 @@ public final class NSSummaryCodec implements Codec {
private static final Codec INSTANCE = new NSSummaryCodec();
- public static Codec get() {
- return INSTANCE;
- }
-
private final Codec integerCodec = IntegerCodec.get();
private final Codec shortCodec = ShortCodec.get();
private final Codec longCodec = LongCodec.get();
@@ -56,6 +52,10 @@ private NSSummaryCodec() {
// singleton
}
+ public static Codec get() {
+ return INSTANCE;
+ }
+
@Override
public Class getTypeClass() {
return NSSummary.class;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/ContainerHealthMetrics.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/ContainerHealthMetrics.java
index d9531fafcc5f..f013f8670afe 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/ContainerHealthMetrics.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/ContainerHealthMetrics.java
@@ -35,9 +35,23 @@ public final class ContainerHealthMetrics {
private static final String SOURCE_NAME =
ContainerHealthMetrics.class.getSimpleName();
+ @Metric(about = "Number of missing containers detected in Recon.")
+ private MutableGaugeLong missingContainerCount;
+
+ @Metric(about = "Number of under replicated containers detected in Recon.")
+ private MutableGaugeLong underReplicatedContainerCount;
+
+ @Metric(about = "Number of replica mismatch containers detected in Recon.")
+ private MutableGaugeLong replicaMisMatchContainerCount;
+
private ContainerHealthMetrics() {
}
+ public void unRegister() {
+ MetricsSystem ms = DefaultMetricsSystem.instance();
+ ms.unregisterSource(SOURCE_NAME);
+ }
+
public static ContainerHealthMetrics create() {
MetricsSystem ms = DefaultMetricsSystem.instance();
return ms.register(SOURCE_NAME,
@@ -45,20 +59,6 @@ public static ContainerHealthMetrics create() {
new ContainerHealthMetrics());
}
- public void unRegister() {
- MetricsSystem ms = DefaultMetricsSystem.instance();
- ms.unregisterSource(SOURCE_NAME);
- }
-
- @Metric(about = "Number of missing containers detected in Recon.")
- private MutableGaugeLong missingContainerCount;
-
- @Metric(about = "Number of under replicated containers detected in Recon.")
- private MutableGaugeLong underReplicatedContainerCount;
-
- @Metric(about = "Number of replica mismatch containers detected in Recon.")
- private MutableGaugeLong replicaMisMatchContainerCount;
-
public void setMissingContainerCount(long missingContainerCount) {
this.missingContainerCount.set(missingContainerCount);
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/OzoneManagerSyncMetrics.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/OzoneManagerSyncMetrics.java
index c4df2a5e98d4..403a08b5b098 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/OzoneManagerSyncMetrics.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/OzoneManagerSyncMetrics.java
@@ -38,21 +38,6 @@ public final class OzoneManagerSyncMetrics {
private static final String SOURCE_NAME =
OzoneManagerSyncMetrics.class.getSimpleName();
- private OzoneManagerSyncMetrics() {
- }
-
- public static OzoneManagerSyncMetrics create() {
- MetricsSystem ms = DefaultMetricsSystem.instance();
- return ms.register(SOURCE_NAME,
- "Recon Ozone Manager Sync Metrics",
- new OzoneManagerSyncMetrics());
- }
-
- public void unRegister() {
- MetricsSystem ms = DefaultMetricsSystem.instance();
- ms.unregisterSource(SOURCE_NAME);
- }
-
@Metric(about = "Number of OM snapshot requests made by Recon.")
private MutableCounterLong numSnapshotRequests;
@@ -78,6 +63,21 @@ public void unRegister() {
@Metric(about = "The lag of sequence number between Recon and OM")
private MutableGaugeLong sequenceNumberLag;
+ private OzoneManagerSyncMetrics() {
+ }
+
+ public static OzoneManagerSyncMetrics create() {
+ MetricsSystem ms = DefaultMetricsSystem.instance();
+ return ms.register(SOURCE_NAME,
+ "Recon Ozone Manager Sync Metrics",
+ new OzoneManagerSyncMetrics());
+ }
+
+ public void unRegister() {
+ MetricsSystem ms = DefaultMetricsSystem.instance();
+ ms.unregisterSource(SOURCE_NAME);
+ }
+
public void incrNumSnapshotRequests() {
this.numSnapshotRequests.incr();
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistoryList.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistoryList.java
index a31300785b2f..6c43630ad228 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistoryList.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ContainerReplicaHistoryList.java
@@ -38,10 +38,6 @@ public class ContainerReplicaHistoryList {
ContainerReplicaHistoryList::toProto,
ContainerReplicaHistoryList.class);
- public static Codec getCodec() {
- return CODEC;
- }
-
private List replicaHistories;
public ContainerReplicaHistoryList(
@@ -49,6 +45,10 @@ public ContainerReplicaHistoryList(
this.replicaHistories = new ArrayList<>(replicaHistories);
}
+ public static Codec getCodec() {
+ return CODEC;
+ }
+
public List asList() {
return Collections.unmodifiableList(replicaHistories);
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPolicyProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPolicyProvider.java
index 56171a835128..76fd43e1dff6 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPolicyProvider.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPolicyProvider.java
@@ -37,6 +37,13 @@ public final class ReconPolicyProvider extends PolicyProvider {
private static final Supplier SUPPLIER =
MemoizedSupplier.valueOf(ReconPolicyProvider::new);
+ private static final List RECON_SERVICES =
+ Collections.singletonList(
+ new Service(
+ OZONE_RECON_SECURITY_CLIENT_DATANODE_CONTAINER_PROTOCOL_ACL,
+ ReconDatanodeProtocol.class)
+ );
+
private ReconPolicyProvider() {
}
@@ -46,13 +53,6 @@ public static ReconPolicyProvider getInstance() {
return SUPPLIER.get();
}
- private static final List RECON_SERVICES =
- Collections.singletonList(
- new Service(
- OZONE_RECON_SECURITY_CLIENT_DATANODE_CONTAINER_PROTOCOL_ACL,
- ReconDatanodeProtocol.class)
- );
-
@Override
public Service[] getServices() {
return RECON_SERVICES.toArray(new Service[0]);
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/KeyPrefixContainerCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/KeyPrefixContainerCodec.java
index 9b88343a7e48..52a6998ca1a7 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/KeyPrefixContainerCodec.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/KeyPrefixContainerCodec.java
@@ -35,6 +35,8 @@ public final class KeyPrefixContainerCodec
private static final Codec INSTANCE =
new KeyPrefixContainerCodec();
+ private static final String KEY_DELIMITER = "_";
+
public static Codec get() {
return INSTANCE;
}
@@ -43,8 +45,6 @@ private KeyPrefixContainerCodec() {
// singleton
}
- private static final String KEY_DELIMITER = "_";
-
@Override
public Class getTypeClass() {
return KeyPrefixContainer.class;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java
index 83cc90adc614..a5d6cd914537 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconDBDefinition.java
@@ -36,11 +36,6 @@ public class ReconDBDefinition extends DBDefinition.WithMap {
private final String dbName;
- public ReconDBDefinition(String dbName) {
- super(COLUMN_FAMILIES);
- this.dbName = dbName;
- }
-
public static final DBColumnFamilyDefinition
CONTAINER_KEY =
new DBColumnFamilyDefinition<>(
@@ -92,6 +87,11 @@ public ReconDBDefinition(String dbName) {
REPLICA_HISTORY,
REPLICA_HISTORY_V2);
+ public ReconDBDefinition(String dbName) {
+ super(COLUMN_FAMILIES);
+ this.dbName = dbName;
+ }
+
@Override
public String getName() {
return dbName;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskConfig.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskConfig.java
index dc6c676662e9..9ccab5b083e0 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskConfig.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskConfig.java
@@ -38,14 +38,6 @@ public class ReconTaskConfig {
)
private Duration pipelineSyncTaskInterval = Duration.ofMinutes(5);
- public Duration getPipelineSyncTaskInterval() {
- return pipelineSyncTaskInterval;
- }
-
- public void setPipelineSyncTaskInterval(Duration interval) {
- this.pipelineSyncTaskInterval = interval;
- }
-
@Config(key = "missingcontainer.interval",
type = ConfigType.TIME,
defaultValue = "300s",
@@ -56,14 +48,6 @@ public void setPipelineSyncTaskInterval(Duration interval) {
)
private Duration missingContainerTaskInterval = Duration.ofMinutes(5);
- public Duration getMissingContainerTaskInterval() {
- return missingContainerTaskInterval;
- }
-
- public void setMissingContainerTaskInterval(Duration interval) {
- this.missingContainerTaskInterval = interval;
- }
-
@Config(key = "safemode.wait.threshold",
type = ConfigType.TIME,
defaultValue = "300s",
@@ -74,14 +58,6 @@ public void setMissingContainerTaskInterval(Duration interval) {
)
private Duration safeModeWaitThreshold = Duration.ofMinutes(5);
- public Duration getSafeModeWaitThreshold() {
- return safeModeWaitThreshold;
- }
-
- public void setSafeModeWaitThreshold(Duration safeModeWaitThreshold) {
- this.safeModeWaitThreshold = safeModeWaitThreshold;
- }
-
@Config(key = "containercounttask.interval",
type = ConfigType.TIME,
defaultValue = "60s",
@@ -91,6 +67,30 @@ public void setSafeModeWaitThreshold(Duration safeModeWaitThreshold) {
)
private Duration containerSizeCountTaskInterval = Duration.ofMinutes(1);
+ public Duration getPipelineSyncTaskInterval() {
+ return pipelineSyncTaskInterval;
+ }
+
+ public void setPipelineSyncTaskInterval(Duration interval) {
+ this.pipelineSyncTaskInterval = interval;
+ }
+
+ public Duration getMissingContainerTaskInterval() {
+ return missingContainerTaskInterval;
+ }
+
+ public void setMissingContainerTaskInterval(Duration interval) {
+ this.missingContainerTaskInterval = interval;
+ }
+
+ public Duration getSafeModeWaitThreshold() {
+ return safeModeWaitThreshold;
+ }
+
+ public void setSafeModeWaitThreshold(Duration safeModeWaitThreshold) {
+ this.safeModeWaitThreshold = safeModeWaitThreshold;
+ }
+
public Duration getContainerSizeCountTaskInterval() {
return containerSizeCountTaskInterval;
}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index 21718295b9c1..1b11d47a166e 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -176,6 +176,9 @@ public class ObjectEndpoint extends EndpointBase {
private boolean datastreamEnabled;
private long datastreamMinLength;
+ @Inject
+ private OzoneConfiguration ozoneConfiguration;
+
public ObjectEndpoint() {
overrideQueryParameter = ImmutableMap.builder()
.put("Content-Type", "response-content-type")
@@ -187,9 +190,6 @@ public ObjectEndpoint() {
.build();
}
- @Inject
- private OzoneConfiguration ozoneConfiguration;
-
@Override
@PostConstruct
public void init() {
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java
index de92c315a82a..63b9923947b5 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java
@@ -63,10 +63,6 @@ enum ACLType {
// Allows grantee above all permissions on the bucket
FULL_CONTROL("FULL_CONTROL");
- public String getValue() {
- return value;
- }
-
/**
* String value for this Enum.
*/
@@ -79,6 +75,10 @@ public String getValue() {
value = val;
}
+ public String getValue() {
+ return value;
+ }
+
public static ACLType getType(String typeStr) {
for (ACLType type: ACLType.values()) {
if (type.getValue().equals(typeStr)) {
@@ -97,14 +97,6 @@ enum ACLIdentityType {
GROUP("Group", false, "url"),
USER_BY_EMAIL("AmazonCustomerByEmail", false, "emailAddress");
- public String getGranteeType() {
- return granteeType;
- }
-
- public String getHeaderType() {
- return granteeInHeader;
- }
-
/**
* Grantee type in body XML.
*/
@@ -131,6 +123,14 @@ public String getHeaderType() {
granteeInHeader = headerType;
}
+ public String getGranteeType() {
+ return granteeType;
+ }
+
+ public String getHeaderType() {
+ return granteeInHeader;
+ }
+
boolean isSupported() {
return supported;
}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java
index ed10f2894f0b..b09ba5c9547f 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java
@@ -40,10 +40,6 @@ public final class S3ErrorTable {
private static final Logger LOG = LoggerFactory.getLogger(
S3ErrorTable.class);
- private S3ErrorTable() {
- //No one should construct this object.
- }
-
public static final OS3Exception INVALID_URI = new OS3Exception("InvalidURI",
"Couldn't parse the specified URI.", HTTP_BAD_REQUEST);
@@ -154,6 +150,13 @@ private S3ErrorTable() {
HTTP_FORBIDDEN
);
+ private static Function generateInternalError =
+ e -> new OS3Exception("InternalError", e.getMessage(), HTTP_INTERNAL_ERROR);
+
+ private S3ErrorTable() {
+ //No one should construct this object.
+ }
+
public static OS3Exception newError(OS3Exception e, String resource) {
return newError(e, resource, null);
}
@@ -178,9 +181,6 @@ public static OS3Exception newError(OS3Exception e, String resource,
return err;
}
- private static Function generateInternalError = e ->
- new OS3Exception("InternalError", e.getMessage(), HTTP_INTERNAL_ERROR);
-
public static OS3Exception getInternalError(Exception e) {
return generateInternalError.apply(e);
}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RFC1123Util.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RFC1123Util.java
index 539154384419..8a33c7d7fd8c 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RFC1123Util.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RFC1123Util.java
@@ -39,10 +39,6 @@
* golang clients.
*/
public final class RFC1123Util {
-
- private RFC1123Util() {
- }
-
/**
* An RFC-1123 compatible file format which always use two digits for the
* days.
@@ -95,4 +91,7 @@ private RFC1123Util() {
.appendOffset("+HHMM", OzoneConsts.OZONE_TIME_ZONE)
.toFormatter();
}
+
+ private RFC1123Util() {
+ }
}
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java
index 3204abeebab3..1060f2568c80 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java
@@ -26,11 +26,6 @@
@InterfaceAudience.Private
public final class S3Consts {
- //Never Constructed
- private S3Consts() {
-
- }
-
public static final String COPY_SOURCE_HEADER = "x-amz-copy-source";
public static final String COPY_SOURCE_HEADER_RANGE =
"x-amz-copy-source-range";
@@ -92,6 +87,11 @@ private S3Consts() {
public static final Pattern TAG_REGEX_PATTERN = Pattern.compile("^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$");
public static final String MP_PARTS_COUNT = "x-amz-mp-parts-count";
+ //Never Constructed
+ private S3Consts() {
+
+ }
+
/**
* Copy directive for metadata and tags.
*/
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryCLIUtils.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryCLIUtils.java
index 5c6d744a4522..e34e72d95483 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryCLIUtils.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryCLIUtils.java
@@ -37,10 +37,6 @@
*/
public final class NSSummaryCLIUtils {
- private NSSummaryCLIUtils() {
-
- }
-
private static final String OFS_PREFIX = "ofs://";
public static String makeHttpCall(StringBuilder url, String path,
@@ -154,4 +150,8 @@ public static String parseInputPath(String path) {
}
return path.substring(idx);
}
+
+ private NSSummaryCLIUtils() {
+
+ }
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetFailedDeletedBlocksTxnSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetFailedDeletedBlocksTxnSubcommand.java
index 9717a3856789..c9b8d7fbe534 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetFailedDeletedBlocksTxnSubcommand.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetFailedDeletedBlocksTxnSubcommand.java
@@ -46,18 +46,6 @@ public class GetFailedDeletedBlocksTxnSubcommand extends ScmSubcommand {
@CommandLine.ArgGroup(multiplicity = "1")
private TransactionsOption group;
- static class TransactionsOption {
- @CommandLine.Option(names = {"-a", "--all"},
- description = "Get all the failed transactions.")
- private boolean getAll;
-
- @CommandLine.Option(names = {"-c", "--count"},
- defaultValue = "20",
- description = "Get at most the count number of the" +
- " failed transactions.")
- private int count;
- }
-
@CommandLine.Option(names = {"-s", "--startTxId", "--start-tx-id"},
defaultValue = "0",
description = "The least transaction ID to start with, default 0." +
@@ -89,4 +77,16 @@ public void execute(ScmClient client) throws IOException {
System.out.println(result);
}
}
+
+ static class TransactionsOption {
+ @CommandLine.Option(names = {"-a", "--all"},
+ description = "Get all the failed transactions.")
+ private boolean getAll;
+
+ @CommandLine.Option(names = {"-c", "--count"},
+ defaultValue = "20",
+ description = "Get at most the count number of the" +
+ " failed transactions.")
+ private int count;
+ }
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/containerlog/parser/DBConsts.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/containerlog/parser/DBConsts.java
index 7f8bb903ae70..d4b3c5a32043 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/containerlog/parser/DBConsts.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/containerlog/parser/DBConsts.java
@@ -22,10 +22,6 @@
*/
public final class DBConsts {
- private DBConsts() {
- //Never constructed
- }
-
public static final String DRIVER = "org.sqlite.JDBC";
public static final String CONNECTION_PREFIX = "jdbc:sqlite:";
public static final String DATABASE_NAME = "container_datanode.db";
@@ -35,4 +31,7 @@ private DBConsts() {
public static final String DATANODE_CONTAINER_LOG_TABLE_NAME = "DatanodeContainerLogTable";
public static final String CONTAINER_LOG_TABLE_NAME = "ContainerLogTable";
+ private DBConsts() {
+ //Never constructed
+ }
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java
index 0e3b3cfadde7..5b082ea49803 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBDefinitionFactory.java
@@ -46,20 +46,20 @@
*/
public final class DBDefinitionFactory {
- private DBDefinitionFactory() {
- }
-
private static final AtomicReference DATANODE_DB_SCHEMA_VERSION = new AtomicReference<>();
private static final Map DB_MAP;
static {
final Map map = new HashMap<>();
Arrays.asList(SCMDBDefinition.get(), OMDBDefinition.get(), ReconSCMDBDefinition.get(),
- WitnessedContainerDBDefinition.get())
+ WitnessedContainerDBDefinition.get())
.forEach(dbDefinition -> map.put(dbDefinition.getName(), dbDefinition));
DB_MAP = Collections.unmodifiableMap(map);
}
+ private DBDefinitionFactory() {
+ }
+
public static DBDefinition getDefinition(String dbName) {
// OM snapshot DB name starts with this prefix.
if (!dbName.equals(OM_DB_NAME) && dbName.startsWith(OM_DB_NAME)) {
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/audit/parser/common/DatabaseHelper.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/audit/parser/common/DatabaseHelper.java
index 7d6271aca3c8..63f3fa53cde5 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/audit/parser/common/DatabaseHelper.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/audit/parser/common/DatabaseHelper.java
@@ -45,9 +45,6 @@
* Database helper for ozone audit parser tool.
*/
public final class DatabaseHelper {
- private DatabaseHelper() {
- //Never constructed
- }
static {
loadProperties();
@@ -57,6 +54,10 @@ private DatabaseHelper() {
LoggerFactory.getLogger(DatabaseHelper.class);
private static Map properties;
+ private DatabaseHelper() {
+ //Never constructed
+ }
+
public static boolean setup(String dbName, String logs) throws Exception {
if (createAuditTable(dbName)) {
return insertAudits(dbName, logs);
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/audit/parser/common/ParserConsts.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/audit/parser/common/ParserConsts.java
index ee93ce5aa0c8..1265a7fca523 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/audit/parser/common/ParserConsts.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/audit/parser/common/ParserConsts.java
@@ -21,11 +21,6 @@
* Constants used for ozone audit parser.
*/
public final class ParserConsts {
-
- private ParserConsts() {
- //Never constructed
- }
-
public static final String DRIVER = "org.sqlite.JDBC";
public static final String CONNECTION_PREFIX = "jdbc:sqlite:";
public static final String DATE_REGEX = "^\\d{4}-\\d{2}-\\d{2}.*$";
@@ -33,4 +28,7 @@ private ParserConsts() {
public static final String INSERT_AUDITS = "insertAuditEntry";
public static final String CREATE_AUDIT_TABLE = "createAuditTable";
+ private ParserConsts() {
+ //Never constructed
+ }
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/PrefixParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/PrefixParser.java
index a9ba5961b517..31b14754eeb0 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/PrefixParser.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/PrefixParser.java
@@ -48,18 +48,6 @@
description = "Parse prefix contents")
public class PrefixParser implements Callable {
- /**
- * Types to represent the level or path component type.
- */
- public enum Types {
- VOLUME,
- BUCKET,
- FILE,
- DIRECTORY,
- INTERMEDIATE_DIRECTORY,
- NON_EXISTENT_DIRECTORY,
- }
-
private final int[] parserStats = new int[Types.values().length];
@CommandLine.Option(names = {"--db"},
@@ -248,4 +236,16 @@ private static MetadataKeyFilters.KeyPrefixFilter getPrefixFilter(
public int getParserStats(Types type) {
return parserStats[type.ordinal()];
}
+
+ /**
+ * Types to represent the level or path component type.
+ */
+ public enum Types {
+ VOLUME,
+ BUCKET,
+ FILE,
+ DIRECTORY,
+ INTERMEDIATE_DIRECTORY,
+ NON_EXISTENT_DIRECTORY,
+ }
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/ReplicasVerify.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/ReplicasVerify.java
index 2aa72fa56700..2f1a30ed26d9 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/ReplicasVerify.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/ReplicasVerify.java
@@ -57,15 +57,6 @@ public class ReplicasVerify extends Handler {
@CommandLine.ArgGroup(exclusive = false, multiplicity = "1")
private Verification verification;
- static class Verification {
- @CommandLine.Option(names = "--checksums",
- description = "Do client side data checksum validation of all replicas.",
- // value will be true only if the "--checksums" option was specified on the CLI
- defaultValue = "false")
- private boolean doExecuteChecksums;
-
- }
-
private List replicaVerifiers;
@Override
@@ -126,4 +117,13 @@ void checkBucket(OzoneBucket bucket) throws IOException {
void processKey(OzoneKeyDetails keyDetails) {
replicaVerifiers.forEach(verifier -> verifier.verifyKey(keyDetails));
}
+
+ static class Verification {
+ @CommandLine.Option(names = "--checksums",
+ description = "Do client side data checksum validation of all replicas.",
+ // value will be true only if the "--checksums" option was specified on the CLI
+ defaultValue = "false")
+ private boolean doExecuteChecksums;
+
+ }
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java
index e89c71ed111a..852ed68930f1 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java
@@ -73,26 +73,6 @@
public class OmMetadataGenerator extends BaseFreonGenerator
implements Callable {
- enum Operation {
- CREATE_FILE,
- CREATE_STREAM_FILE,
- LOOKUP_FILE,
- READ_FILE,
- LIST_STATUS,
- LIST_STATUS_LIGHT,
- CREATE_KEY,
- CREATE_STREAM_KEY,
- LOOKUP_KEY,
- GET_KEYINFO,
- HEAD_KEY,
- READ_KEY,
- LIST_KEYS,
- LIST_KEYS_LIGHT,
- INFO_BUCKET,
- INFO_VOLUME,
- MIXED,
- }
-
@Option(names = {"-v", "--volume"},
description = "Name of the volume which contains the test data. Will be"
+ " created if missing.",
@@ -464,4 +444,24 @@ private Void performReadOperation(ReadOperation readOp, byte[] buffer) throws IO
public boolean allowEmptyPrefix() {
return true;
}
+
+ enum Operation {
+ CREATE_FILE,
+ CREATE_STREAM_FILE,
+ LOOKUP_FILE,
+ READ_FILE,
+ LIST_STATUS,
+ LIST_STATUS_LIGHT,
+ CREATE_KEY,
+ CREATE_STREAM_KEY,
+ LOOKUP_KEY,
+ GET_KEYINFO,
+ HEAD_KEY,
+ READ_KEY,
+ LIST_KEYS,
+ LIST_KEYS_LIGHT,
+ INFO_BUCKET,
+ INFO_VOLUME,
+ MIXED,
+ }
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteListOps.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteListOps.java
index a820bbe9c7a4..a41252e848a0 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteListOps.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteListOps.java
@@ -144,15 +144,6 @@ public class OzoneClientKeyReadWriteListOps extends BaseFreonGenerator
private static final AtomicLong NEXT_NUMBER = new AtomicLong();
- /**
- * Task type of read task, or write task.
- */
- public enum TaskType {
- READ_TASK,
- WRITE_TASK,
- LIST_TASK
- }
-
private KeyGeneratorUtil kg;
@Override
@@ -278,4 +269,13 @@ public String getKeyName() {
public boolean allowEmptyPrefix() {
return true;
}
+
+ /**
+ * Task type of read task, or write task.
+ */
+ public enum TaskType {
+ READ_TASK,
+ WRITE_TASK,
+ LIST_TASK
+ }
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
index db335ac23c1a..3c2e364668e5 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java
@@ -94,13 +94,6 @@ public final class RandomKeyGenerator implements Callable, FreonSubcommand
@ParentCommand
private Freon freon;
- enum FreonOps {
- VOLUME_CREATE,
- BUCKET_CREATE,
- KEY_CREATE,
- KEY_WRITE
- }
-
private static final String DURATION_FORMAT = "HH:mm:ss,SSS";
private static final int QUANTILES = 10;
@@ -1218,4 +1211,11 @@ public void run() {
public int getThreadPoolSize() {
return threadPoolSize;
}
+
+ enum FreonOps {
+ VOLUME_CREATE,
+ BUCKET_CREATE,
+ KEY_CREATE,
+ KEY_WRITE
+ }
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java
index 70fd469f915b..64c8d1bddf68 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java
@@ -116,15 +116,6 @@ public final class SCMThroughputBenchmark implements Callable, FreonSubcom
private static final Logger LOG =
LoggerFactory.getLogger(SCMThroughputBenchmark.class);
- /**
- * Type of benchmarks.
- */
- public enum BenchmarkType {
- AllocateBlocks,
- AllocateContainers,
- ProcessReports,
- }
-
@CommandLine.ParentCommand
private Freon freon;
@@ -921,4 +912,13 @@ private static ContainerReportsProto createContainerReport() {
private static PipelineReportsProto createPipelineReport() {
return PipelineReportsProto.newBuilder().build();
}
+
+ /**
+ * Type of benchmarks.
+ */
+ public enum BenchmarkType {
+ AllocateBlocks,
+ AllocateContainers,
+ ProcessReports,
+ }
}