diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 68d73f3c41fb..9b5c89e1f73e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -58,15 +58,6 @@ public class ContainerSet implements Iterable> { private static final Logger LOG = LoggerFactory.getLogger(ContainerSet.class); - public static ContainerSet newReadOnlyContainerSet(long recoveringTimeout) { - return new ContainerSet(null, recoveringTimeout); - } - - public static ContainerSet newRwContainerSet(Table containerIdsTable, long recoveringTimeout) { - Objects.requireNonNull(containerIdsTable, "containerIdsTable == null"); - return new ContainerSet(containerIdsTable, recoveringTimeout); - } - private final ConcurrentSkipListMap> containerMap = new ConcurrentSkipListMap<>(); private final ConcurrentSkipListSet missingContainerSet = @@ -77,6 +68,15 @@ public static ContainerSet newRwContainerSet(Table containe private long recoveringTimeout; private final Table containerIdsTable; + public static ContainerSet newReadOnlyContainerSet(long recoveringTimeout) { + return new ContainerSet(null, recoveringTimeout); + } + + public static ContainerSet newRwContainerSet(Table containerIdsTable, long recoveringTimeout) { + Objects.requireNonNull(containerIdsTable, "containerIdsTable == null"); + return new ContainerSet(containerIdsTable, recoveringTimeout); + } + private ContainerSet(Table continerIdsTable, long recoveringTimeout) { this(continerIdsTable, recoveringTimeout, null); } diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/CoderUtil.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/CoderUtil.java index 92737377630a..ebf45e88dda3 100644 --- a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/CoderUtil.java +++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/CoderUtil.java @@ -26,12 +26,12 @@ */ public final class CoderUtil { + private static byte[] emptyChunk = new byte[4096]; + private CoderUtil() { - // No called + // Not called } - private static byte[] emptyChunk = new byte[4096]; - /** * Make sure to return an empty chunk buffer for the desired length. * @param leastLength diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/GF256.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/GF256.java index 65a855776589..0524cea0922c 100644 --- a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/GF256.java +++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/GF256.java @@ -26,8 +26,6 @@ @InterfaceAudience.Private public final class GF256 { - private GF256() { } - private static final byte[] GF_BASE = new byte[]{ (byte) 0x01, (byte) 0x02, (byte) 0x04, (byte) 0x08, (byte) 0x10, (byte) 0x20, (byte) 0x40, (byte) 0x80, (byte) 0x1d, (byte) 0x3a, @@ -153,6 +151,9 @@ private GF256() { } } } + private GF256() { + } + /** * Get the big GF multiply table so utilize it efficiently. * @return the big GF multiply table diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/RSUtil.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/RSUtil.java index 86911cb81c86..ce5531fe0d11 100644 --- a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/RSUtil.java +++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/RSUtil.java @@ -26,15 +26,14 @@ */ @InterfaceAudience.Private public final class RSUtil { - - private RSUtil() { - } - // We always use the byte system (with symbol size 8, field size 256, // primitive polynomial 285, and primitive root 2). public static final GaloisField GF = GaloisField.getInstance(); public static final int PRIMITIVE_ROOT = 2; + private RSUtil() { + } + public static int[] getPrimitivePower(int numDataUnits, int numParityUnits) { int[] primitivePower = new int[numDataUnits + numParityUnits]; // compute powers of the primitive root diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectMetrics.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectMetrics.java index 0947dce34831..ee8de31f760f 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectMetrics.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectMetrics.java @@ -37,12 +37,6 @@ public class ManagedRocksObjectMetrics { private static final String SOURCE_NAME = ManagedRocksObjectMetrics.class.getSimpleName(); - private static ManagedRocksObjectMetrics create() { - return DefaultMetricsSystem.instance().register(SOURCE_NAME, - "OzoneManager DoubleBuffer Metrics", - new ManagedRocksObjectMetrics()); - } - @Metric(about = "Total number of managed RocksObjects that are not " + "closed before being GCed.") private MutableCounterLong totalLeakObjects; @@ -74,4 +68,10 @@ long totalLeakObjects() { long totalManagedObjects() { return totalManagedObjects.value(); } + + private static ManagedRocksObjectMetrics create() { + return DefaultMetricsSystem.instance().register(SOURCE_NAME, + "OzoneManager DoubleBuffer Metrics", + new ManagedRocksObjectMetrics()); + } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java index eef1f286d6c6..e60508033e51 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java @@ -34,8 +34,6 @@ * Utilities to help assert RocksObject closures. */ public final class ManagedRocksObjectUtils { - private ManagedRocksObjectUtils() { - } static final Logger LOG = LoggerFactory.getLogger(ManagedRocksObjectUtils.class); @@ -44,6 +42,9 @@ private ManagedRocksObjectUtils() { private static final LeakDetector LEAK_DETECTOR = new LeakDetector("ManagedRocksObject"); + private ManagedRocksObjectUtils() { + } + static UncheckedAutoCloseable track(AutoCloseable object) { ManagedRocksObjectMetrics.INSTANCE.increaseManagedObject(); final Class clazz = object.getClass(); diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java index 97f7a7a49e79..461d9ddc8ab9 100644 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java @@ -32,6 +32,12 @@ */ public class ManagedRawSSTFileReader implements Closeable { + private static final Logger LOG = LoggerFactory.getLogger(ManagedRawSSTFileReader.class); + + private final String fileName; + // Native address of pointer to the object. + private final long nativeHandle; + public static boolean tryLoadLibrary() { try { loadLibrary(); @@ -50,11 +56,6 @@ public static boolean loadLibrary() throws NativeLibraryNotLoadedException { return true; } - private final String fileName; - // Native address of pointer to the object. - private final long nativeHandle; - private static final Logger LOG = LoggerFactory.getLogger(ManagedRawSSTFileReader.class); - public ManagedRawSSTFileReader(final ManagedOptions options, final String fileName, final int readAheadSize) { this.fileName = fileName; this.nativeHandle = this.newRawSSTFileReader(options.getNativeHandle(), fileName, readAheadSize); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java index 2243d0f0d2cc..75a58ceb4b19 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java @@ -49,6 +49,8 @@ public class DecommissionStatusSubCommand extends ScmSubcommand { + private String errorMessage = "Error getting pipeline and container metrics for "; + @CommandLine.Option(names = { "--id" }, description = "Show info by datanode UUID", defaultValue = "") @@ -122,8 +124,6 @@ public void execute(ScmClient scmClient) throws IOException { } } - private String errorMessage = "Error getting pipeline and container metrics for "; - public String getErrorMessage() { return errorMessage; } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java index 514be5a62065..2d486dab495a 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java @@ -60,27 +60,6 @@ public class UsageInfoSubcommand extends ScmSubcommand { @CommandLine.ArgGroup(multiplicity = "1") private ExclusiveArguments exclusiveArguments; - private static class ExclusiveArguments { - @CommandLine.Option(names = {"--address"}, paramLabel = "ADDRESS", - description = "Show info by datanode ip or hostname address.", - defaultValue = "") - private String address; - - @CommandLine.Option(names = {"--uuid"}, paramLabel = "UUID", description = - "Show info by datanode UUID.", defaultValue = "") - private String uuid; - - @CommandLine.Option(names = {"-m", "--most-used"}, - description = "Show the most used datanodes.", - defaultValue = "false") - private boolean mostUsed; - - @CommandLine.Option(names = {"-l", "--least-used"}, - description = "Show the least used datanodes.", - defaultValue = "false") - private boolean leastUsed; - } - @CommandLine.Option(names = {"-c", "--count"}, description = "Number of " + "datanodes to display (Default: ${DEFAULT-VALUE}).", paramLabel = "NUMBER OF NODES", defaultValue = "3") @@ -288,4 +267,25 @@ public long getPipelineCount() { return pipelineCount; } } + + private static class ExclusiveArguments { + @CommandLine.Option(names = {"--address"}, paramLabel = "ADDRESS", + description = "Show info by datanode ip or hostname address.", + defaultValue = "") + private String address; + + @CommandLine.Option(names = {"--uuid"}, paramLabel = "UUID", description = + "Show info by datanode UUID.", defaultValue = "") + private String uuid; + + @CommandLine.Option(names = {"-m", "--most-used"}, + description = "Show the most used datanodes.", + defaultValue = "false") + private boolean mostUsed; + + @CommandLine.Option(names = {"-l", "--least-used"}, + description = "Show the least used datanodes.", + defaultValue = "false") + private boolean leastUsed; + } }