diff --git a/dev-support/pmd/pmd-ruleset.xml b/dev-support/pmd/pmd-ruleset.xml
index 2c7115237bce..8d977ddb03e7 100644
--- a/dev-support/pmd/pmd-ruleset.xml
+++ b/dev-support/pmd/pmd-ruleset.xml
@@ -29,6 +29,7 @@
+
.*/generated-sources/.*
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
index 4c33c362b323..c584e4681e5d 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
@@ -628,7 +628,7 @@ CompletableFuture executePutBlock(boolean close,
// never reach, just to make compiler happy.
return null;
}
- return flushFuture.thenApply(r -> new PutBlockResult(flushPos, asyncReply.getLogIndex(), r));
+ return flushFuture.thenApply(r -> new PutBlockResult(asyncReply.getLogIndex(), r));
}
@Override
@@ -997,7 +997,7 @@ private CompletableFuture writeChunkToContainer(
// never reach.
return null;
}
- return validateFuture.thenApply(x -> new PutBlockResult(flushPos, asyncReply.getLogIndex(), x));
+ return validateFuture.thenApply(x -> new PutBlockResult(asyncReply.getLogIndex(), x));
}
private void handleSuccessfulPutBlock(
@@ -1227,12 +1227,10 @@ public int getReplicationIndex() {
}
static class PutBlockResult {
- private final long flushPosition;
private final long commitIndex;
private final ContainerCommandResponseProto response;
- PutBlockResult(long flushPosition, long commitIndex, ContainerCommandResponseProto response) {
- this.flushPosition = flushPosition;
+ PutBlockResult(long commitIndex, ContainerCommandResponseProto response) {
this.commitIndex = commitIndex;
this.response = response;
}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java
index 868c5fd75d8c..0d772dfe77fa 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java
@@ -306,7 +306,7 @@ public CompletableFuture executePutBlock(boolean close,
return null;
}
this.putBlkRspFuture = flushFuture;
- return flushFuture.thenApply(r -> new PutBlockResult(0, 0, r));
+ return flushFuture.thenApply(r -> new PutBlockResult(0, r));
}
/**
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index bdccd7647ac7..a11eb204aa2c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -103,7 +103,6 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor {
private final Map handlers;
private final ConfigurationSource conf;
private final ContainerSet containerSet;
- private final VolumeSet volumeSet;
private final StateContext context;
private final float containerCloseThreshold;
private final ProtocolMessageMetrics protocolMetrics;
@@ -125,7 +124,6 @@ public HddsDispatcher(ConfigurationSource config, ContainerSet contSet,
TokenVerifier tokenVerifier) {
this.conf = config;
this.containerSet = contSet;
- this.volumeSet = volumes;
this.context = context;
this.handlers = handlers;
this.metrics = metrics;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
index fafc77aa6956..37867dbff6b4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
@@ -71,26 +71,12 @@ public class HeartbeatEndpointTask
public static final Logger LOG =
LoggerFactory.getLogger(HeartbeatEndpointTask.class);
private final EndpointStateMachine rpcEndpoint;
- private final ConfigurationSource conf;
private DatanodeDetailsProto datanodeDetailsProto;
private StateContext context;
private int maxContainerActionsPerHB;
private int maxPipelineActionsPerHB;
private HDDSLayoutVersionManager layoutVersionManager;
- /**
- * Constructs a SCM heart beat.
- *
- * @param rpcEndpoint rpc Endpoint
- * @param conf Config.
- * @param context State context
- */
- public HeartbeatEndpointTask(EndpointStateMachine rpcEndpoint,
- ConfigurationSource conf, StateContext context) {
- this(rpcEndpoint, conf, context,
- context.getParent().getLayoutVersionManager());
- }
-
/**
* Constructs a SCM heart beat.
*
@@ -103,7 +89,6 @@ public HeartbeatEndpointTask(EndpointStateMachine rpcEndpoint,
ConfigurationSource conf, StateContext context,
HDDSLayoutVersionManager versionManager) {
this.rpcEndpoint = rpcEndpoint;
- this.conf = conf;
this.context = context;
this.maxContainerActionsPerHB = conf.getInt(HDDS_CONTAINER_ACTION_MAX_LIMIT,
HDDS_CONTAINER_ACTION_MAX_LIMIT_DEFAULT);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
index 5a39b10a30db..9b538f187cfc 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
@@ -24,7 +24,6 @@
import java.io.IOException;
import java.util.UUID;
import java.util.concurrent.Callable;
-import java.util.concurrent.Future;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -48,8 +47,6 @@ public final class RegisterEndpointTask implements
static final Logger LOG = LoggerFactory.getLogger(RegisterEndpointTask.class);
private final EndpointStateMachine rpcEndPoint;
- private final ConfigurationSource conf;
- private Future result;
private DatanodeDetails datanodeDetails;
private final OzoneContainer datanodeContainerManager;
private StateContext stateContext;
@@ -59,34 +56,15 @@ public final class RegisterEndpointTask implements
* Creates a register endpoint task.
*
* @param rpcEndPoint - endpoint
- * @param conf - conf
- * @param ozoneContainer - container
- * @param context - State context
- */
- @VisibleForTesting
- public RegisterEndpointTask(EndpointStateMachine rpcEndPoint,
- ConfigurationSource conf,
- OzoneContainer ozoneContainer,
- StateContext context) {
- this(rpcEndPoint, conf, ozoneContainer, context,
- context.getParent().getLayoutVersionManager());
- }
-
- /**
- * Creates a register endpoint task.
- *
- * @param rpcEndPoint - endpoint
- * @param conf - conf
* @param ozoneContainer - container
* @param context - State context
* @param versionManager - layout version Manager
*/
@VisibleForTesting
public RegisterEndpointTask(EndpointStateMachine rpcEndPoint,
- ConfigurationSource conf, OzoneContainer ozoneContainer,
+ OzoneContainer ozoneContainer,
StateContext context, HDDSLayoutVersionManager versionManager) {
this.rpcEndPoint = rpcEndPoint;
- this.conf = conf;
this.datanodeContainerManager = ozoneContainer;
this.stateContext = context;
if (versionManager != null) {
@@ -305,7 +283,7 @@ public RegisterEndpointTask build() {
}
RegisterEndpointTask task = new RegisterEndpointTask(this
- .endPointStateMachine, this.conf, this.container, this.context,
+ .endPointStateMachine, this.container, this.context,
this.versionManager);
task.setDatanodeDetails(datanodeDetails);
return task;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
index f6157f21a537..f43f83dcfa81 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
@@ -70,7 +70,6 @@
public final class XceiverServerGrpc implements XceiverServerSpi {
private static final Logger
LOG = LoggerFactory.getLogger(XceiverServerGrpc.class);
- private static final String COMPONENT = "dn";
private int port;
private UUID id;
private Server server;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCacheMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCacheMetrics.java
index 4483763f95c9..c4ca78ffb41c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCacheMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCacheMetrics.java
@@ -28,9 +28,6 @@
*/
public final class ContainerCacheMetrics {
- private final String name;
- private final MetricsSystem ms;
-
@Metric("Rate to measure the db open latency")
private MutableRate dbOpenLatency;
@@ -52,16 +49,14 @@ public final class ContainerCacheMetrics {
@Metric("Number of Container Cache Evictions")
private MutableCounterLong numCacheEvictions;
- private ContainerCacheMetrics(String name, MetricsSystem ms) {
- this.name = name;
- this.ms = ms;
+ private ContainerCacheMetrics() {
}
public static ContainerCacheMetrics create() {
MetricsSystem ms = DefaultMetricsSystem.instance();
String name = "ContainerCacheMetrics";
- return ms.register(name, "null", new ContainerCacheMetrics(name, ms));
+ return ms.register(name, "null", new ContainerCacheMetrics());
}
public void incNumDbGetOps() {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
index 6ac3b62e8bc1..6663786d7e07 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
@@ -79,7 +79,6 @@ public class MutableVolumeSet implements VolumeSet {
private final ReentrantReadWriteLock volumeSetRWLock;
private final String datanodeUuid;
- private String clusterID;
private final StorageVolumeChecker volumeChecker;
private CheckedRunnable failedVolumeListener;
@@ -100,7 +99,6 @@ public MutableVolumeSet(String dnUuid, String clusterID,
) throws IOException {
this.context = context;
this.datanodeUuid = dnUuid;
- this.clusterID = clusterID;
this.conf = conf;
this.volumeSetRWLock = new ReentrantReadWriteLock();
this.volumeChecker = volumeChecker;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java
index 095a229b4193..14ee0940d310 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java
@@ -215,18 +215,12 @@ private static final class LastCheckResult {
@Nullable
private final V result;
- /**
- * Exception thrown by the check. null if it returned a result.
- */
- private final Throwable exception; // null on success.
-
/**
* Initialize with a result.
* @param result
*/
private LastCheckResult(V result, long completedAt) {
this.result = result;
- this.exception = null;
this.completedAt = completedAt;
}
@@ -237,7 +231,6 @@ private LastCheckResult(V result, long completedAt) {
*/
private LastCheckResult(Throwable t, long completedAt) {
this.result = null;
- this.exception = t;
this.completedAt = completedAt;
}
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
index 586fdc402f37..e668c427607c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
@@ -36,7 +36,6 @@
import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
@@ -55,12 +54,11 @@ public class ChunkManagerDispatcher implements ChunkManager {
private final Map handlers
= new EnumMap<>(ContainerLayoutVersion.class);
- ChunkManagerDispatcher(boolean sync, BlockManager manager,
- VolumeSet volSet) {
+ ChunkManagerDispatcher(boolean sync, BlockManager manager) {
handlers.put(FILE_PER_CHUNK,
- new FilePerChunkStrategy(sync, manager, volSet));
+ new FilePerChunkStrategy(sync, manager));
handlers.put(FILE_PER_BLOCK,
- new FilePerBlockStrategy(sync, manager, volSet));
+ new FilePerBlockStrategy(sync, manager));
}
@Override
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
index 5394ce468151..6c3fc8c79cdd 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
@@ -74,6 +74,6 @@ public static ChunkManager createChunkManager(ConfigurationSource conf,
return new ChunkManagerDummyImpl();
}
- return new ChunkManagerDispatcher(sync, manager, volSet);
+ return new ChunkManagerDispatcher(sync, manager);
}
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
index 0621be4aae27..a402c7a2b32e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
@@ -50,7 +50,6 @@
import org.apache.hadoop.ozone.container.common.interfaces.Container;
import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils;
@@ -76,10 +75,8 @@ public class FilePerBlockStrategy implements ChunkManager {
private final MappedBufferManager mappedBufferManager;
private final boolean readNettyChunkedNioFile;
- private final VolumeSet volumeSet;
- public FilePerBlockStrategy(boolean sync, BlockManager manager,
- VolumeSet volSet) {
+ public FilePerBlockStrategy(boolean sync, BlockManager manager) {
doSyncWrite = sync;
this.defaultReadBufferCapacity = manager == null ? 0 :
manager.getDefaultReadBufferCapacity();
@@ -88,7 +85,6 @@ public FilePerBlockStrategy(boolean sync, BlockManager manager,
this.readMappedBufferMaxCount = manager == null ? 0
: manager.getReadMappedBufferMaxCount();
LOG.info("ozone.chunk.read.mapped.buffer.max.count is load with {}", readMappedBufferMaxCount);
- this.volumeSet = volSet;
if (this.readMappedBufferMaxCount > 0) {
mappedBufferManager = new MappedBufferManager(this.readMappedBufferMaxCount);
} else {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java
index aedb8402b0c6..4cd1b1ff5600 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java
@@ -42,7 +42,6 @@
import org.apache.hadoop.ozone.container.common.interfaces.Container;
import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils;
@@ -67,10 +66,8 @@ public class FilePerChunkStrategy implements ChunkManager {
private final MappedBufferManager mappedBufferManager;
private final boolean readNettyChunkedNioFile;
- private final VolumeSet volumeSet;
- public FilePerChunkStrategy(boolean sync, BlockManager manager,
- VolumeSet volSet) {
+ public FilePerChunkStrategy(boolean sync, BlockManager manager) {
doSyncWrite = sync;
blockManager = manager;
this.defaultReadBufferCapacity = manager == null ? 0 :
@@ -80,7 +77,6 @@ public FilePerChunkStrategy(boolean sync, BlockManager manager,
this.readMappedBufferMaxCount = manager == null ? 0
: manager.getReadMappedBufferMaxCount();
LOG.info("ozone.chunk.read.mapped.buffer.max.count is load with {}", readMappedBufferMaxCount);
- this.volumeSet = volSet;
if (this.readMappedBufferMaxCount > 0) {
mappedBufferManager = new MappedBufferManager(this.readMappedBufferMaxCount);
} else {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/MappedBufferManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/MappedBufferManager.java
index 1470b0db81da..8186bdb029f2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/MappedBufferManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/MappedBufferManager.java
@@ -38,12 +38,10 @@ public class MappedBufferManager {
new ConcurrentHashMap>();
private static final Logger LOG = LoggerFactory.getLogger(MappedBufferManager.class);
private final Semaphore semaphore;
- private final int capacity;
private final AtomicBoolean cleanupInProgress = new AtomicBoolean(false);
private final Striped lock;
public MappedBufferManager(int capacity) {
- this.capacity = capacity;
this.semaphore = new Semaphore(capacity);
this.lock = Striped.lazyWeakLock(1024);
}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index 34a5553f311c..60117d25609e 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -164,9 +164,9 @@ private KeyValueContainerData createToDeleteBlocks(ContainerSet containerSet,
int numOfBlocksPerContainer, int numOfChunksPerBlock) throws IOException {
ChunkManager chunkManager;
if (layout == FILE_PER_BLOCK) {
- chunkManager = new FilePerBlockStrategy(true, null, null);
+ chunkManager = new FilePerBlockStrategy(true, null);
} else {
- chunkManager = new FilePerChunkStrategy(true, null, null);
+ chunkManager = new FilePerChunkStrategy(true, null);
}
byte[] arr = randomAlphanumeric(1048576).getBytes(UTF_8);
ChunkBuffer buffer = ChunkBuffer.wrap(ByteBuffer.wrap(arr));
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
index 1b7cf2ae71ce..3d8932010e1d 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
@@ -129,7 +129,7 @@ public void setup() throws Exception {
StorageVolume.VolumeType.DATA_VOLUME, null);
blockManager = new BlockManagerImpl(conf);
- chunkManager = new FilePerBlockStrategy(true, blockManager, volumeSet);
+ chunkManager = new FilePerBlockStrategy(true, blockManager);
containerSet = new ContainerSet(1000);
keyValueHandler = new KeyValueHandler(conf, datanodeUuid,
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java
index a6f340bb6349..4d635e67fc2c 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java
@@ -68,7 +68,7 @@ public void updateConfig(OzoneConfiguration config) {
FILE_PER_CHUNK {
@Override
public ChunkManager createChunkManager(boolean sync, BlockManager manager) {
- return new FilePerChunkStrategy(sync, manager, null);
+ return new FilePerChunkStrategy(sync, manager);
}
@Override
@@ -85,7 +85,7 @@ public ContainerLayoutVersion getLayout() {
FILE_PER_BLOCK {
@Override
public ChunkManager createChunkManager(boolean sync, BlockManager manager) {
- return new FilePerBlockStrategy(sync, null, null);
+ return new FilePerBlockStrategy(sync, null);
}
@Override
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
index b3e89d387e55..a01a93eead8a 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java
@@ -61,7 +61,6 @@
import java.time.LocalDateTime;
import java.time.ZoneId;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.HashSet;
@@ -74,8 +73,6 @@
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.Collectors;
@@ -122,8 +119,6 @@ public abstract class DefaultCertificateClient implements CertificateClient {
private String rootCaCertId;
private String component;
private final String threadNamePrefix;
- private List pemEncodedCACerts = null;
- private Lock pemEncodedCACertsLock = new ReentrantLock();
private ReloadingX509KeyManager keyManager;
private ReloadingX509TrustManager trustManager;
@@ -1466,6 +1461,5 @@ public synchronized void setCACertificate(X509Certificate cert)
String pemCert = CertificateCodec.getPEMEncodedString(cert);
certificateMap.put(caCertId,
CertificateCodec.getCertPathFromPemEncodedString(pemCert));
- pemEncodedCACerts = Arrays.asList(pemCert);
}
}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TokenVerifierTests.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TokenVerifierTests.java
index e8a085799c07..b08bad4a7ace 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TokenVerifierTests.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TokenVerifierTests.java
@@ -42,17 +42,12 @@
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.junit.jupiter.api.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Common test cases for {@link ShortLivedTokenVerifier} implementations.
*/
public abstract class TokenVerifierTests {
- private static final Logger LOG =
- LoggerFactory.getLogger(TokenVerifierTests.class);
-
protected static final UUID SECRET_KEY_ID = UUID.randomUUID();
/**
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
index b1c13244b106..487546ac237d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
@@ -115,7 +115,7 @@ public DeletedBlockLogImpl(ConfigurationSource conf,
this.metrics = metrics;
this.transactionStatusManager =
new SCMDeletedBlockTransactionStatusManager(deletedBlockLogStateManager,
- containerManager, this.scmContext, metrics, scmCommandTimeoutMs);
+ containerManager, metrics, scmCommandTimeoutMs);
}
@Override
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMDeletedBlockTransactionStatusManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMDeletedBlockTransactionStatusManager.java
index 835735f5f0d4..49b0cc99bc5e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMDeletedBlockTransactionStatusManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMDeletedBlockTransactionStatusManager.java
@@ -45,7 +45,6 @@
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-import org.apache.hadoop.hdds.scm.ha.SCMContext;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -67,7 +66,6 @@ public class SCMDeletedBlockTransactionStatusManager {
private final DeletedBlockLogStateManager deletedBlockLogStateManager;
private final ContainerManager containerManager;
private final ScmBlockDeletingServiceMetrics metrics;
- private final SCMContext scmContext;
private final long scmCommandTimeoutMs;
/**
@@ -82,13 +80,12 @@ public class SCMDeletedBlockTransactionStatusManager {
public SCMDeletedBlockTransactionStatusManager(
DeletedBlockLogStateManager deletedBlockLogStateManager,
- ContainerManager containerManager, SCMContext scmContext,
+ ContainerManager containerManager,
ScmBlockDeletingServiceMetrics metrics, long scmCommandTimeoutMs) {
// maps transaction to dns which have committed it.
this.deletedBlockLogStateManager = deletedBlockLogStateManager;
this.metrics = metrics;
this.containerManager = containerManager;
- this.scmContext = scmContext;
this.scmCommandTimeoutMs = scmCommandTimeoutMs;
this.transactionToDNsCommitMap = new ConcurrentHashMap<>();
this.transactionToRetryCountMap = new ConcurrentHashMap<>();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java
index 19a41d9befa7..c9c5bcf592fa 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java
@@ -86,8 +86,6 @@ public class ContainerManagerImpl implements ContainerManager {
@SuppressWarnings("java:S2245") // no need for secure random
private final Random random = new Random();
- private int maxCountOfContainerList;
-
/**
*
*/
@@ -117,10 +115,6 @@ public ContainerManagerImpl(
.getInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT,
ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT);
- this.maxCountOfContainerList = conf
- .getInt(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT,
- ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT);
-
this.scmContainerManagerMetrics = SCMContainerManagerMetrics.create();
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
index d18ea6c064a0..5cc70f652285 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
@@ -72,7 +72,6 @@
*/
public class DatanodeAdminMonitorImpl implements DatanodeAdminMonitor {
- private OzoneConfiguration conf;
private EventPublisher eventQueue;
private NodeManager nodeManager;
private ReplicationManager replicationManager;
@@ -144,7 +143,6 @@ public DatanodeAdminMonitorImpl(
EventPublisher eventQueue,
NodeManager nodeManager,
ReplicationManager replicationManager) {
- this.conf = conf;
this.eventQueue = eventQueue;
this.nodeManager = nodeManager;
this.replicationManager = replicationManager;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java
index 860c48ffb09f..dd2aeb471d3b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java
@@ -17,7 +17,6 @@
package org.apache.hadoop.hdds.scm.node;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.ha.SCMService.Event;
@@ -38,16 +37,13 @@ public class NewNodeHandler implements EventHandler {
private final PipelineManager pipelineManager;
private final NodeDecommissionManager decommissionManager;
- private final ConfigurationSource conf;
private final SCMServiceManager serviceManager;
public NewNodeHandler(PipelineManager pipelineManager,
NodeDecommissionManager decommissionManager,
- ConfigurationSource conf,
SCMServiceManager serviceManager) {
this.pipelineManager = pipelineManager;
this.decommissionManager = decommissionManager;
- this.conf = conf;
this.serviceManager = serviceManager;
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java
index acdf49efdf0b..874835389587 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java
@@ -22,16 +22,12 @@
import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode;
import org.apache.hadoop.hdds.server.events.EventHandler;
import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Handles Node Reports from datanode.
*/
public class NodeReportHandler implements EventHandler {
- private static final Logger LOGGER = LoggerFactory
- .getLogger(NodeReportHandler.class);
private final NodeManager nodeManager;
public NodeReportHandler(NodeManager nodeManager) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/ReadOnlyHealthyToHealthyNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/ReadOnlyHealthyToHealthyNodeHandler.java
index 2faf2de0fa5e..eba89247a196 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/ReadOnlyHealthyToHealthyNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/ReadOnlyHealthyToHealthyNodeHandler.java
@@ -17,8 +17,6 @@
package org.apache.hadoop.hdds.scm.node;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.ha.SCMService.Event;
import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
@@ -36,12 +34,9 @@ public class ReadOnlyHealthyToHealthyNodeHandler
private static final Logger LOG =
LoggerFactory.getLogger(ReadOnlyHealthyToHealthyNodeHandler.class);
- private final ConfigurationSource conf;
private final SCMServiceManager serviceManager;
- public ReadOnlyHealthyToHealthyNodeHandler(
- OzoneConfiguration conf, SCMServiceManager serviceManager) {
- this.conf = conf;
+ public ReadOnlyHealthyToHealthyNodeHandler(SCMServiceManager serviceManager) {
this.serviceManager = serviceManager;
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
index b4301e7bc6e7..3f556e85d44e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
@@ -29,7 +29,6 @@
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
-import javax.management.ObjectName;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
@@ -52,8 +51,6 @@ public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean {
private final double criticalUtilizationThreshold;
private final Map> scmNodeStorageReportMap;
- // NodeStorageInfo MXBean
- private ObjectName scmNodeStorageInfoBean;
/**
* constructs the scmNodeStorageReportMap object.
*/
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
index f17d4dfa2920..60b88e94973e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
@@ -19,8 +19,6 @@
import java.io.IOException;
import java.util.Set;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
@@ -38,13 +36,11 @@ public class StaleNodeHandler implements EventHandler {
private final NodeManager nodeManager;
private final PipelineManager pipelineManager;
- private final ConfigurationSource conf;
public StaleNodeHandler(NodeManager nodeManager,
- PipelineManager pipelineManager, OzoneConfiguration conf) {
+ PipelineManager pipelineManager) {
this.nodeManager = nodeManager;
this.pipelineManager = pipelineManager;
- this.conf = conf;
}
@Override
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
index b79fad539b44..f6103ee4b8a6 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.hdds.scm.pipeline;
import java.io.IOException;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction;
@@ -47,13 +45,11 @@ public class PipelineActionHandler
private final PipelineManager pipelineManager;
private final SCMContext scmContext;
- private final ConfigurationSource ozoneConf;
public PipelineActionHandler(PipelineManager pipelineManager,
- SCMContext scmContext, OzoneConfiguration conf) {
+ SCMContext scmContext) {
this.pipelineManager = pipelineManager;
this.scmContext = scmContext;
- this.ozoneConf = conf;
}
@Override
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
index 908d29ed2246..251661eb47f7 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
@@ -53,7 +53,6 @@ public final class PipelinePlacementPolicy extends SCMCommonPlacementPolicy {
LoggerFactory.getLogger(PipelinePlacementPolicy.class);
private final NodeManager nodeManager;
private final PipelineStateManager stateManager;
- private final ConfigurationSource conf;
private final int heavyNodeCriteria;
private static final int REQUIRED_RACKS = 2;
@@ -75,7 +74,6 @@ public PipelinePlacementPolicy(final NodeManager nodeManager,
final ConfigurationSource conf) {
super(nodeManager, conf);
this.nodeManager = nodeManager;
- this.conf = conf;
this.stateManager = stateManager;
String dnLimit = conf.get(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT);
this.heavyNodeCriteria = dnLimit == null ? 0 : Integer.parseInt(dnLimit);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
index 317504f357c9..dbdb366a9119 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
@@ -50,7 +50,6 @@ public class PipelineReportHandler implements
private static final Logger LOGGER = LoggerFactory.getLogger(
PipelineReportHandler.class);
private final PipelineManager pipelineManager;
- private final ConfigurationSource conf;
private final SafeModeManager scmSafeModeManager;
private final SCMContext scmContext;
private final boolean pipelineAvailabilityCheck;
@@ -64,7 +63,6 @@ public PipelineReportHandler(SafeModeManager scmSafeModeManager,
this.scmSafeModeManager = scmSafeModeManager;
this.pipelineManager = pipelineManager;
this.scmContext = scmContext;
- this.conf = conf;
this.metrics = SCMPipelineMetrics.create();
this.pipelineAvailabilityCheck = conf.getBoolean(
HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK,
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
index fe9fb65e0d93..461124157162 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
@@ -21,17 +21,12 @@
import java.util.stream.Collectors;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Utility class for Ratis pipelines.
*/
public final class RatisPipelineUtils {
- private static final Logger LOG =
- LoggerFactory.getLogger(RatisPipelineUtils.class);
-
private RatisPipelineUtils() {
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerFactory.java
index 771c32274af7..b816bc4de7f4 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerFactory.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerFactory.java
@@ -44,7 +44,7 @@ public WritableContainerFactory(StorageContainerManager scm) {
ConfigurationSource conf = scm.getConfiguration();
this.ratisProvider = new WritableRatisContainerProvider(
- conf, scm.getPipelineManager(),
+ scm.getPipelineManager(),
scm.getContainerManager(), scm.getPipelineChoosePolicy());
this.standaloneProvider = ratisProvider;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableRatisContainerProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableRatisContainerProvider.java
index 6ac344ab1a73..93b4c3b2f6c5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableRatisContainerProvider.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableRatisContainerProvider.java
@@ -22,7 +22,6 @@
import java.util.List;
import java.util.stream.Collectors;
import org.apache.hadoop.hdds.client.ReplicationConfig;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.scm.PipelineChoosePolicy;
import org.apache.hadoop.hdds.scm.PipelineRequestInformation;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
@@ -41,16 +40,14 @@ public class WritableRatisContainerProvider
private static final Logger LOG = LoggerFactory
.getLogger(WritableRatisContainerProvider.class);
- private final ConfigurationSource conf;
private final PipelineManager pipelineManager;
private final PipelineChoosePolicy pipelineChoosePolicy;
private final ContainerManager containerManager;
- public WritableRatisContainerProvider(ConfigurationSource conf,
+ public WritableRatisContainerProvider(
PipelineManager pipelineManager,
ContainerManager containerManager,
PipelineChoosePolicy pipelineChoosePolicy) {
- this.conf = conf;
this.pipelineManager = pipelineManager;
this.containerManager = containerManager;
this.pipelineChoosePolicy = pipelineChoosePolicy;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java
index 76b30feaad3a..ced9d2c31ee0 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java
@@ -91,7 +91,6 @@ public class SCMSafeModeManager implements SafeModeManager {
private Set preCheckRules = new HashSet<>(1);
private ConfigurationSource config;
private static final String CONT_EXIT_RULE = "ContainerSafeModeRule";
- private static final String DN_EXIT_RULE = "DataNodeSafeModeRule";
private static final String HEALTHY_PIPELINE_EXIT_RULE =
"HealthyPipelineSafeModeRule";
private static final String ATLEAST_ONE_DATANODE_REPORTED_PIPELINE_EXIT_RULE =
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
index 4c42bff28c55..0c3627e5034a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
@@ -95,7 +95,6 @@ public class SCMBlockProtocolServer implements
new AuditLogger(AuditLoggerType.SCMLOGGER);
private final StorageContainerManager scm;
- private final OzoneConfiguration conf;
private final RPC.Server blockRpcServer;
private final InetSocketAddress blockRpcAddress;
private final ProtocolMessageMetrics
@@ -108,7 +107,6 @@ public class SCMBlockProtocolServer implements
public SCMBlockProtocolServer(OzoneConfiguration conf,
StorageContainerManager scm) throws IOException {
this.scm = scm;
- this.conf = conf;
this.perfMetrics = getPerfMetrics();
final int handlerCount = conf.getInt(OZONE_SCM_BLOCK_HANDLER_COUNT_KEY,
OZONE_SCM_HANDLER_COUNT_KEY, OZONE_SCM_HANDLER_COUNT_DEFAULT,
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 7e898acc0ed1..116145da8d4f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -469,18 +469,18 @@ private void initializeEventHandlers() {
new CommandStatusReportHandler();
NewNodeHandler newNodeHandler = new NewNodeHandler(pipelineManager,
- scmDecommissionManager, configuration, serviceManager);
+ scmDecommissionManager, serviceManager);
NodeAddressUpdateHandler nodeAddressUpdateHandler =
new NodeAddressUpdateHandler(pipelineManager,
scmDecommissionManager, serviceManager);
StaleNodeHandler staleNodeHandler =
- new StaleNodeHandler(scmNodeManager, pipelineManager, configuration);
+ new StaleNodeHandler(scmNodeManager, pipelineManager);
DeadNodeHandler deadNodeHandler = new DeadNodeHandler(scmNodeManager,
pipelineManager, containerManager);
StartDatanodeAdminHandler datanodeStartAdminHandler =
new StartDatanodeAdminHandler(scmNodeManager, pipelineManager);
ReadOnlyHealthyToHealthyNodeHandler readOnlyHealthyToHealthyNodeHandler =
- new ReadOnlyHealthyToHealthyNodeHandler(configuration, serviceManager);
+ new ReadOnlyHealthyToHealthyNodeHandler(serviceManager);
HealthyReadOnlyNodeHandler
healthyReadOnlyNodeHandler =
new HealthyReadOnlyNodeHandler(scmNodeManager,
@@ -495,7 +495,7 @@ private void initializeEventHandlers() {
new IncrementalContainerReportHandler(
scmNodeManager, containerManager, scmContext);
PipelineActionHandler pipelineActionHandler =
- new PipelineActionHandler(pipelineManager, scmContext, configuration);
+ new PipelineActionHandler(pipelineManager, scmContext);
eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, scmNodeManager);
eventQueue.addHandler(SCMEvents.RETRIABLE_DATANODE_COMMAND, scmNodeManager);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index d52283c517f4..244f86e79540 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -102,7 +102,6 @@ public class MockNodeManager implements NodeManager {
private final List deadNodes;
private final Map nodeMetricMap;
private final SCMNodeStat aggregateStat;
- private boolean safemode;
private final Map> commandMap;
private Node2PipelineMap node2PipelineMap;
private final Node2ContainerMap node2ContainerMap;
@@ -142,7 +141,6 @@ public MockNodeManager(NetworkTopologyImpl clusterMap,
populateNodeMetric(dd, x);
}
}
- safemode = false;
this.commandMap = new HashMap<>();
numHealthyDisksPerDatanode = 1;
numRaftLogDisksPerDatanode = 1;
@@ -169,7 +167,6 @@ public MockNodeManager(List nodes)
"be empty");
}
- safemode = false;
this.commandMap = new HashMap<>();
numHealthyDisksPerDatanode = 1;
numRaftLogDisksPerDatanode = 1;
@@ -205,7 +202,6 @@ public MockNodeManager(
" empty");
}
- safemode = false;
this.commandMap = new HashMap<>();
numHealthyDisksPerDatanode = 1;
numRaftLogDisksPerDatanode = 1;
@@ -242,15 +238,6 @@ private void populateNodeMetric(DatanodeDetails datanodeDetails, int x) {
}
- /**
- * Sets the safe mode value.
- * @param safemode boolean
- */
- public void setSafemode(boolean safemode) {
- this.safemode = safemode;
- }
-
-
/**
* Gets all Live Datanodes that is currently communicating with SCM.
*
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java
index c4f66267657d..da837db16502 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java
@@ -49,7 +49,7 @@ public void testPipelineActionHandlerForValidPipeline() throws IOException {
final PipelineManager manager = mock(PipelineManager.class);
final EventQueue queue = mock(EventQueue.class);
final PipelineActionHandler actionHandler = new PipelineActionHandler(
- manager, SCMContext.emptyContext(), null);
+ manager, SCMContext.emptyContext());
final Pipeline pipeline = HddsTestUtils.getRandomPipeline();
actionHandler.onMessage(getPipelineActionsFromDatanode(
@@ -64,7 +64,7 @@ public void testPipelineActionHandlerForValidPipelineInFollower()
final EventQueue queue = mock(EventQueue.class);
final SCMContext context = SCMContext.emptyContext();
final PipelineActionHandler actionHandler = new PipelineActionHandler(
- manager, context, null);
+ manager, context);
final Pipeline pipeline = HddsTestUtils.getRandomPipeline();
context.updateLeaderAndTerm(false, 1);
@@ -80,7 +80,7 @@ public void testPipelineActionHandlerForUnknownPipeline() throws IOException {
final PipelineManager manager = mock(PipelineManager.class);
final EventQueue queue = mock(EventQueue.class);
final PipelineActionHandler actionHandler = new PipelineActionHandler(
- manager, SCMContext.emptyContext(), null);
+ manager, SCMContext.emptyContext());
final Pipeline pipeline = HddsTestUtils.getRandomPipeline();
doThrow(new PipelineNotFoundException())
@@ -99,7 +99,7 @@ public void testPipelineActionHandlerForUnknownPipelineInFollower()
final EventQueue queue = mock(EventQueue.class);
final SCMContext context = SCMContext.emptyContext();
final PipelineActionHandler actionHandler = new PipelineActionHandler(
- manager, context, null);
+ manager, context);
final Pipeline pipeline = HddsTestUtils.getRandomPipeline();
context.updateLeaderAndTerm(false, 1);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
index ebbd5d12dfab..4007b7cd3058 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
@@ -868,7 +868,7 @@ public void testWaitForAllocatedPipeline()
doThrow(SCMException.class).when(pipelineManagerSpy)
.createPipeline(any(), any(), anyList());
provider = new WritableRatisContainerProvider(
- conf, pipelineManagerSpy, containerManager, pipelineChoosingPolicy);
+ pipelineManagerSpy, containerManager, pipelineChoosingPolicy);
// Add a single pipeline to manager, (in the allocated state)
allocatedPipeline = pipelineManager.createPipeline(repConfig);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableRatisContainerProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableRatisContainerProvider.java
index aa4ef1f3c37d..a1ba81d0a70a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableRatisContainerProvider.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableRatisContainerProvider.java
@@ -157,7 +157,7 @@ private void throwWhenCreatePipeline() throws IOException {
}
private WritableRatisContainerProvider createSubject() {
- return new WritableRatisContainerProvider(conf,
+ return new WritableRatisContainerProvider(
pipelineManager, containerManager, policy);
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index 05a091399177..64f903f45057 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -424,7 +424,7 @@ private RegisterEndpointTask getRegisterEndpointTask(boolean clearDatanodeDetail
when(versionManager.getSoftwareLayoutVersion())
.thenReturn(maxLayoutVersion());
RegisterEndpointTask endpointTask =
- new RegisterEndpointTask(rpcEndPoint, conf, ozoneContainer,
+ new RegisterEndpointTask(rpcEndPoint, ozoneContainer,
mock(StateContext.class), versionManager);
if (!clearDatanodeDetails) {
DatanodeDetails datanodeDetails = randomDatanodeDetails();
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java
index d6d4b4ea9a95..30f426823806 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java
@@ -135,7 +135,7 @@ public void setup() throws Exception {
containerSet = new ContainerSet(1000);
blockManager = new BlockManagerImpl(CONF);
- chunkManager = new FilePerBlockStrategy(true, blockManager, null);
+ chunkManager = new FilePerBlockStrategy(true, blockManager);
}
@BeforeAll
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
index 836e021c944a..b3804f2589ae 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
@@ -27,7 +27,6 @@
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
import org.apache.hadoop.hdds.tracing.TracingUtil;
import org.apache.hadoop.io.Text;
@@ -64,7 +63,6 @@ public class ObjectStore {
private static final Logger LOG =
LoggerFactory.getLogger(ObjectStore.class);
- private final ConfigurationSource conf;
/**
* The proxy used for connecting to the cluster and perform
* client operations.
@@ -76,7 +74,6 @@ public class ObjectStore {
* Cache size to be used for listVolume calls.
*/
private int listCacheSize;
- private final String defaultS3Volume;
private BucketLayout s3BucketLayout;
/**
@@ -85,10 +82,8 @@ public class ObjectStore {
* @param proxy ClientProtocol proxy.
*/
public ObjectStore(ConfigurationSource conf, ClientProtocol proxy) {
- this.conf = conf;
this.proxy = TracingUtil.createProxy(proxy, ClientProtocol.class, conf);
this.listCacheSize = HddsClientUtils.getListCacheSize(conf);
- defaultS3Volume = HddsClientUtils.getDefaultS3VolumeName(conf);
s3BucketLayout = OmUtils.validateBucketLayout(
conf.getTrimmed(
OzoneConfigKeys.OZONE_S3G_DEFAULT_BUCKET_LAYOUT_KEY,
@@ -98,9 +93,7 @@ public ObjectStore(ConfigurationSource conf, ClientProtocol proxy) {
@VisibleForTesting
protected ObjectStore() {
// For the unit test
- this.conf = new OzoneConfiguration();
proxy = null;
- defaultS3Volume = HddsClientUtils.getDefaultS3VolumeName(conf);
}
@VisibleForTesting
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
index 60dc354e0a28..9b5e727292bc 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
@@ -56,7 +56,6 @@ public class BlockDataStreamOutputEntryPool implements KeyMetadataAware {
private final OzoneManagerProtocol omClient;
private final OmKeyArgs keyArgs;
private final XceiverClientFactory xceiverClientFactory;
- private final String requestID;
private OmMultipartCommitUploadPartInfo commitUploadPartInfo;
private final long openID;
private final ExcludeList excludeList;
@@ -67,7 +66,7 @@ public class BlockDataStreamOutputEntryPool implements KeyMetadataAware {
public BlockDataStreamOutputEntryPool(
OzoneClientConfig config,
OzoneManagerProtocol omClient,
- String requestId, ReplicationConfig replicationConfig,
+ ReplicationConfig replicationConfig,
String uploadID, int partNumber,
boolean isMultipart, OmKeyInfo info,
boolean unsafeByteBufferConversion,
@@ -84,7 +83,6 @@ public BlockDataStreamOutputEntryPool(
.setIsMultipartKey(isMultipart).setMultipartUploadID(uploadID)
.setMultipartUploadPartNumber(partNumber)
.setSortDatanodesInPipeline(true).build();
- this.requestID = requestId;
this.openID = openID;
this.excludeList = createExcludeList();
this.bufferList = new ArrayList<>();
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
index 484b21352bf1..0128443caa31 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
@@ -128,7 +128,7 @@ public KeyDataStreamOutput(
new BlockDataStreamOutputEntryPool(
config,
omClient,
- requestId, replicationConfig,
+ replicationConfig,
uploadID, partNumber,
isMultipart, info,
unsafeByteBufferConversion,
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMNotLeaderException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMNotLeaderException.java
index b24fd5a20fc5..bd7caf0071d3 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMNotLeaderException.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMNotLeaderException.java
@@ -28,14 +28,12 @@
*/
public class OMNotLeaderException extends IOException {
- private final String currentPeerId;
private final String leaderPeerId;
private final String leaderAddress;
public OMNotLeaderException(RaftPeerId currentPeerId) {
super("OM:" + currentPeerId + " is not the leader. Could not " +
"determine the leader node.");
- this.currentPeerId = currentPeerId.toString();
this.leaderPeerId = null;
this.leaderAddress = null;
}
@@ -49,14 +47,12 @@ public OMNotLeaderException(RaftPeerId currentPeerId,
RaftPeerId suggestedLeaderPeerId, String suggestedLeaderAddress) {
super("OM:" + currentPeerId + " is not the leader. Suggested leader is" +
" OM:" + suggestedLeaderPeerId + "[" + suggestedLeaderAddress + "].");
- this.currentPeerId = currentPeerId.toString();
this.leaderPeerId = suggestedLeaderPeerId.toString();
this.leaderAddress = suggestedLeaderAddress;
}
public OMNotLeaderException(String msg) {
super(msg);
- this.currentPeerId = null;
this.leaderPeerId = null;
this.leaderAddress = null;
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/HadoopRpcOMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/HadoopRpcOMFailoverProxyProvider.java
index 5c8cd55ca859..8a0f62bfdd07 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/HadoopRpcOMFailoverProxyProvider.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/HadoopRpcOMFailoverProxyProvider.java
@@ -51,7 +51,6 @@ public class HadoopRpcOMFailoverProxyProvider extends
private final Text delegationTokenService;
private Map omProxyInfos;
- private List retryExceptions = new ArrayList<>();
// HadoopRpcOMFailoverProxyProvider, on encountering certain exception,
// tries each OM once in a round robin fashion. After that it waits
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java
index 0116d56c6838..ed0cc62ce6ad 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProviderBase.java
@@ -66,7 +66,6 @@ public abstract class OMFailoverProxyProviderBase implements
LoggerFactory.getLogger(OMFailoverProxyProviderBase.class);
private final ConfigurationSource conf;
- private final String omServiceId;
private final Class protocolClass;
// Map of OMNodeID to its proxy
@@ -100,7 +99,6 @@ public OMFailoverProxyProviderBase(ConfigurationSource configuration,
this.conf = configuration;
this.protocolClass = protocol;
this.performFailoverDone = true;
- this.omServiceId = omServiceId;
this.ugi = ugi;
waitBetweenRetries = conf.getLong(
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
index 19d1a88213e1..13967fa2fba7 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/GrpcOmTransport.java
@@ -75,11 +75,9 @@ public class GrpcOmTransport implements OmTransport {
// gRPC specific
private static List caCerts = null;
- private OzoneManagerServiceGrpc.OzoneManagerServiceBlockingStub client;
private Map clients;
private Map channels;
- private int lastVisited = -1;
private ConfigurationSource conf;
private AtomicReference host;
@@ -91,7 +89,6 @@ public static void setCaCerts(List x509Certificates) {
caCerts = x509Certificates;
}
- private List oms;
private RetryPolicy retryPolicy;
private int failoverCount = 0;
private GrpcOMFailoverProxyProvider
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/Hadoop3OmTransport.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/Hadoop3OmTransport.java
index d0fe51948258..52ee5b552d2e 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/Hadoop3OmTransport.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/Hadoop3OmTransport.java
@@ -34,8 +34,6 @@
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.security.UserGroupInformation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Full-featured Hadoop RPC implementation with failover support.
@@ -47,9 +45,6 @@ public class Hadoop3OmTransport implements OmTransport {
*/
private static final RpcController NULL_RPC_CONTROLLER = null;
- private static final Logger LOG =
- LoggerFactory.getLogger(Hadoop3OmTransport.class);
-
private final HadoopRpcOMFailoverProxyProvider omFailoverProxyProvider;
private final OzoneManagerProtocolPB rpcProxy;
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OMInterServiceProtocolClientSideImpl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OMInterServiceProtocolClientSideImpl.java
index 31cc0f812e02..4cc650496d60 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OMInterServiceProtocolClientSideImpl.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OMInterServiceProtocolClientSideImpl.java
@@ -36,8 +36,6 @@
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerInterServiceProtocolProtos.BootstrapOMResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerInterServiceProtocolProtos.ErrorCode;
import org.apache.hadoop.security.UserGroupInformation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Protocol implementation for Inter OM communication.
@@ -50,9 +48,6 @@ public class OMInterServiceProtocolClientSideImpl implements
*/
private static final RpcController NULL_RPC_CONTROLLER = null;
- private static final Logger LOG =
- LoggerFactory.getLogger(OMInterServiceProtocolClientSideImpl.class);
-
private final HadoopRpcOMFailoverProxyProvider omFailoverProxyProvider;
private final OMInterServiceProtocolPB rpcProxy;
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java
index f07e71785c13..9541635baca7 100644
--- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java
@@ -48,9 +48,6 @@ public class TestOzoneAclUtil {
private static final OzoneAcl USER1 = new OzoneAcl(USER, "user1",
ACCESS, ACLType.READ_ACL);
- private static final OzoneAcl USER2 = new OzoneAcl(USER, "user2",
- ACCESS, ACLType.WRITE);
-
private static final OzoneAcl GROUP1 = new OzoneAcl(GROUP, "group1",
ACCESS, ACLType.ALL);
diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomLoadGenerator.java
index eef48daa3c9d..91d5d23ee3ea 100644
--- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomLoadGenerator.java
+++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomLoadGenerator.java
@@ -19,16 +19,12 @@
import java.nio.ByteBuffer;
import org.apache.commons.lang3.RandomUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Random load generator which writes, read and deletes keys from
* the bucket.
*/
public class RandomLoadGenerator extends LoadGenerator {
- private static final Logger LOG =
- LoggerFactory.getLogger(RandomLoadGenerator.class);
private final LoadBucket ozoneBucket;
private final DataBuffer dataBuffer;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
index 8ca6929e3c2c..c9be029a805a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
@@ -28,15 +28,11 @@
import org.apache.hadoop.fs.Path;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Test append -if supported.
*/
public abstract class AbstractContractAppendTest extends AbstractFSContractTestBase {
- private static final Logger LOG =
- LoggerFactory.getLogger(AbstractContractAppendTest.class);
private Path testPath;
private Path target;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java
index 6a12f3d41932..08659c9f9f6f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java
@@ -28,15 +28,11 @@
import org.apache.hadoop.fs.Path;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Test concat -if supported.
*/
public abstract class AbstractContractConcatTest extends AbstractFSContractTestBase {
- private static final Logger LOG =
- LoggerFactory.getLogger(AbstractContractConcatTest.class);
private Path testPath;
private Path srcFile;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java
index e3948a1bd07b..5cd71e7ecc1d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java
@@ -23,16 +23,12 @@
import org.apache.hadoop.fs.Path;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Test setTimes -if supported.
*/
public abstract class AbstractContractSetTimesTest extends
AbstractFSContractTestBase {
- private static final Logger LOG =
- LoggerFactory.getLogger(AbstractContractSetTimesTest.class);
private Path testPath;
private Path target;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
index 0cb8e77e27ac..fda3e47c64af 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
@@ -189,7 +189,7 @@ public void testPipelineCloseWithPipelineAction() throws Exception {
// send closing action for pipeline
final PipelineActionHandler pipelineActionHandler =
new PipelineActionHandler(pipelineManager,
- SCMContext.emptyContext(), conf);
+ SCMContext.emptyContext());
pipelineActionHandler.onMessage(
pipelineActionsFromDatanode, new EventQueue());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
index 1614596eb0d8..2ad7391edf81 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
@@ -46,7 +46,6 @@
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
-import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.CountDownLatch;
@@ -124,7 +123,6 @@ public class TestContainerStateMachineFailures {
private static String volumeName;
private static String bucketName;
private static XceiverClientManager xceiverClientManager;
- private static Random random;
/**
* Create a MiniDFSCluster for testing.
@@ -184,7 +182,6 @@ public static void init() throws Exception {
bucketName = volumeName;
objectStore.createVolume(volumeName);
objectStore.getVolume(volumeName).createBucket(bucketName);
- random = new Random();
}
/**
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java
index 56583f80dd64..9cf9d252c48e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java
@@ -46,7 +46,6 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -81,7 +80,6 @@ public class TestValidateBCSIDOnRestart {
private static ObjectStore objectStore;
private static String volumeName;
private static String bucketName;
- private static XceiverClientManager xceiverClientManager;
/**
* Create a MiniDFSCluster for testing.
@@ -133,7 +131,6 @@ public static void init() throws Exception {
//the easiest way to create an open container is creating a key
client = OzoneClientFactory.getRpcClient(conf);
objectStore = client.getObjectStore();
- xceiverClientManager = new XceiverClientManager(conf);
volumeName = "testcontainerstatemachinefailures";
bucketName = volumeName;
objectStore.createVolume(volumeName);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
index bebc639c57fe..d1b842e680d4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java
@@ -72,8 +72,6 @@
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Tests the EC recovery and over replication processing.
@@ -92,8 +90,6 @@ public class TestECContainerRecovery {
private static int dataBlocks = 3;
private static byte[][] inputChunks = new byte[dataBlocks][chunkSize];
- private static final Logger LOG =
- LoggerFactory.getLogger(TestECContainerRecovery.class);
/**
* Create a MiniDFSCluster for testing.
*/
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java
index 0624c0e56d1a..021bb251d7c2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java
@@ -77,8 +77,6 @@
*/
public class TestOzoneDebugShell {
- private static String omServiceId;
-
private static MiniOzoneCluster cluster = null;
private static OzoneClient client;
private static OzoneDebug ozoneDebugShell;
@@ -87,7 +85,6 @@ public class TestOzoneDebugShell {
protected static void startCluster() throws Exception {
// Init HA cluster
- omServiceId = "om-service-test1";
final int numDNs = 5;
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(numDNs)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
index 7ba450917761..c41bf75fdc03 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
@@ -150,8 +150,6 @@ protected int getTestTimeoutMillis() {
private Path outputFile4;
- private Path outputFile5;
-
private Path inputDirUnderOutputDir;
@Override
@@ -218,7 +216,6 @@ protected void initOutputFields(final Path path) {
outputFile3 = new Path(outputSubDir2, "file3");
outputSubDir4 = new Path(inputDirUnderOutputDir, "subDir4/subDir4");
outputFile4 = new Path(outputSubDir4, "file4");
- outputFile5 = new Path(outputSubDir4, "file5");
}
/**
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java
index abf89343f440..3334925a957a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java
@@ -148,9 +148,7 @@ public void init(OzoneManagerProtocolServerSideTranslatorPB omTranslator,
.channelType(NioServerSocketChannel.class)
.executor(readExecutors)
.addService(ServerInterceptors.intercept(
- new OzoneManagerServiceGrpc(omTranslator,
- delegationTokenMgr,
- omServerConfig),
+ new OzoneManagerServiceGrpc(omTranslator),
new ClientAddressServerInterceptor(),
new GrpcMetricsServerResponseInterceptor(omS3gGrpcMetrics),
new GrpcMetricsServerRequestInterceptor(omS3gGrpcMetrics)))
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java
index 1249eee04927..1bd739c730dd 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java
@@ -93,7 +93,6 @@ public class OMDBCheckpointServlet extends DBCheckpointServlet {
private static final long serialVersionUID = 1L;
private transient BootstrapStateHandler.Lock lock;
private long maxTotalSstSize = 0;
- private static final AtomicLong PAUSE_COUNTER = new AtomicLong(0);
@Override
public void init() throws ServletException {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 79e688f03363..d448417b6600 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -311,7 +311,6 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
private Table snapshotRenamedTable;
private Table compactionLogTable;
- private boolean ignorePipelineinKey;
private Table deletedDirTable;
private OzoneManager ozoneManager;
@@ -349,9 +348,6 @@ public OmMetadataManagerImpl(OzoneConfiguration conf,
}
this.lock = new OzoneManagerLock(conf);
this.omEpoch = OmUtils.getOMEpoch();
- // For test purpose only
- ignorePipelineinKey = conf.getBoolean(
- "ozone.om.ignore.pipeline", Boolean.TRUE);
start(conf);
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerServiceGrpc.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerServiceGrpc.java
index 7c79c5340449..50e430471690 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerServiceGrpc.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerServiceGrpc.java
@@ -21,15 +21,12 @@
import io.grpc.Status;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.security.SecurityConfig;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerServiceGrpc.OzoneManagerServiceImplBase;
import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB;
-import org.apache.hadoop.ozone.security.OzoneDelegationTokenSecretManager;
import org.apache.hadoop.ozone.util.UUIDUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -45,16 +42,10 @@ public class OzoneManagerServiceGrpc extends OzoneManagerServiceImplBase {
*/
private static final RpcController NULL_RPC_CONTROLLER = null;
private final OzoneManagerProtocolServerSideTranslatorPB omTranslator;
- private final OzoneDelegationTokenSecretManager delegationTokenMgr;
- private final SecurityConfig secConfig;
OzoneManagerServiceGrpc(
- OzoneManagerProtocolServerSideTranslatorPB omTranslator,
- OzoneDelegationTokenSecretManager delegationTokenMgr,
- OzoneConfiguration configuration) {
+ OzoneManagerProtocolServerSideTranslatorPB omTranslator) {
this.omTranslator = omTranslator;
- this.delegationTokenMgr = delegationTokenMgr;
- this.secConfig = new SecurityConfig(configuration);
}
@Override
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java
index 35c40947b556..e2fb89e16199 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerUtils.java
@@ -31,8 +31,6 @@
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
import org.apache.hadoop.security.token.Token;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Ozone Manager utility class.
@@ -42,9 +40,6 @@ public final class OzoneManagerUtils {
private OzoneManagerUtils() {
}
- private static final Logger LOG = LoggerFactory
- .getLogger(OzoneManagerUtils.class);
-
/**
* All the client requests are executed through
* OzoneManagerStateMachine#runCommand function and ensures sequential
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
index f025db64ea9b..651d44ad5f87 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
@@ -71,8 +71,6 @@ public class TrashOzoneFileSystem extends FileSystem {
private final OzoneManager ozoneManager;
- private final String userName;
-
private final AtomicLong runCount;
private static final ClientId CLIENT_ID = ClientId.randomId();
@@ -84,8 +82,6 @@ public class TrashOzoneFileSystem extends FileSystem {
public TrashOzoneFileSystem(OzoneManager ozoneManager) throws IOException {
this.ozoneManager = ozoneManager;
- this.userName =
- UserGroupInformation.getCurrentUser().getShortUserName();
this.runCount = new AtomicLong(0);
setConf(ozoneManager.getConfiguration());
ozoneConfiguration = OzoneConfiguration.of(getConf());
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 627d333e3721..7a2c66e81d2d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -24,7 +24,6 @@
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
-import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
import java.io.File;
import java.io.IOException;
@@ -117,7 +116,6 @@
public final class OzoneManagerRatisUtils {
private static final Logger LOG = LoggerFactory
.getLogger(OzoneManagerRatisUtils.class);
- private static final RpcController NULL_RPC_CONTROLLER = null;
private OzoneManagerRatisUtils() {
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java
index 24a44fd5ef04..738fb860fbbe 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java
@@ -39,8 +39,6 @@
import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCompleteResponseWithFSO;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Handle Multipart upload complete request.
@@ -48,9 +46,6 @@
public class S3MultipartUploadCompleteRequestWithFSO
extends S3MultipartUploadCompleteRequest {
- private static final Logger LOG =
- LoggerFactory.getLogger(S3MultipartUploadCompleteRequestWithFSO.class);
-
public S3MultipartUploadCompleteRequestWithFSO(OMRequest omRequest,
BucketLayout bucketLayout) {
super(omRequest, bucketLayout);
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java
index c892cbd50eb6..f446eee769e6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java
@@ -40,8 +40,6 @@
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveDeletedKeysRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Handles OMSnapshotMoveDeletedKeys Request.
@@ -49,9 +47,6 @@
*/
public class OMSnapshotMoveDeletedKeysRequest extends OMClientRequest {
- private static final Logger LOG =
- LoggerFactory.getLogger(OMSnapshotMoveDeletedKeysRequest.class);
-
public OMSnapshotMoveDeletedKeysRequest(OMRequest omRequest) {
super(omRequest);
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java
index 04eea15e93d4..0ac0257c4173 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java
@@ -45,8 +45,6 @@
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveTableKeysRequest;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Handles OMSnapshotMoveTableKeysRequest Request.
@@ -54,8 +52,6 @@
*/
public class OMSnapshotMoveTableKeysRequest extends OMClientRequest {
- private static final Logger LOG = LoggerFactory.getLogger(OMSnapshotMoveTableKeysRequest.class);
-
public OMSnapshotMoveTableKeysRequest(OMRequest omRequest) {
super(omRequest);
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java
index d5776af37765..74ccae327b13 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java
@@ -30,17 +30,12 @@
import org.apache.hadoop.ozone.om.request.OMClientRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.PersistedUserVolumeInfo;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Defines common methods required for volume requests.
*/
public abstract class OMVolumeRequest extends OMClientRequest {
- private static final Logger LOG =
- LoggerFactory.getLogger(OMVolumeRequest.class);
-
public OMVolumeRequest(OMRequest omRequest) {
super(omRequest);
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDiffCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDiffCleanupService.java
index da776e496dff..e73d5ef4253e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDiffCleanupService.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDiffCleanupService.java
@@ -58,7 +58,6 @@ public class SnapshotDiffCleanupService extends BackgroundService {
private final AtomicBoolean suspended;
private final AtomicLong runCount;
private final AtomicLong successRunCount;
- private final OzoneManager ozoneManager;
private final ManagedRocksDB db;
private final ColumnFamilyHandle snapDiffJobCfh;
private final ColumnFamilyHandle snapDiffPurgedJobCfh;
@@ -91,7 +90,6 @@ public SnapshotDiffCleanupService(long interval,
this.suspended = new AtomicBoolean(false);
this.runCount = new AtomicLong(0);
this.successRunCount = new AtomicLong(0);
- this.ozoneManager = ozoneManager;
this.db = db;
this.snapDiffJobCfh = snapDiffJobCfh;
this.snapDiffPurgedJobCfh = snapDiffPurgedJobCfh;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java
index 43cb7593437d..10f3a7b98029 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java
@@ -65,16 +65,6 @@ public class TestOzoneTokenIdentifier {
private static final Logger LOG = LoggerFactory
.getLogger(TestOzoneTokenIdentifier.class);
- private static String sslConfsDir;
- private static final String EXCLUDE_CIPHERS =
- "TLS_ECDHE_RSA_WITH_RC4_128_SHA,"
- + "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, \n"
- + "SSL_RSA_WITH_DES_CBC_SHA,"
- + "SSL_DHE_RSA_WITH_DES_CBC_SHA, "
- + "SSL_RSA_EXPORT_WITH_RC4_40_MD5,\t \n"
- + "SSL_RSA_EXPORT_WITH_DES40_CBC_SHA,"
- + "SSL_RSA_WITH_RC4_128_MD5";
-
@Test
public void testSignToken(@TempDir Path baseDir) throws GeneralSecurityException, IOException {
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java
index ee24592830de..38038acb6e17 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java
@@ -25,7 +25,6 @@
import java.util.List;
import java.util.Objects;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.om.OmConfig;
@@ -54,15 +53,11 @@ public abstract class BucketHandler {
private final ReconOMMetadataManager omMetadataManager;
- private final ContainerManager containerManager;
-
public BucketHandler(
ReconNamespaceSummaryManager reconNamespaceSummaryManager,
- ReconOMMetadataManager omMetadataManager,
- OzoneStorageContainerManager reconSCM) {
+ ReconOMMetadataManager omMetadataManager) {
this.reconNamespaceSummaryManager = reconNamespaceSummaryManager;
this.omMetadataManager = omMetadataManager;
- this.containerManager = reconSCM.getContainerManager();
}
public ReconOMMetadataManager getOmMetadataManager() {
@@ -174,22 +169,22 @@ public static BucketHandler getBucketHandler(
if (bucketInfo.getBucketLayout()
.equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) {
return new FSOBucketHandler(reconNamespaceSummaryManager,
- omMetadataManager, reconSCM, bucketInfo);
+ omMetadataManager, bucketInfo);
} else if (bucketInfo.getBucketLayout().equals(BucketLayout.LEGACY)) {
// Choose handler based on enableFileSystemPaths flag for legacy layout.
// If enableFileSystemPaths is false, then the legacy bucket is treated
// as an OBS bucket.
if (enableFileSystemPaths) {
return new LegacyBucketHandler(reconNamespaceSummaryManager,
- omMetadataManager, reconSCM, bucketInfo);
+ omMetadataManager, bucketInfo);
} else {
return new OBSBucketHandler(reconNamespaceSummaryManager,
- omMetadataManager, reconSCM, bucketInfo);
+ omMetadataManager, bucketInfo);
}
} else if (bucketInfo.getBucketLayout()
.equals(BucketLayout.OBJECT_STORE)) {
return new OBSBucketHandler(reconNamespaceSummaryManager,
- omMetadataManager, reconSCM, bucketInfo);
+ omMetadataManager, bucketInfo);
} else {
LOG.error("Unsupported bucket layout: " +
bucketInfo.getBucketLayout());
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java
index 9c94ffea210f..845e27b5bde6 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java
@@ -25,7 +25,6 @@
import java.util.Iterator;
import java.util.List;
import java.util.Set;
-import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
@@ -37,25 +36,19 @@
import org.apache.hadoop.ozone.recon.api.types.NSSummary;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Class for handling FSO buckets NameSpaceSummaries.
*/
public class FSOBucketHandler extends BucketHandler {
- private static final Logger LOG =
- LoggerFactory.getLogger(FSOBucketHandler.class);
private final long volumeId;
private final long bucketId;
public FSOBucketHandler(
ReconNamespaceSummaryManager reconNamespaceSummaryManager,
ReconOMMetadataManager omMetadataManager,
- OzoneStorageContainerManager reconSCM,
OmBucketInfo bucketInfo) throws IOException {
- super(reconNamespaceSummaryManager, omMetadataManager,
- reconSCM);
+ super(reconNamespaceSummaryManager, omMetadataManager);
String vol = bucketInfo.getVolumeName();
String bucket = bucketInfo.getBucketName();
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java
index da197ee00970..1673d76282fb 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java
@@ -23,7 +23,6 @@
import java.io.IOException;
import java.util.List;
import java.util.Set;
-import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
@@ -36,17 +35,12 @@
import org.apache.hadoop.ozone.recon.api.types.NSSummary;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Class for handling Legacy buckets NameSpaceSummaries.
*/
public class LegacyBucketHandler extends BucketHandler {
- private static final Logger LOG = LoggerFactory.getLogger(
- LegacyBucketHandler.class);
-
private final String vol;
private final String bucket;
private final OmBucketInfo omBucketInfo;
@@ -54,10 +48,8 @@ public class LegacyBucketHandler extends BucketHandler {
public LegacyBucketHandler(
ReconNamespaceSummaryManager reconNamespaceSummaryManager,
ReconOMMetadataManager omMetadataManager,
- OzoneStorageContainerManager reconSCM,
OmBucketInfo bucketInfo) {
- super(reconNamespaceSummaryManager, omMetadataManager,
- reconSCM);
+ super(reconNamespaceSummaryManager, omMetadataManager);
this.omBucketInfo = bucketInfo;
this.vol = omBucketInfo.getVolumeName();
this.bucket = omBucketInfo.getBucketName();
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java
index 870f030219f9..8b535f626f9b 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java
@@ -21,7 +21,6 @@
import java.io.IOException;
import java.util.List;
-import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
@@ -46,10 +45,8 @@ public class OBSBucketHandler extends BucketHandler {
public OBSBucketHandler(
ReconNamespaceSummaryManager reconNamespaceSummaryManager,
ReconOMMetadataManager omMetadataManager,
- OzoneStorageContainerManager reconSCM,
OmBucketInfo bucketInfo) {
- super(reconNamespaceSummaryManager, omMetadataManager,
- reconSCM);
+ super(reconNamespaceSummaryManager, omMetadataManager);
this.omBucketInfo = bucketInfo;
this.vol = omBucketInfo.getVolumeName();
this.bucket = omBucketInfo.getBucketName();
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ReconSafeModeMgrTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ReconSafeModeMgrTask.java
index c14bdae4535b..0553f87bca88 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ReconSafeModeMgrTask.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ReconSafeModeMgrTask.java
@@ -50,7 +50,6 @@ public class ReconSafeModeMgrTask {
private ReconSafeModeManager safeModeManager;
private List allNodes;
private List containers;
- private OzoneConfiguration ozoneConfiguration;
private final long interval;
private final long dnHBInterval;
@@ -65,7 +64,6 @@ public ReconSafeModeMgrTask(
this.nodeManager = nodeManager;
this.allNodes = nodeManager.getAllNodes();
this.containers = containerManager.getContainers();
- this.ozoneConfiguration = ozoneConfiguration;
interval = reconTaskConfig.getSafeModeWaitThreshold().toMillis();
dnHBInterval = ozoneConfiguration.getTimeDuration(HDDS_HEARTBEAT_INTERVAL,
HDDS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapServiceImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapServiceImpl.java
index d10ae7fb7cad..1e2c5fd883dc 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapServiceImpl.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapServiceImpl.java
@@ -58,7 +58,7 @@ public HeatMapServiceImpl(OzoneConfiguration ozoneConfiguration,
this.reconSCM = reconSCM;
heatMapUtil =
new HeatMapUtil(reconNamespaceSummaryManager, omMetadataManager,
- reconSCM, ozoneConfiguration);
+ reconSCM);
initializeProvider();
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java
index e7534cdc6904..e1cb7bd4061b 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java
@@ -26,7 +26,6 @@
import java.util.List;
import java.util.stream.Collectors;
import org.apache.commons.collections.CollectionUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler;
import org.apache.hadoop.ozone.recon.api.types.DUResponse;
@@ -44,7 +43,6 @@
public class HeatMapUtil {
private static final Logger LOG =
LoggerFactory.getLogger(HeatMapUtil.class);
- private OzoneConfiguration ozoneConfiguration;
private final ReconNamespaceSummaryManager reconNamespaceSummaryManager;
private final ReconOMMetadataManager omMetadataManager;
private final OzoneStorageContainerManager reconSCM;
@@ -53,12 +51,10 @@ public class HeatMapUtil {
public HeatMapUtil(ReconNamespaceSummaryManager
namespaceSummaryManager,
ReconOMMetadataManager omMetadataManager,
- OzoneStorageContainerManager reconSCM,
- OzoneConfiguration ozoneConfiguration) {
+ OzoneStorageContainerManager reconSCM) {
this.reconNamespaceSummaryManager = namespaceSummaryManager;
this.omMetadataManager = omMetadataManager;
this.reconSCM = reconSCM;
- this.ozoneConfiguration = ozoneConfiguration;
}
private long getEntitySize(String path) throws IOException {
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java
index 5d72eb27fb99..adbe00b8fb9b 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java
@@ -24,17 +24,12 @@
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode;
import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Recon's container report handler.
*/
public class ReconContainerReportHandler extends ContainerReportHandler {
- private static final Logger LOG =
- LoggerFactory.getLogger(ReconContainerReportHandler.class);
-
public ReconContainerReportHandler(NodeManager nodeManager,
ContainerManager containerManager) {
super(nodeManager, containerManager);
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDeadNodeHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDeadNodeHandler.java
index 44308aa6cd09..56441a37c5ba 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDeadNodeHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDeadNodeHandler.java
@@ -28,7 +28,6 @@
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.ozone.recon.fsck.ContainerHealthTask;
import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
-import org.apache.hadoop.ozone.recon.tasks.ContainerSizeCountTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -43,22 +42,17 @@ public class ReconDeadNodeHandler extends DeadNodeHandler {
private StorageContainerServiceProvider scmClient;
private ContainerHealthTask containerHealthTask;
private PipelineSyncTask pipelineSyncTask;
- private ContainerSizeCountTask containerSizeCountTask;
- private ContainerManager containerManager;
public ReconDeadNodeHandler(NodeManager nodeManager,
PipelineManager pipelineManager,
ContainerManager containerManager,
StorageContainerServiceProvider scmClient,
ContainerHealthTask containerHealthTask,
- PipelineSyncTask pipelineSyncTask,
- ContainerSizeCountTask containerSizeCountTask) {
+ PipelineSyncTask pipelineSyncTask) {
super(nodeManager, pipelineManager, containerManager);
this.scmClient = scmClient;
- this.containerManager = containerManager;
this.containerHealthTask = containerHealthTask;
this.pipelineSyncTask = pipelineSyncTask;
- this.containerSizeCountTask = containerSizeCountTask;
}
@Override
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStaleNodeHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStaleNodeHandler.java
index c4d1d96c4c81..6a2591667e90 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStaleNodeHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStaleNodeHandler.java
@@ -17,7 +17,6 @@
package org.apache.hadoop.ozone.recon.scm;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.node.StaleNodeHandler;
@@ -37,9 +36,8 @@ public class ReconStaleNodeHandler extends StaleNodeHandler {
public ReconStaleNodeHandler(NodeManager nodeManager,
PipelineManager pipelineManager,
- OzoneConfiguration conf,
PipelineSyncTask pipelineSyncTask) {
- super(nodeManager, pipelineManager, conf);
+ super(nodeManager, pipelineManager);
this.pipelineSyncTask = pipelineSyncTask;
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
index ff7a56d7979a..165306081e3e 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
@@ -263,7 +263,7 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf,
pipelineManager, scmContext, conf, scmServiceProvider);
PipelineActionHandler pipelineActionHandler =
- new PipelineActionHandler(pipelineManager, scmContext, conf);
+ new PipelineActionHandler(pipelineManager, scmContext);
ReconTaskConfig reconTaskConfig = conf.getObject(ReconTaskConfig.class);
PipelineSyncTask pipelineSyncTask = new PipelineSyncTask(pipelineManager, nodeManager,
@@ -273,17 +273,16 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf,
containerHealthSchemaManager, containerPlacementPolicy,
reconTaskConfig, reconContainerMetadataManager, conf, taskStatusUpdaterManager);
- this.containerSizeCountTask = new ContainerSizeCountTask(containerManager, scmServiceProvider,
+ this.containerSizeCountTask = new ContainerSizeCountTask(containerManager,
reconTaskConfig, containerCountBySizeDao, utilizationSchemaDefinition, taskStatusUpdaterManager);
this.dataSource = dataSource;
StaleNodeHandler staleNodeHandler =
- new ReconStaleNodeHandler(nodeManager, pipelineManager, conf,
- pipelineSyncTask);
+ new ReconStaleNodeHandler(nodeManager, pipelineManager, pipelineSyncTask);
DeadNodeHandler deadNodeHandler = new ReconDeadNodeHandler(nodeManager,
pipelineManager, containerManager, scmServiceProvider,
- containerHealthTask, pipelineSyncTask, containerSizeCountTask);
+ containerHealthTask, pipelineSyncTask);
ContainerReportHandler containerReportHandler =
new ReconContainerReportHandler(nodeManager, containerManager);
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java
index 16452d5ef5da..da8002e88c04 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java
@@ -32,10 +32,8 @@
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.ozone.recon.ReconUtils;
import org.apache.hadoop.ozone.recon.scm.ReconScmTask;
-import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdater;
import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdaterManager;
-import org.apache.ozone.recon.schema.ContainerSchemaDefinition;
import org.apache.ozone.recon.schema.UtilizationSchemaDefinition;
import org.apache.ozone.recon.schema.generated.tables.daos.ContainerCountBySizeDao;
import org.apache.ozone.recon.schema.generated.tables.pojos.ContainerCountBySize;
@@ -54,26 +52,21 @@ public class ContainerSizeCountTask extends ReconScmTask {
private static final Logger LOG =
LoggerFactory.getLogger(ContainerSizeCountTask.class);
- private StorageContainerServiceProvider scmClient;
private ContainerManager containerManager;
private final long interval;
private ContainerCountBySizeDao containerCountBySizeDao;
private DSLContext dslContext;
private HashMap processedContainers = new HashMap<>();
- private Map>
- unhealthyContainerStateStatsMap;
private ReadWriteLock lock = new ReentrantReadWriteLock(true);
private final ReconTaskStatusUpdater taskStatusUpdater;
public ContainerSizeCountTask(
ContainerManager containerManager,
- StorageContainerServiceProvider scmClient,
ReconTaskConfig reconTaskConfig,
ContainerCountBySizeDao containerCountBySizeDao,
UtilizationSchemaDefinition utilizationSchemaDefinition,
ReconTaskStatusUpdaterManager taskStatusUpdaterManager) {
super(taskStatusUpdaterManager);
- this.scmClient = scmClient;
this.containerManager = containerManager;
this.containerCountBySizeDao = containerCountBySizeDao;
this.dslContext = utilizationSchemaDefinition.getDSLContext();
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java
index 2b3445dce9a0..aa0193dc1fb5 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java
@@ -90,13 +90,12 @@ public NSSummaryTask(ReconNamespaceSummaryManager
this.nsSummaryTaskWithFSO = new NSSummaryTaskWithFSO(
reconNamespaceSummaryManager, reconOMMetadataManager,
- ozoneConfiguration, nsSummaryFlushToDBMaxThreshold);
+ nsSummaryFlushToDBMaxThreshold);
this.nsSummaryTaskWithLegacy = new NSSummaryTaskWithLegacy(
reconNamespaceSummaryManager, reconOMMetadataManager,
ozoneConfiguration, nsSummaryFlushToDBMaxThreshold);
this.nsSummaryTaskWithOBS = new NSSummaryTaskWithOBS(
- reconNamespaceSummaryManager, reconOMMetadataManager,
- ozoneConfiguration, nsSummaryFlushToDBMaxThreshold);
+ reconNamespaceSummaryManager, reconOMMetadataManager, nsSummaryFlushToDBMaxThreshold);
}
@Override
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
index 4b0b8514904d..23bee1574d98 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
@@ -17,12 +17,9 @@
package org.apache.hadoop.ozone.recon.tasks;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD;
-import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD_DEFAULT;
import java.io.IOException;
import java.util.Map;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.db.RDBBatchOperation;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -45,19 +42,12 @@ public class NSSummaryTaskDbEventHandler {
private ReconNamespaceSummaryManager reconNamespaceSummaryManager;
private ReconOMMetadataManager reconOMMetadataManager;
- private final long nsSummaryFlushToDBMaxThreshold;
-
public NSSummaryTaskDbEventHandler(ReconNamespaceSummaryManager
reconNamespaceSummaryManager,
ReconOMMetadataManager
- reconOMMetadataManager,
- OzoneConfiguration
- ozoneConfiguration) {
+ reconOMMetadataManager) {
this.reconNamespaceSummaryManager = reconNamespaceSummaryManager;
this.reconOMMetadataManager = reconOMMetadataManager;
- nsSummaryFlushToDBMaxThreshold = ozoneConfiguration.getLong(
- OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD,
- OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD_DEFAULT);
}
public ReconNamespaceSummaryManager getReconNamespaceSummaryManager() {
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithFSO.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithFSO.java
index 6ebc36331a5e..9de9bfaab38c 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithFSO.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithFSO.java
@@ -28,7 +28,6 @@
import java.util.Map;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.om.OMMetadataManager;
@@ -55,11 +54,9 @@ public NSSummaryTaskWithFSO(ReconNamespaceSummaryManager
reconNamespaceSummaryManager,
ReconOMMetadataManager
reconOMMetadataManager,
- OzoneConfiguration
- ozoneConfiguration,
long nsSummaryFlushToDBMaxThreshold) {
super(reconNamespaceSummaryManager,
- reconOMMetadataManager, ozoneConfiguration);
+ reconOMMetadataManager);
this.nsSummaryFlushToDBMaxThreshold = nsSummaryFlushToDBMaxThreshold;
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java
index a14600391765..1fa5a272f833 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java
@@ -64,7 +64,7 @@ public NSSummaryTaskWithLegacy(ReconNamespaceSummaryManager
ozoneConfiguration,
long nsSummaryFlushToDBMaxThreshold) {
super(reconNamespaceSummaryManager,
- reconOMMetadataManager, ozoneConfiguration);
+ reconOMMetadataManager);
// true if FileSystemPaths enabled
enableFileSystemPaths = ozoneConfiguration
.getBoolean(OmConfig.Keys.ENABLE_FILESYSTEM_PATHS,
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java
index e15cc2836fb2..1c53e6271128 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java
@@ -25,7 +25,6 @@
import java.util.Map;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.om.OMMetadataManager;
@@ -55,10 +54,9 @@ public class NSSummaryTaskWithOBS extends NSSummaryTaskDbEventHandler {
public NSSummaryTaskWithOBS(
ReconNamespaceSummaryManager reconNamespaceSummaryManager,
ReconOMMetadataManager reconOMMetadataManager,
- OzoneConfiguration ozoneConfiguration,
long nsSummaryFlushToDBMaxThreshold) {
super(reconNamespaceSummaryManager,
- reconOMMetadataManager, ozoneConfiguration);
+ reconOMMetadataManager);
this.nsSummaryFlushToDBMaxThreshold = nsSummaryFlushToDBMaxThreshold;
}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
index 264f3d38e5bb..9353f406a779 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
@@ -477,7 +477,7 @@ public void testGetKeysForContainer() throws IOException {
setUpFSOData();
NSSummaryTaskWithFSO nSSummaryTaskWithFso =
new NSSummaryTaskWithFSO(reconNamespaceSummaryManager,
- reconOMMetadataManager, new OzoneConfiguration(), 10);
+ reconOMMetadataManager, 10);
nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager);
// Reprocess the container key mapper to ensure the latest mapping is used
reprocessContainerKeyMapper();
@@ -565,7 +565,7 @@ public void testGetKeysForContainerWithPrevKey() throws IOException {
reprocessContainerKeyMapper();
NSSummaryTaskWithFSO nSSummaryTaskWithFso =
new NSSummaryTaskWithFSO(reconNamespaceSummaryManager,
- reconOMMetadataManager, new OzoneConfiguration(), 10);
+ reconOMMetadataManager, 10);
nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager);
response = containerEndpoint.getKeysForContainer(20L, -1, "/0/1/2/file7");
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestDeletedKeysSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestDeletedKeysSearchEndpoint.java
index ab43d6ae23e0..be2b0a799547 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestDeletedKeysSearchEndpoint.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestDeletedKeysSearchEndpoint.java
@@ -80,7 +80,6 @@ public class TestDeletedKeysSearchEndpoint extends AbstractReconSqlDBTest {
private ReconOMMetadataManager reconOMMetadataManager;
private OMDBInsightEndpoint omdbInsightEndpoint;
private OzoneConfiguration ozoneConfiguration;
- private static final String ROOT_PATH = "/";
private OMMetadataManager omMetadataManager;
@BeforeEach
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryDiskUsageOrdering.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryDiskUsageOrdering.java
index 84d0807a3404..3fffbd62c8d6 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryDiskUsageOrdering.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryDiskUsageOrdering.java
@@ -106,7 +106,7 @@ public void setUp() throws Exception {
populateOMDB();
NSSummaryTaskWithFSO nSSummaryTaskWithFso =
new NSSummaryTaskWithFSO(reconNamespaceSummaryManager,
- reconOMMetadataManager, ozoneConfiguration, 10);
+ reconOMMetadataManager, 10);
nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager);
}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
index 77d5bb69a57a..d5ebf03a8e1c 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
@@ -389,7 +389,7 @@ public void setUp() throws Exception {
populateOMDB();
NSSummaryTaskWithFSO nSSummaryTaskWithFso =
new NSSummaryTaskWithFSO(reconNamespaceSummaryManager,
- reconOMMetadataManager, ozoneConfiguration, 10);
+ reconOMMetadataManager, 10);
nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager);
commonUtils = new CommonUtils();
}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
index a162f48d8258..96fb41272f39 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
@@ -304,18 +304,10 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_ONE;
private static final String KEY_TWO_PATH =
ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_TWO;
- private static final String KEY_THREE_PATH =
- ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_THREE;
- private static final String KEY_FOUR_PATH =
- ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FOUR;
private static final String KEY_FIVE_PATH =
ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FIVE;
private static final String KEY_EIGHT_PATH =
ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_EIGHT;
- private static final String KEY_NINE_PATH =
- ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_NINE;
- private static final String KEY_TEN_PATH =
- ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_TEN;
private static final String KEY_ELEVEN_PATH =
ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_FOUR + ROOT_PATH + KEY_ELEVEN;
private static final String KEY4_PATH =
@@ -382,7 +374,7 @@ public void setUp() throws Exception {
populateOMDB();
NSSummaryTaskWithOBS nsSummaryTaskWithOBS =
new NSSummaryTaskWithOBS(reconNamespaceSummaryManager,
- reconOMMetadataManager, conf, 10);
+ reconOMMetadataManager, 10);
nsSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager);
NSSummaryTaskWithLegacy nsSummaryTaskWithLegacy =
new NSSummaryTaskWithLegacy(reconNamespaceSummaryManager,
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java
index c251ccd8c27c..aea63b41000e 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java
@@ -153,13 +153,6 @@ public class TestOmDBInsightEndPoint extends AbstractReconSqlDBTest {
private static final String KEY_TWELVE = "key12";
private static final String KEY_THIRTEEN = "dir4/dir7/key13";
- private static final String KEY_FOURTEEN = "dir8/key14";
- private static final String KEY_FIFTEEN = "dir8/key15";
- private static final String KEY_SIXTEEN = "dir9/key16";
- private static final String KEY_SEVENTEEN = "dir9/key17";
- private static final String KEY_EIGHTEEN = "dir8/key18";
- private static final String KEY_NINETEEN = "dir8/key19";
-
private static final String FILE_EIGHT = "key8";
private static final String FILE_NINE = "key9";
private static final String FILE_TEN = "key10";
@@ -167,13 +160,6 @@ public class TestOmDBInsightEndPoint extends AbstractReconSqlDBTest {
private static final String FILE_TWELVE = "key12";
private static final String FILE_THIRTEEN = "key13";
- private static final String FILE_FOURTEEN = "key14";
- private static final String FILE_FIFTEEN = "key15";
- private static final String FILE_SIXTEEN = "key16";
- private static final String FILE_SEVENTEEN = "key17";
- private static final String FILE_EIGHTEEN = "key18";
- private static final String FILE_NINETEEN = "key19";
-
private static final long PARENT_OBJECT_ID_ZERO = 0L;
private static final long VOLUME_ONE_OBJECT_ID = 1L;
@@ -217,7 +203,6 @@ public class TestOmDBInsightEndPoint extends AbstractReconSqlDBTest {
private static final long KEY_TWENTY_TWO_OBJECT_ID = 37L;
private static final long KEY_TWENTY_THREE_OBJECT_ID = 38L;
private static final long KEY_TWENTY_FOUR_OBJECT_ID = 39L;
- private static final long KEY_TWENTY_FIVE_OBJECT_ID = 42L;
private static final long EMPTY_OBS_BUCKET_OBJECT_ID = 40L;
private static final long EMPTY_FSO_BUCKET_OBJECT_ID = 41L;
@@ -243,7 +228,6 @@ public class TestOmDBInsightEndPoint extends AbstractReconSqlDBTest {
private static final long KEY_SEVENTEEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2
private static final long KEY_EIGHTEEN_SIZE = OzoneConsts.KB + 1; // bin 1
private static final long KEY_NINETEEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2
- private static final long KEY_TWENTY_SIZE = OzoneConsts.KB + 1; // bin 1
private static final String OBS_BUCKET_PATH = "/volume1/obs-bucket";
private static final String FSO_BUCKET_PATH = "/volume1/fso-bucket";
@@ -324,10 +308,10 @@ public void setUp() throws Exception {
reconOMMetadataManager, ozoneConfiguration, 10);
nsSummaryTaskWithOBS = new NSSummaryTaskWithOBS(
reconNamespaceSummaryManager,
- reconOMMetadataManager, ozoneConfiguration, 10);
+ reconOMMetadataManager, 10);
nsSummaryTaskWithFSO = new NSSummaryTaskWithFSO(
reconNamespaceSummaryManager,
- reconOMMetadataManager, ozoneConfiguration, 10);
+ reconOMMetadataManager, 10);
reconNamespaceSummaryManager.clearNSSummaryTable();
nSSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager);
nsSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager);
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenKeysSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenKeysSearchEndpoint.java
index 5123619416bd..0897f548630a 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenKeysSearchEndpoint.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenKeysSearchEndpoint.java
@@ -116,7 +116,7 @@ public void setUp() throws Exception {
populateOMDB();
NSSummaryTaskWithFSO nSSummaryTaskWithFso =
new NSSummaryTaskWithFSO(reconNamespaceSummaryManager,
- reconOMMetadataManager, ozoneConfiguration, 10);
+ reconOMMetadataManager, 10);
nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager);
}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java
index ff2fbb872fc4..032b948233eb 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java
@@ -36,7 +36,6 @@
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest;
-import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdater;
import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdaterManager;
import org.apache.ozone.recon.schema.UtilizationSchemaDefinition;
@@ -53,7 +52,6 @@
public class TestContainerSizeCountTask extends AbstractReconSqlDBTest {
private ContainerManager containerManager;
- private StorageContainerServiceProvider scmClient;
private ReconTaskConfig reconTaskConfig;
private ReconTaskStatusUpdaterManager reconTaskStatusUpdaterManager;
private ContainerCountBySizeDao containerCountBySizeDao;
@@ -77,10 +75,8 @@ public void setUp() {
when(reconTaskStatusUpdaterManager.getTaskStatusUpdater(anyString())).thenReturn(new ReconTaskStatusUpdater(
getDao(ReconTaskStatusDao.class), "mockedTask-" + System.currentTimeMillis()));
containerManager = mock(ContainerManager.class);
- scmClient = mock(StorageContainerServiceProvider.class);
task = new ContainerSizeCountTask(
containerManager,
- scmClient,
reconTaskConfig,
containerCountBySizeDao,
utilizationSchemaDefinition,
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
index 83b722240510..3294ab4fae81 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java
@@ -152,7 +152,7 @@ void setUp(@TempDir File tmpDir) throws Exception {
OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD, 3);
nSSummaryTaskWithFso = new NSSummaryTaskWithFSO(
reconNamespaceSummaryManager, reconOMMetadataManager,
- ozoneConfiguration, nsSummaryFlushToDBMaxThreshold);
+ nsSummaryFlushToDBMaxThreshold);
}
/**
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java
index 386a5539f125..0b41162f837f 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java
@@ -133,7 +133,7 @@ void setUp(@TempDir File tmpDir) throws Exception {
OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD_DEFAULT);
nSSummaryTaskWithOBS = new NSSummaryTaskWithOBS(
reconNamespaceSummaryManager,
- reconOMMetadataManager, omConfiguration,
+ reconOMMetadataManager,
nsSummaryFlushToDBMaxThreshold);
}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java
index 3effcb549b5b..e889195a9583 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java
@@ -49,7 +49,6 @@
import java.util.List;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TypedTable;
@@ -72,8 +71,6 @@
import org.junit.jupiter.api.io.TempDir;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* This test class is designed for the OM Table Insight Task. It conducts tests
@@ -89,7 +86,6 @@ public class TestOmTableInsightTask extends AbstractReconSqlDBTest {
private boolean isSetupDone = false;
private static ReconOMMetadataManager reconOMMetadataManager;
private static NSSummaryTaskWithFSO nSSummaryTaskWithFso;
- private static OzoneConfiguration ozoneConfiguration;
private static ReconNamespaceSummaryManagerImpl reconNamespaceSummaryManager;
// Object names in FSO-enabled format
@@ -127,15 +123,11 @@ public class TestOmTableInsightTask extends AbstractReconSqlDBTest {
@Mock
private Table nsSummaryTable;
- private static final Logger LOG =
- LoggerFactory.getLogger(TestOmTableInsightTask.class);
-
public TestOmTableInsightTask() {
super();
}
private void initializeInjector() throws IOException {
- ozoneConfiguration = new OzoneConfiguration();
reconOMMetadataManager = getTestReconOmMetadataManager(
initializeNewOmMetadataManager(Files.createDirectory(
temporaryFolder.resolve("JunitOmDBDir")).toFile()),
@@ -154,8 +146,7 @@ private void initializeInjector() throws IOException {
omTableInsightTask = new OmTableInsightTask(
globalStatsDao, getConfiguration(), reconOMMetadataManager);
nSSummaryTaskWithFso = new NSSummaryTaskWithFSO(
- reconNamespaceSummaryManager, reconOMMetadataManager,
- ozoneConfiguration, 10);
+ reconNamespaceSummaryManager, reconOMMetadataManager, 10);
dslContext = getDslContext();
omTableInsightTask.setTables(omTableInsightTask.getTaskTables());
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java
index e8b03bd4bf15..d2c3cb5bcdaa 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java
@@ -20,17 +20,12 @@
import java.io.IOException;
import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
import org.apache.hadoop.hdds.server.http.BaseHttpServer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Http server to provide S3-compatible API.
*/
public class S3GatewayHttpServer extends BaseHttpServer {
- private static final Logger LOG =
- LoggerFactory.getLogger(S3GatewayHttpServer.class);
-
/**
* Default offset between two filters.
*/
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AuthorizationV4HeaderParser.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AuthorizationV4HeaderParser.java
index 19240122d62a..cee828fa8466 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AuthorizationV4HeaderParser.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AuthorizationV4HeaderParser.java
@@ -31,17 +31,12 @@
import org.apache.commons.codec.binary.Hex;
import org.apache.hadoop.ozone.s3.signature.SignatureInfo.Version;
import org.apache.hadoop.util.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Class to parse v4 auth information from header.
*/
public class AuthorizationV4HeaderParser implements SignatureParser {
- private static final Logger LOG =
- LoggerFactory.getLogger(AuthorizationV4HeaderParser.class);
-
private static final String CREDENTIAL = "Credential=";
private static final String SIGNEDHEADERS = "SignedHeaders=";
private static final String SIGNATURE = "Signature=";
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AuthorizationV4QueryParser.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AuthorizationV4QueryParser.java
index 438bf51220d2..5c716d88e0c5 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AuthorizationV4QueryParser.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AuthorizationV4QueryParser.java
@@ -32,8 +32,6 @@
import org.apache.commons.codec.DecoderException;
import org.apache.commons.codec.binary.Hex;
import org.apache.hadoop.ozone.s3.signature.SignatureInfo.Version;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Parser for getting auth info from query parameters.
@@ -43,9 +41,6 @@
*/
public class AuthorizationV4QueryParser implements SignatureParser {
- private static final Logger LOG =
- LoggerFactory.getLogger(AuthorizationV4QueryParser.class);
-
private final Map queryParameters;
private static final Long X_AMZ_EXPIRES_MIN = 1L;
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java
index 5f0cb4a84618..fab1718d93e6 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java
@@ -17,16 +17,12 @@
package org.apache.hadoop.ozone.s3.signature;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
/**
* Credential in the AWS authorization header.
* Ref: https://docs.aws.amazon.com/AmazonS3/latest/API/
* sigv4-auth-using-authorization-header.html
*/
public class Credential {
- private static final Logger LOG = LoggerFactory.getLogger(Credential.class);
private String accessKeyID;
private String date;