getPipelineToOpenContainer() {
return pipelineToOpenContainer;
}
+ @VisibleForTesting
+ public StorageContainerServiceProvider getScmClient() {
+ return scmClient;
+ }
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportQueue.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportQueue.java
new file mode 100644
index 000000000000..8d5f92eda4ca
--- /dev/null
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportQueue.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.recon.scm;
+
+import org.apache.hadoop.hdds.scm.server.ContainerReportQueue;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReport;
+
+import java.util.List;
+
+/**
+ * Customized queue to handle multiple ICR report together.
+ */
+public class ReconContainerReportQueue extends ContainerReportQueue {
+
+ public ReconContainerReportQueue(int queueSize) {
+ super(queueSize);
+ }
+
+ @Override
+ protected boolean mergeIcr(ContainerReport val,
+ List dataList) {
+ if (!dataList.isEmpty()) {
+ if (SCMDatanodeHeartbeatDispatcher.ContainerReportType.ICR
+ == dataList.get(dataList.size() - 1).getType()) {
+ dataList.get(dataList.size() - 1).mergeReport(val);
+ return true;
+ }
+ }
+ return false;
+ }
+}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
index 18d995d053aa..1f2b1d5cf249 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
@@ -24,8 +24,8 @@
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
import org.apache.hadoop.hdds.scm.container.IncrementalContainerReportHandler;
import org.apache.hadoop.hdds.scm.ha.SCMContext;
import org.apache.hadoop.hdds.scm.node.NodeManager;
@@ -69,24 +69,33 @@ public void onMessage(final IncrementalContainerReportFromDatanode report,
ReconContainerManager containerManager =
(ReconContainerManager) getContainerManager();
+ try {
+ containerManager.checkAndAddNewContainerBatch(
+ report.getReport().getReportList());
+ } catch (Exception ioEx) {
+ LOG.error("Exception while checking and adding new container.", ioEx);
+ return;
+ }
boolean success = true;
for (ContainerReplicaProto replicaProto :
report.getReport().getReportList()) {
+ ContainerID id = ContainerID.valueOf(replicaProto.getContainerID());
+ ContainerInfo container = null;
try {
- final ContainerID id = ContainerID.valueOf(
- replicaProto.getContainerID());
try {
- containerManager.checkAndAddNewContainer(id, replicaProto.getState(),
- report.getDatanodeDetails());
- } catch (Exception ioEx) {
- LOG.error("Exception while checking and adding new container.", ioEx);
- return;
+ container = getContainerManager().getContainer(id);
+ // Ensure we reuse the same ContainerID instance in containerInfo
+ id = container.containerID();
+ } finally {
+ if (replicaProto.getState().equals(
+ ContainerReplicaProto.State.DELETED)) {
+ getNodeManager().removeContainer(dd, id);
+ } else {
+ getNodeManager().addContainer(dd, id);
+ }
}
- getNodeManager().addContainer(dd, id);
processContainerReplica(dd, replicaProto, publisher);
- } catch (ContainerNotFoundException e) {
- success = false;
- LOG.warn("Container {} not found!", replicaProto.getContainerID());
+ success = true;
} catch (NodeNotFoundException ex) {
success = false;
LOG.error("Received ICR from unknown datanode {}.",
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
index 464ec1a5ee85..556c6194192f 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
@@ -99,11 +99,21 @@
import org.apache.hadoop.ozone.recon.tasks.ContainerSizeCountTask;
import org.apache.hadoop.ozone.recon.tasks.ReconTaskConfig;
import com.google.inject.Inject;
+
import static org.apache.hadoop.hdds.recon.ReconConfigKeys.RECON_SCM_CONFIG_PREFIX;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EVENT_REPORT_EXEC_WAIT_THRESHOLD_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EVENT_REPORT_QUEUE_WAIT_THRESHOLD_DEFAULT;
import static org.apache.hadoop.hdds.scm.server.StorageContainerManager.buildRpcServerStartMessage;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_SCM_CLIENT_FAILOVER_MAX_RETRY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_SCM_CLIENT_MAX_RETRY_TIMEOUT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_SCM_CLIENT_RPC_TIME_OUT;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
+import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_CLIENT_FAILOVER_MAX_RETRY_DEFAULT;
+import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_CLIENT_FAILOVER_MAX_RETRY_KEY;
+import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_CLIENT_MAX_RETRY_TIMEOUT_DEFAULT;
+import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_CLIENT_MAX_RETRY_TIMEOUT_KEY;
+import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_CLIENT_RPC_TIME_OUT_DEFAULT;
+import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_CLIENT_RPC_TIME_OUT_KEY;
import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_SNAPSHOT_TASK_INITIAL_DELAY;
import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_SNAPSHOT_TASK_INITIAL_DELAY_DEFAULT;
import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_SNAPSHOT_TASK_INTERVAL_DEFAULT;
@@ -182,6 +192,23 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf,
.setSCM(this)
.build();
this.ozoneConfiguration = getReconScmConfiguration(conf);
+ long scmClientRPCTimeOut = conf.getTimeDuration(
+ OZONE_RECON_SCM_CLIENT_RPC_TIME_OUT_KEY,
+ OZONE_RECON_SCM_CLIENT_RPC_TIME_OUT_DEFAULT,
+ TimeUnit.MILLISECONDS);
+ long scmClientMaxRetryTimeOut = conf.getTimeDuration(
+ OZONE_RECON_SCM_CLIENT_MAX_RETRY_TIMEOUT_KEY,
+ OZONE_RECON_SCM_CLIENT_MAX_RETRY_TIMEOUT_DEFAULT,
+ TimeUnit.MILLISECONDS);
+ int scmClientFailOverMaxRetryCount = conf.getInt(
+ OZONE_RECON_SCM_CLIENT_FAILOVER_MAX_RETRY_KEY,
+ OZONE_RECON_SCM_CLIENT_FAILOVER_MAX_RETRY_DEFAULT);
+
+ conf.setLong(HDDS_SCM_CLIENT_RPC_TIME_OUT, scmClientRPCTimeOut);
+ conf.setLong(HDDS_SCM_CLIENT_MAX_RETRY_TIMEOUT, scmClientMaxRetryTimeOut);
+ conf.setLong(HDDS_SCM_CLIENT_FAILOVER_MAX_RETRY,
+ scmClientFailOverMaxRetryCount);
+
this.scmStorageConfig = new ReconStorageConfig(conf, reconUtils);
this.clusterMap = new NetworkTopologyImpl(conf);
this.dbStore = DBStoreBuilder
@@ -283,7 +310,7 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf,
ScmUtils.getContainerReportConfPrefix() + ".execute.wait.threshold",
OZONE_SCM_EVENT_REPORT_EXEC_WAIT_THRESHOLD_DEFAULT);
List> queues
- = ScmUtils.initContainerReportQueue(ozoneConfiguration);
+ = ReconUtils.initContainerReportQueue(ozoneConfiguration);
List executors
= FixedThreadPoolWithAffinityExecutor.initializeExecutorPool(
threadNamePrefix, queues);
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
index cb11d7060d78..f50acc09258f 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
@@ -30,7 +30,9 @@
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
+import java.util.ArrayList;
import java.util.Arrays;
+import java.util.List;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
@@ -66,18 +68,28 @@ public class TestReconIncrementalContainerReportHandler
private HDDSLayoutVersionManager versionManager;
@Test
- public void testProcessICR() throws IOException, NodeNotFoundException {
+ public void testProcessICR()
+ throws IOException, NodeNotFoundException, TimeoutException {
ContainerID containerID = ContainerID.valueOf(100L);
DatanodeDetails datanodeDetails = randomDatanodeDetails();
IncrementalContainerReportFromDatanode reportMock =
mock(IncrementalContainerReportFromDatanode.class);
when(reportMock.getDatanodeDetails()).thenReturn(datanodeDetails);
+
+ ContainerWithPipeline containerWithPipeline = getTestContainer(
+ containerID.getId(), OPEN);
+ List containerWithPipelineList = new ArrayList<>();
+ containerWithPipelineList.add(containerWithPipeline);
+ ReconContainerManager containerManager = getContainerManager();
IncrementalContainerReportProto containerReport =
getIncrementalContainerReportProto(containerID,
State.OPEN,
datanodeDetails.getUuidString());
when(reportMock.getReport()).thenReturn(containerReport);
+ when(getContainerManager().getScmClient()
+ .getExistContainerWithPipelinesInBatch(any(
+ ArrayList.class))).thenReturn(containerWithPipelineList);
final String path =
GenericTestUtils.getTempPath(UUID.randomUUID().toString());
@@ -99,7 +111,6 @@ public void testProcessICR() throws IOException, NodeNotFoundException {
nodeManager.register(datanodeDetails, null, null);
- ReconContainerManager containerManager = getContainerManager();
ReconIncrementalContainerReportHandler reconIcr =
new ReconIncrementalContainerReportHandler(nodeManager,
containerManager, SCMContext.emptyContext());