diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index a3db139b96ce..73a138910415 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -35,12 +35,15 @@ * - UUID of the DataNode. * - IP and Hostname details. * - Port details to which the DataNode will be listening. + * and may also include some extra info like: + * - version of the DataNode + * - setup time etc. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class DatanodeDetails extends NodeImpl implements Comparable { -/** + /** * DataNode's unique identifier in the cluster. */ private final UUID uuid; @@ -225,17 +228,34 @@ public static DatanodeDetails getFromProtoBuf( if (datanodeDetailsProto.hasNetworkLocation()) { builder.setNetworkLocation(datanodeDetailsProto.getNetworkLocation()); } - if (datanodeDetailsProto.hasVersion()) { - builder.setVersion(datanodeDetailsProto.getVersion()); + return builder.build(); + } + + /** + * Returns a ExtendedDatanodeDetails from the protocol buffers. + * + * @param extendedDetailsProto - protoBuf Message + * @return DatanodeDetails + */ + public static DatanodeDetails getFromProtoBuf( + HddsProtos.ExtendedDatanodeDetailsProto extendedDetailsProto) { + DatanodeDetails.Builder builder = newBuilder(); + if (extendedDetailsProto.hasDatanodeDetails()) { + DatanodeDetails datanodeDetails = getFromProtoBuf( + extendedDetailsProto.getDatanodeDetails()); + builder.setDatanodeDetails(datanodeDetails); + } + if (extendedDetailsProto.hasVersion()) { + builder.setVersion(extendedDetailsProto.getVersion()); } - if (datanodeDetailsProto.hasSetupTime()) { - builder.setSetupTime(datanodeDetailsProto.getSetupTime()); + if (extendedDetailsProto.hasSetupTime()) { + builder.setSetupTime(extendedDetailsProto.getSetupTime()); } - if (datanodeDetailsProto.hasRevision()) { - builder.setRevision(datanodeDetailsProto.getRevision()); + if (extendedDetailsProto.hasRevision()) { + builder.setRevision(extendedDetailsProto.getRevision()); } - if (datanodeDetailsProto.hasBuildDate()) { - builder.setBuildDate(datanodeDetailsProto.getBuildDate()); + if (extendedDetailsProto.hasBuildDate()) { + builder.setBuildDate(extendedDetailsProto.getBuildDate()); } return builder.build(); } @@ -279,20 +299,34 @@ public HddsProtos.DatanodeDetailsProto getProtoBufMessage() { .build()); } + return builder.build(); + } + + /** + * Returns a ExtendedDatanodeDetails protobuf message from a datanode ID. + * @return HddsProtos.ExtendedDatanodeDetailsProto + */ + public HddsProtos.ExtendedDatanodeDetailsProto getExtendedProtoBufMessage() { + HddsProtos.DatanodeDetailsProto datanodeDetailsProto = getProtoBufMessage(); + + HddsProtos.ExtendedDatanodeDetailsProto.Builder extendedBuilder = + HddsProtos.ExtendedDatanodeDetailsProto.newBuilder() + .setDatanodeDetails(datanodeDetailsProto); + if (!Strings.isNullOrEmpty(getVersion())) { - builder.setVersion(getVersion()); + extendedBuilder.setVersion(getVersion()); } - builder.setSetupTime(getSetupTime()); + extendedBuilder.setSetupTime(getSetupTime()); if (!Strings.isNullOrEmpty(getRevision())) { - builder.setRevision(getRevision()); + extendedBuilder.setRevision(getRevision()); } if (!Strings.isNullOrEmpty(getBuildDate())) { - builder.setBuildDate(getBuildDate()); + extendedBuilder.setBuildDate(getBuildDate()); } - return builder.build(); + return extendedBuilder.build(); } @Override @@ -357,6 +391,27 @@ private Builder() { ports = new ArrayList<>(); } + /** + * Initialize with DatanodeDetails. + * + * @param details DatanodeDetails + * @return DatanodeDetails.Builder + */ + public Builder setDatanodeDetails(DatanodeDetails details) { + this.id = details.getUuid(); + this.ipAddress = details.getIpAddress(); + this.hostName = details.getHostName(); + this.networkName = details.getNetworkName(); + this.networkLocation = details.getNetworkLocation(); + this.ports = details.getPorts(); + this.certSerialId = details.getCertSerialId(); + this.version = details.getVersion(); + this.setupTime = details.getSetupTime(); + this.revision = details.getRevision(); + this.buildDate = details.getBuildDate(); + return this; + } + /** * Sets the DatanodeUuid. * diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/MockDatanodeDetails.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/MockDatanodeDetails.java index 579271943c35..06a1bf0f8678 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/MockDatanodeDetails.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/MockDatanodeDetails.java @@ -44,7 +44,7 @@ public static DatanodeDetails randomDatanodeDetails() { * @return DatanodeDetails */ public static DatanodeDetails createDatanodeDetails(String hostname, - String loc) { + String loc) { Random random = ThreadLocalRandom.current(); String ipAddress = random.nextInt(256) + "." + random.nextInt(256) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index 425074d6888a..1951c8e097c5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -90,7 +90,7 @@ public class DatanodeStateMachine implements Closeable { /** * Constructs a a datanode state machine. - * @param datanodeDetails - DatanodeDetails used to identify a datanode + * @param datanodeDetails - DatanodeDetails used to identify a datanode * @param conf - Configuration. * @param certClient - Datanode Certificate client, required if security is * enabled @@ -135,7 +135,7 @@ public DatanodeStateMachine(DatanodeDetails datanodeDetails, dnConf.getReplicationMaxStreams()); // When we add new handlers just adding a new handler here should do the - // trick. + // trick. commandDispatcher = CommandDispatcher.newBuilder() .addHandler(new CloseContainerCommandHandler()) .addHandler(new DeleteBlocksCommandHandler(container.getContainerSet(), diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java index be95f011407c..60d2bb23f22a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; + .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.ozone.container.common.statemachine .EndpointStateMachine; import org.apache.hadoop.hdds.protocol.proto @@ -119,8 +119,8 @@ public EndpointStateMachine.EndPointStates call() throws Exception { datanodeContainerManager.getPipelineReport(); // TODO : Add responses to the command Queue. SCMRegisteredResponseProto response = rpcEndPoint.getEndPoint() - .register(datanodeDetails.getProtoBufMessage(), nodeReport, - containerReport, pipelineReportsProto); + .register(datanodeDetails.getExtendedProtoBufMessage(), + nodeReport, containerReport, pipelineReportsProto); Preconditions.checkState(UUID.fromString(response.getDatanodeUUID()) .equals(datanodeDetails.getUuid()), "Unexpected datanode ID in the response."); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java index b62f712bc769..64f294388944 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java @@ -17,9 +17,9 @@ package org.apache.hadoop.ozone.protocol; import org.apache.hadoop.hdds.annotation.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ExtendedDatanodeDetailsProto; import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; + .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; import org.apache.hadoop.hdds.protocol.proto @@ -73,15 +73,15 @@ SCMHeartbeatResponseProto sendHeartbeat(SCMHeartbeatRequestProto heartbeat) /** * Register Datanode. - * @param datanodeDetails - Datanode Details. + * @param extendedDatanodeDetailsProto - extended Datanode Details. * @param nodeReport - Node Report. * @param containerReportsRequestProto - Container Reports. * @return SCM Command. */ SCMRegisteredResponseProto register( - DatanodeDetailsProto datanodeDetails, - NodeReportProto nodeReport, - ContainerReportsProto containerReportsRequestProto, - PipelineReportsProto pipelineReports) throws IOException; + ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto, + NodeReportProto nodeReport, + ContainerReportsProto containerReportsRequestProto, + PipelineReportsProto pipelineReports) throws IOException; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java index 9b446666e5d1..4da8b2754559 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java @@ -18,9 +18,10 @@ import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos + .ExtendedDatanodeDetailsProto; import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; + .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; @@ -153,20 +154,21 @@ public SCMHeartbeatResponseProto sendHeartbeat( /** * Register Datanode. * - * @param datanodeDetailsProto - Datanode Details + * @param extendedDatanodeDetailsProto - extended Datanode Details * @param nodeReport - Node Report. * @param containerReportsRequestProto - Container Reports. * @return SCM Command. */ @Override public SCMRegisteredResponseProto register( - DatanodeDetailsProto datanodeDetailsProto, NodeReportProto nodeReport, + ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto, + NodeReportProto nodeReport, ContainerReportsProto containerReportsRequestProto, PipelineReportsProto pipelineReportsProto) throws IOException { SCMRegisterRequestProto.Builder req = SCMRegisterRequestProto.newBuilder(); - req.setDatanodeDetails(datanodeDetailsProto); + req.setExtendedDatanodeDetails(extendedDatanodeDetailsProto); req.setContainerReport(containerReportsRequestProto); req.setPipelineReports(pipelineReportsProto); req.setNodeReport(nodeReport); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java index e99cbae9f027..f27f4f3cf38c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java @@ -68,7 +68,7 @@ public SCMRegisteredResponseProto register( .getContainerReport(); NodeReportProto dnNodeReport = request.getNodeReport(); PipelineReportsProto pipelineReport = request.getPipelineReports(); - return impl.register(request.getDatanodeDetails(), dnNodeReport, + return impl.register(request.getExtendedDatanodeDetails(), dnNodeReport, containerRequestProto, pipelineReport); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java index c4b29ba2722d..534f9efdabaa 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java @@ -17,6 +17,8 @@ package org.apache.hadoop.ozone.container.common; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos + .ExtendedDatanodeDetailsProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; @@ -211,17 +213,20 @@ private void sleepIfNeeded() { /** * Register Datanode. * - * @param datanodeDetailsProto DatanodDetailsProto. + * @param extendedDatanodeDetailsProto ExtendedDatanodDetailsProto. * @return SCM Command. */ @Override public StorageContainerDatanodeProtocolProtos .SCMRegisteredResponseProto register( - DatanodeDetailsProto datanodeDetailsProto, NodeReportProto nodeReport, + ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto, + NodeReportProto nodeReport, ContainerReportsProto containerReportsRequestProto, PipelineReportsProto pipelineReportsProto) throws IOException { rpcCount.incrementAndGet(); + DatanodeDetailsProto datanodeDetailsProto = + extendedDatanodeDetailsProto.getDatanodeDetails(); updateNodeReport(datanodeDetailsProto, nodeReport); updateContainerReport(containerReportsRequestProto, datanodeDetailsProto); sleepIfNeeded(); diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index 3eeb3a321927..0c9b26142558 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -43,14 +43,23 @@ message DatanodeDetailsProto { // network name, can be Ip address or host name, depends optional string networkName = 6; optional string networkLocation = 7; // Network topology location - optional string version = 8; // Datanode version - optional int64 setupTime = 9; - optional string revision = 10; - optional string buildDate = 11; // TODO(runzhiwang): when uuid is gone, specify 1 as the index of uuid128 and mark as required optional UUID uuid128 = 100; // UUID with 128 bits assigned to the Datanode. } +/** + The DatanodeDetailsProto is a basic type that will be shared by many Proto, + to reduce the number of fields transfered by ProtoBuf, only need to extend + DatanodeDetailsProto for registering DataNode with SCM and Recon. +*/ +message ExtendedDatanodeDetailsProto { + required DatanodeDetailsProto datanodeDetails = 1; + optional string version = 2; + optional int64 setupTime = 3; + optional string revision = 4; + optional string buildDate = 5; +} + /** Proto message encapsulating information required to uniquely identify a OzoneManager. diff --git a/hadoop-hdds/interface-client/src/main/resources/proto.lock b/hadoop-hdds/interface-client/src/main/resources/proto.lock index b27896c655e3..581ffafc4ef6 100644 --- a/hadoop-hdds/interface-client/src/main/resources/proto.lock +++ b/hadoop-hdds/interface-client/src/main/resources/proto.lock @@ -1530,16 +1530,6 @@ "name": "networkLocation", "type": "string" }, - { - "id": 8, - "name": "version", - "type": "string" - }, - { - "id": 9, - "name": "setupTime", - "type": "int64" - }, { "id": 100, "name": "uuid128", @@ -1547,6 +1537,36 @@ } ] }, + { + "name": "ExtendedDatanodeDetailsProto", + "fields": [ + { + "id": 1, + "name": "datanodeDetails", + "type": "DatanodeDetailsProto" + }, + { + "id": 2, + "name": "version", + "type": "string" + }, + { + "id": 3, + "name": "setupTime", + "type": "int64" + }, + { + "id": 4, + "name": "revision", + "type": "string" + }, + { + "id": 5, + "name": "buildDate", + "type": "string" + } + ] + }, { "name": "OzoneManagerDetailsProto", "fields": [ diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto index 00c8fdbf3fb4..1dc4bcd4d249 100644 --- a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto @@ -88,7 +88,7 @@ message SCMVersionResponseProto { } message SCMRegisterRequestProto { - required DatanodeDetailsProto datanodeDetails = 1; + required ExtendedDatanodeDetailsProto extendedDatanodeDetails = 1; required NodeReportProto nodeReport = 2; required ContainerReportsProto containerReport = 3; required PipelineReportsProto pipelineReports = 4; diff --git a/hadoop-hdds/interface-server/src/main/resources/proto.lock b/hadoop-hdds/interface-server/src/main/resources/proto.lock index 5492e00b10a3..5022d48be971 100644 --- a/hadoop-hdds/interface-server/src/main/resources/proto.lock +++ b/hadoop-hdds/interface-server/src/main/resources/proto.lock @@ -310,8 +310,8 @@ "fields": [ { "id": 1, - "name": "datanodeDetails", - "type": "DatanodeDetailsProto" + "name": "extendedDatanodeDetails", + "type": "ExtendedDatanodeDetailsProto" }, { "id": 2, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java index b6248aa817d0..d51961f7d471 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java @@ -564,18 +564,18 @@ private void checkNodesHealth() { * >>-->> time-line >>-->> * * Here is the logic of computing the health of a node. -     * -     * 1. We get the current time and look back that the time -     *  when we got a heartbeat from a node. -     *  -     * 2. If the last heartbeat was within the window of healthy node we mark -     *  it as healthy. -     *  -     * 3. If the last HB Time stamp is longer and falls within the window of -     *  Stale Node time, we will mark it as Stale. -     *  -     * 4. If the last HB time is older than the Stale Window, then the node is -     * marked as dead. + * + * 1. We get the current time and look back that the time + *  when we got a heartbeat from a node. + * + * 2. If the last heartbeat was within the window of healthy node we mark + *  it as healthy. + * + * 3. If the last HB Time stamp is longer and falls within the window of + *  Stale Node time, we will mark it as Stale. + * + * 4. If the last HB time is older than the Stale Window, then the node is + * marked as dead. * * The Processing starts from current time and looks backwards in time. */ @@ -602,7 +602,7 @@ private void checkNodesHealth() { // Move the node to STALE if the last heartbeat time is less than // configured stale-node interval. updateNodeState(node, staleNodeCondition, state, - NodeLifeCycleEvent.TIMEOUT); + NodeLifeCycleEvent.TIMEOUT); break; case STALE: // Move the node to DEAD if the last heartbeat time is less than @@ -620,8 +620,8 @@ private void checkNodesHealth() { updateNodeState(node, healthyNodeCondition, state, NodeLifeCycleEvent.RESURRECT); break; - // We don't do anything for DECOMMISSIONING and DECOMMISSIONED in - // heartbeat processing. + // We don't do anything for DECOMMISSIONING and DECOMMISSIONED in + // heartbeat processing. case DECOMMISSIONING: case DECOMMISSIONED: default: diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java index ad7f65ab5853..a2953415cb38 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java @@ -203,13 +203,13 @@ public SCMVersionResponseProto getVersion(SCMVersionRequestProto @Override public SCMRegisteredResponseProto register( - HddsProtos.DatanodeDetailsProto datanodeDetailsProto, + HddsProtos.ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto, NodeReportProto nodeReport, ContainerReportsProto containerReportsProto, PipelineReportsProto pipelineReportsProto) throws IOException { DatanodeDetails datanodeDetails = DatanodeDetails - .getFromProtoBuf(datanodeDetailsProto); + .getFromProtoBuf(extendedDatanodeDetailsProto); boolean auditSuccess = true; Map auditMap = Maps.newHashMap(); auditMap.put("datanodeDetails", datanodeDetails.toString()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index 663ac8c5b80a..8cad8b0d4561 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -269,7 +269,7 @@ public void testRegister() throws Exception { try (EndpointStateMachine rpcEndPoint = createEndpoint( SCMTestUtils.getConf(), serverAddress, 1000)) { SCMRegisteredResponseProto responseProto = rpcEndPoint.getEndPoint() - .register(nodeToRegister.getProtoBufMessage(), TestUtils + .register(nodeToRegister.getExtendedProtoBufMessage(), TestUtils .createNodeReport( getStorageReports(nodeToRegister.getUuid())), TestUtils.getRandomContainerReports(10), diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/DatanodeDetailsCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/DatanodeDetailsCodec.java index c11ebbf63a63..96ae806c8737 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/DatanodeDetailsCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/DatanodeDetailsCodec.java @@ -19,7 +19,8 @@ package org.apache.hadoop.ozone.recon.codec; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto.PARSER; +import static org.apache.hadoop.hdds.protocol.proto + .HddsProtos.ExtendedDatanodeDetailsProto.PARSER; import java.io.IOException; @@ -33,7 +34,7 @@ public class DatanodeDetailsCodec implements Codec { @Override public byte[] toPersistedFormat(DatanodeDetails object) throws IOException { - return object.getProtoBufMessage().toByteArray(); + return object.getExtendedProtoBufMessage().toByteArray(); } @Override diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index 79cf1bbf9f6e..b99f30cca948 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -22,6 +22,8 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos + .ExtendedDatanodeDetailsProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.PipelineID; @@ -127,7 +129,7 @@ public class TestEndpoints extends AbstractReconSqlDBTest { private DatanodeDetails datanodeDetails2; private long containerId = 1L; private ContainerReportsProto containerReportsProto; - private DatanodeDetailsProto datanodeDetailsProto; + private ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto; private Pipeline pipeline; private FileCountBySizeDao fileCountBySizeDao; private DSLContext dslContext; @@ -263,12 +265,20 @@ public void setUp() throws Exception { PipelineReportsProto pipelineReportsProto = PipelineReportsProto.newBuilder() .addPipelineReport(pipelineReport).build(); - datanodeDetailsProto = + DatanodeDetailsProto datanodeDetailsProto = DatanodeDetailsProto.newBuilder() .setHostName(host1) .setUuid(datanodeId) .setIpAddress(ip1) .build(); + extendedDatanodeDetailsProto = + HddsProtos.ExtendedDatanodeDetailsProto.newBuilder() + .setDatanodeDetails(datanodeDetailsProto) + .setVersion("0.6.0") + .setSetupTime(1596347628802L) + .setBuildDate("2020-08-01T08:50Z") + .setRevision("3346f493fa1690358add7bb9f3e5b52545993f36") + .build(); StorageReportProto storageReportProto1 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK) .setStorageLocation("/disk1").setScmUsed(10000).setRemaining(5400) @@ -288,10 +298,18 @@ public void setUp() throws Exception { DatanodeDetailsProto datanodeDetailsProto2 = DatanodeDetailsProto.newBuilder() - .setHostName(host2) - .setUuid(datanodeId2) - .setIpAddress(ip2) - .build(); + .setHostName(host2) + .setUuid(datanodeId2) + .setIpAddress(ip2) + .build(); + ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto2 = + ExtendedDatanodeDetailsProto.newBuilder() + .setDatanodeDetails(datanodeDetailsProto2) + .setVersion("0.6.0") + .setSetupTime(1596347636802L) + .setBuildDate("2020-08-01T08:50Z") + .setRevision("3346f493fa1690358add7bb9f3e5b52545993f36") + .build(); StorageReportProto storageReportProto3 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK) .setStorageLocation("/disk1").setScmUsed(20000).setRemaining(7800) @@ -311,10 +329,10 @@ public void setUp() throws Exception { try { reconScm.getDatanodeProtocolServer() - .register(datanodeDetailsProto, nodeReportProto, + .register(extendedDatanodeDetailsProto, nodeReportProto, containerReportsProto, pipelineReportsProto); reconScm.getDatanodeProtocolServer() - .register(datanodeDetailsProto2, nodeReportProto2, + .register(extendedDatanodeDetailsProto2, nodeReportProto2, ContainerReportsProto.newBuilder().build(), PipelineReportsProto.newBuilder().build()); // Process all events in the event queue @@ -628,7 +646,8 @@ private void waitAndCheckConditionAfterHeartbeat(Callable check) SCMHeartbeatRequestProto heartbeatRequestProto = SCMHeartbeatRequestProto.newBuilder() .setContainerReport(containerReportsProto) - .setDatanodeDetails(datanodeDetailsProto) + .setDatanodeDetails(extendedDatanodeDetailsProto + .getDatanodeDetails()) .build(); reconScm.getDatanodeProtocolServer().sendHeartbeat(heartbeatRequestProto); LambdaTestUtils.await(30000, 1000, check);