Skip to content
Closed
Original file line number Diff line number Diff line change
Expand Up @@ -53,15 +53,17 @@ public static NodeReport newInstance(NodeId nodeId, NodeState nodeState,
String httpAddress, String rackName, Resource used, Resource capability,
int numContainers, String healthReport, long lastHealthReportTime) {
return newInstance(nodeId, nodeState, httpAddress, rackName, used,
capability, numContainers, healthReport, lastHealthReportTime, null);
capability, numContainers, healthReport, lastHealthReportTime,
null, null, null);
}

@Private
@Unstable
public static NodeReport newInstance(NodeId nodeId, NodeState nodeState,
String httpAddress, String rackName, Resource used, Resource capability,
int numContainers, String healthReport, long lastHealthReportTime,
Set<String> nodeLabels) {
Set<String> nodeLabels, Integer decommissioningTimeout,
NodeUpdateType nodeUpdateType) {
NodeReport nodeReport = Records.newRecord(NodeReport.class);
nodeReport.setNodeId(nodeId);
nodeReport.setNodeState(nodeState);
Expand All @@ -73,6 +75,8 @@ public static NodeReport newInstance(NodeId nodeId, NodeState nodeState,
nodeReport.setHealthReport(healthReport);
nodeReport.setLastHealthReportTime(lastHealthReportTime);
nodeReport.setNodeLabels(nodeLabels);
nodeReport.setDecommissioningTimeout(decommissioningTimeout);
nodeReport.setNodeUpdateType(nodeUpdateType);
return nodeReport;
}

Expand Down Expand Up @@ -186,8 +190,8 @@ public static NodeReport newInstance(NodeId nodeId, NodeState nodeState,
public abstract void setLastHealthReportTime(long lastHealthReport);

/**
* Get labels of this node
* @return labels of this node
* Get labels of this node.
* @return labels of this node.
*/
@Public
@Stable
Expand All @@ -198,8 +202,8 @@ public static NodeReport newInstance(NodeId nodeId, NodeState nodeState,
public abstract void setNodeLabels(Set<String> nodeLabels);

/**
* Get containers aggregated resource utilization in a node
* @return containers resource utilization
* Get containers aggregated resource utilization in a node.
* @return containers resource utilization.
*/
@Public
@Stable
Expand All @@ -217,8 +221,8 @@ public void setAggregatedContainersUtilization(ResourceUtilization
}

/**
* Get node resource utilization
* @return node resource utilization
* Get node resource utilization.
* @return node resource utilization.
*/
@Public
@Stable
Expand All @@ -227,4 +231,31 @@ public void setAggregatedContainersUtilization(ResourceUtilization
@Private
@Unstable
public abstract void setNodeUtilization(ResourceUtilization nodeUtilization);

/**
* Optional decommissioning timeout in seconds (null indicates absent
* timeout).
* @return the decommissioning timeout in second.
*/
public Integer getDecommissioningTimeout() {
return null;
}

/**
* Set the decommissioning timeout in seconds (null indicates absent timeout).
* */
public void setDecommissioningTimeout(Integer decommissioningTimeout) {}

/**
* Optional node update type (null indicates absent update type).
* @return the node update.
*/
public NodeUpdateType getNodeUpdateType() {
return NodeUpdateType.NODE_UNUSABLE;
}

/**
* Set the node update type (null indicates absent node update type).
* */
public void setNodeUpdateType(NodeUpdateType nodeUpdateType) {}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.yarn.api.records;

/**
* <p>Taxonomy of the <code>NodeState</code> that a
* <code>Node</code> might transition into.</p>
* */
public enum NodeUpdateType {
NODE_USABLE,
NODE_UNUSABLE,
NODE_DECOMMISSIONING
}
Original file line number Diff line number Diff line change
Expand Up @@ -332,6 +332,12 @@ message NodeIdProto {
optional int32 port = 2;
}

enum NodeUpdateTypeProto {
NODE_USABLE = 0;
NODE_UNUSABLE = 1;
NODE_DECOMMISSIONING = 2;
}

message NodeReportProto {
optional NodeIdProto nodeId = 1;
optional string httpAddress = 2;
Expand All @@ -345,6 +351,8 @@ message NodeReportProto {
repeated string node_labels = 10;
optional ResourceUtilizationProto containers_utilization = 11;
optional ResourceUtilizationProto node_utilization = 12;
optional uint32 decommissioning_timeout = 13;
optional NodeUpdateTypeProto node_update_type = 14;
}

message NodeIdToLabelsProto {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -618,15 +618,15 @@ public CancelDelegationTokenResponse cancelDelegationToken(
}

public ApplicationReport createFakeAppReport() {
ApplicationId appId = ApplicationId.newInstance(1000l, 1);
ApplicationId appId = ApplicationId.newInstance(1000L, 1);
ApplicationAttemptId attemptId =
ApplicationAttemptId.newInstance(appId, 1);
// create a fake application report
ApplicationReport report =
ApplicationReport.newInstance(appId, attemptId, "fakeUser",
"fakeQueue", "fakeApplicationName", "localhost", 0, null,
YarnApplicationState.FINISHED, "fake an application report", "",
1000l, 1200l, FinalApplicationStatus.FAILED, null, "", 50f,
1000L, 1200L, FinalApplicationStatus.FAILED, null, "", 50f,
"fakeApplicationType", null);
return report;
}
Expand All @@ -638,7 +638,7 @@ public List<ApplicationReport> createFakeAppReports() {
}

public ApplicationId createFakeAppId() {
return ApplicationId.newInstance(1000l, 1);
return ApplicationId.newInstance(1000L, 1);
}

public ApplicationAttemptId createFakeApplicationAttemptId() {
Expand All @@ -657,7 +657,7 @@ public List<NodeReport> createFakeNodeReports() {
NodeId nodeId = NodeId.newInstance("localhost", 0);
NodeReport report =
NodeReport.newInstance(nodeId, NodeState.RUNNING, "localhost",
"rack1", null, null, 4, null, 1000l, null);
"rack1", null, null, 4, null, 1000L);
List<NodeReport> reports = new ArrayList<NodeReport>();
reports.add(report);
return reports;
Expand All @@ -680,8 +680,8 @@ public List<QueueUserACLInfo> createFakeQueueUserACLInfoList() {
public ApplicationAttemptReport createFakeApplicationAttemptReport() {
return ApplicationAttemptReport.newInstance(
createFakeApplicationAttemptId(), "localhost", 0, "", "", "",
YarnApplicationAttemptState.RUNNING, createFakeContainerId(), 1000l,
1200l);
YarnApplicationAttemptState.RUNNING, createFakeContainerId(), 1000L,
1200L);
}

public List<ApplicationAttemptReport>
Expand All @@ -694,7 +694,7 @@ YarnApplicationAttemptState.RUNNING, createFakeContainerId(), 1000l,

public ContainerReport createFakeContainerReport() {
return ContainerReport.newInstance(createFakeContainerId(), null,
NodeId.newInstance("localhost", 0), null, 1000l, 1200l, "", "", 0,
NodeId.newInstance("localhost", 0), null, 1000L, 1200L, "", "", 0,
ContainerState.COMPLETE,
"http://" + NodeId.newInstance("localhost", 0).toString());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1992,7 +1992,7 @@ private List<NodeReport> getNodeReports(int noOfNodes, NodeState state,
NodeReport nodeReport = NodeReport.newInstance(NodeId
.newInstance("host" + i, 0), state, "host" + 1 + ":8888",
"rack1", Records.newRecord(Resource.class), Records
.newRecord(Resource.class), 0, "", 0, nodeLabels);
.newRecord(Resource.class), 0, "", 0, nodeLabels, null, null);
if (!emptyResourceUtilization) {
ResourceUtilization containersUtilization = ResourceUtilization
.newInstance(1024, 2048, 4);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.NodeUpdateType;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
Expand Down Expand Up @@ -150,8 +151,9 @@ public NodeId getNodeId() {
@Override
public void setNodeId(NodeId nodeId) {
maybeInitBuilder();
if (nodeId == null)
if (nodeId == null) {
builder.clearNodeId();
}
this.nodeId = nodeId;
}

Expand All @@ -177,8 +179,9 @@ public void setNodeState(NodeState nodeState) {
@Override
public void setCapability(Resource capability) {
maybeInitBuilder();
if (capability == null)
if (capability == null) {
builder.clearCapability();
}
this.capability = capability;
}

Expand Down Expand Up @@ -215,8 +218,9 @@ public void setRackName(String rackName) {
@Override
public void setUsed(Resource used) {
maybeInitBuilder();
if (used == null)
if (used == null) {
builder.clearUsed();
}
this.used = used;
}

Expand All @@ -234,8 +238,9 @@ public int hashCode() {

@Override
public boolean equals(Object other) {
if (other == null)
if (other == null) {
return false;
}
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
Expand Down Expand Up @@ -278,8 +283,9 @@ private void mergeLocalToBuilder() {
}

private void mergeLocalToProto() {
if (viaProto)
if (viaProto) {
maybeInitBuilder();
}
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
Expand Down Expand Up @@ -387,4 +393,38 @@ public void setNodeUtilization(ResourceUtilization nodeResourceUtilization) {
}
this.nodeUtilization = nodeResourceUtilization;
}

@Override
public Integer getDecommissioningTimeout() {
NodeReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.hasDecommissioningTimeout())
? p.getDecommissioningTimeout() : null;
}

@Override
public void setDecommissioningTimeout(Integer decommissioningTimeout) {
maybeInitBuilder();
if (decommissioningTimeout == null || decommissioningTimeout < 0) {
builder.clearDecommissioningTimeout();
return;
}
builder.setDecommissioningTimeout(decommissioningTimeout);
}

@Override
public NodeUpdateType getNodeUpdateType() {
NodeReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.hasNodeUpdateType()) ?
ProtoUtils.convertFromProtoFormat(p.getNodeUpdateType()) : null;
}

@Override
public void setNodeUpdateType(NodeUpdateType nodeUpdateType) {
maybeInitBuilder();
if (nodeUpdateType == null) {
builder.clearNodeUpdateType();
return;
}
builder.setNodeUpdateType(ProtoUtils.convertToProtoFormat(nodeUpdateType));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@
import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.NodeUpdateType;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter;
Expand Down Expand Up @@ -80,6 +81,7 @@
import org.apache.hadoop.yarn.proto.YarnProtos.ExecutionTypeProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ExecutionTypeRequestProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceTypesProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeUpdateTypeProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.ContainerUpdateTypeProto;
import org.apache.hadoop.yarn.server.api.ContainerType;
Expand Down Expand Up @@ -342,7 +344,17 @@ public static ContainerTypeProto convertToProtoFormat(ContainerType e) {
public static ContainerType convertFromProtoFormat(ContainerTypeProto e) {
return ContainerType.valueOf(e.name());
}


/*
* NodeUpdateType
*/
public static NodeUpdateTypeProto convertToProtoFormat(NodeUpdateType e) {
return NodeUpdateTypeProto.valueOf(e.name());
}
public static NodeUpdateType convertFromProtoFormat(NodeUpdateTypeProto e) {
return NodeUpdateType.valueOf(e.name());
}

/*
* ExecutionType
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.NodeUpdateType;
import org.apache.hadoop.yarn.api.records.PreemptionMessage;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
Expand Down Expand Up @@ -187,23 +188,26 @@ public static NodeReport newNodeReport(NodeId nodeId, NodeState nodeState,
String httpAddress, String rackName, Resource used, Resource capability,
int numContainers, String healthReport, long lastHealthReportTime) {
return newNodeReport(nodeId, nodeState, httpAddress, rackName, used,
capability, numContainers, healthReport, lastHealthReportTime, null);
capability, numContainers, healthReport, lastHealthReportTime,
null, null, null);
}

public static NodeReport newNodeReport(NodeId nodeId, NodeState nodeState,
String httpAddress, String rackName, Resource used, Resource capability,
int numContainers, String healthReport, long lastHealthReportTime,
Set<String> nodeLabels) {
Set<String> nodeLabels, Integer decommissioningTimeout,
NodeUpdateType nodeUpdateType) {
return newNodeReport(nodeId, nodeState, httpAddress, rackName, used,
capability, numContainers, healthReport, lastHealthReportTime,
nodeLabels, null, null);
nodeLabels, null, null, decommissioningTimeout, nodeUpdateType);
}

public static NodeReport newNodeReport(NodeId nodeId, NodeState nodeState,
String httpAddress, String rackName, Resource used, Resource capability,
int numContainers, String healthReport, long lastHealthReportTime,
Set<String> nodeLabels, ResourceUtilization containersUtilization,
ResourceUtilization nodeUtilization) {
ResourceUtilization nodeUtilization, Integer decommissioningTimeout,
NodeUpdateType nodeUpdateType) {
NodeReport nodeReport = recordFactory.newRecordInstance(NodeReport.class);
nodeReport.setNodeId(nodeId);
nodeReport.setNodeState(nodeState);
Expand All @@ -217,6 +221,8 @@ public static NodeReport newNodeReport(NodeId nodeId, NodeState nodeState,
nodeReport.setNodeLabels(nodeLabels);
nodeReport.setAggregatedContainersUtilization(containersUtilization);
nodeReport.setNodeUtilization(nodeUtilization);
nodeReport.setDecommissioningTimeout(decommissioningTimeout);
nodeReport.setNodeUpdateType(nodeUpdateType);
return nodeReport;
}

Expand Down
Loading