-
Notifications
You must be signed in to change notification settings - Fork 587
HDDS-6960. EC: Implement the Over-replication Handler #3572
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
15c8283
92623a4
602d4fa
07719a3
d7b3e96
10a1998
81bcd2c
d1b587f
318420a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,109 @@ | ||
| /* | ||
| * Licensed to the Apache Software Foundation (ASF) under one | ||
| * or more contributor license agreements. See the NOTICE file | ||
| * distributed with this work for additional information | ||
| * regarding copyright ownership. The ASF licenses this file | ||
| * to you under the Apache License, Version 2.0 (the | ||
| * "License"); you may not use this file except in compliance | ||
| * with the License. You may obtain a copy of the License at | ||
| * | ||
| * http://www.apache.org/licenses/LICENSE-2.0 | ||
| * | ||
| * Unless required by applicable law or agreed to in writing, software | ||
| * distributed under the License is distributed on an "AS IS" BASIS, | ||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| * See the License for the specific language governing permissions and | ||
| * limitations under the License. | ||
| */ | ||
|
|
||
| package org.apache.hadoop.hdds.scm.container.replication; | ||
|
|
||
| import org.apache.hadoop.hdds.protocol.DatanodeDetails; | ||
| import org.apache.hadoop.hdds.scm.ContainerPlacementStatus; | ||
| import org.apache.hadoop.hdds.scm.PlacementPolicy; | ||
| import org.apache.hadoop.hdds.scm.container.ContainerReplica; | ||
| import org.apache.hadoop.ozone.protocol.commands.SCMCommand; | ||
|
|
||
| import java.util.List; | ||
| import java.util.Map; | ||
| import java.util.Set; | ||
| import java.util.stream.Collectors; | ||
|
|
||
| /** | ||
| * this class holds some common methods that will be shared among | ||
| * different kinds of Implementation of OverReplicationHandler. | ||
| * */ | ||
| public abstract class AbstractOverReplicationHandler | ||
| implements UnhealthyReplicationHandler { | ||
| private final PlacementPolicy placementPolicy; | ||
|
|
||
| protected AbstractOverReplicationHandler(PlacementPolicy placementPolicy) { | ||
| this.placementPolicy = placementPolicy; | ||
| } | ||
| /** | ||
| * Identify a new set of datanode(s) to replicate/reconstruct the container | ||
| * and form the SCM commands to send it to DN. | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Missed to update doc?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. sorry for missing , fixed now! |
||
| * | ||
| * @param replicas - Set of available container replicas. | ||
| * @param pendingOps - Inflight replications and deletion ops. | ||
| * @param result - Health check result. | ||
| * @param remainingMaintenanceRedundancy - represents that how many nodes go | ||
| * into maintenance. | ||
| * @return Returns the key value pair of destination dn where the command gets | ||
| * executed and the command itself. | ||
| */ | ||
| public abstract Map<DatanodeDetails, SCMCommand<?>> processAndCreateCommands( | ||
| Set<ContainerReplica> replicas, List<ContainerReplicaOp> pendingOps, | ||
| ContainerHealthResult result, int remainingMaintenanceRedundancy); | ||
|
|
||
| /** | ||
| * Identify whether the placement status is actually equal for a | ||
| * replica set after removing those filtered replicas. | ||
| * | ||
| * @param replicas the oringianl set of replicas | ||
| * @param replicationFactor the criteria which replicas should be removed. | ||
| * @param replica the replica to be removed | ||
| */ | ||
| public boolean isPlacementStatusActuallyEqualAfterRemove( | ||
| final Set<ContainerReplica> replicas, | ||
| final ContainerReplica replica, | ||
| final int replicationFactor) { | ||
| ContainerPlacementStatus currentCPS = | ||
| getPlacementStatus(replicas, replicationFactor); | ||
| replicas.remove(replica); | ||
| ContainerPlacementStatus newCPS = | ||
| getPlacementStatus(replicas, replicationFactor); | ||
| replicas.add(replica); | ||
| return isPlacementStatusActuallyEqual(currentCPS, newCPS); | ||
| } | ||
|
|
||
| /** | ||
| * Given a set of ContainerReplica, transform it to a list of DatanodeDetails | ||
| * and then check if the list meets the container placement policy. | ||
| * @param replicas List of containerReplica | ||
| * @param replicationFactor Expected Replication Factor of the containe | ||
| * @return ContainerPlacementStatus indicating if the policy is met or not | ||
| */ | ||
| private ContainerPlacementStatus getPlacementStatus( | ||
| Set<ContainerReplica> replicas, int replicationFactor) { | ||
| List<DatanodeDetails> replicaDns = replicas.stream() | ||
| .map(ContainerReplica::getDatanodeDetails) | ||
| .collect(Collectors.toList()); | ||
| return placementPolicy.validateContainerPlacement( | ||
| replicaDns, replicationFactor); | ||
| } | ||
|
|
||
| /** | ||
| * whether the given two ContainerPlacementStatus are actually equal. | ||
| * | ||
| * @param cps1 ContainerPlacementStatus | ||
| * @param cps2 ContainerPlacementStatus | ||
| */ | ||
| private boolean isPlacementStatusActuallyEqual( | ||
| ContainerPlacementStatus cps1, | ||
| ContainerPlacementStatus cps2) { | ||
| return (!cps1.isPolicySatisfied() && | ||
| cps1.actualPlacementCount() == cps2.actualPlacementCount()) || | ||
| cps1.isPolicySatisfied() && cps2.isPolicySatisfied(); | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,178 @@ | ||
| /** | ||
| * Licensed to the Apache Software Foundation (ASF) under one | ||
| * or more contributor license agreements. See the NOTICE file | ||
| * distributed with this work for additional information | ||
| * regarding copyright ownership. The ASF licenses this file | ||
| * to you under the Apache License, Version 2.0 (the | ||
| * "License"); you may not use this file except in compliance | ||
| * with the License. You may obtain a copy of the License at | ||
| * | ||
| * http://www.apache.org/licenses/LICENSE-2.0 | ||
| * | ||
| * Unless required by applicable law or agreed to in writing, software | ||
| * distributed under the License is distributed on an "AS IS" BASIS, | ||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| * See the License for the specific language governing permissions and | ||
| * limitations under the License. | ||
| */ | ||
| package org.apache.hadoop.hdds.scm.container.replication; | ||
|
|
||
| import org.apache.hadoop.hdds.protocol.DatanodeDetails; | ||
| import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; | ||
| import org.apache.hadoop.hdds.scm.PlacementPolicy; | ||
| import org.apache.hadoop.hdds.scm.container.ContainerInfo; | ||
| import org.apache.hadoop.hdds.scm.container.ContainerReplica; | ||
| import org.apache.hadoop.hdds.scm.node.NodeManager; | ||
| import org.apache.hadoop.hdds.scm.node.NodeStatus; | ||
| import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; | ||
| import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand; | ||
| import org.apache.hadoop.ozone.protocol.commands.SCMCommand; | ||
| import org.slf4j.Logger; | ||
| import org.slf4j.LoggerFactory; | ||
|
|
||
| import java.util.ArrayList; | ||
| import java.util.HashMap; | ||
| import java.util.HashSet; | ||
| import java.util.Iterator; | ||
| import java.util.LinkedList; | ||
| import java.util.List; | ||
| import java.util.Map; | ||
| import java.util.Set; | ||
|
|
||
| import static java.util.Collections.emptyMap; | ||
|
|
||
| /** | ||
| * Handles the EC Over replication processing and forming the respective SCM | ||
| * commands. | ||
| */ | ||
| public class ECOverReplicationHandler extends AbstractOverReplicationHandler { | ||
| public static final Logger LOG = | ||
| LoggerFactory.getLogger(ECOverReplicationHandler.class); | ||
|
|
||
| private final ECContainerHealthCheck ecContainerHealthCheck = | ||
| new ECContainerHealthCheck(); | ||
| private final NodeManager nodeManager; | ||
|
|
||
| public ECOverReplicationHandler(PlacementPolicy placementPolicy, | ||
| NodeManager nodeManager) { | ||
| super(placementPolicy); | ||
| this.nodeManager = nodeManager; | ||
| } | ||
|
|
||
| /** | ||
| * Identify a new set of datanode(s) to replicate/reconstruct the container | ||
| * and form the SCM commands to send it to DN. | ||
JacksonYao287 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| * | ||
| * @param replicas - Set of available container replicas. | ||
| * @param pendingOps - Inflight replications and deletion ops. | ||
| * @param result - Health check result. | ||
| * @param remainingMaintenanceRedundancy - represents that how many nodes go | ||
| * into maintenance. | ||
| * @return Returns the key value pair of destination dn where the command gets | ||
| * executed and the command itself. | ||
| */ | ||
| @Override | ||
| public Map<DatanodeDetails, SCMCommand<?>> processAndCreateCommands( | ||
| Set<ContainerReplica> replicas, List<ContainerReplicaOp> pendingOps, | ||
| ContainerHealthResult result, int remainingMaintenanceRedundancy) { | ||
| ContainerInfo container = result.getContainerInfo(); | ||
| ContainerHealthResult currentUnderRepRes = ecContainerHealthCheck | ||
| .checkHealth(container, replicas, pendingOps, | ||
| remainingMaintenanceRedundancy); | ||
| LOG.debug("Handling over-replicated EC container: {}", container); | ||
|
|
||
| //sanity check | ||
| if (currentUnderRepRes.getHealthState() != | ||
| ContainerHealthResult.HealthState.OVER_REPLICATED) { | ||
| LOG.info("The container {} state changed and it's not in over" | ||
| + " replication any more. Current state is: {}", | ||
| container.getContainerID(), currentUnderRepRes); | ||
| return emptyMap(); | ||
| } | ||
|
|
||
| ContainerHealthResult.OverReplicatedHealthResult containerHealthResult = | ||
| ((ContainerHealthResult.OverReplicatedHealthResult) | ||
| currentUnderRepRes); | ||
| if (containerHealthResult.isSufficientlyReplicatedAfterPending()) { | ||
| LOG.info("The container {} with replicas {} will be corrected " + | ||
| "by the pending delete", container.getContainerID(), replicas); | ||
| return emptyMap(); | ||
| } | ||
|
|
||
| // we don`t support hybrid state(both under and over replicated) for | ||
JacksonYao287 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| // EC container , and we always handle under-replicated first now. it | ||
| // means when reaching here, we have all the replica indexes and some | ||
| // of them are more than 1. | ||
| // TODO: support hybrid state if needed. | ||
JacksonYao287 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| final ECContainerReplicaCount replicaCount = | ||
| new ECContainerReplicaCount(container, replicas, pendingOps, | ||
| remainingMaintenanceRedundancy); | ||
|
|
||
| List<Integer> overReplicatedIndexes = | ||
| replicaCount.overReplicatedIndexes(true); | ||
| //sanity check | ||
| if (overReplicatedIndexes.size() == 0) { | ||
| LOG.warn("The container {} with replicas {} is found over replicated " + | ||
| "by ContainerHealthCheck, but found not over replicated by " + | ||
| "ECContainerReplicaCount", | ||
| container.getContainerID(), replicas); | ||
| return emptyMap(); | ||
| } | ||
|
|
||
| final List<DatanodeDetails> deletionInFlight = new ArrayList<>(); | ||
| for (ContainerReplicaOp op : pendingOps) { | ||
| if (op.getOpType() == ContainerReplicaOp.PendingOpType.DELETE) { | ||
| deletionInFlight.add(op.getTarget()); | ||
| } | ||
| } | ||
| Map<Integer, List<ContainerReplica>> index2replicas = new HashMap<>(); | ||
| replicas.stream() | ||
| .filter(r -> overReplicatedIndexes.contains(r.getReplicaIndex())) | ||
| .filter(r -> r | ||
| .getState() == StorageContainerDatanodeProtocolProtos | ||
| .ContainerReplicaProto.State.CLOSED) | ||
| .filter(r -> { | ||
| DatanodeDetails dd = r.getDatanodeDetails(); | ||
| try { | ||
JacksonYao287 marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| //the command target node should be in-service and healthy | ||
| return nodeManager.getNodeStatus(dd) | ||
| .equals(NodeStatus.inServiceHealthy()); | ||
|
||
| } catch (NodeNotFoundException nnfe) { | ||
| //nothing to do, just skip; | ||
| LOG.warn("can not find node when getting NodeStatus, {}", dd); | ||
| return false; | ||
| } | ||
| }).filter(r -> !deletionInFlight.contains(r.getDatanodeDetails())) | ||
| .forEach(r -> { | ||
| int index = r.getReplicaIndex(); | ||
| index2replicas.computeIfAbsent(index, k -> new LinkedList<>()); | ||
| index2replicas.get(index).add(r); | ||
| }); | ||
|
|
||
| if (index2replicas.size() > 0) { | ||
| final Map<DatanodeDetails, SCMCommand<?>> commands = new HashMap<>(); | ||
| final int replicationFactor = | ||
| container.getReplicationConfig().getRequiredNodes(); | ||
| index2replicas.values().forEach(l -> { | ||
| Iterator<ContainerReplica> it = l.iterator(); | ||
| Set<ContainerReplica> tempReplicaSet = new HashSet<>(); | ||
JacksonYao287 marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| tempReplicaSet.addAll(replicas); | ||
| for (; it.hasNext() && l.size() > 1;) { | ||
JacksonYao287 marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| ContainerReplica r = it.next(); | ||
| if (isPlacementStatusActuallyEqualAfterRemove( | ||
| tempReplicaSet, r, replicationFactor)) { | ||
| DeleteContainerCommand deleteCommand = | ||
| new DeleteContainerCommand(container.getContainerID(), true); | ||
| commands.put(r.getDatanodeDetails(), deleteCommand); | ||
| it.remove(); | ||
| tempReplicaSet.remove(r); | ||
| } | ||
| } | ||
| }); | ||
| return commands; | ||
| } | ||
|
|
||
| return emptyMap(); | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -28,7 +28,6 @@ | |
| import org.apache.hadoop.hdds.scm.container.ContainerID; | ||
| import org.apache.hadoop.hdds.scm.container.ContainerInfo; | ||
| import org.apache.hadoop.hdds.scm.container.ContainerReplica; | ||
| import org.apache.hadoop.hdds.scm.container.ECContainerReplicaCount; | ||
| import org.apache.hadoop.hdds.scm.node.NodeManager; | ||
| import org.apache.hadoop.ozone.protocol.commands.ReconstructECContainersCommand; | ||
| import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; | ||
|
|
@@ -51,7 +50,7 @@ | |
| * Handles the EC Under replication processing and forming the respective SCM | ||
| * commands. | ||
| */ | ||
| public class ECUnderReplicationHandler implements UnderReplicationHandler { | ||
| public class ECUnderReplicationHandler implements UnhealthyReplicationHandler { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not sure we will call over replication as unhealthy. Don't have better name in my mind but how about simply ReplicationHandler ? Since it is common class now, javadoc should represent the generic message. Currently it assumed for underReplication handler.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
|
|
||
| public static final Logger LOG = | ||
| LoggerFactory.getLogger(ECUnderReplicationHandler.class); | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Typos: this --> This
Implementation -> implementation