Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
package org.apache.hadoop.hdds.scm.container;

import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaCount;

import java.util.Set;

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.hdds.scm.container.replication;

import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.ContainerPlacementStatus;
import org.apache.hadoop.hdds.scm.PlacementPolicy;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;

import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;

/**
* This class holds some common methods that will be shared among
* different kinds of implementation of OverReplicationHandler.
* */
public abstract class AbstractOverReplicationHandler
implements UnhealthyReplicationHandler {
private final PlacementPolicy placementPolicy;

protected AbstractOverReplicationHandler(PlacementPolicy placementPolicy) {
this.placementPolicy = placementPolicy;
}

/**
* Identify a new set of datanode(s) to delete the container
* and form the SCM commands to send it to DN.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Missed to update doc?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

sorry for missing , fixed now!

*
* @param replicas - Set of available container replicas.
* @param pendingOps - Inflight replications and deletion ops.
* @param result - Health check result.
* @param remainingMaintenanceRedundancy - represents that how many nodes go
* into maintenance.
* @return Returns the key value pair of destination dn where the command gets
* executed and the command itself.
*/
public abstract Map<DatanodeDetails, SCMCommand<?>> processAndCreateCommands(
Set<ContainerReplica> replicas, List<ContainerReplicaOp> pendingOps,
ContainerHealthResult result, int remainingMaintenanceRedundancy);

/**
* Identify whether the placement status is actually equal for a
* replica set after removing those filtered replicas.
*
* @param replicas the oringianl set of replicas
* @param replicationFactor the criteria which replicas should be removed.
* @param replica the replica to be removed
*/
public boolean isPlacementStatusActuallyEqualAfterRemove(
final Set<ContainerReplica> replicas,
final ContainerReplica replica,
final int replicationFactor) {
ContainerPlacementStatus currentCPS =
getPlacementStatus(replicas, replicationFactor);
replicas.remove(replica);
ContainerPlacementStatus newCPS =
getPlacementStatus(replicas, replicationFactor);
replicas.add(replica);
return isPlacementStatusActuallyEqual(currentCPS, newCPS);
}

/**
* Given a set of ContainerReplica, transform it to a list of DatanodeDetails
* and then check if the list meets the container placement policy.
* @param replicas List of containerReplica
* @param replicationFactor Expected Replication Factor of the containe
* @return ContainerPlacementStatus indicating if the policy is met or not
*/
private ContainerPlacementStatus getPlacementStatus(
Set<ContainerReplica> replicas, int replicationFactor) {
List<DatanodeDetails> replicaDns = replicas.stream()
.map(ContainerReplica::getDatanodeDetails)
.collect(Collectors.toList());
return placementPolicy.validateContainerPlacement(
replicaDns, replicationFactor);
}

/**
* whether the given two ContainerPlacementStatus are actually equal.
*
* @param cps1 ContainerPlacementStatus
* @param cps2 ContainerPlacementStatus
*/
private boolean isPlacementStatusActuallyEqual(
ContainerPlacementStatus cps1,
ContainerPlacementStatus cps2) {
return (!cps1.isPolicySatisfied() &&
cps1.actualPlacementCount() == cps2.actualPlacementCount()) ||
cps1.isPolicySatisfied() && cps2.isPolicySatisfied();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdds.scm.container;
package org.apache.hadoop.hdds.scm.container.replication;

import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.container.replication.LegacyReplicationManager;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;

import java.util.Set;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import org.apache.hadoop.hdds.scm.container.ECContainerReplicaCount;

import java.util.List;
import java.util.Set;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,12 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdds.scm.container;
package org.apache.hadoop.hdds.scm.container.replication;

import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaOp;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;

import java.util.ArrayList;
import java.util.Collections;
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,165 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdds.scm.container.replication;

import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
import org.apache.hadoop.hdds.scm.PlacementPolicy;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;

import static java.util.Collections.emptyMap;

/**
* Handles the EC Over replication processing and forming the respective SCM
* commands.
*/
public class ECOverReplicationHandler extends AbstractOverReplicationHandler {
public static final Logger LOG =
LoggerFactory.getLogger(ECOverReplicationHandler.class);

private final ECContainerHealthCheck ecContainerHealthCheck =
new ECContainerHealthCheck();
private final NodeManager nodeManager;

public ECOverReplicationHandler(PlacementPolicy placementPolicy,
NodeManager nodeManager) {
super(placementPolicy);
this.nodeManager = nodeManager;
}

/**
* Identify a new set of datanode(s) to delete the container
* and form the SCM commands to send it to DN.
*
* @param replicas - Set of available container replicas.
* @param pendingOps - Inflight replications and deletion ops.
* @param result - Health check result.
* @param remainingMaintenanceRedundancy - represents that how many nodes go
* into maintenance.
* @return Returns the key value pair of destination dn where the command gets
* executed and the command itself.
*/
@Override
public Map<DatanodeDetails, SCMCommand<?>> processAndCreateCommands(
Set<ContainerReplica> replicas, List<ContainerReplicaOp> pendingOps,
ContainerHealthResult result, int remainingMaintenanceRedundancy) {
ContainerInfo container = result.getContainerInfo();
ContainerHealthResult currentUnderRepRes = ecContainerHealthCheck
.checkHealth(container, replicas, pendingOps,
remainingMaintenanceRedundancy);
LOG.debug("Handling over-replicated EC container: {}", container);

//sanity check
if (currentUnderRepRes.getHealthState() !=
ContainerHealthResult.HealthState.OVER_REPLICATED) {
LOG.info("The container {} state changed and it's not in over"
+ " replication any more. Current state is: {}",
container.getContainerID(), currentUnderRepRes);
return emptyMap();
}

ContainerHealthResult.OverReplicatedHealthResult containerHealthResult =
((ContainerHealthResult.OverReplicatedHealthResult)
currentUnderRepRes);
if (containerHealthResult.isSufficientlyReplicatedAfterPending()) {
LOG.info("The container {} with replicas {} will be corrected " +
"by the pending delete", container.getContainerID(), replicas);
return emptyMap();
}

// we don`t support hybrid state(both under and over replicated) for
// EC container and we always handle under-replicated first now. it
// means when reaching here, we have all the replica indexes and some
// of them are more than 1.
// TODO: support hybrid state if needed.
final ECContainerReplicaCount replicaCount =
new ECContainerReplicaCount(container, replicas, pendingOps,
remainingMaintenanceRedundancy);

List<Integer> overReplicatedIndexes =
replicaCount.overReplicatedIndexes(true);
//sanity check
if (overReplicatedIndexes.size() == 0) {
LOG.warn("The container {} with replicas {} is found over replicated " +
"by ContainerHealthCheck, but found not over replicated by " +
"ECContainerReplicaCount",
container.getContainerID(), replicas);
return emptyMap();
}

final List<DatanodeDetails> deletionInFlight = new ArrayList<>();
for (ContainerReplicaOp op : pendingOps) {
if (op.getOpType() == ContainerReplicaOp.PendingOpType.DELETE) {
deletionInFlight.add(op.getTarget());
}
}
Map<Integer, List<ContainerReplica>> index2replicas = new HashMap<>();
replicas.stream()
.filter(r -> overReplicatedIndexes.contains(r.getReplicaIndex()))
.filter(r -> r
.getState() == StorageContainerDatanodeProtocolProtos
.ContainerReplicaProto.State.CLOSED)
.filter(r -> ReplicationManager
.getNodeStatus(r.getDatanodeDetails(), nodeManager).isHealthy())
.filter(r -> !deletionInFlight.contains(r.getDatanodeDetails()))
.forEach(r -> {
int index = r.getReplicaIndex();
index2replicas.computeIfAbsent(index, k -> new LinkedList<>());
index2replicas.get(index).add(r);
});

if (index2replicas.size() > 0) {
final Map<DatanodeDetails, SCMCommand<?>> commands = new HashMap<>();
final int replicationFactor =
container.getReplicationConfig().getRequiredNodes();
index2replicas.values().forEach(l -> {
Iterator<ContainerReplica> it = l.iterator();
Set<ContainerReplica> tempReplicaSet = new HashSet<>(replicas);
while (it.hasNext() && l.size() > 1) {
ContainerReplica r = it.next();
if (isPlacementStatusActuallyEqualAfterRemove(
tempReplicaSet, r, replicationFactor)) {
DeleteContainerCommand deleteCommand =
new DeleteContainerCommand(container.getContainerID(), true);
commands.put(r.getDatanodeDetails(), deleteCommand);
it.remove();
tempReplicaSet.remove(r);
}
}
});
return commands;
}

return emptyMap();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import org.apache.hadoop.hdds.scm.container.ECContainerReplicaCount;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.ozone.protocol.commands.ReconstructECContainersCommand;
import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
Expand All @@ -51,7 +50,7 @@
* Handles the EC Under replication processing and forming the respective SCM
* commands.
*/
public class ECUnderReplicationHandler implements UnderReplicationHandler {
public class ECUnderReplicationHandler implements UnhealthyReplicationHandler {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure we will call over replication as unhealthy. Don't have better name in my mind but how about simply ReplicationHandler ?

Since it is common class now, javadoc should represent the generic message. Currently it assumed for underReplication handler.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ReplicationHandler seems a little confusing, it might be considered as the handler to handle replication command.
so i suggest we use unhealthy just for now and change it to an appropriate name after we find a proper one. what about "nonOptimalReplicationHandler"


public static final Logger LOG =
LoggerFactory.getLogger(ECUnderReplicationHandler.class);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import org.apache.hadoop.hdds.scm.container.ContainerReplicaCount;
import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport.HealthState;
import org.apache.hadoop.hdds.scm.container.common.helpers.MoveDataNodePair;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,7 @@
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import org.apache.hadoop.hdds.scm.container.ContainerReplicaCount;
import org.apache.hadoop.hdds.scm.container.ECContainerReplicaCount;

import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport.HealthState;
import org.apache.hadoop.hdds.scm.ha.SCMContext;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
* This interface to create respective commands after processing the replicas
* with pending ops and health check results.
*/
public interface UnderReplicationHandler {
public interface UnhealthyReplicationHandler {

/**
* Identify a new set of datanode(s) to replicate/reconstruct the container
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import org.apache.hadoop.hdds.scm.container.ContainerReplicaCount;
import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaCount;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
Expand Down
Loading