Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.hdds.scm.container.replication;

import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;

/**
* Class to count the replicas in a quasi-closed stuck container.
*/
public class QuasiClosedStuckReplicaCount {

private final Map<UUID, Set<ContainerReplica>> replicasByOrigin = new HashMap<>();
private final Map<UUID, Set<ContainerReplica>> inServiceReplicasByOrigin = new HashMap<>();
private final Map<UUID, Set<ContainerReplica>> maintenanceReplicasByOrigin = new HashMap<>();
private boolean hasOutOfServiceReplicas = false;
private int minHealthyForMaintenance;
private boolean hasHealthyReplicas = false;

public QuasiClosedStuckReplicaCount(Set<ContainerReplica> replicas, int minHealthyForMaintenance) {
this.minHealthyForMaintenance = minHealthyForMaintenance;
for (ContainerReplica r : replicas) {
if (r.getState() != StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.UNHEALTHY) {
hasHealthyReplicas = true;
}
replicasByOrigin.computeIfAbsent(r.getOriginDatanodeId(), k -> new HashSet<>()).add(r);
HddsProtos.NodeOperationalState opState = r.getDatanodeDetails().getPersistedOpState();
if (opState == HddsProtos.NodeOperationalState.IN_SERVICE) {
inServiceReplicasByOrigin.computeIfAbsent(r.getOriginDatanodeId(), k -> new HashSet<>()).add(r);
} else if (opState == HddsProtos.NodeOperationalState.IN_MAINTENANCE
|| opState == HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE) {
maintenanceReplicasByOrigin.computeIfAbsent(r.getOriginDatanodeId(), k -> new HashSet<>()).add(r);
hasOutOfServiceReplicas = true;
} else {
hasOutOfServiceReplicas = true;
}
}
}

public int availableOrigins() {
return replicasByOrigin.size();
}

public boolean hasOutOfServiceReplicas() {
return hasOutOfServiceReplicas;
}

public boolean hasHealthyReplicas() {
return hasHealthyReplicas;
}

public boolean isUnderReplicated() {
return !getUnderReplicatedReplicas().isEmpty();
}

public List<MisReplicatedOrigin> getUnderReplicatedReplicas() {
List<MisReplicatedOrigin> misReplicatedOrigins = new ArrayList<>();

if (replicasByOrigin.size() == 1) {
Map.Entry<UUID, Set<ContainerReplica>> entry = replicasByOrigin.entrySet().iterator().next();
Set<ContainerReplica> inService = inServiceReplicasByOrigin.get(entry.getKey());
if (inService == null) {
inService = Collections.emptySet();
}
Set<ContainerReplica> maintenance = maintenanceReplicasByOrigin.get(entry.getKey());
int maintenanceCount = maintenance == null ? 0 : maintenance.size();

if (maintenanceCount > 0) {
if (inService.size() < minHealthyForMaintenance) {
int additionalReplicas = minHealthyForMaintenance - inService.size();
misReplicatedOrigins.add(new MisReplicatedOrigin(entry.getValue(), additionalReplicas));
}
} else {
if (inService.size() < 3) {
int additionalReplicas = 3 - inService.size();
misReplicatedOrigins.add(new MisReplicatedOrigin(entry.getValue(), additionalReplicas));
}
}
return misReplicatedOrigins;
}

// If there are multiple origins, we expect 2 copies of each origin
// For maintenance, we expect 1 copy of each origin and ignore the minHealthyForMaintenance parameter
for (Map.Entry<UUID, Set<ContainerReplica>> entry : replicasByOrigin.entrySet()) {
Set<ContainerReplica> inService = inServiceReplicasByOrigin.get(entry.getKey());
if (inService == null) {
inService = Collections.emptySet();
}
Set<ContainerReplica> maintenance = maintenanceReplicasByOrigin.get(entry.getKey());
int maintenanceCount = maintenance == null ? 0 : maintenance.size();

if (inService.size() < 2) {
if (maintenanceCount > 0) {
if (inService.isEmpty()) {
// We need 1 copy online for maintenance
misReplicatedOrigins.add(new MisReplicatedOrigin(entry.getValue(), 1));
}
} else {
misReplicatedOrigins.add(new MisReplicatedOrigin(entry.getValue(), 2 - inService.size()));
}
}
}
return misReplicatedOrigins;
}

/**
* Returns True is the container is over-replicated. This means that if we have a single origin, there are more than
* 3 copies. If we have multiple origins, there are more than 2 copies of each origin.
* The over replication check ignore maintenance replicas. The container may become over replicated when maintenance
* ends.
*
* @return True if the container is over-replicated, otherwise false
*/
public boolean isOverReplicated() {
return !getOverReplicatedOrigins().isEmpty();
}

public List<MisReplicatedOrigin> getOverReplicatedOrigins() {
// If there is only a single origin, we expect 3 copies, otherwise we expect 2 copies of each origin
if (replicasByOrigin.size() == 1) {
UUID origin = replicasByOrigin.keySet().iterator().next();
Set<ContainerReplica> inService = inServiceReplicasByOrigin.get(origin);
if (inService != null && inService.size() > 3) {
return Collections.singletonList(new MisReplicatedOrigin(inService, inService.size() - 3));
}
return Collections.emptyList();
}

// If there are multiple origins, we expect 2 copies of each origin
List<MisReplicatedOrigin> overReplicatedOrigins = new ArrayList<>();
for (UUID origin : replicasByOrigin.keySet()) {
Set<ContainerReplica> replicas = inServiceReplicasByOrigin.get(origin);
if (replicas != null && replicas.size() > 2) {
overReplicatedOrigins.add(new MisReplicatedOrigin(replicas, replicas.size() - 2));
}
}
// If we have 2 copies or less of each origin, we are not over-replicated
return overReplicatedOrigins;
}

/**
* Class to represent the origin of under replicated replicas and the number of additional replicas required.
*/
public static class MisReplicatedOrigin {

private final Set<ContainerReplica> sources;
private final int replicaDelta;

public MisReplicatedOrigin(Set<ContainerReplica> sources, int replicaDelta) {
this.sources = sources;
this.replicaDelta = replicaDelta;
}

public Set<ContainerReplica> getSources() {
return sources;
}

public int getReplicaDelta() {
return replicaDelta;
}
}

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.hdds.scm.container.replication;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.PlacementPolicy;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.pipeline.InsufficientDatanodesException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* Class to correct under replicated QuasiClosed Stuck Ratis containers.
*/
public class QuasiClosedStuckUnderReplicationHandler implements UnhealthyReplicationHandler {
public static final Logger LOG = LoggerFactory.getLogger(QuasiClosedStuckUnderReplicationHandler.class);

private final PlacementPolicy placementPolicy;
private final ReplicationManager replicationManager;
private final long currentContainerSize;
private final ReplicationManagerMetrics metrics;

public QuasiClosedStuckUnderReplicationHandler(final PlacementPolicy placementPolicy,
final ConfigurationSource conf, final ReplicationManager replicationManager) {
this.placementPolicy = placementPolicy;
this.currentContainerSize = (long) conf.getStorageSize(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
this.replicationManager = replicationManager;
this.metrics = replicationManager.getMetrics();
}

@Override
public int processAndSendCommands(Set<ContainerReplica> replicas, List<ContainerReplicaOp> pendingOps,
ContainerHealthResult result, int remainingMaintenanceRedundancy) throws IOException {
ContainerInfo containerInfo = result.getContainerInfo();
LOG.debug("Handling under replicated QuasiClosed Stuck Ratis container {}", containerInfo);

int pendingAdd = 0;
for (ContainerReplicaOp op : pendingOps) {
if (op.getOpType() == ContainerReplicaOp.PendingOpType.ADD) {
pendingAdd++;
}
}

if (pendingAdd > 0) {
LOG.debug("Container {} has pending add operations. No more replication will be scheduled until they complete",
containerInfo);
return 0;
}

QuasiClosedStuckReplicaCount replicaCount =
new QuasiClosedStuckReplicaCount(replicas, remainingMaintenanceRedundancy);

List<QuasiClosedStuckReplicaCount.MisReplicatedOrigin> misReplicatedOrigins
= replicaCount.getUnderReplicatedReplicas();

if (misReplicatedOrigins.isEmpty()) {
LOG.debug("Container {} is not under replicated", containerInfo);
return 0;
}

// Schedule Replicas for the under replicated origins.
int totalRequiredReplicas = 0;
int totalCommandsSent = 0;
IOException firstException = null;
List<ContainerReplicaOp> mutablePendingOps = new ArrayList<>(pendingOps);
for (QuasiClosedStuckReplicaCount.MisReplicatedOrigin origin : misReplicatedOrigins) {
totalRequiredReplicas += origin.getReplicaDelta();
List<DatanodeDetails> targets;
try {
targets = getTargets(containerInfo, replicas, origin.getReplicaDelta(), mutablePendingOps);
} catch (SCMException e) {
if (firstException == null) {
firstException = e;
}
LOG.warn("Cannot replicate container {} because no suitable targets were found.", containerInfo, e);
continue;
}

List<DatanodeDetails> sourceDatanodes = origin.getSources().stream()
.map(ContainerReplica::getDatanodeDetails)
.collect(Collectors.toList());
for (DatanodeDetails target : targets) {
try {
replicationManager.sendThrottledReplicationCommand(containerInfo, sourceDatanodes, target, 0);
// Add the pending op, so we exclude the node for subsequent origins
mutablePendingOps.add(ContainerReplicaOp.create(ContainerReplicaOp.PendingOpType.ADD, target, 0));
totalCommandsSent++;
} catch (CommandTargetOverloadedException e) {
LOG.warn("Cannot replicate container {} because all sources are overloaded.", containerInfo);
if (firstException == null) {
firstException = e;
}
}
}
}

if (firstException != null || totalCommandsSent < totalRequiredReplicas) {
// Some commands were not sent as expected (not enough nodes found or overloaded nodes), so we just rethrow
// the first exception we encountered.
LOG.info("A command was not sent for all required new replicas for container {}. Total sent {}, required {} ",
containerInfo, totalCommandsSent, totalRequiredReplicas);
metrics.incrPartialReplicationTotal();
if (firstException != null) {
throw firstException;
} else {
throw new InsufficientDatanodesException(totalRequiredReplicas, totalCommandsSent);
}
}
return totalCommandsSent;
}

private List<DatanodeDetails> getTargets(ContainerInfo containerInfo,
Set<ContainerReplica> replicas, int additionalRequired, List<ContainerReplicaOp> pendingOps) throws IOException {
LOG.debug("Need {} target datanodes for container {}. Current replicas: {}.",
additionalRequired, containerInfo, replicas);

ReplicationManagerUtil.ExcludedAndUsedNodes excludedAndUsedNodes =
ReplicationManagerUtil.getExcludedAndUsedNodes(containerInfo, new ArrayList<>(replicas), Collections.emptySet(),
pendingOps, replicationManager);

List<DatanodeDetails> excluded = excludedAndUsedNodes.getExcludedNodes();
List<DatanodeDetails> used = excludedAndUsedNodes.getUsedNodes();

LOG.debug("UsedList: {}, size {}. ExcludeList: {}, size: {}. ",
used, used.size(), excluded, excluded.size());

return ReplicationManagerUtil.getTargetDatanodes(placementPolicy,
additionalRequired, used, excluded, currentContainerSize, containerInfo);
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@
import org.apache.hadoop.hdds.scm.container.replication.health.MismatchedReplicasHandler;
import org.apache.hadoop.hdds.scm.container.replication.health.OpenContainerHandler;
import org.apache.hadoop.hdds.scm.container.replication.health.QuasiClosedContainerHandler;
import org.apache.hadoop.hdds.scm.container.replication.health.QuasiClosedStuckReplicationCheck;
import org.apache.hadoop.hdds.scm.container.replication.health.RatisReplicationCheckHandler;
import org.apache.hadoop.hdds.scm.container.replication.health.RatisUnhealthyReplicationCheckHandler;
import org.apache.hadoop.hdds.scm.container.replication.health.VulnerableUnhealthyReplicasHandler;
Expand Down Expand Up @@ -182,6 +183,7 @@ public class ReplicationManager implements SCMService, ContainerReplicaPendingOp
private final RatisUnderReplicationHandler ratisUnderReplicationHandler;
private final RatisOverReplicationHandler ratisOverReplicationHandler;
private final RatisMisReplicationHandler ratisMisReplicationHandler;
private final QuasiClosedStuckUnderReplicationHandler quasiClosedStuckUnderReplicationHandler;
private Thread underReplicatedProcessorThread;
private Thread overReplicatedProcessorThread;
private final UnderReplicatedProcessor underReplicatedProcessor;
Expand Down Expand Up @@ -248,6 +250,8 @@ public ReplicationManager(final ConfigurationSource conf,
new RatisOverReplicationHandler(ratisContainerPlacement, this);
ratisMisReplicationHandler = new RatisMisReplicationHandler(
ratisContainerPlacement, conf, this);
quasiClosedStuckUnderReplicationHandler =
new QuasiClosedStuckUnderReplicationHandler(ratisContainerPlacement, conf, this);
underReplicatedProcessor =
new UnderReplicatedProcessor(this, rmConf::getUnderReplicatedInterval);
overReplicatedProcessor =
Expand All @@ -262,6 +266,7 @@ public ReplicationManager(final ConfigurationSource conf,
.addNext(new MismatchedReplicasHandler(this))
.addNext(new EmptyContainerHandler(this))
.addNext(new DeletingContainerHandler(this))
.addNext(new QuasiClosedStuckReplicationCheck())
.addNext(ecReplicationCheckHandler)
.addNext(ratisReplicationCheckHandler)
.addNext(new ClosedWithUnhealthyReplicasHandler(this))
Expand Down Expand Up @@ -746,8 +751,15 @@ int processUnderReplicatedContainer(

if (result.getHealthState()
== ContainerHealthResult.HealthState.UNDER_REPLICATED) {
handler = isEC ? ecUnderReplicationHandler
: ratisUnderReplicationHandler;
if (isEC) {
handler = ecUnderReplicationHandler;
} else {
if (QuasiClosedStuckReplicationCheck.shouldHandleAsQuasiClosedStuck(result.getContainerInfo(), replicas)) {
handler = quasiClosedStuckUnderReplicationHandler;
} else {
handler = ratisUnderReplicationHandler;
}
}
} else if (result.getHealthState()
== ContainerHealthResult.HealthState.MIS_REPLICATED) {
handler = isEC ? ecMisReplicationHandler : ratisMisReplicationHandler;
Expand Down
Loading