diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java index 7e6220209a22..c61e2f4bfd73 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java @@ -30,6 +30,8 @@ import org.apache.hadoop.fs.CanUnbuffer; import org.apache.hadoop.fs.Seekable; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.GetBlockResponseProto; @@ -196,7 +198,10 @@ protected List getChunkInfos() throws IOException { // protocol. if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) { pipeline = Pipeline.newBuilder(pipeline) - .setType(HddsProtos.ReplicationType.STAND_ALONE).build(); + .setReplicationConfig(new StandaloneReplicationConfig( + ReplicationConfig + .getLegacyFactor(pipeline.getReplicationConfig()))) + .build(); } acquireClient(); boolean success = false; diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java index 51ba2c6eddce..556d93867540 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java @@ -24,8 +24,9 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -59,8 +60,8 @@ final class DummyBlockInputStreamWithRetry return Pipeline.newBuilder() .setState(Pipeline.PipelineState.OPEN) .setId(PipelineID.randomId()) - .setType(HddsProtos.ReplicationType.STAND_ALONE) - .setFactor(HddsProtos.ReplicationFactor.ONE) + .setReplicationConfig(new StandaloneReplicationConfig( + ReplicationFactor.ONE)) .setNodes(Collections.emptyList()) .build(); }, chunkList, chunkMap); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java index b2be2ee58809..d0cf5fdcb21a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java @@ -33,10 +33,10 @@ import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -50,8 +50,7 @@ public final class Pipeline { private static final Logger LOG = LoggerFactory.getLogger(Pipeline.class); private final PipelineID id; - private final ReplicationType type; - private final ReplicationFactor factor; + private final ReplicationConfig replicationConfig; private PipelineState state; private Map nodeStatus; @@ -69,12 +68,11 @@ public final class Pipeline { * ContainerStateManager#getMatchingContainerByPipeline to take a lock on * the container allocations for a particular pipeline. */ - private Pipeline(PipelineID id, ReplicationType type, - ReplicationFactor factor, PipelineState state, + private Pipeline(PipelineID id, + ReplicationConfig replicationConfig, PipelineState state, Map nodeStatus, UUID suggestedLeaderId) { this.id = id; - this.type = type; - this.factor = factor; + this.replicationConfig = replicationConfig; this.state = state; this.nodeStatus = nodeStatus; this.creationTimestamp = Instant.now(); @@ -96,16 +94,7 @@ public PipelineID getId() { * @return type - Simple or Ratis. */ public ReplicationType getType() { - return type; - } - - /** - * Returns the factor. - * - * @return type - Simple or Ratis. - */ - public ReplicationFactor getFactor() { - return factor; + return replicationConfig.getReplicationType(); } /** @@ -186,6 +175,7 @@ public boolean sameDatanodes(Pipeline pipeline) { return getNodeSet().equals(pipeline.getNodeSet()); } + /** * Returns the leader if found else defaults to closest node. * @@ -266,6 +256,10 @@ public boolean isEmpty() { return nodeStatus.isEmpty(); } + public ReplicationConfig getReplicationConfig() { + return replicationConfig; + } + public HddsProtos.Pipeline getProtobufMessage(int clientVersion) throws UnknownPipelineStateException { List members = new ArrayList<>(); @@ -275,8 +269,8 @@ public HddsProtos.Pipeline getProtobufMessage(int clientVersion) HddsProtos.Pipeline.Builder builder = HddsProtos.Pipeline.newBuilder() .setId(id.getProtobuf()) - .setType(type) - .setFactor(factor) + .setType(replicationConfig.getReplicationType()) + .setFactor(ReplicationConfig.getLegacyFactor(replicationConfig)) .setState(PipelineState.getProtobuf(state)) .setLeaderID(leaderId != null ? leaderId.toString() : "") .setCreationTimeStamp(creationTimestamp.toEpochMilli()) @@ -342,9 +336,10 @@ public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline) new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits()); } + final ReplicationConfig config = ReplicationConfig + .fromProto(pipeline.getType(), pipeline.getFactor()); return new Builder().setId(PipelineID.getFromProtobuf(pipeline.getId())) - .setFactor(pipeline.getFactor()) - .setType(pipeline.getType()) + .setReplicationConfig(config) .setState(PipelineState.fromProtobuf(pipeline.getState())) .setNodes(nodes) .setLeaderId(leaderId) @@ -367,8 +362,7 @@ public boolean equals(Object o) { return new EqualsBuilder() .append(id, that.id) - .append(type, that.type) - .append(factor, that.factor) + .append(replicationConfig, that.replicationConfig) .append(getNodes(), that.getNodes()) .isEquals(); } @@ -377,8 +371,7 @@ public boolean equals(Object o) { public int hashCode() { return new HashCodeBuilder() .append(id) - .append(type) - .append(factor) + .append(replicationConfig.getReplicationType()) .append(nodeStatus) .toHashCode(); } @@ -390,8 +383,7 @@ public String toString() { b.append(" Id: ").append(id.getId()); b.append(", Nodes: "); nodeStatus.keySet().forEach(b::append); - b.append(", Type:").append(getType()); - b.append(", Factor:").append(getFactor()); + b.append(", ReplicationConfig: ").append(replicationConfig); b.append(", State:").append(getPipelineState()); b.append(", leaderId:").append(leaderId != null ? leaderId.toString() : ""); b.append(", CreationTimestamp").append(getCreationTimestamp()); @@ -412,8 +404,7 @@ public static Builder newBuilder(Pipeline pipeline) { */ public static class Builder { private PipelineID id = null; - private ReplicationType type = null; - private ReplicationFactor factor = null; + private ReplicationConfig replicationConfig = null; private PipelineState state = null; private Map nodeStatus = null; private List nodeOrder = null; @@ -426,8 +417,7 @@ public Builder() {} public Builder(Pipeline pipeline) { this.id = pipeline.id; - this.type = pipeline.type; - this.factor = pipeline.factor; + this.replicationConfig = pipeline.replicationConfig; this.state = pipeline.state; this.nodeStatus = pipeline.nodeStatus; this.nodesInOrder = pipeline.nodesInOrder.get(); @@ -441,13 +431,8 @@ public Builder setId(PipelineID id1) { return this; } - public Builder setType(ReplicationType type1) { - this.type = type1; - return this; - } - - public Builder setFactor(ReplicationFactor factor1) { - this.factor = factor1; + public Builder setReplicationConfig(ReplicationConfig replicationConf) { + this.replicationConfig = replicationConf; return this; } @@ -484,12 +469,12 @@ public Builder setSuggestedLeaderId(UUID uuid) { public Pipeline build() { Preconditions.checkNotNull(id); - Preconditions.checkNotNull(type); - Preconditions.checkNotNull(factor); + Preconditions.checkNotNull(replicationConfig); Preconditions.checkNotNull(state); Preconditions.checkNotNull(nodeStatus); Pipeline pipeline = - new Pipeline(id, type, factor, state, nodeStatus, suggestedLeaderId); + new Pipeline(id, replicationConfig, state, nodeStatus, + suggestedLeaderId); pipeline.setLeaderId(leaderId); // overwrite with original creationTimestamp if (creationTimestamp != null) { diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java index 556c052b32d8..b7b3dc6340d9 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java @@ -22,11 +22,11 @@ import java.util.List; import java.util.Objects; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import com.google.common.base.Preconditions; @@ -65,8 +65,8 @@ public static Pipeline createPipeline(Iterable ids) { return Pipeline.newBuilder() .setState(Pipeline.PipelineState.OPEN) .setId(PipelineID.randomId()) - .setType(HddsProtos.ReplicationType.STAND_ALONE) - .setFactor(HddsProtos.ReplicationFactor.ONE) + .setReplicationConfig( + new StandaloneReplicationConfig(ReplicationFactor.ONE)) .setNodes(dns) .build(); } @@ -81,8 +81,8 @@ public static Pipeline createRatisPipeline() { return Pipeline.newBuilder() .setState(Pipeline.PipelineState.OPEN) .setId(PipelineID.randomId()) - .setType(ReplicationType.RATIS) - .setFactor(ReplicationFactor.THREE) + .setReplicationConfig( + new RatisReplicationConfig(ReplicationFactor.THREE)) .setNodes(nodes) .build(); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java index 9137c6e339fb..175b65ab8dfa 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.scm.protocol; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.AddSCMRequest; import org.apache.hadoop.hdds.scm.ScmConfig; @@ -59,8 +60,30 @@ public interface ScmBlockLocationProtocol extends Closeable { * @return allocated block accessing info (key, pipeline). * @throws IOException */ - List allocateBlock(long size, int numBlocks, + @Deprecated + default List allocateBlock(long size, int numBlocks, ReplicationType type, ReplicationFactor factor, String owner, + ExcludeList excludeList) throws IOException { + return allocateBlock(size, numBlocks, ReplicationConfig + .fromTypeAndFactor(type, factor), owner, excludeList); + } + + /** + * Asks SCM where a block should be allocated. SCM responds with the + * set of datanodes that should be used creating this block. + * + * @param size - size of the block. + * @param numBlocks - number of blocks. + * @param replicationConfig - replicationConfiguration + * @param owner - service owner of the new block + * @param excludeList List of datanodes/containers to exclude during + * block + * allocation. + * @return allocated block accessing info (key, pipeline). + * @throws IOException + */ + List allocateBlock(long size, int numBlocks, + ReplicationConfig replicationConfig, String owner, ExcludeList excludeList) throws IOException; /** diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java index 0662a8172eb0..d93f7f63057b 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java @@ -24,6 +24,9 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.client.ContainerBlockID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos; @@ -139,29 +142,47 @@ private SCMBlockLocationResponse handleError(SCMBlockLocationResponse resp) /** * Asks SCM where a block should be allocated. SCM responds with the * set of datanodes that should be used creating this block. - * @param size - size of the block. - * @param num - number of blocks. - * @param type - replication type of the blocks. - * @param factor - replication factor of the blocks. - * @param excludeList - exclude list while allocating blocks. + * + * @param size - size of the block. + * @param num - number of blocks. + * @param replicationConfig - replication configuration of the blocks. + * @param excludeList - exclude list while allocating blocks. * @return allocated block accessing info (key, pipeline). * @throws IOException */ @Override - public List allocateBlock(long size, int num, - HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, - String owner, ExcludeList excludeList) throws IOException { + public List allocateBlock( + long size, int num, + ReplicationConfig replicationConfig, + String owner, ExcludeList excludeList + ) throws IOException { Preconditions.checkArgument(size > 0, "block size must be greater than 0"); - AllocateScmBlockRequestProto request = + final AllocateScmBlockRequestProto.Builder requestBuilder = AllocateScmBlockRequestProto.newBuilder() .setSize(size) .setNumBlocks(num) - .setType(type) - .setFactor(factor) + .setType(replicationConfig.getReplicationType()) .setOwner(owner) - .setExcludeList(excludeList.getProtoBuf()) - .build(); + .setExcludeList(excludeList.getProtoBuf()); + + switch (replicationConfig.getReplicationType()) { + case STAND_ALONE: + requestBuilder.setFactor( + ((StandaloneReplicationConfig) replicationConfig) + .getReplicationFactor()); + break; + case RATIS: + requestBuilder.setFactor( + ((RatisReplicationConfig) replicationConfig).getReplicationFactor()); + break; + default: + throw new IllegalArgumentException( + "Unsupported replication type " + replicationConfig + .getReplicationType()); + } + + AllocateScmBlockRequestProto request = requestBuilder.build(); SCMBlockLocationRequest wrapper = createSCMBlockRequest( Type.AllocateScmBlock) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index 75d5864f0951..00cb49bee072 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -165,6 +165,7 @@ private ScmContainerLocationResponse submitRpcRequest( * * @param type - Replication Type * @param factor - Replication Count + * @param owner - Service owner of the container. */ @Override public ContainerWithPipeline allocateContainer( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java index bfc68c7341f9..8acbb61efad0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.util.List; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.ozone.common.BlockGroup; @@ -35,15 +35,14 @@ public interface BlockManager extends Closeable { /** * Allocates a new block for a given size. * @param size - Block Size - * @param type Replication Type - * @param factor - Replication Factor + * @param replicationConfig configuration of the replication method * @param excludeList List of datanodes/containers to exclude during block * allocation. * @return AllocatedBlock * @throws IOException */ - AllocatedBlock allocateBlock(long size, HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor, String owner, + AllocatedBlock allocateBlock(long size, ReplicationConfig replicationConfig, + String owner, ExcludeList excludeList) throws IOException; /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java index 8fdebf0fb1a3..94da8820d4ea 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java @@ -28,10 +28,9 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ContainerBlockID; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.StorageUnit; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.PipelineRequestInformation; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.PipelineChoosePolicy; @@ -150,19 +149,20 @@ public void stop() throws IOException { * Allocates a block in a container and returns that info. * * @param size - Block Size - * @param type Replication Type - * @param factor - Replication Factor + * @param replicationConfig - Replication config + * @param owner - Owner (service) of the container. * @param excludeList List of datanodes/containers to exclude during block * allocation. * @return Allocated block * @throws IOException on failure. */ @Override - public AllocatedBlock allocateBlock(final long size, ReplicationType type, - ReplicationFactor factor, String owner, ExcludeList excludeList) + public AllocatedBlock allocateBlock(final long size, + ReplicationConfig replicationConfig, + String owner, ExcludeList excludeList) throws IOException { if (LOG.isTraceEnabled()) { - LOG.trace("Size : {} , type : {}, factor : {} ", size, type, factor); + LOG.trace("Size : {} , replicationConfig: {}", size, replicationConfig); } if (scm.getScmContext().isInSafeMode()) { throw new SCMException("SafeModePrecheck failed for allocateBlock", @@ -190,45 +190,50 @@ public AllocatedBlock allocateBlock(final long size, ReplicationType type, ContainerInfo containerInfo; + //TODO we need to continue the refactor to use ReplicationConfig everywhere + //in downstream managers. + while (true) { List availablePipelines = pipelineManager - .getPipelines(type, factor, Pipeline.PipelineState.OPEN, + .getPipelines(replicationConfig, Pipeline.PipelineState.OPEN, excludeList.getDatanodes(), excludeList.getPipelineIds()); Pipeline pipeline = null; if (availablePipelines.size() == 0 && !excludeList.isEmpty()) { // if no pipelines can be found, try finding pipeline without // exclusion availablePipelines = pipelineManager - .getPipelines(type, factor, Pipeline.PipelineState.OPEN); + .getPipelines(replicationConfig, Pipeline.PipelineState.OPEN); } if (availablePipelines.size() == 0) { try { // TODO: #CLUTIL Remove creation logic when all replication types and // factors are handled by pipeline creator - pipeline = pipelineManager.createPipeline(type, factor); + pipeline = pipelineManager.createPipeline(replicationConfig); // wait until pipeline is ready pipelineManager.waitPipelineReady(pipeline.getId(), 0); } catch (SCMException se) { - LOG.warn("Pipeline creation failed for type:{} factor:{}. " + - "Datanodes may be used up.", type, factor, se); + LOG.warn("Pipeline creation failed for replicationConfig {} " + + "Datanodes may be used up.", replicationConfig, se); break; } catch (IOException e) { - LOG.warn("Pipeline creation failed for type:{} factor:{}. Retrying " + - "get pipelines call once.", type, factor, e); + LOG.warn("Pipeline creation failed for replicationConfig: {}. " + + "Retrying get pipelines call once.", replicationConfig, e); availablePipelines = pipelineManager - .getPipelines(type, factor, Pipeline.PipelineState.OPEN, + .getPipelines(replicationConfig, Pipeline.PipelineState.OPEN, excludeList.getDatanodes(), excludeList.getPipelineIds()); if (availablePipelines.size() == 0 && !excludeList.isEmpty()) { // if no pipelines can be found, try finding pipeline without // exclusion availablePipelines = pipelineManager - .getPipelines(type, factor, Pipeline.PipelineState.OPEN); + .getPipelines(replicationConfig, Pipeline.PipelineState.OPEN); } if (availablePipelines.size() == 0) { - LOG.info("Could not find available pipeline of type:{} and " + - "factor:{} even after retrying", type, factor); + LOG.info( + "Could not find available pipeline of replicationConfig: {} " + + "even after retrying", + replicationConfig); break; } } @@ -255,8 +260,8 @@ public AllocatedBlock allocateBlock(final long size, ReplicationType type, // we have tried all strategies we know and but somehow we are not able // to get a container for this block. Log that info and return a null. LOG.error( - "Unable to allocate a block for the size: {}, type: {}, factor: {}", - size, type, factor); + "Unable to allocate a block for the size: {}, replicationConfig: {}", + size, replicationConfig); return null; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java index 460987494054..2b035ad4ed27 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java @@ -35,12 +35,11 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.metrics.SCMContainerManagerMetrics; import org.apache.hadoop.hdds.scm.ha.SCMHAManager; @@ -174,24 +173,24 @@ public List getContainers(final LifeCycleState state) { } @Override - public ContainerInfo allocateContainer(final ReplicationType type, - final ReplicationFactor replicationFactor, final String owner) + public ContainerInfo allocateContainer( + final ReplicationConfig replicationConfig, final String owner) throws IOException { lock.lock(); try { final List pipelines = pipelineManager - .getPipelines(type, replicationFactor, Pipeline.PipelineState.OPEN); + .getPipelines(replicationConfig, Pipeline.PipelineState.OPEN); final Pipeline pipeline; if (pipelines.isEmpty()) { try { - pipeline = pipelineManager.createPipeline(type, replicationFactor); + pipeline = pipelineManager.createPipeline(replicationConfig); pipelineManager.waitPipelineReady(pipeline.getId(), 0); } catch (IOException e) { scmContainerManagerMetrics.incNumFailureCreateContainers(); throw new IOException("Could not allocate container. Cannot get any" + - " matching pipeline for Type:" + type + ", Factor:" + - replicationFactor + ", State:PipelineState.OPEN", e); + " matching pipeline for replicationConfig: " + replicationConfig + + ", State:PipelineState.OPEN", e); } } else { pipeline = pipelines.get(random.nextInt(pipelines.size())); @@ -223,7 +222,8 @@ private ContainerInfo allocateContainer(final Pipeline pipeline, .setOwner(owner) .setContainerID(containerID.getId()) .setDeleteTransactionId(0) - .setReplicationFactor(pipeline.getFactor()) + .setReplicationFactor( + ReplicationConfig.getLegacyFactor(pipeline.getReplicationConfig())) .setReplicationType(pipeline.getType()) .build(); containerStateManager.addContainer(containerInfo); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java index 6f0233de2656..e349e7e4bedd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java @@ -23,10 +23,9 @@ import java.util.Map; import java.util.Set; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; @@ -94,13 +93,9 @@ default List getContainers() { /** * Allocates a new container for a given keyName and replication factor. * - * @param replicationFactor - replication factor of the container. - * @param owner - * @return - ContainerInfo. * @throws IOException */ - ContainerInfo allocateContainer(ReplicationType type, - ReplicationFactor replicationFactor, + ContainerInfo allocateContainer(ReplicationConfig replicationConfig, String owner) throws IOException; /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java index 0c3772f44825..946363be0e63 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java @@ -26,6 +26,8 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -237,39 +239,33 @@ void loadContainer(final ContainerInfo containerInfo) throws SCMException { /** * Allocates a new container based on the type, replication etc. - * - * @param pipelineManager -- Pipeline Manager class. - * @param type -- Replication type. - * @param replicationFactor - Replication replicationFactor. - * @return ContainerWithPipeline - * @throws IOException on Failure. */ ContainerInfo allocateContainer(final PipelineManager pipelineManager, - final HddsProtos.ReplicationType type, - final HddsProtos.ReplicationFactor replicationFactor, final String owner) + final ReplicationConfig replicationConfig, final String owner) throws IOException { final List pipelines = pipelineManager - .getPipelines(type, replicationFactor, Pipeline.PipelineState.OPEN); + .getPipelines(replicationConfig, Pipeline.PipelineState.OPEN); Pipeline pipeline; - boolean bgCreateOne = (type == ReplicationType.RATIS) && replicationFactor - == ReplicationFactor.ONE && autoCreateRatisOne; - boolean bgCreateThree = (type == ReplicationType.RATIS) && replicationFactor - == ReplicationFactor.THREE; + boolean bgCreateOne = RatisReplicationConfig + .hasFactor(replicationConfig, ReplicationFactor.ONE) + && autoCreateRatisOne; + boolean bgCreateThree = RatisReplicationConfig + .hasFactor(replicationConfig, ReplicationFactor.THREE); if (!pipelines.isEmpty() && (bgCreateOne || bgCreateThree)) { // let background create Ratis pipelines. pipeline = pipelines.get((int) containerCount.get() % pipelines.size()); } else { try { - pipeline = pipelineManager.createPipeline(type, replicationFactor); + pipeline = pipelineManager.createPipeline(replicationConfig); pipelineManager.waitPipelineReady(pipeline.getId(), 0); } catch (IOException e) { if (pipelines.isEmpty()) { throw new IOException("Could not allocate container. Cannot get any" + - " matching pipeline for Type:" + type + - ", Factor:" + replicationFactor + ", State:PipelineState.OPEN"); + " matching pipeline for replicationConfig:" + replicationConfig + + ", State:PipelineState.OPEN"); } pipeline = pipelines.get((int) containerCount.get() % pipelines.size()); } @@ -309,7 +305,8 @@ ContainerInfo allocateContainer( .setOwner(owner) .setContainerID(containerID) .setDeleteTransactionId(0) - .setReplicationFactor(pipeline.getFactor()) + .setReplicationFactor( + ReplicationConfig.getLegacyFactor(pipeline.getReplicationConfig())) .setReplicationType(pipeline.getType()) .build(); addContainerInfo(containerID, containerInfo, pipelineManager, pipeline); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java index 20592bbfe0ce..2044dc607539 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java @@ -30,6 +30,7 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.stream.Collectors; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; @@ -290,8 +291,9 @@ public ContainerInfo allocateContainer(final ReplicationType type, ContainerInfo containerInfo = null; try { containerInfo = - containerStateManager.allocateContainer(pipelineManager, type, - replicationFactor, owner); + containerStateManager.allocateContainer(pipelineManager, + ReplicationConfig.fromTypeAndFactor(type, replicationFactor), + owner); } catch (IOException ex) { scmContainerManagerMetrics.incNumFailureCreateContainers(); throw ex; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java index 42b3a939522e..50c954cd663e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java @@ -18,8 +18,13 @@ package org.apache.hadoop.hdds.scm.pipeline; import org.apache.commons.collections.iterators.LoopingIterator; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.hdds.utils.Scheduler; @@ -91,16 +96,20 @@ void triggerPipelineCreation() { scheduler.schedule(this::createPipelines, 0, TimeUnit.MILLISECONDS); } - private boolean skipCreation(HddsProtos.ReplicationFactor factor, - HddsProtos.ReplicationType type, - boolean autoCreate) { - if (type == HddsProtos.ReplicationType.RATIS) { - return factor == HddsProtos.ReplicationFactor.ONE && (!autoCreate); - } else { + private boolean skipCreation(ReplicationConfig replicationConfig, + boolean autoCreate) { + if (replicationConfig.getReplicationType() + == HddsProtos.ReplicationType.RATIS) { + return RatisReplicationConfig + .hasFactor(replicationConfig, ReplicationFactor.ONE) && (!autoCreate); + } else if (replicationConfig.getReplicationType() + == ReplicationType.STAND_ALONE) { // For STAND_ALONE Replication Type, Replication Factor 3 should not be // used. - return factor == HddsProtos.ReplicationFactor.THREE; + return ((StandaloneReplicationConfig) replicationConfig) + .getReplicationFactor() == ReplicationFactor.ONE; } + return true; } private void createPipelines() throws RuntimeException { @@ -112,18 +121,22 @@ private void createPipelines() throws RuntimeException { ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE_DEFAULT); - List list = + List list = new ArrayList<>(); for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor .values()) { - if (skipCreation(factor, type, autoCreateFactorOne)) { + + final ReplicationConfig replicationConfig = + ReplicationConfig.fromTypeAndFactor(type, factor); + + if (skipCreation(replicationConfig, autoCreateFactorOne)) { // Skip this iteration for creating pipeline continue; } - list.add(factor); + list.add(replicationConfig); if (!pipelineManager.getSafeModeStatus()) { try { - pipelineManager.scrubPipeline(type, factor); + pipelineManager.scrubPipeline(replicationConfig); } catch (IOException e) { LOG.error("Error while scrubbing pipelines.", e); } @@ -132,14 +145,14 @@ private void createPipelines() throws RuntimeException { LoopingIterator it = new LoopingIterator(list); while (it.hasNext()) { - HddsProtos.ReplicationFactor factor = - (HddsProtos.ReplicationFactor) it.next(); + ReplicationConfig replicationConfig = + (ReplicationConfig) it.next(); try { if (scheduler.isClosed()) { break; } - pipelineManager.createPipeline(type, factor); + pipelineManager.createPipeline(replicationConfig); } catch (IOException ioe) { it.remove(); } catch (Throwable t) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreatorV2.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreatorV2.java index 41d3aa8dcf7b..4901a571f135 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreatorV2.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreatorV2.java @@ -20,8 +20,12 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.commons.collections.iterators.LoopingIterator; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.ha.SCMService; @@ -40,6 +44,8 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.STAND_ALONE; import static org.apache.hadoop.hdds.scm.ha.SCMService.Event.UNHEALTHY_TO_HEALTHY_NODE_HANDLER_TRIGGERED; import static org.apache.hadoop.hdds.scm.ha.SCMService.Event.NEW_NODE_HANDLER_TRIGGERED; import static org.apache.hadoop.hdds.scm.ha.SCMService.Event.PRE_CHECK_COMPLETED; @@ -181,16 +187,18 @@ private void run() { } } - private boolean skipCreation(HddsProtos.ReplicationFactor factor, - HddsProtos.ReplicationType type, - boolean autoCreate) { - if (type == HddsProtos.ReplicationType.RATIS) { - return factor == HddsProtos.ReplicationFactor.ONE && (!autoCreate); - } else { + private boolean skipCreation(ReplicationConfig replicationConfig, + boolean autoCreate) { + if (replicationConfig.getReplicationType().equals(RATIS)) { + return RatisReplicationConfig + .hasFactor(replicationConfig, ReplicationFactor.ONE) && (!autoCreate); + } else if (replicationConfig.getReplicationType().equals(STAND_ALONE)) { // For STAND_ALONE Replication Type, Replication Factor 3 should not be // used. - return factor == HddsProtos.ReplicationFactor.THREE; + return ((StandaloneReplicationConfig) replicationConfig) + .getReplicationFactor() != ReplicationFactor.ONE; } + return true; } private void createPipelines() throws RuntimeException { @@ -202,18 +210,20 @@ private void createPipelines() throws RuntimeException { ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE_DEFAULT); - List list = + List list = new ArrayList<>(); for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor .values()) { - if (skipCreation(factor, type, autoCreateFactorOne)) { + final ReplicationConfig replicationConfig = + ReplicationConfig.fromTypeAndFactor(type, factor); + if (skipCreation(replicationConfig, autoCreateFactorOne)) { // Skip this iteration for creating pipeline continue; } - list.add(factor); + list.add(replicationConfig); if (!pipelineManager.getSafeModeStatus()) { try { - pipelineManager.scrubPipeline(type, factor); + pipelineManager.scrubPipeline(replicationConfig); } catch (IOException e) { LOG.error("Error while scrubbing pipelines.", e); } @@ -222,11 +232,11 @@ private void createPipelines() throws RuntimeException { LoopingIterator it = new LoopingIterator(list); while (it.hasNext()) { - HddsProtos.ReplicationFactor factor = - (HddsProtos.ReplicationFactor) it.next(); + ReplicationConfig replicationConfig = + (ReplicationConfig) it.next(); try { - pipelineManager.createPipeline(type, factor); + pipelineManager.createPipeline(replicationConfig); } catch (IOException ioe) { it.remove(); } catch (Throwable t) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java index ed73a64dd2ac..0fbabee8bb22 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java @@ -19,9 +19,9 @@ package org.apache.hadoop.hdds.scm.pipeline; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.node.NodeManager; @@ -56,19 +56,27 @@ protected PipelineFactory() { } @VisibleForTesting - void setProvider(ReplicationType replicationType, - PipelineProvider provider) { + void setProvider( + ReplicationType replicationType, + PipelineProvider provider + ) { providers.put(replicationType, provider); } - public Pipeline create(ReplicationType type, ReplicationFactor factor) + public Pipeline create( + ReplicationConfig replicationConfig + ) throws IOException { - return providers.get(type).create(factor); + return providers + .get(replicationConfig.getReplicationType()) + .create(replicationConfig); } - public Pipeline create(ReplicationType type, ReplicationFactor factor, - List nodes) { - return providers.get(type).create(factor, nodes); + public Pipeline create(ReplicationConfig replicationConfig, + List nodes + ) { + return providers.get(replicationConfig.getReplicationType()) + .create(replicationConfig, nodes); } public void close(ReplicationType type, Pipeline pipeline) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java index 04985d4e1fd2..ad8faca3e10d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java @@ -24,23 +24,25 @@ import java.util.List; import java.util.NavigableSet; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.ratis.protocol.exceptions.NotLeaderException; /** * Interface which exposes the api for pipeline management. */ public interface PipelineManager extends Closeable, PipelineManagerMXBean { - Pipeline createPipeline(ReplicationType type, ReplicationFactor factor) + Pipeline createPipeline( + ReplicationConfig replicationConfig + ) throws IOException; - Pipeline createPipeline(ReplicationType type, ReplicationFactor factor, - List nodes); + Pipeline createPipeline( + ReplicationConfig replicationConfig, + List nodes + ); Pipeline getPipeline(PipelineID pipelineID) throws PipelineNotFoundException; @@ -48,20 +50,20 @@ Pipeline createPipeline(ReplicationType type, ReplicationFactor factor, List getPipelines(); - List getPipelines(ReplicationType type); + List getPipelines( + ReplicationConfig replicationConfig + ); - List getPipelines(ReplicationType type, - ReplicationFactor factor); + List getPipelines( + ReplicationConfig replicationConfig, Pipeline.PipelineState state + ); - List getPipelines(ReplicationType type, - Pipeline.PipelineState state) throws NotLeaderException; - - List getPipelines(ReplicationType type, - ReplicationFactor factor, Pipeline.PipelineState state); - - List getPipelines(ReplicationType type, ReplicationFactor factor, - Pipeline.PipelineState state, Collection excludeDns, - Collection excludePipelines); + List getPipelines( + ReplicationConfig replicationConfig, + Pipeline.PipelineState state, + Collection excludeDns, + Collection excludePipelines + ); void addContainerToPipeline(PipelineID pipelineID, ContainerID containerID) throws IOException; @@ -78,7 +80,7 @@ NavigableSet getContainersInPipeline(PipelineID pipelineID) void closePipeline(Pipeline pipeline, boolean onTimeout) throws IOException; - void scrubPipeline(ReplicationType type, ReplicationFactor factor) + void scrubPipeline(ReplicationConfig replicationConfig) throws IOException; void startPipelineCreator(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java index 0d97b8760e10..b7d8864ef5af 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java @@ -20,11 +20,14 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.events.SCMEvents; @@ -139,9 +142,10 @@ public static PipelineManagerV2Impl newPipelineManager( } @Override - public Pipeline createPipeline(ReplicationType type, - ReplicationFactor factor) throws IOException { - if (!isPipelineCreationAllowed() && factor != ReplicationFactor.ONE) { + public Pipeline createPipeline( + ReplicationConfig replicationConfig + ) throws IOException { + if (!isPipelineCreationAllowed() && !factorOne(replicationConfig)) { LOG.debug("Pipeline creation is not allowed until safe mode prechecks " + "complete"); throw new IOException("Pipeline creation is not allowed as safe mode " + @@ -149,14 +153,14 @@ public Pipeline createPipeline(ReplicationType type, } lock.lock(); try { - Pipeline pipeline = pipelineFactory.create(type, factor); + Pipeline pipeline = pipelineFactory.create(replicationConfig); stateManager.addPipeline(pipeline.getProtobufMessage( ClientVersions.CURRENT_VERSION)); recordMetricsForPipeline(pipeline); return pipeline; } catch (IOException ex) { - LOG.debug("Failed to create pipeline of type {} and factor {}. " + - "Exception: {}", type, factor, ex.getMessage()); + LOG.debug("Failed to create pipeline with replicationConfig {}.", + replicationConfig, ex); metrics.incNumPipelineCreationFailed(); throw ex; } finally { @@ -164,12 +168,28 @@ public Pipeline createPipeline(ReplicationType type, } } + private boolean factorOne(ReplicationConfig replicationConfig) { + if (replicationConfig.getReplicationType() == ReplicationType.RATIS) { + return ((RatisReplicationConfig) replicationConfig).getReplicationFactor() + == ReplicationFactor.ONE; + + } else if (replicationConfig.getReplicationType() + == ReplicationType.STAND_ALONE) { + return ((StandaloneReplicationConfig) replicationConfig) + .getReplicationFactor() + == ReplicationFactor.ONE; + } + return false; + } + @Override - public Pipeline createPipeline(ReplicationType type, ReplicationFactor factor, - List nodes) { + public Pipeline createPipeline( + ReplicationConfig replicationConfig, + List nodes + ) { // This will mostly be used to create dummy pipeline for SimplePipelines. // We don't update the metrics for SimplePipelines. - return pipelineFactory.create(type, factor, nodes); + return pipelineFactory.create(replicationConfig, nodes); } @Override @@ -194,36 +214,23 @@ public List getPipelines() { } @Override - public List getPipelines(ReplicationType type) { - return stateManager.getPipelines(type); + public List getPipelines(ReplicationConfig replicationConfig) { + return stateManager.getPipelines(replicationConfig); } @Override - public List getPipelines(ReplicationType type, - ReplicationFactor factor) { - return stateManager.getPipelines(type, factor); - } - - @Override - public List getPipelines(ReplicationType type, - Pipeline.PipelineState state) { - return stateManager.getPipelines(type, state); - } - - @Override - public List getPipelines(ReplicationType type, - ReplicationFactor factor, - Pipeline.PipelineState state) { - return stateManager.getPipelines(type, factor, state); + public List getPipelines(ReplicationConfig config, + Pipeline.PipelineState state) { + return stateManager.getPipelines(config, state); } @Override public List getPipelines( - ReplicationType type, ReplicationFactor factor, + ReplicationConfig replicationConfig, Pipeline.PipelineState state, Collection excludeDns, Collection excludePipelines) { return stateManager - .getPipelines(type, factor, state, excludeDns, excludePipelines); + .getPipelines(replicationConfig, state, excludeDns, excludePipelines); } @Override @@ -336,12 +343,9 @@ public void closePipeline(Pipeline pipeline, boolean onTimeout) /** * Scrub pipelines. - * @param type Pipeline type - * @param factor Pipeline factor - * @throws IOException */ @Override - public void scrubPipeline(ReplicationType type, ReplicationFactor factor) + public void scrubPipeline(ReplicationConfig config) throws IOException { Instant currentTime = Instant.now(); Long pipelineScrubTimeoutInMills = conf.getTimeDuration( @@ -349,7 +353,7 @@ public void scrubPipeline(ReplicationType type, ReplicationFactor factor) ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); - List candidates = stateManager.getPipelines(type, factor); + List candidates = stateManager.getPipelines(config); for (Pipeline p : candidates) { // scrub pipelines who stay ALLOCATED for too long. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java index 0482085df1f8..f1f325ce1fca 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java @@ -20,9 +20,11 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy; import org.apache.hadoop.hdds.scm.exceptions.SCMException; @@ -98,12 +100,16 @@ int currentPipelineCount(DatanodeDetails datanodeDetails, int nodesRequired) { continue; } if (pipeline != null && - // single node pipeline are not accounted for while determining - // the pipeline limit for dn - pipeline.getType() == HddsProtos.ReplicationType.RATIS && - (pipeline.getFactor() == HddsProtos.ReplicationFactor.ONE || - pipeline.getFactor().getNumber() == nodesRequired && - pipeline.getPipelineState() == Pipeline.PipelineState.CLOSED)) { + // single node pipeline are not accounted for while determining + // the pipeline limit for dn + pipeline.getType() == HddsProtos.ReplicationType.RATIS && + (RatisReplicationConfig + .hasFactor(pipeline.getReplicationConfig(), ReplicationFactor.ONE) + || + pipeline.getReplicationConfig().getRequiredNodes() + == nodesRequired && + pipeline.getPipelineState() + == Pipeline.PipelineState.CLOSED)) { pipelineNumDeductable++; } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java index e459fccd3bae..50bc0e1fb5c2 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java @@ -24,9 +24,8 @@ import java.util.Set; import java.util.stream.Collectors; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.NodeStatus; @@ -34,7 +33,8 @@ /** * Interface for creating pipelines. */ -public abstract class PipelineProvider { +public abstract class PipelineProvider { private final NodeManager nodeManager; private final StateManager stateManager; @@ -58,20 +58,22 @@ public StateManager getPipelineStateManager() { return stateManager; } - protected abstract Pipeline create(ReplicationFactor factor) + protected abstract Pipeline create(REPLICATION_CONFIG replicationConfig) throws IOException; - protected abstract Pipeline create(ReplicationFactor factor, - List nodes); + protected abstract Pipeline create( + REPLICATION_CONFIG replicationConfig, + List nodes + ); protected abstract void close(Pipeline pipeline) throws IOException; protected abstract void shutdown(); - List pickNodesNeverUsed(ReplicationType type, - ReplicationFactor factor) throws SCMException { + List pickNodesNeverUsed(REPLICATION_CONFIG replicationConfig) + throws SCMException { Set dnsUsed = new HashSet<>(); - stateManager.getPipelines(type, factor).stream().filter( + stateManager.getPipelines(replicationConfig).stream().filter( p -> p.getPipelineState().equals(Pipeline.PipelineState.OPEN) || p.getPipelineState().equals(Pipeline.PipelineState.DORMANT) || p.getPipelineState().equals(Pipeline.PipelineState.ALLOCATED)) @@ -82,12 +84,13 @@ List pickNodesNeverUsed(ReplicationType type, .getNodes(NodeStatus.inServiceHealthy()) .parallelStream() .filter(dn -> !dnsUsed.contains(dn)) - .limit(factor.getNumber()) + .limit(replicationConfig.getRequiredNodes()) .collect(Collectors.toList()); - if (dns.size() < factor.getNumber()) { + if (dns.size() < replicationConfig.getRequiredNodes()) { String e = String - .format("Cannot create pipeline of factor %d using %d nodes." + - " Used %d nodes. Healthy nodes %d", factor.getNumber(), + .format("Cannot create pipeline %s using %d nodes." + + " Used %d nodes. Healthy nodes %d", + replicationConfig.toString(), dns.size(), dnsUsed.size(), nodeManager.getNodes(NodeStatus.inServiceHealthy()).size()); throw new SCMException(e, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java index 8fc7f3eccbd9..6676c6e9c898 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java @@ -20,10 +20,11 @@ import java.io.IOException; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.PipelineReport; import org.apache.hadoop.hdds.protocol.proto @@ -114,8 +115,10 @@ protected void processPipelineReport(PipelineReport report, if (pipeline.getPipelineState() == Pipeline.PipelineState.ALLOCATED) { if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Pipeline {} {} reported by {}", pipeline.getFactor(), - pipeline.getId(), dn); + LOGGER.debug("Pipeline {} {} reported by {}", + pipeline.getReplicationConfig(), + pipeline.getId(), + dn); } if (pipeline.isHealthy()) { pipelineManager.openPipeline(pipelineID); @@ -139,7 +142,8 @@ protected void setPipelineLeaderId(PipelineReport report, DatanodeDetails dn) { // ONE replica pipeline doesn't have leader flag if (report.getIsLeader() || - pipeline.getFactor() == HddsProtos.ReplicationFactor.ONE) { + RatisReplicationConfig.hasFactor(pipeline.getReplicationConfig(), + ReplicationFactor.ONE)) { pipeline.setLeaderId(dn.getUuid()); metrics.incNumPipelineBytesWritten(pipeline, report.getBytesWritten()); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java index 581477e09066..c3df3d2db9e1 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java @@ -18,10 +18,9 @@ package org.apache.hadoop.hdds.scm.pipeline; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState; import org.apache.hadoop.hdds.utils.db.Table; @@ -75,36 +74,27 @@ public List getPipelines() { } @Override - public List getPipelines(ReplicationType type) { - return pipelineStateMap.getPipelines(type); - } - - @Override - public List getPipelines(ReplicationType type, - ReplicationFactor factor) { - return pipelineStateMap.getPipelines(type, factor); + public List getPipelines( + ReplicationConfig replicationConfig + ) { + return pipelineStateMap.getPipelines(replicationConfig); } @Override - public List getPipelines(ReplicationType type, - ReplicationFactor factor, - PipelineState state) { - return pipelineStateMap.getPipelines(type, factor, state); + public List getPipelines( + ReplicationConfig replicationConfig, + PipelineState state + ) { + return pipelineStateMap.getPipelines(replicationConfig, state); } @Override public List getPipelines( - ReplicationType type, ReplicationFactor factor, + ReplicationConfig replicationConfig, PipelineState state, Collection excludeDns, Collection excludePipelines) { return pipelineStateMap - .getPipelines(type, factor, state, excludeDns, excludePipelines); - } - - @Override - public List getPipelines(ReplicationType type, - PipelineState... states) { - return pipelineStateMap.getPipelines(type, states); + .getPipelines(replicationConfig, state, excludeDns, excludePipelines); } @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2Impl.java index c84a8525bba1..8473b5ab7feb 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2Impl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2Impl.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.pipeline; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol; @@ -137,22 +138,13 @@ public List getPipelines() { } } - @Override - public List getPipelines(HddsProtos.ReplicationType type) { - lock.readLock().lock(); - try { - return pipelineStateMap.getPipelines(type); - } finally { - lock.readLock().unlock(); - } - } @Override public List getPipelines( - HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor) { + ReplicationConfig replicationConfig) { lock.readLock().lock(); try { - return pipelineStateMap.getPipelines(type, factor); + return pipelineStateMap.getPipelines(replicationConfig); } finally { lock.readLock().unlock(); } @@ -160,11 +152,11 @@ public List getPipelines( @Override public List getPipelines( - HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, - Pipeline.PipelineState state) { + ReplicationConfig replicationConfig, + Pipeline.PipelineState state) { lock.readLock().lock(); try { - return pipelineStateMap.getPipelines(type, factor, state); + return pipelineStateMap.getPipelines(replicationConfig, state); } finally { lock.readLock().unlock(); } @@ -172,24 +164,13 @@ public List getPipelines( @Override public List getPipelines( - HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, + ReplicationConfig replicationConfig, Pipeline.PipelineState state, Collection excludeDns, Collection excludePipelines) { lock.readLock().lock(); try { return pipelineStateMap - .getPipelines(type, factor, state, excludeDns, excludePipelines); - } finally { - lock.readLock().unlock(); - } - } - - @Override - public List getPipelines(HddsProtos.ReplicationType type, - Pipeline.PipelineState... states) { - lock.readLock().lock(); - try { - return pipelineStateMap.getPipelines(type, states); + .getPipelines(replicationConfig, state, excludeDns, excludePipelines); } finally { lock.readLock().unlock(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java index 0af610125f50..ac95b0765466 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java @@ -17,12 +17,9 @@ */ package org.apache.hadoop.hdds.scm.pipeline; -import com.google.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import org.apache.commons.lang3.builder.HashCodeBuilder; +import com.google.common.base.Preconditions;; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState; import org.slf4j.Logger; @@ -46,7 +43,7 @@ class PipelineStateMap { private final Map pipelineMap; private final Map> pipeline2container; - private final Map> query2OpenPipelines; + private final Map> query2OpenPipelines; PipelineStateMap() { @@ -54,19 +51,9 @@ class PipelineStateMap { pipelineMap = new ConcurrentHashMap<>(); pipeline2container = new ConcurrentHashMap<>(); query2OpenPipelines = new HashMap<>(); - initializeQueryMap(); } - private void initializeQueryMap() { - for (ReplicationType type : ReplicationType.values()) { - for (ReplicationFactor factor : ReplicationFactor.values()) { - query2OpenPipelines - .put(new PipelineQuery(type, factor), new CopyOnWriteArrayList<>()); - } - } - } - /** * Adds provided pipeline in the data structures. * @@ -76,9 +63,11 @@ private void initializeQueryMap() { void addPipeline(Pipeline pipeline) throws IOException { Preconditions.checkNotNull(pipeline, "Pipeline cannot be null"); Preconditions.checkArgument( - pipeline.getNodes().size() == pipeline.getFactor().getNumber(), + pipeline.getNodes().size() == pipeline.getReplicationConfig() + .getRequiredNodes(), String.format("Nodes size=%d, replication factor=%d do not match ", - pipeline.getNodes().size(), pipeline.getFactor().getNumber())); + pipeline.getNodes().size(), pipeline.getReplicationConfig() + .getRequiredNodes())); if (pipelineMap.putIfAbsent(pipeline.getId(), pipeline) != null) { LOG.warn("Duplicate pipeline ID detected. {}", pipeline.getId()); @@ -87,7 +76,8 @@ void addPipeline(Pipeline pipeline) throws IOException { } pipeline2container.put(pipeline.getId(), new TreeSet<>()); if (pipeline.getPipelineState() == PipelineState.OPEN) { - query2OpenPipelines.get(new PipelineQuery(pipeline)).add(pipeline); + query2OpenPipelines.computeIfAbsent(pipeline.getReplicationConfig(), + any -> new CopyOnWriteArrayList<>()).add(pipeline); } } @@ -144,62 +134,16 @@ public List getPipelines() { /** * Get pipeline corresponding to specified replication type. * - * @param type - ReplicationType + * @param replicationConfig - ReplicationConfig * @return List of pipelines which have the specified replication type */ - List getPipelines(ReplicationType type) { - Preconditions.checkNotNull(type, "Replication type cannot be null"); - - List pipelines = new ArrayList<>(); - for (Pipeline pipeline : pipelineMap.values()) { - if (pipeline.getType() == type) { - pipelines.add(pipeline); - } - } - - return pipelines; - } - - /** - * Get pipeline corresponding to specified replication type and factor. - * - * @param type - ReplicationType - * @param factor - ReplicationFactor - * @return List of pipelines with specified replication type and factor - */ - List getPipelines(ReplicationType type, ReplicationFactor factor) { - Preconditions.checkNotNull(type, "Replication type cannot be null"); - Preconditions.checkNotNull(factor, "Replication factor cannot be null"); - - List pipelines = new ArrayList<>(); - for (Pipeline pipeline : pipelineMap.values()) { - if (pipeline.getType() == type && pipeline.getFactor() == factor) { - pipelines.add(pipeline); - } - } - - return pipelines; - } - - /** - * Get list of pipeline corresponding to specified replication type and - * pipeline states. - * - * @param type - ReplicationType - * @param states - Array of required PipelineState - * @return List of pipelines with specified replication type and states - */ - List getPipelines(ReplicationType type, PipelineState... states) { - Preconditions.checkNotNull(type, "Replication type cannot be null"); - Preconditions.checkNotNull(states, "Pipeline state cannot be null"); - - Set pipelineStates = new HashSet<>(); - pipelineStates.addAll(Arrays.asList(states)); + List getPipelines(ReplicationConfig replicationConfig) { + Preconditions + .checkNotNull(replicationConfig, "ReplicationConfig cannot be null"); List pipelines = new ArrayList<>(); for (Pipeline pipeline : pipelineMap.values()) { - if (pipeline.getType() == type - && pipelineStates.contains(pipeline.getPipelineState())) { + if (pipeline.getReplicationConfig().equals(replicationConfig)) { pipelines.add(pipeline); } } @@ -211,28 +155,27 @@ List getPipelines(ReplicationType type, PipelineState... states) { * Get list of pipeline corresponding to specified replication type, * replication factor and pipeline state. * - * @param type - ReplicationType - * @param state - Required PipelineState + * @param replicationConfig - ReplicationConfig + * @param state - Required PipelineState * @return List of pipelines with specified replication type, * replication factor and pipeline state */ - List getPipelines(ReplicationType type, ReplicationFactor factor, + List getPipelines(ReplicationConfig replicationConfig, PipelineState state) { - Preconditions.checkNotNull(type, "Replication type cannot be null"); - Preconditions.checkNotNull(factor, "Replication factor cannot be null"); + Preconditions + .checkNotNull(replicationConfig, "ReplicationConfig cannot be null"); Preconditions.checkNotNull(state, "Pipeline state cannot be null"); if (state == PipelineState.OPEN) { return new ArrayList<>( query2OpenPipelines.getOrDefault( - new PipelineQuery(type, factor), Collections.EMPTY_LIST)); + replicationConfig, Collections.EMPTY_LIST)); } List pipelines = new ArrayList<>(); for (Pipeline pipeline : pipelineMap.values()) { - if (pipeline.getType() == type - && pipeline.getPipelineState() == state - && pipeline.getFactor() == factor) { + if (pipeline.getReplicationConfig().equals(replicationConfig) + && pipeline.getPipelineState() == state) { pipelines.add(pipeline); } } @@ -244,18 +187,18 @@ List getPipelines(ReplicationType type, ReplicationFactor factor, * Get list of pipeline corresponding to specified replication type, * replication factor and pipeline state. * - * @param type - ReplicationType - * @param state - Required PipelineState - * @param excludeDns dns to exclude - * @param excludePipelines pipelines to exclude + * @param replicationConfig - ReplicationType + * @param state - Required PipelineState + * @param excludeDns dns to exclude + * @param excludePipelines pipelines to exclude * @return List of pipelines with specified replication type, * replication factor and pipeline state */ - List getPipelines(ReplicationType type, ReplicationFactor factor, + List getPipelines(ReplicationConfig replicationConfig, PipelineState state, Collection excludeDns, Collection excludePipelines) { - Preconditions.checkNotNull(type, "Replication type cannot be null"); - Preconditions.checkNotNull(factor, "Replication factor cannot be null"); + Preconditions + .checkNotNull(replicationConfig, "ReplicationConfig cannot be null"); Preconditions.checkNotNull(state, "Pipeline state cannot be null"); Preconditions .checkNotNull(excludeDns, "Datanode exclude list cannot be null"); @@ -265,7 +208,7 @@ List getPipelines(ReplicationType type, ReplicationFactor factor, List pipelines = null; if (state == PipelineState.OPEN) { pipelines = new ArrayList<>(query2OpenPipelines.getOrDefault( - new PipelineQuery(type, factor), Collections.EMPTY_LIST)); + replicationConfig, Collections.EMPTY_LIST)); } else { pipelines = new ArrayList<>(pipelineMap.values()); } @@ -273,9 +216,8 @@ List getPipelines(ReplicationType type, ReplicationFactor factor, Iterator iter = pipelines.iterator(); while (iter.hasNext()) { Pipeline pipeline = iter.next(); - if (pipeline.getType() != type || + if (!pipeline.getReplicationConfig().equals(replicationConfig) || pipeline.getPipelineState() != state || - pipeline.getFactor() != factor || excludePipelines.contains(pipeline.getId())) { iter.remove(); } else { @@ -397,13 +339,15 @@ Pipeline updatePipelineState(PipelineID pipelineID, PipelineState state) } Pipeline updatedPipeline = pipelineMap.compute(pipelineID, (id, p) -> Pipeline.newBuilder(pipeline).setState(state).build()); - PipelineQuery query = new PipelineQuery(pipeline); - List pipelineList = query2OpenPipelines.get(query); + + List pipelineList = + query2OpenPipelines.get(pipeline.getReplicationConfig()); + if (updatedPipeline.getPipelineState() == PipelineState.OPEN) { // for transition to OPEN state add pipeline to query2OpenPipelines if (pipelineList == null) { pipelineList = new CopyOnWriteArrayList<>(); - query2OpenPipelines.put(query, pipelineList); + query2OpenPipelines.put(pipeline.getReplicationConfig(), pipelineList); } pipelineList.add(updatedPipeline); } else { @@ -416,39 +360,4 @@ Pipeline updatePipelineState(PipelineID pipelineID, PipelineState state) return updatedPipeline; } - private static class PipelineQuery { - private ReplicationType type; - private ReplicationFactor factor; - - PipelineQuery(ReplicationType type, ReplicationFactor factor) { - this.type = Preconditions.checkNotNull(type); - this.factor = Preconditions.checkNotNull(factor); - } - - PipelineQuery(Pipeline pipeline) { - type = pipeline.getType(); - factor = pipeline.getFactor(); - } - - @Override - @SuppressFBWarnings("NP_EQUALS_SHOULD_HANDLE_NULL_ARGUMENT") - public boolean equals(Object other) { - if (this == other) { - return true; - } - if (!this.getClass().equals(other.getClass())) { - return false; - } - PipelineQuery otherQuery = (PipelineQuery) other; - return type == otherQuery.type && factor == otherQuery.factor; - } - - @Override - public int hashCode() { - return new HashCodeBuilder() - .append(type) - .append(factor) - .toHashCode(); - } - } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java index 64101a584854..485ea8312a2f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java @@ -21,9 +21,9 @@ import java.io.IOException; import java.util.List; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.events.SCMEvents; @@ -38,7 +38,6 @@ import org.apache.hadoop.ozone.protocol.commands.ClosePipelineCommand; import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; import org.apache.hadoop.ozone.protocol.commands.CreatePipelineCommand; - import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.apache.ratis.thirdparty.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; @@ -47,7 +46,8 @@ /** * Implements Api for creating ratis pipelines. */ -public class RatisPipelineProvider extends PipelineProvider { +public class RatisPipelineProvider + extends PipelineProvider { private static final Logger LOG = LoggerFactory.getLogger(RatisPipelineProvider.class); @@ -86,30 +86,29 @@ public RatisPipelineProvider(NodeManager nodeManager, } } - private boolean exceedPipelineNumberLimit(ReplicationFactor factor) { - if (factor != ReplicationFactor.THREE) { + private boolean exceedPipelineNumberLimit( + RatisReplicationConfig replicationConfig) { + if (replicationConfig.getReplicationFactor() != ReplicationFactor.THREE) { // Only put limits for Factor THREE pipelines. return false; } // Per datanode limit if (maxPipelinePerDatanode > 0) { - return (getPipelineStateManager().getPipelines( - ReplicationType.RATIS, factor).size() - - getPipelineStateManager().getPipelines(ReplicationType.RATIS, factor, + return (getPipelineStateManager().getPipelines(replicationConfig).size() - + getPipelineStateManager().getPipelines(replicationConfig, PipelineState.CLOSED).size()) > maxPipelinePerDatanode * getNodeManager().getNodeCount(NodeStatus.inServiceHealthy()) / - factor.getNumber(); + replicationConfig.getRequiredNodes(); } // Global limit if (pipelineNumberLimit > 0) { - return (getPipelineStateManager().getPipelines(ReplicationType.RATIS, - ReplicationFactor.THREE).size() - + return (getPipelineStateManager().getPipelines(replicationConfig).size() - getPipelineStateManager().getPipelines( - ReplicationType.RATIS, ReplicationFactor.THREE, - PipelineState.CLOSED).size()) > - (pipelineNumberLimit - getPipelineStateManager().getPipelines( - ReplicationType.RATIS, ReplicationFactor.ONE).size()); + replicationConfig, PipelineState.CLOSED).size()) > + (pipelineNumberLimit - getPipelineStateManager() + .getPipelines(new RatisReplicationConfig(ReplicationFactor.ONE)) + .size()); } return false; @@ -121,20 +120,22 @@ public LeaderChoosePolicy getLeaderChoosePolicy() { } @Override - public synchronized Pipeline create(ReplicationFactor factor) + public synchronized Pipeline create(RatisReplicationConfig replicationConfig) throws IOException { - if (exceedPipelineNumberLimit(factor)) { + if (exceedPipelineNumberLimit(replicationConfig)) { throw new SCMException("Ratis pipeline number meets the limit: " + - pipelineNumberLimit + " factor : " + - factor.getNumber(), + pipelineNumberLimit + " replicationConfig : " + + replicationConfig, SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE); } List dns; - switch(factor) { + final ReplicationFactor factor = + replicationConfig.getReplicationFactor(); + switch (factor) { case ONE: - dns = pickNodesNeverUsed(ReplicationType.RATIS, ReplicationFactor.ONE); + dns = pickNodesNeverUsed(replicationConfig); break; case THREE: dns = placementPolicy.chooseDatanodes(null, @@ -149,8 +150,8 @@ public synchronized Pipeline create(ReplicationFactor factor) Pipeline pipeline = Pipeline.newBuilder() .setId(PipelineID.randomId()) .setState(PipelineState.ALLOCATED) - .setType(ReplicationType.RATIS) - .setFactor(factor) + .setReplicationConfig(new RatisReplicationConfig( + factor)) .setNodes(dns) .setSuggestedLeaderId( suggestedLeader != null ? suggestedLeader.getUuid() : null) @@ -176,13 +177,12 @@ public synchronized Pipeline create(ReplicationFactor factor) } @Override - public Pipeline create(ReplicationFactor factor, - List nodes) { + public Pipeline create(RatisReplicationConfig replicationConfig, + List nodes) { return Pipeline.newBuilder() .setId(PipelineID.randomId()) .setState(PipelineState.ALLOCATED) - .setType(ReplicationType.RATIS) - .setFactor(factor) + .setReplicationConfig(replicationConfig) .setNodes(nodes) .build(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java index 7940f6d53cb1..0b9c14ac141e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java @@ -21,12 +21,12 @@ import java.util.List; import java.util.stream.Collectors; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.ScmConfigKeys; - import org.apache.ratis.client.RaftClient; import org.apache.ratis.grpc.GrpcTlsConfig; import org.apache.ratis.protocol.RaftGroup; @@ -109,9 +109,8 @@ static void destroyPipeline(DatanodeDetails dn, PipelineID pipelineID, */ static List checkPipelineContainSameDatanodes( PipelineStateManager stateManager, Pipeline pipeline) { - return stateManager.getPipelines( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE) + return stateManager.getPipelines(new RatisReplicationConfig( + ReplicationFactor.THREE)) .stream().filter(p -> !p.getId().equals(pipeline.getId()) && (p.getPipelineState() != Pipeline.PipelineState.CLOSED && p.sameDatanodes(pipeline))) @@ -128,9 +127,8 @@ static List checkPipelineContainSameDatanodes( */ static List checkPipelineContainSameDatanodes( StateManager stateManager, Pipeline pipeline) { - return stateManager.getPipelines( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE) + return stateManager + .getPipelines(new RatisReplicationConfig(ReplicationFactor.THREE)) .stream().filter(p -> !p.getId().equals(pipeline.getId()) && (p.getPipelineState() != Pipeline.PipelineState.CLOSED && p.sameDatanodes(pipeline))) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java index b22feab2a859..c96ce2d620fd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java @@ -33,7 +33,10 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; @@ -53,8 +56,6 @@ import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.util.Time; - -import com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -260,9 +261,10 @@ private void recordMetricsForPipeline(Pipeline pipeline) { } @Override - public Pipeline createPipeline(ReplicationType type, - ReplicationFactor factor) throws IOException { - if (!isPipelineCreationAllowed() && factor != ReplicationFactor.ONE) { + public Pipeline createPipeline(ReplicationConfig replicationConfig) + throws IOException { + if (!isPipelineCreationAllowed() + && replicationConfig.getRequiredNodes() != 1) { LOG.debug("Pipeline creation is not allowed until safe mode prechecks " + "complete"); throw new IOException("Pipeline creation is not allowed as safe mode " + @@ -270,7 +272,7 @@ public Pipeline createPipeline(ReplicationType type, } lock.writeLock().lock(); try { - Pipeline pipeline = pipelineFactory.create(type, factor); + Pipeline pipeline = pipelineFactory.create(replicationConfig); if (pipelineStore != null) { pipelineStore.put(pipeline.getId(), pipeline); } @@ -282,11 +284,11 @@ public Pipeline createPipeline(ReplicationType type, if (ex instanceof SCMException && ((SCMException) ex).getResult() == FAILED_TO_FIND_SUITABLE_NODE) { // Avoid spam SCM log with errors when SCM has enough open pipelines - LOG.debug("Can't create more pipelines of type {} and factor {}. " + - "Reason: {}", type, factor, ex.getMessage()); + LOG.debug("Can't create more pipelines of replicationConfig: {}. " + + "Reason: {}", replicationConfig, ex.getMessage()); } else { - LOG.error("Failed to create pipeline of type {} and factor {}. " + - "Exception: {}", type, factor, ex.getMessage()); + LOG.error("Failed to create pipeline of replicationConfig: {}. " + + "Exception: {}", replicationConfig, ex.getMessage()); } metrics.incNumPipelineCreationFailed(); throw ex; @@ -296,13 +298,13 @@ public Pipeline createPipeline(ReplicationType type, } @Override - public Pipeline createPipeline(ReplicationType type, ReplicationFactor factor, + public Pipeline createPipeline(ReplicationConfig replicationConfig, List nodes) { // This will mostly be used to create dummy pipeline for SimplePipelines. // We don't update the metrics for SimplePipelines. lock.writeLock().lock(); try { - return pipelineFactory.create(type, factor, nodes); + return pipelineFactory.create(replicationConfig, nodes); } finally { lock.writeLock().unlock(); } @@ -343,57 +345,35 @@ public List getPipelines() { } @Override - public List getPipelines(ReplicationType type) { - lock.readLock().lock(); - try { - return stateManager.getPipelines(type); - } finally { - lock.readLock().unlock(); - } - } - - @Override - public List getPipelines(ReplicationType type, - ReplicationFactor factor) { + public List getPipelines(ReplicationConfig replicationConfig) { lock.readLock().lock(); try { - return stateManager.getPipelines(type, factor); + return stateManager.getPipelines(replicationConfig); } finally { lock.readLock().unlock(); } } @Override - public List getPipelines(ReplicationType type, + public List getPipelines(ReplicationConfig replicationConfig, Pipeline.PipelineState state) { lock.readLock().lock(); try { - return stateManager.getPipelines(type, state); + return stateManager.getPipelines(replicationConfig, state); } finally { lock.readLock().unlock(); } } @Override - public List getPipelines(ReplicationType type, - ReplicationFactor factor, Pipeline.PipelineState state) { - lock.readLock().lock(); - try { - return stateManager.getPipelines(type, factor, state); - } finally { - lock.readLock().unlock(); - } - } - - @Override - public List getPipelines(ReplicationType type, - ReplicationFactor factor, Pipeline.PipelineState state, + public List getPipelines(ReplicationConfig replicationConfig, + Pipeline.PipelineState state, Collection excludeDns, Collection excludePipelines) { lock.readLock().lock(); try { return stateManager - .getPipelines(type, factor, state, excludeDns, excludePipelines); + .getPipelines(replicationConfig, state, excludeDns, excludePipelines); } finally { lock.readLock().unlock(); } @@ -513,14 +493,12 @@ public void closePipeline(Pipeline pipeline, boolean onTimeout) /** * Scrub pipelines. - * @param type Pipeline type - * @param factor Pipeline factor - * @throws IOException */ @Override - public void scrubPipeline(ReplicationType type, ReplicationFactor factor) - throws IOException{ - if (type != ReplicationType.RATIS || factor != ReplicationFactor.THREE) { + public void scrubPipeline(ReplicationConfig replicationConfig) + throws IOException { + if (!RatisReplicationConfig.hasFactor(replicationConfig, + ReplicationFactor.THREE)) { // Only srub pipeline for RATIS THREE pipeline return; } @@ -530,7 +508,7 @@ public void scrubPipeline(ReplicationType type, ReplicationFactor factor) ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); - List candidates = stateManager.getPipelines(type, factor); + List candidates = stateManager.getPipelines(replicationConfig); for (Pipeline p : candidates) { // scrub pipelines who stay ALLOCATED for too long. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineMetrics.java index c6424dc040b3..af3c7b2c4142 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineMetrics.java @@ -119,12 +119,12 @@ void createPerPipelineMetrics(Pipeline pipeline) { public static String getBlockAllocationMetricName(Pipeline pipeline) { return "NumBlocksAllocated-" + pipeline.getType() + "-" + pipeline - .getFactor() + "-" + pipeline.getId().getId(); + .getReplicationConfig().toString() + "-" + pipeline.getId().getId(); } public static String getBytesWrittenMetricName(Pipeline pipeline) { return "NumPipelineBytesWritten-" + pipeline.getType() + "-" + pipeline - .getFactor() + "-" + pipeline.getId().getId(); + .getReplicationConfig().toString() + "-" + pipeline.getId().getId(); } void removePipelineMetrics(PipelineID pipelineID) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java index 9973a0a336a2..98b700ecacdc 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java @@ -18,9 +18,8 @@ package org.apache.hadoop.hdds.scm.pipeline; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState; @@ -31,7 +30,8 @@ /** * Implements Api for creating stand alone pipelines. */ -public class SimplePipelineProvider extends PipelineProvider { +public class SimplePipelineProvider + extends PipelineProvider { public SimplePipelineProvider(NodeManager nodeManager, StateManager stateManager) { @@ -39,13 +39,13 @@ public SimplePipelineProvider(NodeManager nodeManager, } @Override - public Pipeline create(ReplicationFactor factor) throws IOException { - List dns = pickNodesNeverUsed(ReplicationType.STAND_ALONE, - factor); - if (dns.size() < factor.getNumber()) { + public Pipeline create(StandaloneReplicationConfig replicationConfig) + throws IOException { + List dns = pickNodesNeverUsed(replicationConfig); + if (dns.size() < replicationConfig.getRequiredNodes()) { String e = String .format("Cannot create pipeline of factor %d using %d nodes.", - factor.getNumber(), dns.size()); + replicationConfig.getRequiredNodes(), dns.size()); throw new InsufficientDatanodesException(e); } @@ -53,20 +53,19 @@ public Pipeline create(ReplicationFactor factor) throws IOException { return Pipeline.newBuilder() .setId(PipelineID.randomId()) .setState(PipelineState.OPEN) - .setType(ReplicationType.STAND_ALONE) - .setFactor(factor) - .setNodes(dns.subList(0, factor.getNumber())) + .setReplicationConfig(replicationConfig) + .setNodes(dns.subList(0, + replicationConfig.getReplicationFactor().getNumber())) .build(); } @Override - public Pipeline create(ReplicationFactor factor, + public Pipeline create(StandaloneReplicationConfig replicationConfig, List nodes) { return Pipeline.newBuilder() .setId(PipelineID.randomId()) .setState(PipelineState.OPEN) - .setType(ReplicationType.STAND_ALONE) - .setFactor(factor) + .setReplicationConfig(replicationConfig) .setNodes(nodes) .build(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/StateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/StateManager.java index e8a39020bd84..c075dea250d6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/StateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/StateManager.java @@ -17,6 +17,7 @@ package org.apache.hadoop.hdds.scm.pipeline; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.ContainerID; @@ -59,41 +60,42 @@ void removePipeline(HddsProtos.PipelineID pipelineIDProto) * @throws IOException */ @Replicate - void updatePipelineState(HddsProtos.PipelineID pipelineIDProto, - HddsProtos.PipelineState newState) + void updatePipelineState( + HddsProtos.PipelineID pipelineIDProto, + HddsProtos.PipelineState newState + ) throws IOException; - void addContainerToPipeline(PipelineID pipelineID, - ContainerID containerID) throws IOException; + void addContainerToPipeline( + PipelineID pipelineID, + ContainerID containerID + ) throws IOException; Pipeline getPipeline(PipelineID pipelineID) throws PipelineNotFoundException; List getPipelines(); - List getPipelines(HddsProtos.ReplicationType type); + List getPipelines( + ReplicationConfig replicationConfig + ); - List getPipelines(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor); + List getPipelines( + ReplicationConfig replicationConfig, + Pipeline.PipelineState state + ); - List getPipelines(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor, - Pipeline.PipelineState state); - - List getPipelines(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor, - Pipeline.PipelineState state, - Collection excludeDns, - Collection excludePipelines); - - List getPipelines(HddsProtos.ReplicationType type, - Pipeline.PipelineState... states); + List getPipelines( + ReplicationConfig replicationConfig, + Pipeline.PipelineState state, + Collection excludeDns, + Collection excludePipelines + ); NavigableSet getContainers(PipelineID pipelineID) throws IOException; int getNumberOfContainers(PipelineID pipelineID) throws IOException; - void removeContainerFromPipeline(PipelineID pipelineID, ContainerID containerID) throws IOException; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java index da40c10e6b47..950e6da925e4 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java @@ -22,6 +22,7 @@ import java.util.stream.Collectors; import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos; @@ -182,8 +183,11 @@ public AllocateScmBlockResponseProto allocateScmBlock( throws IOException { List allocatedBlocks = impl.allocateBlock(request.getSize(), - request.getNumBlocks(), request.getType(), - request.getFactor(), request.getOwner(), + request.getNumBlocks(), + ReplicationConfig.fromProto( + request.getType(), + request.getFactor()), + request.getOwner(), ExcludeList.getFromProtoBuf(request.getExcludeList())); AllocateScmBlockResponseProto.Builder builder = @@ -204,7 +208,8 @@ public AllocateScmBlockResponseProto allocateScmBlock( } public DeleteScmKeyBlocksResponseProto deleteScmKeyBlocks( - DeleteScmKeyBlocksRequestProto req) + DeleteScmKeyBlocksRequestProto req + ) throws IOException { DeleteScmKeyBlocksResponseProto.Builder resp = DeleteScmKeyBlocksResponseProto.newBuilder(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java index d8c57785c8cf..14bc58bc32b5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java @@ -21,6 +21,7 @@ import java.util.Set; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.events.SCMEvents; @@ -77,7 +78,7 @@ public class HealthyPipelineSafeModeRule extends SafeModeExitRule { // We want to wait for RATIS THREE factor write pipelines int pipelineCount = pipelineManager.getPipelines( - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, + new RatisReplicationConfig(HddsProtos.ReplicationFactor.THREE), Pipeline.PipelineState.OPEN).size(); // This value will be zero when pipeline count is 0. @@ -117,7 +118,8 @@ protected void process(Pipeline pipeline) { // create new pipelines. Preconditions.checkNotNull(pipeline); if (pipeline.getType() == HddsProtos.ReplicationType.RATIS && - pipeline.getFactor() == HddsProtos.ReplicationFactor.THREE && + ((RatisReplicationConfig) pipeline.getReplicationConfig()) + .getReplicationFactor() == HddsProtos.ReplicationFactor.THREE && !processedPipelineIDs.contains(pipeline.getId())) { getSafeModeMetrics().incCurrentHealthyPipelinesCount(); currentHealthyPipelineCount++; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java index 5268bc9045e2..3f5bcd68f350 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java @@ -22,8 +22,9 @@ import java.util.stream.Collectors; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -75,8 +76,8 @@ public OneReplicaPipelineSafeModeRule(String ruleName, EventQueue eventQueue, this.pipelineManager = pipelineManager; oldPipelineIDSet = pipelineManager.getPipelines( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN) + new RatisReplicationConfig(ReplicationFactor.THREE), + Pipeline.PipelineState.OPEN) .stream().map(p -> p.getId()).collect(Collectors.toSet()); int totalPipelineCount = oldPipelineIDSet.size(); @@ -107,17 +108,18 @@ protected void process(PipelineReportFromDatanode report) { Pipeline pipeline; try { pipeline = pipelineManager.getPipeline( - PipelineID.getFromProtobuf(report1.getPipelineID())); + PipelineID.getFromProtobuf(report1.getPipelineID())); } catch (PipelineNotFoundException pnfe) { continue; } - if (pipeline.getType() == HddsProtos.ReplicationType.RATIS && - pipeline.getFactor() == HddsProtos.ReplicationFactor.THREE && - pipeline.isOpen() && - !reportedPipelineIDSet.contains(pipeline.getId())) { + + if (RatisReplicationConfig + .hasFactor(pipeline.getReplicationConfig(), ReplicationFactor.THREE) + && pipeline.isOpen() && + !reportedPipelineIDSet.contains(pipeline.getId())) { if (oldPipelineIDSet.contains(pipeline.getId())) { getSafeModeMetrics(). - incCurrentHealthyPipelinesWithAtleastOneReplicaReportedCount(); + incCurrentHealthyPipelinesWithAtleastOneReplicaReportedCount(); currentReportedPipelineCount++; reportedPipelineIDSet.add(pipeline.getId()); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java index 3a1181e62572..e3aceebee828 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java @@ -29,9 +29,9 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos; import org.apache.hadoop.hdds.scm.AddSCMRequest; import org.apache.hadoop.hdds.scm.ScmInfo; @@ -169,14 +169,15 @@ public void join() throws InterruptedException { } @Override - public List allocateBlock(long size, int num, - HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, - String owner, ExcludeList excludeList) throws IOException { + public List allocateBlock( + long size, int num, + ReplicationConfig replicationConfig, + String owner, ExcludeList excludeList + ) throws IOException { Map auditMap = Maps.newHashMap(); auditMap.put("size", String.valueOf(size)); auditMap.put("num", String.valueOf(num)); - auditMap.put("type", type.name()); - auditMap.put("factor", factor.name()); + auditMap.put("replication", replicationConfig.toString()); auditMap.put("owner", owner); List blocks = new ArrayList<>(num); boolean auditSuccess = true; @@ -188,7 +189,7 @@ public List allocateBlock(long size, int num, try { for (int i = 0; i < num; i++) { AllocatedBlock block = scm.getScmBlockManager() - .allocateBlock(size, type, factor, owner, excludeList); + .allocateBlock(size, replicationConfig, owner, excludeList); if (block != null) { blocks.add(block); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 4dda2966711a..1d5f358e3038 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -27,6 +27,8 @@ import com.google.protobuf.ProtocolMessageEnum; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -190,7 +192,9 @@ public ContainerWithPipeline allocateContainer(HddsProtos.ReplicationType getScm().checkAdminAccess(getRpcRemoteUsername()); final ContainerInfo container = scm.getContainerManager() - .allocateContainer(replicationType, factor, owner); + .allocateContainer( + ReplicationConfig.fromTypeAndFactor(replicationType, factor), + owner); final Pipeline pipeline = scm.getPipelineManager() .getPipeline(container.getPipelineID()); return new ContainerWithPipeline(container, pipeline); @@ -249,8 +253,7 @@ private ContainerWithPipeline getContainerWithPipelineCommon( if (pipeline == null) { pipeline = scm.getPipelineManager().createPipeline( - HddsProtos.ReplicationType.STAND_ALONE, - container.getReplicationFactor(), + new StandaloneReplicationConfig(container.getReplicationFactor()), scm.getContainerManager() .getContainerReplicas(cid).stream() .map(ContainerReplica::getDatanodeDetails) @@ -506,7 +509,8 @@ public void closeContainer(long containerID) throws IOException { public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool) throws IOException { - Pipeline result = scm.getPipelineManager().createPipeline(type, factor); + Pipeline result = scm.getPipelineManager() + .createPipeline(ReplicationConfig.fromTypeAndFactor(type, factor)); AUDIT.logWriteSuccess( buildAuditMessageForSuccess(SCMAction.CREATE_PIPELINE, null)); return result; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java index 6aa4cfe5cec8..8d76b24328cf 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java @@ -18,8 +18,10 @@ import com.google.common.base.Preconditions; import org.apache.commons.lang3.RandomUtils; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.PipelineAction; import org.apache.hadoop.hdds.protocol.proto @@ -315,11 +317,12 @@ public static PipelineReportFromDatanode getPipelineReportFromDatanode( public static void openAllRatisPipelines(PipelineManager pipelineManager) throws IOException { // Pipeline is created by background thread - List pipelines = - pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS); - // Trigger the processed pipeline report event - for (Pipeline pipeline : pipelines) { - pipelineManager.openPipeline(pipeline.getId()); + for (ReplicationFactor factor : ReplicationFactor.values()) { + // Trigger the processed pipeline report event + for (Pipeline pipeline : pipelineManager + .getPipelines(new RatisReplicationConfig(factor))) { + pipelineManager.openPipeline(pipeline.getId()); + } } } @@ -435,8 +438,8 @@ public static CommandStatusReportsProto createCommandStatusReport( allocateContainer(ContainerManagerV2 containerManager) throws IOException { return containerManager - .allocateContainer(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, "root"); + .allocateContainer(new RatisReplicationConfig(ReplicationFactor.THREE), + "root"); } @@ -584,11 +587,11 @@ public static Pipeline getRandomPipeline() { nodes.add(MockDatanodeDetails.randomDatanodeDetails()); nodes.add(MockDatanodeDetails.randomDatanodeDetails()); return Pipeline.newBuilder() - .setFactor(HddsProtos.ReplicationFactor.THREE) + .setReplicationConfig( + new RatisReplicationConfig(ReplicationFactor.THREE)) .setId(PipelineID.randomId()) .setNodes(nodes) .setState(Pipeline.PipelineState.OPEN) - .setType(HddsProtos.ReplicationType.RATIS) .build(); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index 8027cb6fac44..8a472cef488d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -29,8 +29,11 @@ import java.util.concurrent.ExecutorService; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.TestUtils; @@ -90,8 +93,6 @@ public class TestBlockManager { private SCMHAManager scmHAManager; private SequenceIdGenerator sequenceIdGen; private static final long DEFAULT_BLOCK_SIZE = 128 * MB; - private HddsProtos.ReplicationFactor factor; - private HddsProtos.ReplicationType type; private EventQueue eventQueue; private SCMContext scmContext; private SCMServiceManager serviceManager; @@ -104,6 +105,7 @@ public class TestBlockManager { @Rule public TemporaryFolder folder= new TemporaryFolder(); private SCMMetadataStore scmMetadataStore; + private ReplicationConfig replicationConfig; @Before public void setUp() throws Exception { @@ -180,8 +182,7 @@ public void emitSafeModeStatus() { CloseContainerEventHandler closeContainerHandler = new CloseContainerEventHandler(pipelineManager, mapping, scmContext); eventQueue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerHandler); - factor = HddsProtos.ReplicationFactor.THREE; - type = HddsProtos.ReplicationType.RATIS; + replicationConfig = new RatisReplicationConfig(ReplicationFactor.THREE); scm.getScmContext().updateSafeModeStatus(new SafeModeStatus(false, true)); } @@ -196,10 +197,10 @@ public void cleanup() throws Exception { @Test public void testAllocateBlock() throws Exception { - pipelineManager.createPipeline(type, factor); + pipelineManager.createPipeline(replicationConfig); TestUtils.openAllRatisPipelines(pipelineManager); AllocatedBlock block = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, - type, factor, OzoneConsts.OZONE, new ExcludeList()); + replicationConfig, OzoneConsts.OZONE, new ExcludeList()); Assert.assertNotNull(block); } @@ -207,27 +208,28 @@ public void testAllocateBlock() throws Exception { public void testAllocateBlockWithExclusion() throws Exception { try { while (true) { - pipelineManager.createPipeline(type, factor); + pipelineManager.createPipeline(replicationConfig); } } catch (IOException e) { } TestUtils.openAllRatisPipelines(pipelineManager); ExcludeList excludeList = new ExcludeList(); excludeList - .addPipeline(pipelineManager.getPipelines(type, factor).get(0).getId()); + .addPipeline(pipelineManager.getPipelines(replicationConfig) + .get(0).getId()); AllocatedBlock block = blockManager - .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, OzoneConsts.OZONE, + .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, excludeList); Assert.assertNotNull(block); for (PipelineID id : excludeList.getPipelineIds()) { Assert.assertNotEquals(block.getPipeline().getId(), id); } - for (Pipeline pipeline : pipelineManager.getPipelines(type, factor)) { + for (Pipeline pipeline : pipelineManager.getPipelines(replicationConfig)) { excludeList.addPipeline(pipeline.getId()); } block = blockManager - .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, OzoneConsts.OZONE, + .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, excludeList); Assert.assertNotNull(block); Assert.assertTrue( @@ -249,7 +251,7 @@ public void testAllocateBlockInParallel() throws Exception { CompletableFuture.supplyAsync(() -> { try { future.complete(blockManager - .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, + .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, new ExcludeList())); } catch (IOException e) { @@ -277,7 +279,7 @@ public void testBlockDistribution() throws Exception { for (int i = 0; i < threadCount; i++) { executors.add(Executors.newSingleThreadExecutor()); } - pipelineManager.createPipeline(type, factor); + pipelineManager.createPipeline(replicationConfig); TestUtils.openAllRatisPipelines(pipelineManager); Map> allocatedBlockMap = new ConcurrentHashMap<>(); @@ -290,9 +292,9 @@ public void testBlockDistribution() throws Exception { try { List blockList; AllocatedBlock block = blockManager - .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, - OzoneConsts.OZONE, - new ExcludeList()); + .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, + OzoneConsts.OZONE, + new ExcludeList()); long containerId = block.getBlockID().getContainerID(); if (!allocatedBlockMap.containsKey(containerId)) { blockList = new ArrayList<>(); @@ -313,7 +315,9 @@ public void testBlockDistribution() throws Exception { CompletableFuture .allOf(futureList.toArray( new CompletableFuture[futureList.size()])).get(); - Assert.assertTrue(pipelineManager.getPipelines(type).size() == 1); + + Assert.assertTrue( + pipelineManager.getPipelines(replicationConfig).size() == 1); Assert.assertTrue( allocatedBlockMap.size() == numContainerPerOwnerInPipeline); Assert.assertTrue(allocatedBlockMap. @@ -337,7 +341,7 @@ public void testBlockDistributionWithMultipleDisks() throws Exception { for (int i = 0; i < threadCount; i++) { executors.add(Executors.newSingleThreadExecutor()); } - pipelineManager.createPipeline(type, factor); + pipelineManager.createPipeline(replicationConfig); TestUtils.openAllRatisPipelines(pipelineManager); Map> allocatedBlockMap = new ConcurrentHashMap<>(); @@ -350,9 +354,9 @@ public void testBlockDistributionWithMultipleDisks() throws Exception { try { List blockList; AllocatedBlock block = blockManager - .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, - OzoneConsts.OZONE, - new ExcludeList()); + .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, + OzoneConsts.OZONE, + new ExcludeList()); long containerId = block.getBlockID().getContainerID(); if (!allocatedBlockMap.containsKey(containerId)) { blockList = new ArrayList<>(); @@ -374,8 +378,9 @@ public void testBlockDistributionWithMultipleDisks() throws Exception { .allOf(futureList.toArray( new CompletableFuture[futureList.size()])).get(); Assert.assertEquals(1, - pipelineManager.getPipelines(type).size()); - Pipeline pipeline = pipelineManager.getPipelines(type).get(0); + pipelineManager.getPipelines(replicationConfig).size()); + Pipeline pipeline = + pipelineManager.getPipelines(replicationConfig).get(0); // total no of containers to be created will be number of healthy // volumes * number of numContainerPerOwnerInPipeline which is equal to // the thread count @@ -404,7 +409,7 @@ public void testBlockDistributionWithMultipleRaftLogDisks() throws Exception { for (int i = 0; i < threadCount; i++) { executors.add(Executors.newSingleThreadExecutor()); } - pipelineManager.createPipeline(type, factor); + pipelineManager.createPipeline(replicationConfig); TestUtils.openAllRatisPipelines(pipelineManager); Map> allocatedBlockMap = new ConcurrentHashMap<>(); @@ -417,7 +422,7 @@ public void testBlockDistributionWithMultipleRaftLogDisks() throws Exception { try { List blockList; AllocatedBlock block = blockManager - .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, + .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, new ExcludeList()); long containerId = block.getBlockID().getContainerID(); @@ -441,8 +446,9 @@ public void testBlockDistributionWithMultipleRaftLogDisks() throws Exception { .allOf(futureList.toArray( new CompletableFuture[futureList.size()])).get(); Assert.assertTrue( - pipelineManager.getPipelines(type).size() == 1); - Pipeline pipeline = pipelineManager.getPipelines(type).get(0); + pipelineManager.getPipelines(replicationConfig).size() == 1); + Pipeline pipeline = + pipelineManager.getPipelines(replicationConfig).get(0); // the pipeline per raft log disk config is set to 1 by default int numContainers = (int)Math.ceil((double) (numContainerPerOwnerInPipeline * @@ -463,7 +469,7 @@ public void testAllocateOversizedBlock() throws Exception { long size = 6 * GB; thrown.expectMessage("Unsupported block size"); blockManager.allocateBlock(size, - type, factor, OzoneConsts.OZONE, new ExcludeList()); + replicationConfig, OzoneConsts.OZONE, new ExcludeList()); } @@ -475,32 +481,33 @@ public void testAllocateBlockFailureInSafeMode() throws Exception { thrown.expectMessage("SafeModePrecheck failed for " + "allocateBlock"); blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, - type, factor, OzoneConsts.OZONE, new ExcludeList()); + replicationConfig, OzoneConsts.OZONE, new ExcludeList()); } @Test public void testAllocateBlockSucInSafeMode() throws Exception { // Test2: Exit safe mode and then try allocateBock again. Assert.assertNotNull(blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, - type, factor, OzoneConsts.OZONE, new ExcludeList())); + replicationConfig, OzoneConsts.OZONE, new ExcludeList())); } @Test(timeout = 10000) public void testMultipleBlockAllocation() throws IOException, TimeoutException, InterruptedException { - pipelineManager.createPipeline(type, factor); - pipelineManager.createPipeline(type, factor); + pipelineManager.createPipeline(replicationConfig); + pipelineManager.createPipeline(replicationConfig); TestUtils.openAllRatisPipelines(pipelineManager); AllocatedBlock allocatedBlock = blockManager - .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, OzoneConsts.OZONE, + .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, new ExcludeList()); // block should be allocated in different pipelines GenericTestUtils.waitFor(() -> { try { AllocatedBlock block = blockManager - .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, OzoneConsts.OZONE, + .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, + OzoneConsts.OZONE, new ExcludeList()); return !block.getPipeline().getId() .equals(allocatedBlock.getPipeline().getId()); @@ -513,7 +520,8 @@ public void testMultipleBlockAllocation() private boolean verifyNumberOfContainersInPipelines( int numContainersPerPipeline) { try { - for (Pipeline pipeline : pipelineManager.getPipelines(type, factor)) { + for (Pipeline pipeline : pipelineManager + .getPipelines(replicationConfig)) { if (pipelineManager.getNumberOfContainers(pipeline.getId()) != numContainersPerPipeline) { return false; @@ -533,8 +541,8 @@ public void testMultipleBlockAllocationWithClosedContainer() // create pipelines for (int i = 0; i < nodeManager.getNodes(NodeStatus.inServiceHealthy()).size() - / factor.getNumber(); i++) { - pipelineManager.createPipeline(type, factor); + / replicationConfig.getRequiredNodes(); i++) { + pipelineManager.createPipeline(replicationConfig); } TestUtils.openAllRatisPipelines(pipelineManager); @@ -544,7 +552,8 @@ public void testMultipleBlockAllocationWithClosedContainer() GenericTestUtils.waitFor(() -> { try { blockManager - .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, OzoneConsts.OZONE, + .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, + OzoneConsts.OZONE, new ExcludeList()); } catch (IOException e) { } @@ -553,7 +562,7 @@ public void testMultipleBlockAllocationWithClosedContainer() }, 10, 1000); // close all the containers in all the pipelines - for (Pipeline pipeline : pipelineManager.getPipelines(type, factor)) { + for (Pipeline pipeline : pipelineManager.getPipelines(replicationConfig)) { for (ContainerID cid : pipelineManager .getContainersInPipeline(pipeline.getId())) { eventQueue.fireEvent(SCMEvents.CLOSE_CONTAINER, cid); @@ -568,7 +577,8 @@ public void testMultipleBlockAllocationWithClosedContainer() GenericTestUtils.waitFor(() -> { try { blockManager - .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, OzoneConsts.OZONE, + .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, + OzoneConsts.OZONE, new ExcludeList()); } catch (IOException e) { } @@ -583,9 +593,10 @@ public void testBlockAllocationWithNoAvailablePipelines() for (Pipeline pipeline : pipelineManager.getPipelines()) { pipelineManager.closePipeline(pipeline, false); } - Assert.assertEquals(0, pipelineManager.getPipelines(type, factor).size()); + Assert.assertEquals(0, + pipelineManager.getPipelines(replicationConfig).size()); Assert.assertNotNull(blockManager - .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, OzoneConsts.OZONE, + .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, new ExcludeList())); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index edf8e0c21e59..0d1da264e8cf 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -19,6 +19,8 @@ import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; @@ -36,7 +38,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.hdds.protocol.proto @@ -408,9 +409,9 @@ private void mockContainerInfo(long containerID, DatanodeDetails dd) throws IOException { List dns = Collections.singletonList(dd); Pipeline pipeline = Pipeline.newBuilder() - .setType(ReplicationType.STAND_ALONE) - .setFactor(ReplicationFactor.ONE) - .setState(Pipeline.PipelineState.OPEN) + .setReplicationConfig( + new StandaloneReplicationConfig(ReplicationFactor.ONE)) + .setState(Pipeline.PipelineState.OPEN) .setId(PipelineID.randomId()) .setNodes(dns) .build(); @@ -419,7 +420,8 @@ private void mockContainerInfo(long containerID, DatanodeDetails dd) builder.setContainerID(containerID) .setPipelineID(pipeline.getId()) .setReplicationType(pipeline.getType()) - .setReplicationFactor(pipeline.getFactor()); + .setReplicationFactor( + ReplicationConfig.getLegacyFactor(pipeline.getReplicationConfig())); ContainerInfo containerInfo = builder.build(); Mockito.doReturn(containerInfo).when(containerManager) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java index a59468551353..954eb88c8c92 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java @@ -23,9 +23,11 @@ import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; @@ -164,8 +166,8 @@ public void testCloseContainerEventWithInvalidContainer() { @Test public void testCloseContainerEventWithValidContainers() throws IOException { ContainerInfo container = containerManager - .allocateContainer(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE); + .allocateContainer(new RatisReplicationConfig( + ReplicationFactor.ONE), OzoneConsts.OZONE); ContainerID id = container.containerID(); DatanodeDetails datanode = pipelineManager .getPipeline(container.getPipelineID()).getFirstNode(); @@ -183,8 +185,8 @@ public void testCloseContainerEventWithRatis() throws IOException { GenericTestUtils.LogCapturer .captureLogs(CloseContainerEventHandler.LOG); ContainerInfo container = containerManager - .allocateContainer(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE); + .allocateContainer(new RatisReplicationConfig( + ReplicationFactor.THREE), OzoneConsts.OZONE); ContainerID id = container.containerID(); int[] closeCount = new int[3]; eventQueue.fireEvent(CLOSE_CONTAINER, id); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java index 2ce3fb00f934..a660e2d7ce7c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java @@ -22,8 +22,10 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; import org.apache.hadoop.hdds.scm.ha.SCMHAManager; import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator; @@ -64,8 +66,8 @@ public void setUp() throws Exception { sequenceIdGen = new SequenceIdGenerator( conf, scmhaManager, SCMDBDefinition.SEQUENCE_ID.getTable(dbStore)); final PipelineManager pipelineManager = MockPipelineManager.getInstance(); - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + pipelineManager.createPipeline(new RatisReplicationConfig( + ReplicationFactor.THREE)); containerManager = new ContainerManagerImpl(conf, scmhaManager, sequenceIdGen, pipelineManager, SCMDBDefinition.CONTAINERS.getTable(dbStore)); @@ -89,8 +91,8 @@ public void testAllocateContainer() throws Exception { Assert.assertTrue( containerManager.getContainers().isEmpty()); final ContainerInfo container = containerManager.allocateContainer( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, "admin"); + new RatisReplicationConfig( + ReplicationFactor.THREE), "admin"); Assert.assertEquals(1, containerManager.getContainers().size()); Assert.assertNotNull(containerManager.getContainer( container.containerID())); @@ -99,8 +101,8 @@ public void testAllocateContainer() throws Exception { @Test public void testUpdateContainerState() throws Exception { final ContainerInfo container = containerManager.allocateContainer( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, "admin"); + new RatisReplicationConfig( + ReplicationFactor.THREE), "admin"); final ContainerID cid = container.containerID(); Assert.assertEquals(HddsProtos.LifeCycleState.OPEN, containerManager.getContainer(cid).getState()); @@ -126,8 +128,8 @@ public void testGetContainers() throws Exception{ ContainerID[] cidArray = new ContainerID[10]; for(int i = 0; i < 10; i++){ ContainerInfo container = containerManager.allocateContainer( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, "admin"); + new RatisReplicationConfig( + ReplicationFactor.THREE), "admin"); cidArray[i] = container.containerID(); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java index b8bae2225257..2a5fc0844bf0 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java @@ -21,10 +21,11 @@ import java.util.ArrayList; import java.util.Set; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; @@ -111,16 +112,16 @@ private ContainerInfo allocateContainer() throws IOException { Pipeline pipeline = Pipeline.newBuilder().setState(Pipeline.PipelineState.CLOSED) .setId(PipelineID.randomId()) - .setType(HddsProtos.ReplicationType.STAND_ALONE) - .setFactor(HddsProtos.ReplicationFactor.THREE) + .setReplicationConfig(new StandaloneReplicationConfig( + ReplicationFactor.THREE)) .setNodes(new ArrayList<>()).build(); - when(pipelineManager.createPipeline(HddsProtos.ReplicationType.STAND_ALONE, - HddsProtos.ReplicationFactor.THREE)).thenReturn(pipeline); + when(pipelineManager.createPipeline(new StandaloneReplicationConfig( + ReplicationFactor.THREE))).thenReturn(pipeline); return containerStateManager.allocateContainer(pipelineManager, - HddsProtos.ReplicationType.STAND_ALONE, - HddsProtos.ReplicationFactor.THREE, "root"); + new StandaloneReplicationConfig( + ReplicationFactor.THREE), "root"); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java index bd2959d50859..d22b2d426dfa 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hdds.scm.node; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; import java.io.File; @@ -32,6 +31,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; @@ -164,9 +164,8 @@ public void testOnMessage() throws Exception { LambdaTestUtils.await(120000, 1000, () -> { - System.out.println(pipelineManager.getPipelines(RATIS, THREE).size()); - System.out.println(pipelineManager.getPipelines(RATIS, ONE).size()); - return pipelineManager.getPipelines(RATIS, THREE).size() > 3; + return pipelineManager.getPipelines(new RatisReplicationConfig(THREE)) + .size() > 3; }); TestUtils.openAllRatisPipelines(pipelineManager); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java index d03aa55c0fdf..791572f1ca61 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java @@ -17,10 +17,9 @@ package org.apache.hadoop.hdds.scm.pipeline; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.utils.db.Table; @@ -48,16 +47,15 @@ private MockPipelineManager() { } @Override - public Pipeline createPipeline(final ReplicationType type, - final ReplicationFactor factor) + public Pipeline createPipeline(ReplicationConfig replicationConfig) throws IOException { final List nodes = Stream.generate( MockDatanodeDetails::randomDatanodeDetails) - .limit(factor.getNumber()).collect(Collectors.toList()); + .limit(replicationConfig.getRequiredNodes()) + .collect(Collectors.toList()); final Pipeline pipeline = Pipeline.newBuilder() .setId(PipelineID.randomId()) - .setType(type) - .setFactor(factor) + .setReplicationConfig(replicationConfig) .setNodes(nodes) .setState(Pipeline.PipelineState.OPEN) .build(); @@ -66,13 +64,11 @@ public Pipeline createPipeline(final ReplicationType type, } @Override - public Pipeline createPipeline(final ReplicationType type, - final ReplicationFactor factor, - final List nodes) { + public Pipeline createPipeline(final ReplicationConfig replicationConfig, + final List nodes) { return Pipeline.newBuilder() .setId(PipelineID.randomId()) - .setType(type) - .setFactor(factor) + .setReplicationConfig(replicationConfig) .setNodes(nodes) .setState(Pipeline.PipelineState.OPEN) .build(); @@ -100,35 +96,23 @@ public List getPipelines() { } @Override - public List getPipelines(final ReplicationType type) { - return stateManager.getPipelines(type); + public List getPipelines( + final ReplicationConfig replicationConfig) { + return stateManager.getPipelines(replicationConfig); } @Override - public List getPipelines(final ReplicationType type, - final ReplicationFactor factor) { - return stateManager.getPipelines(type, factor); + public List getPipelines(ReplicationConfig replicationConfig, + final Pipeline.PipelineState state) { + return stateManager.getPipelines(replicationConfig, state); } @Override - public List getPipelines(final ReplicationType type, - final Pipeline.PipelineState state) { - return stateManager.getPipelines(type, state); - } - - @Override - public List getPipelines(final ReplicationType type, - final ReplicationFactor factor, - final Pipeline.PipelineState state) { - return stateManager.getPipelines(type, factor, state); - } - - @Override - public List getPipelines(final ReplicationType type, - final ReplicationFactor factor, final Pipeline.PipelineState state, + public List getPipelines(ReplicationConfig replicationConfig, + final Pipeline.PipelineState state, final Collection excludeDns, final Collection excludePipelines) { - return stateManager.getPipelines(type, factor, state, + return stateManager.getPipelines(replicationConfig, state, excludeDns, excludePipelines); } @@ -171,8 +155,7 @@ public void closePipeline(final Pipeline pipeline, final boolean onTimeout) } @Override - public void scrubPipeline(final ReplicationType type, - final ReplicationFactor factor) + public void scrubPipeline(ReplicationConfig replicationConfig) throws IOException { } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java index 0e34ae5d6026..663c2c745d17 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java @@ -21,9 +21,10 @@ import java.io.IOException; import java.util.List; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.server.events.EventPublisher; @@ -65,18 +66,19 @@ protected void initializePipeline(Pipeline pipeline) throws IOException { } @Override - public Pipeline create(HddsProtos.ReplicationFactor factor) + public Pipeline create(RatisReplicationConfig replicationConfig) throws IOException { if (autoOpenPipeline) { - return super.create(factor); + return super.create(replicationConfig); } else { - Pipeline initialPipeline = super.create(factor); + Pipeline initialPipeline = super.create(replicationConfig); Pipeline pipeline = Pipeline.newBuilder() .setId(initialPipeline.getId()) // overwrite pipeline state to main ALLOCATED .setState(Pipeline.PipelineState.ALLOCATED) - .setType(initialPipeline.getType()) - .setFactor(factor) + .setReplicationConfig(ReplicationConfig + .fromTypeAndFactor(initialPipeline.getType(), + replicationConfig.getReplicationFactor())) .setNodes(initialPipeline.getNodes()) .build(); return pipeline; @@ -97,13 +99,12 @@ public void shutdown() { } @Override - public Pipeline create(HddsProtos.ReplicationFactor factor, - List nodes) { + public Pipeline create(RatisReplicationConfig replicationConfig, + List nodes) { return Pipeline.newBuilder() .setId(PipelineID.randomId()) .setState(Pipeline.PipelineState.OPEN) - .setType(HddsProtos.ReplicationType.RATIS) - .setFactor(factor) + .setReplicationConfig(replicationConfig) .setNodes(nodes) .build(); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java index 7203d9d75fce..54692cdfb80c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java @@ -18,8 +18,9 @@ package org.apache.hadoop.hdds.scm.pipeline; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.node.NodeManager; @@ -91,13 +92,14 @@ public void testPipelineDatanodesIntersection() { int createdPipelineCount = 0; while (!end && createdPipelineCount <= healthyNodeCount * nodeHeaviness) { try { - Pipeline pipeline = provider.create(HddsProtos.ReplicationFactor.THREE); + Pipeline pipeline = provider.create(new RatisReplicationConfig( + ReplicationFactor.THREE)); stateManager.addPipeline(pipeline); nodeManager.addPipeline(pipeline); List overlapPipelines = RatisPipelineUtils .checkPipelineContainSameDatanodes(stateManager, pipeline); - if (overlapPipelines.isEmpty()){ + if (overlapPipelines.isEmpty()) { intersectionCount++; for (Pipeline overlapPipeline : overlapPipelines) { LOG.info("This pipeline: " + pipeline.getId().toString() + @@ -125,8 +127,9 @@ public void testPipelineDatanodesIntersection() { end = false; LOG.info("Among total " + - stateManager.getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE).size() + " created pipelines" + + stateManager + .getPipelines(new RatisReplicationConfig(ReplicationFactor.THREE)) + .size() + " created pipelines" + " with " + healthyNodeCount + " healthy datanodes and " + nodeHeaviness + " as node heaviness, " + intersectionCount + " pipelines has same set of datanodes."); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java index 06dbc0b352cc..baa67bcbb725 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java @@ -20,9 +20,11 @@ import com.google.common.base.Supplier; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.MockNodeManager; @@ -131,12 +133,12 @@ public void testCreatePipeline() throws Exception { createPipelineManager(true, buffer1); Assert.assertTrue(pipelineManager.getPipelines().isEmpty()); Pipeline pipeline1 = pipelineManager.createPipeline( - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + new RatisReplicationConfig(ReplicationFactor.THREE)); Assert.assertEquals(1, pipelineManager.getPipelines().size()); Assert.assertTrue(pipelineManager.containsPipeline(pipeline1.getId())); Pipeline pipeline2 = pipelineManager.createPipeline( - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); + new RatisReplicationConfig(ReplicationFactor.ONE)); Assert.assertEquals(2, pipelineManager.getPipelines().size()); Assert.assertTrue(pipelineManager.containsPipeline(pipeline2.getId())); buffer1.close(); @@ -150,7 +152,7 @@ public void testCreatePipeline() throws Exception { Assert.assertFalse(pipelineManager2.getPipelines().isEmpty()); Assert.assertEquals(2, pipelineManager.getPipelines().size()); Pipeline pipeline3 = pipelineManager2.createPipeline( - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + new RatisReplicationConfig(ReplicationFactor.THREE)); buffer2.close(); Assert.assertEquals(3, pipelineManager2.getPipelines().size()); Assert.assertTrue(pipelineManager2.containsPipeline(pipeline3.getId())); @@ -163,8 +165,8 @@ public void testCreatePipelineShouldFailOnFollower() throws Exception { PipelineManagerV2Impl pipelineManager = createPipelineManager(false); Assert.assertTrue(pipelineManager.getPipelines().isEmpty()); try { - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + pipelineManager + .createPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); } catch (NotLeaderException ex) { pipelineManager.close(); return; @@ -181,7 +183,7 @@ public void testUpdatePipelineStates() throws Exception { Table pipelineStore = SCMDBDefinition.PIPELINES.getTable(dbStore); Pipeline pipeline = pipelineManager.createPipeline( - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + new RatisReplicationConfig(ReplicationFactor.THREE)); Assert.assertEquals(1, pipelineManager.getPipelines().size()); Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId())); Assert.assertEquals(ALLOCATED, pipeline.getPipelineState()); @@ -193,8 +195,7 @@ public void testUpdatePipelineStates() throws Exception { pipelineManager.openPipeline(pipelineID); pipelineManager.addContainerToPipeline(pipelineID, ContainerID.valueOf(1)); Assert.assertTrue(pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, + .getPipelines(new RatisReplicationConfig(ReplicationFactor.THREE), Pipeline.PipelineState.OPEN).contains(pipeline)); buffer.flush(); Assert.assertTrue(pipelineStore.get(pipeline.getId()).isOpen()); @@ -206,14 +207,12 @@ public void testUpdatePipelineStates() throws Exception { Assert.assertEquals(Pipeline.PipelineState.DORMANT, pipelineStore.get(pipeline.getId()).getPipelineState()); Assert.assertFalse(pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, + .getPipelines(new RatisReplicationConfig(ReplicationFactor.THREE), Pipeline.PipelineState.OPEN).contains(pipeline)); pipelineManager.activatePipeline(pipeline.getId()); Assert.assertTrue(pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, + .getPipelines(new RatisReplicationConfig(ReplicationFactor.THREE), Pipeline.PipelineState.OPEN).contains(pipeline)); buffer.flush(); Assert.assertTrue(pipelineStore.get(pipeline.getId()).isOpen()); @@ -224,7 +223,7 @@ public void testUpdatePipelineStates() throws Exception { public void testOpenPipelineShouldFailOnFollower() throws Exception { PipelineManagerV2Impl pipelineManager = createPipelineManager(true); Pipeline pipeline = pipelineManager.createPipeline( - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + new RatisReplicationConfig(ReplicationFactor.THREE)); Assert.assertEquals(1, pipelineManager.getPipelines().size()); Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId())); Assert.assertEquals(ALLOCATED, pipeline.getPipelineState()); @@ -245,7 +244,7 @@ public void testOpenPipelineShouldFailOnFollower() throws Exception { public void testActivatePipelineShouldFailOnFollower() throws Exception { PipelineManagerV2Impl pipelineManager = createPipelineManager(true); Pipeline pipeline = pipelineManager.createPipeline( - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + new RatisReplicationConfig(ReplicationFactor.THREE)); Assert.assertEquals(1, pipelineManager.getPipelines().size()); Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId())); Assert.assertEquals(ALLOCATED, pipeline.getPipelineState()); @@ -266,7 +265,7 @@ public void testActivatePipelineShouldFailOnFollower() throws Exception { public void testDeactivatePipelineShouldFailOnFollower() throws Exception { PipelineManagerV2Impl pipelineManager = createPipelineManager(true); Pipeline pipeline = pipelineManager.createPipeline( - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + new RatisReplicationConfig(ReplicationFactor.THREE)); Assert.assertEquals(1, pipelineManager.getPipelines().size()); Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId())); Assert.assertEquals(ALLOCATED, pipeline.getPipelineState()); @@ -288,7 +287,7 @@ public void testRemovePipeline() throws Exception { PipelineManagerV2Impl pipelineManager = createPipelineManager(true); // Create a pipeline Pipeline pipeline = pipelineManager.createPipeline( - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + new RatisReplicationConfig(ReplicationFactor.THREE)); Assert.assertEquals(1, pipelineManager.getPipelines().size()); Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId())); Assert.assertEquals(ALLOCATED, pipeline.getPipelineState()); @@ -298,8 +297,7 @@ public void testRemovePipeline() throws Exception { pipelineManager .addContainerToPipeline(pipeline.getId(), ContainerID.valueOf(1)); Assert.assertTrue(pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, + .getPipelines(new RatisReplicationConfig(ReplicationFactor.THREE), Pipeline.PipelineState.OPEN).contains(pipeline)); try { @@ -329,7 +327,7 @@ public void testRemovePipeline() throws Exception { public void testClosePipelineShouldFailOnFollower() throws Exception { PipelineManagerV2Impl pipelineManager = createPipelineManager(true); Pipeline pipeline = pipelineManager.createPipeline( - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + new RatisReplicationConfig(ReplicationFactor.THREE)); Assert.assertEquals(1, pipelineManager.getPipelines().size()); Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId())); Assert.assertEquals(ALLOCATED, pipeline.getPipelineState()); @@ -353,8 +351,7 @@ public void testPipelineReport() throws Exception { new SCMSafeModeManager(conf, new ArrayList<>(), pipelineManager, new EventQueue(), serviceManager, scmContext); Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + .createPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); // pipeline is not healthy until all dns report List nodes = pipeline.getNodes(); @@ -412,8 +409,7 @@ public void testPipelineCreationFailedMetric() throws Exception { for (int i = 0; i < maxPipelineCount; i++) { Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + .createPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); Assert.assertNotNull(pipeline); } @@ -428,8 +424,8 @@ public void testPipelineCreationFailedMetric() throws Exception { //This should fail... try { - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + pipelineManager + .createPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); fail(); } catch (SCMException ioe) { // pipeline creation failed this time. @@ -458,8 +454,7 @@ public void testPipelineOpenOnlyWhenLeaderReported() throws Exception { createPipelineManager(true, buffer1); Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + .createPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); // close manager buffer1.close(); pipelineManager.close(); @@ -505,24 +500,21 @@ public void testScrubPipeline() throws Exception { PipelineManagerV2Impl pipelineManager = createPipelineManager(true); Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + .createPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); // At this point, pipeline is not at OPEN stage. Assert.assertEquals(Pipeline.PipelineState.ALLOCATED, pipeline.getPipelineState()); // pipeline should be seen in pipelineManager as ALLOCATED. Assert.assertTrue(pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, + .getPipelines(new RatisReplicationConfig(ReplicationFactor.THREE), Pipeline.PipelineState.ALLOCATED).contains(pipeline)); - pipelineManager.scrubPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + pipelineManager + .scrubPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); // pipeline should be scrubbed. Assert.assertFalse(pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, + .getPipelines(new RatisReplicationConfig(ReplicationFactor.THREE), Pipeline.PipelineState.ALLOCATED).contains(pipeline)); pipelineManager.close(); @@ -537,16 +529,14 @@ public void testScrubPipelineShouldFailOnFollower() throws Exception { PipelineManagerV2Impl pipelineManager = createPipelineManager(true); Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + .createPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); // At this point, pipeline is not at OPEN stage. Assert.assertEquals(Pipeline.PipelineState.ALLOCATED, pipeline.getPipelineState()); // pipeline should be seen in pipelineManager as ALLOCATED. Assert.assertTrue(pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, + .getPipelines(new RatisReplicationConfig(ReplicationFactor.THREE), Pipeline.PipelineState.ALLOCATED).contains(pipeline)); // Change to follower @@ -554,8 +544,8 @@ public void testScrubPipelineShouldFailOnFollower() throws Exception { ((MockSCMHAManager) pipelineManager.getScmhaManager()).setIsLeader(false); try { - pipelineManager.scrubPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + pipelineManager + .scrubPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); } catch (NotLeaderException ex) { pipelineManager.close(); return; @@ -576,8 +566,8 @@ public void testPipelineNotCreatedUntilSafeModePrecheck() throws Exception { PipelineManagerV2Impl pipelineManager = createPipelineManager(true); try { - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + pipelineManager + .createPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); fail("Pipelines should not have been created"); } catch (IOException e) { // No pipeline is created. @@ -587,11 +577,10 @@ public void testPipelineNotCreatedUntilSafeModePrecheck() throws Exception { // Ensure a pipeline of factor ONE can be created - no exceptions should be // raised. Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE); + .createPipeline(new RatisReplicationConfig(ReplicationFactor.ONE)); Assert.assertTrue(pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE).contains(pipeline)); + .getPipelines(new RatisReplicationConfig(ReplicationFactor.ONE)) + .contains(pipeline)); // Simulate safemode check exiting. scmContext.updateSafeModeStatus( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java index 0f353f1e2428..a724ad3bdb20 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java @@ -25,10 +25,12 @@ import java.util.UUID; import java.util.stream.Collectors; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.ContainerPlacementStatus; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.MockNodeManager; @@ -188,8 +190,8 @@ public void testPickLowestLoadAnchor() throws IOException{ Pipeline pipeline = Pipeline.newBuilder() .setId(PipelineID.randomId()) .setState(Pipeline.PipelineState.ALLOCATED) - .setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.THREE) + .setReplicationConfig(new RatisReplicationConfig( + ReplicationFactor.THREE)) .setNodes(nodes) .build(); nodeManager.addPipeline(pipeline); @@ -210,7 +212,9 @@ public void testPickLowestLoadAnchor() throws IOException{ // Should max out pipeline usage. Assert.assertEquals(maxPipelineCount, - stateManager.getPipelines(HddsProtos.ReplicationType.RATIS).size()); + stateManager + .getPipelines(new RatisReplicationConfig(ReplicationFactor.THREE)) + .size()); } @Test diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java index 43d5398a2513..49d969bf9810 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java @@ -18,9 +18,13 @@ package org.apache.hadoop.hdds.scm.pipeline; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.junit.Assert; import org.junit.Before; @@ -56,8 +60,7 @@ private Pipeline createDummyPipeline(HddsProtos.ReplicationType type, nodes.add(MockDatanodeDetails.randomDatanodeDetails()); } return Pipeline.newBuilder() - .setType(type) - .setFactor(factor) + .setReplicationConfig(ReplicationConfig.fromTypeAndFactor(type, factor)) .setNodes(nodes) .setState(Pipeline.PipelineState.ALLOCATED) .setId(PipelineID.randomId()) @@ -110,8 +113,8 @@ public void testGetPipelines() throws IOException { stateManager.openPipeline(pipeline.getId()); pipelines.add(pipeline); - Set pipelines1 = new HashSet<>(stateManager.getPipelines( - HddsProtos.ReplicationType.RATIS)); + Set pipelines1 = new HashSet<>(stateManager + .getPipelines(new RatisReplicationConfig(ReplicationFactor.ONE))); Assert.assertEquals(pipelines1.size(), pipelines.size()); pipelines1 = new HashSet<>(stateManager.getPipelines()); @@ -126,8 +129,8 @@ public void testGetPipelines() throws IOException { @Test public void testGetPipelinesByTypeAndFactor() throws IOException { Set pipelines = new HashSet<>(); - for (HddsProtos.ReplicationType type : HddsProtos.ReplicationType - .values()) { + for (HddsProtos.ReplicationType type : new ReplicationType[] { + ReplicationType.RATIS, ReplicationType.STAND_ALONE}) { for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor .values()) { for (int i = 0; i < 5; i++) { @@ -152,17 +155,17 @@ public void testGetPipelinesByTypeAndFactor() throws IOException { } } - for (HddsProtos.ReplicationType type : HddsProtos.ReplicationType - .values()) { + for (HddsProtos.ReplicationType type : new ReplicationType[] { + ReplicationType.RATIS, ReplicationType.STAND_ALONE}) { for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor .values()) { // verify pipelines received List pipelines1 = - stateManager.getPipelines(type, factor); + stateManager.getPipelines( + ReplicationConfig.fromTypeAndFactor(type, factor)); Assert.assertEquals(15, pipelines1.size()); pipelines1.stream().forEach(p -> { Assert.assertEquals(type, p.getType()); - Assert.assertEquals(factor, p.getFactor()); }); } } @@ -173,61 +176,11 @@ public void testGetPipelinesByTypeAndFactor() throws IOException { } } - @Test - public void testGetPipelinesByTypeAndState() throws IOException { - Set pipelines = new HashSet<>(); - for (HddsProtos.ReplicationType type : HddsProtos.ReplicationType - .values()) { - HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE; - for (int i = 0; i < 5; i++) { - // 5 pipelines in allocated state for each type and factor - Pipeline pipeline = - createDummyPipeline(type, factor, factor.getNumber()); - stateManager.addPipeline(pipeline); - pipelines.add(pipeline); - - // 5 pipelines in open state for each type and factor - pipeline = createDummyPipeline(type, factor, factor.getNumber()); - stateManager.addPipeline(pipeline); - stateManager.openPipeline(pipeline.getId()); - pipelines.add(pipeline); - - // 5 pipelines in closed state for each type and factor - pipeline = createDummyPipeline(type, factor, factor.getNumber()); - stateManager.addPipeline(pipeline); - stateManager.finalizePipeline(pipeline.getId()); - pipelines.add(pipeline); - } - } - - for (HddsProtos.ReplicationType type : HddsProtos.ReplicationType - .values()) { - // verify pipelines received - List pipelines1 = stateManager - .getPipelines(type, Pipeline.PipelineState.OPEN); - Assert.assertEquals(5, pipelines1.size()); - pipelines1.forEach(p -> { - Assert.assertEquals(type, p.getType()); - Assert.assertEquals(Pipeline.PipelineState.OPEN, p.getPipelineState()); - }); - - pipelines1 = stateManager - .getPipelines(type, Pipeline.PipelineState.OPEN, - Pipeline.PipelineState.CLOSED, Pipeline.PipelineState.ALLOCATED); - Assert.assertEquals(15, pipelines1.size()); - } - - //clean up - for (Pipeline pipeline : pipelines) { - removePipeline(pipeline); - } - } - @Test public void testGetPipelinesByTypeFactorAndState() throws IOException { Set pipelines = new HashSet<>(); - for (HddsProtos.ReplicationType type : HddsProtos.ReplicationType - .values()) { + for (HddsProtos.ReplicationType type : new ReplicationType[] { + ReplicationType.RATIS, ReplicationType.STAND_ALONE}) { for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor .values()) { for (int i = 0; i < 5; i++) { @@ -259,18 +212,18 @@ public void testGetPipelinesByTypeFactorAndState() throws IOException { } } - for (HddsProtos.ReplicationType type : HddsProtos.ReplicationType - .values()) { + for (HddsProtos.ReplicationType type : new HddsProtos.ReplicationType[] { + ReplicationType.RATIS, ReplicationType.STAND_ALONE}) { for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor .values()) { for (Pipeline.PipelineState state : Pipeline.PipelineState.values()) { // verify pipelines received List pipelines1 = - stateManager.getPipelines(type, factor, state); + stateManager.getPipelines( + ReplicationConfig.fromTypeAndFactor(type, factor), state); Assert.assertEquals(5, pipelines1.size()); pipelines1.forEach(p -> { Assert.assertEquals(type, p.getType()); - Assert.assertEquals(factor, p.getFactor()); Assert.assertEquals(state, p.getPipelineState()); }); } @@ -430,15 +383,15 @@ public void testQueryPipeline() throws IOException { // pipeline in allocated state should not be reported stateManager.addPipeline(pipeline); Assert.assertEquals(0, stateManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN) + .getPipelines(new RatisReplicationConfig(ReplicationFactor.THREE), + Pipeline.PipelineState.OPEN) .size()); // pipeline in open state should be reported stateManager.openPipeline(pipeline.getId()); Assert.assertEquals(1, stateManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN) + .getPipelines(new RatisReplicationConfig(ReplicationFactor.THREE), + Pipeline.PipelineState.OPEN) .size()); Pipeline pipeline2 = createDummyPipeline(HddsProtos.ReplicationType.RATIS, @@ -449,15 +402,15 @@ public void testQueryPipeline() throws IOException { // pipeline in open state should be reported stateManager.addPipeline(pipeline2); Assert.assertEquals(2, stateManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN) + .getPipelines(new RatisReplicationConfig(ReplicationFactor.THREE), + Pipeline.PipelineState.OPEN) .size()); // pipeline in closed state should not be reported stateManager.finalizePipeline(pipeline2.getId()); Assert.assertEquals(1, stateManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN) + .getPipelines(new RatisReplicationConfig(ReplicationFactor.THREE), + Pipeline.PipelineState.OPEN) .size()); // clean up diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java index 383944d71763..832f6a20037c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java @@ -18,10 +18,13 @@ package org.apache.hadoop.hdds.scm.pipeline; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.node.NodeManager; @@ -51,11 +54,10 @@ public class TestRatisPipelineProvider { HddsProtos.ReplicationType.RATIS; private NodeManager nodeManager; - private PipelineProvider provider; + private RatisPipelineProvider provider; private PipelineStateManager stateManager; private OzoneConfiguration conf; - public void init(int maxPipelinePerNode) throws Exception { nodeManager = new MockNodeManager(true, 10); conf = new OzoneConfiguration(); @@ -66,47 +68,39 @@ public void init(int maxPipelinePerNode) throws Exception { stateManager, conf); } + private static void assertPipelineProperties( + Pipeline pipeline, HddsProtos.ReplicationFactor expectedFactor, + HddsProtos.ReplicationType expectedReplicationType, + Pipeline.PipelineState expectedState) { + assertEquals(expectedState, pipeline.getPipelineState()); + assertEquals(expectedReplicationType, pipeline.getType()); + assertEquals(expectedFactor.getNumber(), + pipeline.getReplicationConfig().getRequiredNodes()); + assertEquals(expectedFactor.getNumber(), pipeline.getNodes().size()); + } + private void createPipelineAndAssertions( - HddsProtos.ReplicationFactor factor) throws IOException { - Pipeline pipeline = provider.create(factor); + HddsProtos.ReplicationFactor factor) throws IOException { + Pipeline pipeline = provider.create(new RatisReplicationConfig(factor)); assertPipelineProperties(pipeline, factor, REPLICATION_TYPE, Pipeline.PipelineState.ALLOCATED); stateManager.addPipeline(pipeline); nodeManager.addPipeline(pipeline); - Pipeline pipeline1 = provider.create(factor); + Pipeline pipeline1 = provider.create(new RatisReplicationConfig(factor)); assertPipelineProperties(pipeline1, factor, REPLICATION_TYPE, Pipeline.PipelineState.ALLOCATED); // New pipeline should not overlap with the previous created pipeline assertTrue( intersection(pipeline.getNodes(), pipeline1.getNodes()) .size() < factor.getNumber()); - if (pipeline.getFactor() == HddsProtos.ReplicationFactor.THREE) { + if (pipeline.getReplicationConfig().getRequiredNodes() == 3) { assertNotEquals(pipeline.getNodeSet(), pipeline1.getNodeSet()); } stateManager.addPipeline(pipeline1); nodeManager.addPipeline(pipeline1); } - @Test - public void testCreatePipelineWithFactor() throws Exception { - init(1); - HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE; - Pipeline pipeline = provider.create(factor); - assertPipelineProperties(pipeline, factor, REPLICATION_TYPE, - Pipeline.PipelineState.ALLOCATED); - stateManager.addPipeline(pipeline); - - factor = HddsProtos.ReplicationFactor.ONE; - Pipeline pipeline1 = provider.create(factor); - assertPipelineProperties(pipeline1, factor, REPLICATION_TYPE, - Pipeline.PipelineState.ALLOCATED); - stateManager.addPipeline(pipeline1); - // With enough pipeline quote on datanodes, they should not share - // the same set of datanodes. - assertNotEquals(pipeline.getNodeSet(), pipeline1.getNodeSet()); - } - @Test public void testCreatePipelineWithFactorThree() throws Exception { init(1); @@ -127,17 +121,38 @@ private List createListOfNodes(int nodeCount) { return nodes; } + @Test + public void testCreatePipelineWithFactor() throws Exception { + init(1); + HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE; + Pipeline pipeline = provider.create(new RatisReplicationConfig(factor)); + assertPipelineProperties(pipeline, factor, REPLICATION_TYPE, + Pipeline.PipelineState.ALLOCATED); + stateManager.addPipeline(pipeline); + + factor = HddsProtos.ReplicationFactor.ONE; + Pipeline pipeline1 = provider.create(new RatisReplicationConfig(factor)); + assertPipelineProperties(pipeline1, factor, REPLICATION_TYPE, + Pipeline.PipelineState.ALLOCATED); + stateManager.addPipeline(pipeline1); + // With enough pipeline quote on datanodes, they should not share + // the same set of datanodes. + assertNotEquals(pipeline.getNodeSet(), pipeline1.getNodeSet()); + } + @Test public void testCreatePipelineWithNodes() throws Exception { init(1); HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE; Pipeline pipeline = - provider.create(factor, createListOfNodes(factor.getNumber())); + provider.create(new RatisReplicationConfig(factor), + createListOfNodes(factor.getNumber())); assertPipelineProperties(pipeline, factor, REPLICATION_TYPE, Pipeline.PipelineState.OPEN); factor = HddsProtos.ReplicationFactor.ONE; - pipeline = provider.create(factor, createListOfNodes(factor.getNumber())); + pipeline = provider.create(new RatisReplicationConfig(factor), + createListOfNodes(factor.getNumber())); assertPipelineProperties(pipeline, factor, REPLICATION_TYPE, Pipeline.PipelineState.OPEN); } @@ -151,9 +166,9 @@ public void testCreateFactorTHREEPipelineWithSameDatanodes() .limit(3).collect(Collectors.toList()); Pipeline pipeline1 = provider.create( - HddsProtos.ReplicationFactor.THREE, healthyNodes); + new RatisReplicationConfig(ReplicationFactor.THREE), healthyNodes); Pipeline pipeline2 = provider.create( - HddsProtos.ReplicationFactor.THREE, healthyNodes); + new RatisReplicationConfig(ReplicationFactor.THREE), healthyNodes); Assert.assertEquals(pipeline1.getNodeSet(), pipeline2.getNodeSet()); } @@ -174,23 +189,25 @@ public void testCreatePipelinesDnExclude() throws Exception { List dns = healthyNodes.subList(0, 3); for (int i = 0; i < maxPipelinePerNode; i++) { // Saturate pipeline counts on all the 1st 3 DNs. - addPipeline(dns, factor, Pipeline.PipelineState.OPEN, REPLICATION_TYPE); + addPipeline(dns, Pipeline.PipelineState.OPEN, + new RatisReplicationConfig(factor)); } Set membersOfOpenPipelines = new HashSet<>(dns); // Use up next 3 DNs for a closed pipeline. dns = healthyNodes.subList(3, 6); - addPipeline(dns, factor, Pipeline.PipelineState.CLOSED, REPLICATION_TYPE); + addPipeline(dns, Pipeline.PipelineState.CLOSED, + new RatisReplicationConfig(factor)); Set membersOfClosedPipelines = new HashSet<>(dns); // only 2 healthy DNs left that are not part of any pipeline - Pipeline pipeline = provider.create(factor); + Pipeline pipeline = provider.create( + new RatisReplicationConfig(factor)); assertPipelineProperties(pipeline, factor, REPLICATION_TYPE, Pipeline.PipelineState.ALLOCATED); nodeManager.addPipeline(pipeline); stateManager.addPipeline(pipeline); - List nodes = pipeline.getNodes(); assertTrue( @@ -202,23 +219,12 @@ public void testCreatePipelinesDnExclude() throws Exception { nodes.stream().anyMatch(membersOfClosedPipelines::contains)); } - private static void assertPipelineProperties( - Pipeline pipeline, HddsProtos.ReplicationFactor expectedFactor, - HddsProtos.ReplicationType expectedReplicationType, - Pipeline.PipelineState expectedState) { - assertEquals(expectedState, pipeline.getPipelineState()); - assertEquals(expectedReplicationType, pipeline.getType()); - assertEquals(expectedFactor, pipeline.getFactor()); - assertEquals(expectedFactor.getNumber(), pipeline.getNodes().size()); - } - private void addPipeline( - List dns, HddsProtos.ReplicationFactor factor, - Pipeline.PipelineState open, HddsProtos.ReplicationType replicationType) + List dns, + Pipeline.PipelineState open, ReplicationConfig replicationConfig) throws IOException { Pipeline openPipeline = Pipeline.newBuilder() - .setType(replicationType) - .setFactor(factor) + .setReplicationConfig(replicationConfig) .setNodes(dns) .setState(open) .setId(PipelineID.randomId()) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java index 2be7ab99c341..c497710411e8 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java @@ -35,9 +35,11 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.MockNodeManager; @@ -132,8 +134,7 @@ public void testPipelineReload() throws IOException { Set pipelines = new HashSet<>(); for (int i = 0; i < pipelineNum; i++) { Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + .createPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); pipelineManager.openPipeline(pipeline.getId()); pipelines.add(pipeline); } @@ -154,7 +155,8 @@ public void testPipelineReload() throws IOException { Assert.assertTrue(pipelineManager.getPipeline(p.getId()).isOpen()); } List pipelineList = - pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS); + pipelineManager.getPipelines(new RatisReplicationConfig( + ReplicationFactor.THREE)); Assert.assertEquals(pipelines, new HashSet<>(pipelineList)); Set> originalPipelines = pipelineList.stream() @@ -184,8 +186,7 @@ public void testRemovePipeline() throws IOException { mockRatisProvider); Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + .createPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); pipelineManager.openPipeline(pipeline.getId()); pipelineManager .addContainerToPipeline(pipeline.getId(), ContainerID.valueOf(1)); @@ -227,8 +228,7 @@ eventQueue, new SCMServiceManager(), // create a pipeline in allocated state with no dns yet reported Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + .createPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); Assert .assertFalse(pipelineManager.getPipeline(pipeline.getId()).isHealthy()); @@ -300,8 +300,7 @@ public void testPipelineCreationFailedMetric() throws Exception { // Create 5 pipelines (Use up 15 Datanodes) for (int i = 0; i < 5; i++) { Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + .createPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); Assert.assertNotNull(pipeline); } @@ -318,8 +317,8 @@ public void testPipelineCreationFailedMetric() throws Exception { GenericTestUtils.setLogLevel(SCMPipelineManager.getLog(), INFO); //This should fail... try { - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + pipelineManager + .createPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); fail(); } catch (SCMException ioe) { // pipeline creation failed this time. @@ -378,13 +377,12 @@ public void testPipelineLimit() throws Exception { // one node pipeline creation will not be accounted for // pipeline limit determination - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE); + pipelineManager + .createPipeline(new RatisReplicationConfig(ReplicationFactor.ONE)); // max limit on no of pipelines is 4 for (int i = 0; i < pipelinePerDn; i++) { Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + .createPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); Assert.assertNotNull(pipeline); } @@ -398,8 +396,8 @@ public void testPipelineLimit() throws Exception { Assert.assertEquals(0, numPipelineCreateFailed); //This should fail... try { - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + pipelineManager + .createPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); fail(); } catch (SCMException ioe) { // pipeline creation failed this time. @@ -434,16 +432,14 @@ public void testActivateDeactivatePipeline() throws IOException { mockRatisProvider); final Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + .createPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); final PipelineID pid = pipeline.getId(); pipelineManager.openPipeline(pid); pipelineManager.addContainerToPipeline(pid, ContainerID.valueOf(1)); Assert.assertTrue(pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, + .getPipelines(new RatisReplicationConfig(ReplicationFactor.THREE), Pipeline.PipelineState.OPEN).contains(pipeline)); Assert.assertEquals(Pipeline.PipelineState.OPEN, @@ -454,15 +450,13 @@ public void testActivateDeactivatePipeline() throws IOException { pipelineManager.getPipeline(pid).getPipelineState()); Assert.assertFalse(pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, + .getPipelines(new RatisReplicationConfig(ReplicationFactor.THREE), Pipeline.PipelineState.OPEN).contains(pipeline)); pipelineManager.activatePipeline(pid); Assert.assertTrue(pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, + .getPipelines(new RatisReplicationConfig(ReplicationFactor.THREE), Pipeline.PipelineState.OPEN).contains(pipeline)); pipelineManager.close(); @@ -483,8 +477,7 @@ public void testPipelineOpenOnlyWhenLeaderReported() throws Exception { pipelineManager.onMessage( new SCMSafeModeManager.SafeModeStatus(true, true), null); Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + .createPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); // close manager pipelineManager.close(); // new pipeline manager loads the pipelines from the db in ALLOCATED state @@ -549,24 +542,21 @@ public void testScrubPipeline() throws IOException { ratisProvider); Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + .createPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); // At this point, pipeline is not at OPEN stage. Assert.assertEquals(Pipeline.PipelineState.ALLOCATED, pipeline.getPipelineState()); // pipeline should be seen in pipelineManager as ALLOCATED. Assert.assertTrue(pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, + .getPipelines(new RatisReplicationConfig(ReplicationFactor.THREE), Pipeline.PipelineState.ALLOCATED).contains(pipeline)); - pipelineManager.scrubPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + pipelineManager + .scrubPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); // pipeline should be scrubbed. Assert.assertFalse(pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, + .getPipelines(new RatisReplicationConfig(ReplicationFactor.THREE), Pipeline.PipelineState.ALLOCATED).contains(pipeline)); pipelineManager.close(); @@ -592,8 +582,8 @@ public void testPipelineNotCreatedUntilSafeModePrecheck() ratisProvider); try { - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + pipelineManager + .createPipeline(new RatisReplicationConfig(ReplicationFactor.THREE)); fail("Pipelines should not have been created"); } catch (IOException e) { // expected @@ -601,8 +591,8 @@ public void testPipelineNotCreatedUntilSafeModePrecheck() // Ensure a pipeline of factor ONE can be created - no exceptions should be // raised. - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE); + pipelineManager + .createPipeline(new RatisReplicationConfig(ReplicationFactor.ONE)); // Simulate safemode check exiting. pipelineManager.onMessage( @@ -795,8 +785,7 @@ private static void waitForLog(LogCapturer logCapturer) private Pipeline pipelineStub() { return Pipeline.newBuilder() .setId(PipelineID.randomId()) - .setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.ONE) + .setReplicationConfig(new RatisReplicationConfig(ReplicationFactor.ONE)) .setState(Pipeline.PipelineState.OPEN) .setNodes( Arrays.asList( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java index fe3fb7925bf3..f21fc006eaae 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.pipeline; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -50,21 +51,26 @@ public void init() throws Exception { @Test public void testCreatePipelineWithFactor() throws IOException { HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE; - Pipeline pipeline = provider.create(factor); + Pipeline pipeline = + provider.create(new StandaloneReplicationConfig(factor)); stateManager.addPipeline(pipeline); Assert.assertEquals(pipeline.getType(), HddsProtos.ReplicationType.STAND_ALONE); - Assert.assertEquals(pipeline.getFactor(), factor); + Assert.assertEquals(pipeline.getReplicationConfig().getRequiredNodes(), + factor.getNumber()); Assert.assertEquals(pipeline.getPipelineState(), Pipeline.PipelineState.OPEN); Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber()); factor = HddsProtos.ReplicationFactor.ONE; - Pipeline pipeline1 = provider.create(factor); + Pipeline pipeline1 = + provider.create(new StandaloneReplicationConfig(factor)); stateManager.addPipeline(pipeline1); Assert.assertEquals(pipeline1.getType(), HddsProtos.ReplicationType.STAND_ALONE); - Assert.assertEquals(pipeline1.getFactor(), factor); + Assert.assertEquals( + ((StandaloneReplicationConfig) pipeline1.getReplicationConfig()) + .getReplicationFactor(), factor); Assert.assertEquals(pipeline1.getPipelineState(), Pipeline.PipelineState.OPEN); Assert.assertEquals(pipeline1.getNodes().size(), factor.getNumber()); @@ -82,19 +88,25 @@ private List createListOfNodes(int nodeCount) { public void testCreatePipelineWithNodes() throws IOException { HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE; Pipeline pipeline = - provider.create(factor, createListOfNodes(factor.getNumber())); + provider.create(new StandaloneReplicationConfig(factor), + createListOfNodes(factor.getNumber())); Assert.assertEquals(pipeline.getType(), HddsProtos.ReplicationType.STAND_ALONE); - Assert.assertEquals(pipeline.getFactor(), factor); + Assert.assertEquals( + ((StandaloneReplicationConfig) pipeline.getReplicationConfig()) + .getReplicationFactor(), factor); Assert.assertEquals(pipeline.getPipelineState(), Pipeline.PipelineState.OPEN); Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber()); factor = HddsProtos.ReplicationFactor.ONE; - pipeline = provider.create(factor, createListOfNodes(factor.getNumber())); + pipeline = provider.create(new StandaloneReplicationConfig(factor), + createListOfNodes(factor.getNumber())); Assert.assertEquals(pipeline.getType(), HddsProtos.ReplicationType.STAND_ALONE); - Assert.assertEquals(pipeline.getFactor(), factor); + Assert.assertEquals( + ((StandaloneReplicationConfig) pipeline.getReplicationConfig()) + .getReplicationFactor(), factor); Assert.assertEquals(pipeline.getPipelineState(), Pipeline.PipelineState.OPEN); Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java index e7dbeebc4db0..2f4b6223f242 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java @@ -25,8 +25,10 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.MockNodeManager; @@ -147,16 +149,16 @@ public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception { // Create 3 pipelines Pipeline pipeline1 = - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + pipelineManager.createPipeline(new RatisReplicationConfig( + ReplicationFactor.THREE)); pipelineManager.openPipeline(pipeline1.getId()); Pipeline pipeline2 = - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + pipelineManager.createPipeline(new RatisReplicationConfig( + ReplicationFactor.THREE)); pipelineManager.openPipeline(pipeline2.getId()); Pipeline pipeline3 = - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + pipelineManager.createPipeline(new RatisReplicationConfig( + ReplicationFactor.THREE)); pipelineManager.openPipeline(pipeline3.getId()); // Mark pipeline healthy @@ -245,16 +247,16 @@ public void testHealthyPipelineSafeModeRuleWithMixedPipelines() // Create 3 pipelines Pipeline pipeline1 = - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE); + pipelineManager.createPipeline(new RatisReplicationConfig( + ReplicationFactor.ONE)); pipelineManager.openPipeline(pipeline1.getId()); Pipeline pipeline2 = - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + pipelineManager.createPipeline(new RatisReplicationConfig( + ReplicationFactor.THREE)); pipelineManager.openPipeline(pipeline2.getId()); Pipeline pipeline3 = - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + pipelineManager.createPipeline(new RatisReplicationConfig( + ReplicationFactor.THREE)); pipelineManager.openPipeline(pipeline3.getId()); // Mark pipeline healthy diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java index 6de81dcf22e8..45e3e1eb53b1 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java @@ -23,9 +23,11 @@ import java.util.Map; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; import org.apache.hadoop.hdds.scm.HddsTestUtils; @@ -166,8 +168,8 @@ public void testOneReplicaPipelineRuleMixedPipelines() throws Exception { LoggerFactory.getLogger(SCMSafeModeManager.class)); List pipelines = - pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE); + pipelineManager.getPipelines(new RatisReplicationConfig( + ReplicationFactor.ONE)); firePipelineEvent(pipelines); GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains( "reported count is 0"), 1000, 5000); @@ -176,8 +178,9 @@ public void testOneReplicaPipelineRuleMixedPipelines() throws Exception { Assert.assertFalse(rule.validate()); pipelines = - pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + pipelineManager.getPipelines( + new RatisReplicationConfig(ReplicationFactor.THREE)); + firePipelineEvent(pipelines.subList(0, pipelineCountThree -1)); GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains( @@ -194,7 +197,7 @@ private void createPipelines(int count, HddsProtos.ReplicationFactor factor) throws Exception { for (int i = 0; i < count; i++) { Pipeline pipeline = pipelineManager.createPipeline( - HddsProtos.ReplicationType.RATIS, factor); + new RatisReplicationConfig(factor)); pipelineManager.openPipeline(pipeline.getId()); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java index 9bc4ec2c98a6..33ecf351a12e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java @@ -28,9 +28,11 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -351,8 +353,8 @@ public void testSafeModeExitRuleWithPipelineAvailabilityCheck( for (int i = 0; i < pipelineCount; i++) { // Create pipeline Pipeline pipeline = pipelineManager.createPipeline( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + new RatisReplicationConfig( + ReplicationFactor.THREE)); pipelineManager.openPipeline(pipeline.getId()); // Mark pipeline healthy @@ -600,8 +602,8 @@ public void testSafeModePipelineExitRule() throws Exception { mockRatisProvider); Pipeline pipeline = pipelineManager.createPipeline( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + new RatisReplicationConfig( + ReplicationFactor.THREE)); pipeline = pipelineManager.getPipeline(pipeline.getId()); MockRatisPipelineProvider.markPipelineHealthy(pipeline); @@ -699,12 +701,12 @@ public void testPipelinesNotCreatedUntilPreCheckPasses() Pipeline pipeline; try { pipeline = pipelineManager.createPipeline( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + new RatisReplicationConfig( + ReplicationFactor.THREE)); } catch (SCMException ex) { pipeline = pipelineManager.getPipelines( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE).get(0); + new RatisReplicationConfig( + ReplicationFactor.THREE)).get(0); } // Mark pipeline healthy diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java index 5e9b3c3143c7..97371bde78eb 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java @@ -100,9 +100,9 @@ private void printDatanodeInfo(DatanodeWithAttributes dna) { relatedPipelineNum = relatedPipelines.size(); relatedPipelines.forEach( p -> pipelineListInfo.append(p.getId().getId().toString()) - .append("/").append(p.getFactor().toString()).append("/") - .append(p.getType().toString()).append("/") - .append(p.getPipelineState().toString()).append("/") + .append("/").append(p.getReplicationConfig().toString()) + .append("/").append(p.getType().toString()) + .append("/").append(p.getPipelineState().toString()).append("/") .append(datanode.getUuid().equals(p.getLeaderId()) ? "Leader" : "Follower") .append(System.getProperty("line.separator"))); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/CreatePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/CreatePipelineSubcommand.java index 90858def09a6..faea21e2c6af 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/CreatePipelineSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/CreatePipelineSubcommand.java @@ -66,8 +66,7 @@ public void execute(ScmClient scmClient) throws IOException { if (pipeline != null) { System.out.println(pipeline.getId().toString() + - " is created. Factor: " + pipeline.getFactor() + - ", Type: " + pipeline.getType()); + " is created. " + pipeline.toString()); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java index 58ae26e500e1..a91af50a2cf0 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java @@ -20,6 +20,7 @@ import com.google.common.base.Strings; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -53,7 +54,8 @@ public void execute(ScmClient scmClient) throws IOException { Stream stream = scmClient.listPipelines().stream(); if (!Strings.isNullOrEmpty(factor)) { stream = stream.filter( - p -> p.getFactor().toString().compareToIgnoreCase(factor) == 0); + p -> ReplicationConfig.getLegacyFactor(p.getReplicationConfig()) + .toString().compareToIgnoreCase(factor) == 0); } if (!Strings.isNullOrEmpty(state)) { stream = stream.filter(p -> p.getPipelineState().toString() diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java index fc63a7d45be4..9e74fe23ac03 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java @@ -18,14 +18,13 @@ package org.apache.hadoop.ozone.om.helpers; import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo.Builder; - import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.util.Time; import org.junit.Assert; @@ -118,11 +117,11 @@ private OmKeyLocationInfoGroup createOmKeyLocationInfoGroup() { Pipeline getPipeline() { return Pipeline.newBuilder() - .setFactor(HddsProtos.ReplicationFactor.ONE) + .setReplicationConfig( + new StandaloneReplicationConfig(ReplicationFactor.ONE)) .setId(PipelineID.randomId()) .setNodes(Collections.EMPTY_LIST) .setState(Pipeline.PipelineState.OPEN) - .setType(HddsProtos.ReplicationType.STAND_ALONE) .build(); } diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/pipeline.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/pipeline.robot index 00e09fe03224..914939bcf1ea 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/pipeline.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/pipeline.robot @@ -26,17 +26,18 @@ ${SCM} scm *** Test Cases *** Create pipeline ${output} = Execute ozone admin pipeline create - Should contain ${output} is created. Factor: ONE, Type: STAND_ALONE + Should contain ${output} is created. + Should contain ${output} STANDALONE/ONE ${pipeline} = Execute echo "${output}" | grep 'is created' | cut -f1 -d' ' | cut -f2 -d'=' Set Suite Variable ${PIPELINE} ${pipeline} List pipelines ${output} = Execute ozone admin pipeline list - Should contain ${output} Factor:ONE + Should contain ${output} STANDALONE/ONE List pipelines with explicit host ${output} = Execute ozone admin pipeline list --scm ${SCM} - Should contain ${output} Factor:ONE + Should contain ${output} STANDALONE/ONE Deactivate pipeline Execute ozone admin pipeline deactivate "${PIPELINE}" diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java index ecc16555b4ae..acd30ba605c4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java @@ -22,8 +22,9 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.ozone.HddsDatanodeService; @@ -77,8 +78,8 @@ public static void shutdown() throws Exception { @Test(timeout = 120000) public void testLeaderIdUsedOnFirstCall() throws Exception { List pipelines = cluster.getStorageContainerManager() - .getPipelineManager().getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + .getPipelineManager().getPipelines(new RatisReplicationConfig( + ReplicationFactor.THREE)); Assert.assertFalse(pipelines.isEmpty()); Pipeline ratisPipeline = pipelines.iterator().next(); Assert.assertTrue(ratisPipeline.isHealthy()); @@ -109,8 +110,8 @@ public void testLeaderIdUsedOnFirstCall() throws Exception { @Test(timeout = 120000) public void testLeaderIdAfterLeaderChange() throws Exception { List pipelines = cluster.getStorageContainerManager() - .getPipelineManager().getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + .getPipelineManager().getPipelines(new RatisReplicationConfig( + ReplicationFactor.THREE)); Assert.assertFalse(pipelines.isEmpty()); Pipeline ratisPipeline = pipelines.iterator().next(); Assert.assertTrue(ratisPipeline.isHealthy()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java index f3682edca3a0..a7864b09b57d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerManagerV2; @@ -49,7 +50,6 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; /** * Class to test install snapshot feature for SCM HA. @@ -93,11 +93,11 @@ private DBCheckpoint downloadSnapshot() throws Exception { PipelineManager pipelineManager = scm.getPipelineManager(); Pipeline ratisPipeline1 = pipelineManager.getPipeline( containerManager.allocateContainer( - RATIS, THREE, "Owner1").getPipelineID()); + new RatisReplicationConfig(THREE), "Owner1").getPipelineID()); pipelineManager.openPipeline(ratisPipeline1.getId()); Pipeline ratisPipeline2 = pipelineManager.getPipeline( containerManager.allocateContainer( - RATIS, ONE, "Owner2").getPipelineID()); + new RatisReplicationConfig(ONE), "Owner2").getPipelineID()); pipelineManager.openPipeline(ratisPipeline2.getId()); SCMNodeDetails scmNodeDetails = new SCMNodeDetails.Builder() .setRpcAddress(new InetSocketAddress("0.0.0.0", 0)).setSCMNodeId("scm1") diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java index 0c3fbccad7cb..d8ada5132ece 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.scm; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.container.ContainerManagerV2; import org.apache.hadoop.hdds.scm.ha.SCMHAConfiguration; @@ -35,7 +36,6 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; public class TestSCMSnapshot { private static MiniOzoneCluster cluster; @@ -66,11 +66,11 @@ public void testSnapshot() throws Exception { PipelineManager pipelineManager = scm.getPipelineManager(); Pipeline ratisPipeline1 = pipelineManager.getPipeline( containerManager.allocateContainer( - RATIS, THREE, "Owner1").getPipelineID()); + new RatisReplicationConfig(THREE), "Owner1").getPipelineID()); pipelineManager.openPipeline(ratisPipeline1.getId()); Pipeline ratisPipeline2 = pipelineManager.getPipeline( containerManager.allocateContainer( - RATIS, ONE, "Owner2").getPipelineID()); + new RatisReplicationConfig(ONE), "Owner2").getPipelineID()); pipelineManager.openPipeline(ratisPipeline2.getId()); long snapshotInfo2 = scm.getScmHAManager().asSCMHADBTransactionBuffer() .getLatestTrxInfo().getTransactionIndex(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java index 34844748d30e..4035fc809fa8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.container.metrics; import org.apache.commons.lang3.RandomUtils; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -89,8 +90,8 @@ public void testContainerOpsMetrics() throws IOException { "NumSuccessfulCreateContainers", metrics); ContainerInfo containerInfo = containerManager.allocateContainer( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE); + new RatisReplicationConfig( + HddsProtos.ReplicationFactor.ONE), OzoneConsts.OZONE); metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName()); Assert.assertEquals(getLongCounter("NumSuccessfulCreateContainers", @@ -98,8 +99,8 @@ public void testContainerOpsMetrics() throws IOException { try { containerManager.allocateContainer( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE); + new RatisReplicationConfig( + HddsProtos.ReplicationFactor.THREE), OzoneConsts.OZONE); fail("testContainerOpsMetrics failed"); } catch (IOException ex) { // Here it should fail, so it should have the old metric value. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java index 0ecad428f64f..69bfa9e065be 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java @@ -18,8 +18,9 @@ package org.apache.hadoop.hdds.scm.pipeline; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.test.GenericTestUtils; @@ -75,8 +76,8 @@ public void cleanup() { private void checkLeaderBalance(int dnNum, int leaderNumOfEachDn) throws Exception { List pipelines = pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN); + .getPipelines(new RatisReplicationConfig( + ReplicationFactor.THREE), Pipeline.PipelineState.OPEN); for (Pipeline pipeline : pipelines) { LambdaTestUtils.await(30000, 500, () -> @@ -114,9 +115,9 @@ public void testRestoreSuggestedLeader() throws Exception { // make sure two pipelines are created waitForPipelines(pipelineNum); // No Factor ONE pipeline is auto created. - Assert.assertEquals(0, pipelineManager.getPipelines( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE).size()); + Assert.assertEquals(0, + pipelineManager.getPipelines(new RatisReplicationConfig( + ReplicationFactor.ONE)).size()); // pipelineNum pipelines in 3 datanodes, // each datanode has leaderNumOfEachDn leaders after balance @@ -164,8 +165,8 @@ public void testMinLeaderCountChoosePolicy() throws Exception { waitForPipelines(pipelineNum); // No Factor ONE pipeline is auto created. Assert.assertEquals(0, pipelineManager.getPipelines( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE).size()); + new RatisReplicationConfig( + ReplicationFactor.ONE)).size()); // pipelineNum pipelines in 3 datanodes, // each datanode has leaderNumOfEachDn leaders after balance @@ -177,8 +178,8 @@ public void testMinLeaderCountChoosePolicy() throws Exception { // then check leader balance List pipelines = pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN); + .getPipelines(new RatisReplicationConfig( + ReplicationFactor.THREE), Pipeline.PipelineState.OPEN); int destroyNum = r.nextInt(pipelines.size()); for (int k = 0; k <= destroyNum; k++) { @@ -209,8 +210,8 @@ public void testDefaultLeaderChoosePolicy() throws Exception { private void waitForPipelines(int numPipelines) throws TimeoutException, InterruptedException { GenericTestUtils.waitFor(() -> pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN) + .getPipelines(new RatisReplicationConfig( + ReplicationFactor.THREE), Pipeline.PipelineState.OPEN) .size() >= numPipelines, 100, 60000); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java index 1ed7a730a29c..ef45d030465e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.hdds.scm.pipeline; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManagerV2; @@ -37,12 +39,8 @@ import java.util.List; import java.util.Set; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos - .ReplicationType.RATIS; import org.junit.Rule; import org.junit.rules.Timeout; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos - .ReplicationFactor.THREE; /** * Test for the Node2Pipeline map. @@ -76,7 +74,8 @@ public void init() throws Exception { containerManager = scm.getContainerManager(); pipelineManager = scm.getPipelineManager(); ContainerInfo containerInfo = containerManager.allocateContainer( - RATIS, THREE, "testOwner"); + new RatisReplicationConfig( + ReplicationFactor.THREE), "testOwner"); ratisContainer = new ContainerWithPipeline(containerInfo, pipelineManager.getPipeline(containerInfo.getPipelineID())); pipelineManager = scm.getPipelineManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java index 083ad6beed2e..83dd5beb5658 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java @@ -19,8 +19,9 @@ package org.apache.hadoop.hdds.scm.pipeline; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -79,8 +80,8 @@ public static void init() throws Exception { final StorageContainerManager scm = cluster.getStorageContainerManager(); pipelineManager = scm.getPipelineManager(); ratisPipelines = pipelineManager.getPipelines( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + new RatisReplicationConfig( + ReplicationFactor.THREE)); timeForFailure = (int) ratisServerConfig .getFollowerSlownessTimeout(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java index aba5ab54f94a..2a6cc636ff36 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java @@ -18,9 +18,11 @@ package org.apache.hadoop.hdds.scm.pipeline; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -58,8 +60,6 @@ import org.junit.Rule; import org.junit.rules.Timeout; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; /** * Tests for Pipeline Closing. @@ -99,7 +99,8 @@ public void init() throws Exception { containerManager = scm.getContainerManager(); pipelineManager = scm.getPipelineManager(); ContainerInfo containerInfo = containerManager - .allocateContainer(RATIS, THREE, "testOwner"); + .allocateContainer(new RatisReplicationConfig( + ReplicationFactor.THREE), "testOwner"); ratisContainer = new ContainerWithPipeline(containerInfo, pipelineManager.getPipeline(containerInfo.getPipelineID())); pipelineManager = scm.getPipelineManager(); @@ -213,7 +214,8 @@ public void testPipelineCloseWithLogFailure() throws IOException { ArgumentCaptor.forClass(PipelineActionsFromDatanode.class); ContainerInfo containerInfo = containerManager - .allocateContainer(RATIS, THREE, "testOwner"); + .allocateContainer(new RatisReplicationConfig( + ReplicationFactor.THREE), "testOwner"); ContainerWithPipeline containerWithPipeline = new ContainerWithPipeline(containerInfo, pipelineManager.getPipeline(containerInfo.getPipelineID())); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java index 97d25881836c..db9024a53a7f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java @@ -18,8 +18,10 @@ package org.apache.hadoop.hdds.scm.pipeline; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.ha.SCMService.Event; import org.apache.hadoop.hdds.scm.node.NodeStatus; @@ -81,12 +83,12 @@ public void testAutomaticPipelineCreationOnPipelineDestroy() // make sure two pipelines are created waitForPipelines(2); Assert.assertEquals(numOfDatanodes, pipelineManager.getPipelines( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE).size()); + new RatisReplicationConfig( + ReplicationFactor.ONE)).size()); List pipelines = pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN); + .getPipelines(new RatisReplicationConfig( + ReplicationFactor.THREE), Pipeline.PipelineState.OPEN); for (Pipeline pipeline : pipelines) { pipelineManager.closePipeline(pipeline, false); } @@ -103,12 +105,12 @@ public void testAutomaticPipelineCreationDisablingFactorONE() waitForPipelines(2); // No Factor ONE pipeline is auto created. Assert.assertEquals(0, pipelineManager.getPipelines( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE).size()); + new RatisReplicationConfig( + ReplicationFactor.ONE)).size()); List pipelines = pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN); + .getPipelines(new RatisReplicationConfig( + ReplicationFactor.THREE), Pipeline.PipelineState.OPEN); for (Pipeline pipeline : pipelines) { pipelineManager.closePipeline(pipeline, false); } @@ -127,16 +129,16 @@ public void testPipelineCreationOnNodeRestart() throws Exception { List dns = new ArrayList<>(cluster.getHddsDatanodes()); List pipelines = - pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + pipelineManager.getPipelines(new RatisReplicationConfig( + ReplicationFactor.THREE)); for (HddsDatanodeService dn : dns) { cluster.shutdownHddsDatanode(dn.getDatanodeDetails()); } // try creating another pipeline now try { - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + pipelineManager.createPipeline(new RatisReplicationConfig( + ReplicationFactor.THREE)); Assert.fail("pipeline creation should fail after shutting down pipeline"); } catch (IOException ioe) { // As now all datanodes are shutdown, they move to stale state, there @@ -171,8 +173,8 @@ public void testPipelineCreationOnNodeRestart() throws Exception { private void waitForPipelines(int numPipelines) throws TimeoutException, InterruptedException { GenericTestUtils.waitFor(() -> pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN) + .getPipelines(new RatisReplicationConfig( + ReplicationFactor.THREE), Pipeline.PipelineState.OPEN) .size() >= numPipelines, 100, 60000); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java index 7466975c8000..3d34310f78f4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java @@ -18,7 +18,9 @@ package org.apache.hadoop.hdds.scm.pipeline; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManagerV2; @@ -33,13 +35,8 @@ import java.util.concurrent.TimeUnit; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.protocol.proto - .HddsProtos.ReplicationFactor.THREE; import org.junit.Rule; import org.junit.rules.Timeout; -import static org.apache.hadoop.hdds.protocol.proto - .HddsProtos.ReplicationType.RATIS; /** * Test SCM restart and recovery wrt pipelines. @@ -85,11 +82,13 @@ public static void init() throws Exception { pipelineManager = scm.getPipelineManager(); ratisPipeline1 = pipelineManager.getPipeline( containerManager.allocateContainer( - RATIS, THREE, "Owner1").getPipelineID()); + new RatisReplicationConfig( + ReplicationFactor.THREE), "Owner1").getPipelineID()); pipelineManager.openPipeline(ratisPipeline1.getId()); ratisPipeline2 = pipelineManager.getPipeline( containerManager.allocateContainer( - RATIS, ONE, "Owner2").getPipelineID()); + new RatisReplicationConfig( + ReplicationFactor.ONE), "Owner2").getPipelineID()); pipelineManager.openPipeline(ratisPipeline2.getId()); // At this stage, there should be 2 pipeline one with 1 open container // each. Try restarting the SCM and then discover that pipeline are in @@ -125,7 +124,8 @@ public void testPipelineWithScmRestart() throws IOException { // Try creating a new container, it should be from the same pipeline // as was before restart ContainerInfo containerInfo = newContainerManager - .allocateContainer(RATIS, THREE, "Owner1"); + .allocateContainer(new RatisReplicationConfig( + ReplicationFactor.THREE), "Owner1"); Assert.assertEquals(containerInfo.getPipelineID(), ratisPipeline1.getId()); } } \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java index 47cb135415f4..ca80373c6644 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java @@ -19,9 +19,10 @@ package org.apache.hadoop.hdds.scm.safemode; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.ReplicationManager; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -98,8 +99,8 @@ public void testScmSafeMode() throws Exception { pipelineManager = cluster.getStorageContainerManager().getPipelineManager(); List pipelineList = - pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + pipelineManager.getPipelines(new RatisReplicationConfig( + ReplicationFactor.THREE)); pipelineList.get(0).getNodes().forEach(datanodeDetails -> { @@ -192,16 +193,16 @@ public void tearDown() { private void waitForRatis3NodePipelines(int numPipelines) throws TimeoutException, InterruptedException { GenericTestUtils.waitFor(() -> pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN) + .getPipelines(new RatisReplicationConfig(ReplicationFactor.THREE), + Pipeline.PipelineState.OPEN) .size() == numPipelines, 100, 60000); } private void waitForRatis1NodePipelines(int numPipelines) throws TimeoutException, InterruptedException { GenericTestUtils.waitFor(() -> pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, Pipeline.PipelineState.OPEN) + .getPipelines(new RatisReplicationConfig(ReplicationFactor.ONE), + Pipeline.PipelineState.OPEN) .size() == numPipelines, 100, 60000); } } \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index b57d49e97b1a..40d99a798f3c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -242,8 +243,8 @@ public void waitForPipelineTobeReady(HddsProtos.ReplicationFactor factor, TimeoutException, InterruptedException { GenericTestUtils.waitFor(() -> { int openPipelineCount = scm.getPipelineManager(). - getPipelines(HddsProtos.ReplicationType.RATIS, - factor, Pipeline.PipelineState.OPEN).size(); + getPipelines(new RatisReplicationConfig(factor), + Pipeline.PipelineState.OPEN).size(); return openPipelineCount >= 1; }, 1000, timeoutInMs); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java index 4c028bc00496..d49710d5ffbe 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java @@ -30,9 +30,11 @@ import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.XceiverClientGrpc; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -122,8 +124,8 @@ public void testStartMultipleDatanodes() throws Exception { Pipeline pipeline = Pipeline.newBuilder() .setState(Pipeline.PipelineState.OPEN) .setId(PipelineID.randomId()) - .setType(HddsProtos.ReplicationType.STAND_ALONE) - .setFactor(HddsProtos.ReplicationFactor.ONE) + .setReplicationConfig(new StandaloneReplicationConfig( + ReplicationFactor.ONE)) .setNodes(dns) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java index fe5b11e27644..6240426010dc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java @@ -25,6 +25,7 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; @@ -139,9 +140,9 @@ public void teardown() throws Exception { public void testReadStateMachineFailureClosesPipeline() throws Exception { // Stop one follower datanode List pipelines = - cluster.getStorageContainerManager().getPipelineManager().getPipelines( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + cluster.getStorageContainerManager().getPipelineManager() + .getPipelines(new RatisReplicationConfig( + HddsProtos.ReplicationFactor.THREE)); Assert.assertEquals(1, pipelines.size()); Pipeline ratisPipeline = pipelines.iterator().next(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java index dfd2692a986f..fc72e8d09d29 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java @@ -25,6 +25,7 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; @@ -221,8 +222,8 @@ public void testDeleteKeyWithSlowFollower() throws Exception { List pipelineList = cluster.getStorageContainerManager().getPipelineManager() - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + .getPipelines(new RatisReplicationConfig( + HddsProtos.ReplicationFactor.THREE)); Assert.assertTrue(pipelineList.size() >= FACTOR_THREE_PIPELINE_COUNT); Pipeline pipeline = pipelineList.get(0); for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java index ddf7ccdc54a7..8229e3adc46c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java @@ -152,7 +152,7 @@ public void testHybridPipelineOnDatanode() throws IOException { Pipeline pipeline2 = cluster.getStorageContainerManager().getPipelineManager() .getPipeline(pipelineID2); - Assert.assertFalse(pipeline1.getFactor().equals(pipeline2.getFactor())); + Assert.assertNotEquals(pipeline1, pipeline2); Assert.assertTrue(pipeline1.getType() == HddsProtos.ReplicationType.RATIS); Assert.assertTrue(pipeline1.getType() == pipeline2.getType()); // assert that the pipeline Id1 and pipelineId2 are on the same node diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 96a0f7c137e4..e6e895582d06 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -37,6 +37,7 @@ import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -758,7 +759,7 @@ public void testLookupKeyWithLocation() throws IOException { Assume.assumeFalse(nodeList.get(0).equals(nodeList.get(2))); // create a pipeline using 3 datanodes Pipeline pipeline = scm.getPipelineManager().createPipeline( - ReplicationType.RATIS, ReplicationFactor.THREE, nodeList); + new RatisReplicationConfig(ReplicationFactor.THREE), nodeList); List locationInfoList = new ArrayList<>(); locationInfoList.add( new OmKeyLocationInfo.Builder().setPipeline(pipeline) @@ -1249,8 +1250,8 @@ private Pipeline getRandomPipeline() { return Pipeline.newBuilder() .setState(Pipeline.PipelineState.OPEN) .setId(PipelineID.randomId()) - .setType(ReplicationType.RATIS) - .setFactor(ReplicationFactor.THREE) + .setReplicationConfig( + new RatisReplicationConfig(ReplicationFactor.THREE)) .setNodes(new ArrayList<>()) .build(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java index d42606e7b098..25f93b46b2c1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java @@ -19,6 +19,7 @@ import java.util.Optional; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.XceiverClientGrpc; import org.apache.hadoop.hdds.scm.container.ContainerID; @@ -47,7 +48,6 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; import static org.apache.hadoop.hdds.scm.events.SCMEvents.CLOSE_CONTAINER; import static org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer.runTestOzoneContainerViaDataNode; import static org.junit.Assert.assertEquals; @@ -113,7 +113,8 @@ public void testDatanodeRegistrationAndReports() throws Exception { // Verify we can never create a pipeline in Recon. LambdaTestUtils.intercept(UnsupportedOperationException.class, "Trying to create pipeline in Recon, which is prohibited!", - () -> reconPipelineManager.createPipeline(RATIS, ONE)); + () -> reconPipelineManager + .createPipeline(new RatisReplicationConfig(ONE))); ContainerManagerV2 scmContainerManager = scm.getContainerManager(); assertTrue(scmContainerManager.getContainers().isEmpty()); @@ -127,7 +128,8 @@ public void testDatanodeRegistrationAndReports() throws Exception { // Create container ContainerManagerV2 reconContainerManager = reconScm.getContainerManager(); ContainerInfo containerInfo = - scmContainerManager.allocateContainer(RATIS, ONE, "test"); + scmContainerManager + .allocateContainer(new RatisReplicationConfig(ONE), "test"); long containerID = containerInfo.getContainerID(); Pipeline pipeline = scmPipelineManager.getPipeline(containerInfo.getPipelineID()); @@ -167,7 +169,8 @@ public void testReconRestart() throws Exception { // Create container in SCM. ContainerInfo containerInfo = - scmContainerManager.allocateContainer(RATIS, ONE, "test"); + scmContainerManager + .allocateContainer(new RatisReplicationConfig(ONE), "test"); long containerID = containerInfo.getContainerID(); PipelineManager scmPipelineManager = scm.getPipelineManager(); Pipeline pipeline = @@ -178,7 +181,7 @@ public void testReconRestart() throws Exception { // Close a pipeline Optional pipelineToClose = scmPipelineManager - .getPipelines(RATIS, ONE) + .getPipelines(new RatisReplicationConfig(ONE)) .stream() .filter(p -> !p.getId().equals(containerInfo.getPipelineID())) .findFirst(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java index 3e54a963ebaf..aaeaa239e323 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java @@ -20,6 +20,7 @@ import java.time.Duration; import java.util.List; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.XceiverClientGrpc; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -45,7 +46,6 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; import static org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer.runTestOzoneContainerViaDataNode; /** @@ -106,7 +106,8 @@ public void testMissingContainerDownNode() throws Exception { ReconContainerManager reconContainerManager = (ReconContainerManager) reconScm.getContainerManager(); ContainerInfo containerInfo = - scmContainerManager.allocateContainer(RATIS, ONE, "test"); + scmContainerManager + .allocateContainer(new RatisReplicationConfig(ONE), "test"); long containerID = containerInfo.getContainerID(); Pipeline pipeline = scmPipelineManager.getPipeline(containerInfo.getPipelineID()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java index ecffb9e78a92..8b55e1be8cba 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java @@ -19,10 +19,11 @@ package org.apache.hadoop.ozone.scm; import org.apache.hadoop.hdds.HddsUtils; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.placement.algorithms .SCMContainerPlacementMetrics; @@ -120,8 +121,8 @@ public void test() throws IOException { PipelineManager manager = cluster.getStorageContainerManager().getPipelineManager(); List pipelines = manager.getPipelines().stream().filter(p -> - p.getType() == HddsProtos.ReplicationType.RATIS && - p.getFactor() == HddsProtos.ReplicationFactor.THREE) + RatisReplicationConfig + .hasFactor(p.getReplicationConfig(), ReplicationFactor.THREE)) .collect(Collectors.toList()); Pipeline targetPipeline = pipelines.get(0); List nodes = targetPipeline.getNodes(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java index e2513244571b..1d8646f1aae8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java @@ -24,8 +24,9 @@ import java.util.List; import java.util.UUID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.ha.SCMHAConfiguration; import org.apache.hadoop.hdds.scm.ha.SCMHAManagerImpl; @@ -319,8 +320,8 @@ private List writeToIncreaseLogIndex( .getLastAppliedTermIndex().getIndex(); while (logIndex < targetLogIndex) { containers.add(scm.getContainerManager() - .allocateContainer(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, + .allocateContainer( + new RatisReplicationConfig(ReplicationFactor.THREE), TestSCMInstallSnapshotWithHA.class.getName())); Thread.sleep(100); logIndex = stateMachine.getLastAppliedTermIndex().getIndex(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java index e4d9797e7058..0874dca8efe5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java @@ -20,7 +20,9 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManagerV2; @@ -150,9 +152,10 @@ public void testSCMContainerStateCount() throws Exception { List containerInfoList = new ArrayList<>(); for (int i=0; i < 10; i++) { - containerInfoList.add(scmContainerManager.allocateContainer(HddsProtos - .ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor.ONE, - UUID.randomUUID().toString())); + containerInfoList.add( + scmContainerManager.allocateContainer(new StandaloneReplicationConfig( + ReplicationFactor.ONE), + UUID.randomUUID().toString())); } long containerID; for (int i=0; i < 10; i++) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java index 74a7537ab171..a3bd2953f3b1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java @@ -18,11 +18,12 @@ package org.apache.hadoop.ozone.scm; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.XceiverClientGrpc; import org.apache.hadoop.hdds.scm.XceiverClientReply; import org.apache.hadoop.hdds.scm.XceiverClientSpi; @@ -66,8 +67,8 @@ public void setup() { pipeline = Pipeline.newBuilder() .setId(PipelineID.randomId()) - .setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.THREE) + .setReplicationConfig( + new RatisReplicationConfig(ReplicationFactor.THREE)) .setState(Pipeline.PipelineState.CLOSED) .setNodes(dns) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java index 6f94c33ba462..0222512e3fa9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java @@ -19,8 +19,9 @@ package org.apache.hadoop.ozone.scm.pipeline; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -74,7 +75,8 @@ public void setup() throws Exception { public void testPipelineCreation() { MetricsRecordBuilder metrics = getMetrics( SCMPipelineMetrics.class.getSimpleName()); - long numPipelineCreated = getLongCounter("NumPipelineCreated", metrics); + long numPipelineCreated = + getLongCounter("NumPipelineCreated", metrics); // Pipelines are created in background when the cluster starts. Assert.assertTrue(numPipelineCreated > 0); } @@ -106,8 +108,9 @@ public void testPipelineDestroy() { public void testNumBlocksAllocated() throws IOException { AllocatedBlock block = cluster.getStorageContainerManager().getScmBlockManager() - .allocateBlock(5, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, "Test", new ExcludeList()); + .allocateBlock(5, + new RatisReplicationConfig(ReplicationFactor.ONE), + "Test", new ExcludeList()); MetricsRecordBuilder metrics = getMetrics(SCMPipelineMetrics.class.getSimpleName()); Pipeline pipeline = block.getPipeline(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java index 32b2ac944197..e051158aa948 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java @@ -22,8 +22,10 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ContainerBlockID; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.AddSCMRequest; import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; @@ -108,8 +110,6 @@ public ScmBlockLocationTestingClient(String clusterID, String scmId, /** * Returns Fake blocks to the BlockManager so we get blocks in the Database. * @param size - size of the block. - * @param type Replication Type - * @param factor - Replication factor * @param owner - String owner. * @param excludeList list of dns/pipelines to exclude * @return @@ -117,7 +117,7 @@ public ScmBlockLocationTestingClient(String clusterID, String scmId, */ @Override public List allocateBlock(long size, int num, - HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, + ReplicationConfig config, String owner, ExcludeList excludeList) throws IOException { DatanodeDetails datanodeDetails = randomDatanodeDetails(); Pipeline pipeline = createPipeline(datanodeDetails); @@ -136,8 +136,8 @@ private Pipeline createPipeline(DatanodeDetails datanode) { Pipeline pipeline = Pipeline.newBuilder() .setState(Pipeline.PipelineState.OPEN) .setId(PipelineID.randomId()) - .setType(HddsProtos.ReplicationType.STAND_ALONE) - .setFactor(HddsProtos.ReplicationFactor.ONE) + .setReplicationConfig( + new StandaloneReplicationConfig(ReplicationFactor.ONE)) .setNodes(dns) .build(); return pipeline; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java index 9612c3ccf0a4..04ee77cf95c0 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java @@ -35,6 +35,7 @@ import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; @@ -344,8 +345,8 @@ public void testLookupFileWithDnFailure() throws IOException { final Pipeline pipelineOne = Pipeline.newBuilder() .setId(PipelineID.randomId()) - .setType(ReplicationType.RATIS) - .setFactor(ReplicationFactor.THREE) + .setReplicationConfig( + new RatisReplicationConfig(ReplicationFactor.THREE)) .setState(Pipeline.PipelineState.OPEN) .setLeaderId(dnOne.getUuid()) .setNodes(Arrays.asList(dnOne, dnTwo, dnThree)) @@ -353,8 +354,8 @@ public void testLookupFileWithDnFailure() throws IOException { final Pipeline pipelineTwo = Pipeline.newBuilder() .setId(PipelineID.randomId()) - .setType(ReplicationType.RATIS) - .setFactor(ReplicationFactor.THREE) + .setReplicationConfig( + new RatisReplicationConfig(ReplicationFactor.THREE)) .setState(Pipeline.PipelineState.OPEN) .setLeaderId(dnFour.getUuid()) .setNodes(Arrays.asList(dnFour, dnFive, dnSix)) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java index ff1f9c3af196..af6f4a98af9d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java @@ -27,6 +27,7 @@ import com.google.common.base.Optional; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -216,8 +217,8 @@ public static void addKeyLocationInfo( Pipeline pipeline = Pipeline.newBuilder() .setState(Pipeline.PipelineState.OPEN) .setId(PipelineID.randomId()) - .setType(keyInfo.getType()) - .setFactor(keyInfo.getFactor()) + .setReplicationConfig(ReplicationConfig + .fromTypeAndFactor(keyInfo.getType(), keyInfo.getFactor())) .setNodes(new ArrayList<>()) .build(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java index 7bf43a7226f6..861098ac8f49 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java @@ -37,8 +37,10 @@ import org.mockito.Mockito; import org.apache.hadoop.hdds.client.ContainerBlockID; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -134,8 +136,8 @@ public void setup() throws Exception { Pipeline pipeline = Pipeline.newBuilder() .setState(Pipeline.PipelineState.OPEN) .setId(PipelineID.randomId()) - .setType(HddsProtos.ReplicationType.STAND_ALONE) - .setFactor(HddsProtos.ReplicationFactor.ONE) + .setReplicationConfig( + new StandaloneReplicationConfig(ReplicationFactor.ONE)) .setNodes(new ArrayList<>()) .build(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java index 3c228325ad7e..97ade261d38b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.response.key; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -93,8 +94,7 @@ public void testAddToDBBatchWithNonEmptyBlocks() throws Exception { Pipeline pipeline = Pipeline.newBuilder() .setState(Pipeline.PipelineState.OPEN) .setId(PipelineID.randomId()) - .setType(replicationType) - .setFactor(replicationFactor) + .setReplicationConfig(new RatisReplicationConfig(replicationFactor)) .setNodes(new ArrayList<>()) .build(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java index 49e3fd311b2d..9d3734f4c0bb 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.recon.api; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; @@ -98,8 +99,9 @@ public Response getDatanodes() { String leaderNode = pipeline.getLeaderNode().getHostName(); DatanodePipeline datanodePipeline = new DatanodePipeline( pipelineID.getId(), - pipeline.getType().toString(), - pipeline.getFactor().getNumber(), + pipeline.getReplicationConfig().getReplicationType().toString(), + ReplicationConfig.getLegacyFactor(pipeline.getReplicationConfig()) + .getNumber(), leaderNode ); pipelines.add(datanodePipeline); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PipelineEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PipelineEndpoint.java index 045cdc0b3baa..c7e5cc71a119 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PipelineEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PipelineEndpoint.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.recon.api; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.ozone.recon.MetricsServiceProviderFactory; @@ -101,12 +102,12 @@ public Response getPipelines() { PipelineMetadata.Builder pipelineBuilder = builder.setPipelineId(pipelineId) - .setDatanodes(datanodes) - .setDuration(duration) - .setStatus(pipeline.getPipelineState()) - .setReplicationFactor(pipeline.getFactor().getNumber()) + .setDatanodes(datanodes) + .setDuration(duration) + .setStatus(pipeline.getPipelineState()) + .setReplicationFactor(ReplicationConfig + .getLegacyFactor(pipeline.getReplicationConfig()).getNumber()) .setReplicationType(pipeline.getType().toString()); - // If any metrics service providers like Prometheus // is configured, then query it for metrics and populate // leader election count and last leader election time diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineFactory.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineFactory.java index 281330136b35..f390ed72f9cf 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineFactory.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineFactory.java @@ -21,8 +21,8 @@ import java.util.List; import org.apache.commons.collections.map.DefaultedMap; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineFactory; import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; @@ -39,10 +39,11 @@ public class ReconPipelineFactory extends PipelineFactory { setProviders(new DefaultedMap(reconMockPipelineProvider)); } - static class ReconPipelineProvider extends PipelineProvider { + static class ReconPipelineProvider extends + PipelineProvider { @Override - public Pipeline create(HddsProtos.ReplicationFactor factor){ + public Pipeline create(ReplicationConfig config){ // We don't expect this to be called at all. But adding this as a red // flag for troubleshooting. throw new UnsupportedOperationException( @@ -50,7 +51,7 @@ public Pipeline create(HddsProtos.ReplicationFactor factor){ } @Override - public Pipeline create(HddsProtos.ReplicationFactor factor, + public Pipeline create(ReplicationConfig config, List nodes) { throw new UnsupportedOperationException( "Trying to create pipeline in Recon, which is prohibited!"); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineReportHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineReportHandler.java index 589de00269f7..b10f4c8c8c88 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineReportHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineReportHandler.java @@ -82,7 +82,7 @@ protected void processPipelineReport(PipelineReport report, setPipelineLeaderId(report, pipeline, dn); if (pipeline.getPipelineState() == Pipeline.PipelineState.ALLOCATED) { - LOG.info("Pipeline {} {} reported by {}", pipeline.getFactor(), + LOG.info("Pipeline {} {} reported by {}", pipeline.getReplicationConfig(), pipeline.getId(), dn); if (pipeline.isHealthy()) { reconPipelineManager.openPipeline(pipelineID); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java index 848a2af4bcb7..691698725e74 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.recon; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; import static org.junit.Assert.assertNotNull; @@ -29,6 +30,7 @@ import java.util.List; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -143,7 +145,7 @@ public static void writeDataToOm(OMMetadataManager omMetadataManager, .setBucketName("bucketOne") .setVolumeName("sampleVol") .setKeyName(key) - .setReplicationFactor(HddsProtos.ReplicationFactor.ONE) + .setReplicationFactor(ONE) .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE) .build()); } @@ -168,7 +170,7 @@ public static void writeDataToOm(OMMetadataManager omMetadataManager, .setBucketName(bucket) .setVolumeName(volume) .setKeyName(key) - .setReplicationFactor(HddsProtos.ReplicationFactor.ONE) + .setReplicationFactor(ONE) .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE) .setOmKeyLocationInfos(omKeyLocationInfoGroupList) .build()); @@ -188,11 +190,10 @@ public static Pipeline getRandomPipeline() { */ public static Pipeline getRandomPipeline(DatanodeDetails datanodeDetails) { return Pipeline.newBuilder() - .setFactor(HddsProtos.ReplicationFactor.ONE) + .setReplicationConfig(new StandaloneReplicationConfig(ONE)) .setId(PipelineID.randomId()) .setNodes(Collections.singletonList(datanodeDetails)) .setState(Pipeline.PipelineState.OPEN) - .setType(HddsProtos.ReplicationType.STAND_ALONE) .build(); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index 4a453b041eb6..57ce504f6834 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -399,7 +399,7 @@ private void testDatanodeResponse(DatanodeMetadata datanodeMetadata) Assert.assertEquals(1, datanodeMetadata.getPipelines().size()); Assert.assertEquals(pipelineId, datanodeMetadata.getPipelines().get(0).getPipelineID().toString()); - Assert.assertEquals(pipeline.getFactor().getNumber(), + Assert.assertEquals(pipeline.getReplicationConfig().getRequiredNodes(), datanodeMetadata.getPipelines().get(0).getReplicationFactor()); Assert.assertEquals(pipeline.getType().toString(), datanodeMetadata.getPipelines().get(0).getReplicationType()); @@ -504,7 +504,7 @@ public void testGetPipelines() throws Exception { Assert.assertEquals(1, pipelineMetadata.getDatanodes().size()); Assert.assertEquals(pipeline.getType().toString(), pipelineMetadata.getReplicationType()); - Assert.assertEquals(pipeline.getFactor().getNumber(), + Assert.assertEquals(pipeline.getReplicationConfig().getRequiredNodes(), pipelineMetadata.getReplicationFactor()); Assert.assertEquals(datanodeDetails.getHostName(), pipelineMetadata.getLeaderNode()); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java index 9cbf2d2ee7be..39b4e1134ede 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java @@ -23,8 +23,9 @@ import java.util.Collections; import java.util.List; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.ha.MockSCMHADBTransactionBuffer; import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; @@ -100,20 +101,21 @@ public void testInitialize() throws IOException { // Valid pipeline in Allocated state. Pipeline validPipeline = Pipeline.newBuilder() - .setFactor(HddsProtos.ReplicationFactor.ONE) + .setReplicationConfig( + new StandaloneReplicationConfig(ReplicationFactor.ONE)) .setId(pipelinesFromScm.get(0).getId()) .setNodes(pipelinesFromScm.get(0).getNodes()) .setState(Pipeline.PipelineState.ALLOCATED) - .setType(ReplicationType.STAND_ALONE) + .build(); // Invalid pipeline. Pipeline invalidPipeline = Pipeline.newBuilder() - .setFactor(HddsProtos.ReplicationFactor.ONE) + .setReplicationConfig( + new StandaloneReplicationConfig(ReplicationFactor.ONE)) .setId(PipelineID.randomId()) .setNodes(Collections.singletonList(randomDatanodeDetails())) .setState(Pipeline.PipelineState.CLOSED) - .setType(HddsProtos.ReplicationType.STAND_ALONE) .build(); NetworkTopology clusterMap = new NetworkTopologyImpl(conf); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java index a7ccbac95894..2cf3c19bfa7d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java @@ -31,6 +31,7 @@ import com.google.gson.JsonArray; import com.google.gson.JsonElement; import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -53,6 +54,8 @@ import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; + /** * Class that gives chunk location given a specific key. */ @@ -117,7 +120,7 @@ protected void execute(OzoneClient client, OzoneAddress address) Pipeline pipeline = keyLocation.getPipeline(); if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) { pipeline = Pipeline.newBuilder(pipeline) - .setType(HddsProtos.ReplicationType.STAND_ALONE).build(); + .setReplicationConfig(new StandaloneReplicationConfig(ONE)).build(); } xceiverClient = xceiverClientManager .acquireClientForReadData(pipeline); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java index 6d249687fa06..9c67f323162f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java @@ -29,7 +29,6 @@ import java.util.regex.Pattern; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.utils.HAUtils; @@ -357,7 +356,7 @@ public static Pipeline findPipelineForTest(String pipelineId, + pipelineId)); } else { pipeline = pipelines.stream() - .filter(p -> p.getFactor() == HddsProtos.ReplicationFactor.THREE) + .filter(p -> p.getReplicationConfig().getRequiredNodes() == 3) .findFirst() .orElseThrow(() -> new IllegalArgumentException( "Pipeline ID is NOT defined, and no pipeline " + diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkGenerator.java index c0c58d03595f..7181a44cb52c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkGenerator.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientReply; import org.apache.hadoop.hdds.scm.XceiverClientSpi; @@ -126,7 +125,7 @@ public Void call() throws Exception { if (!arePipelinesOrDatanodesProvided()) { //default behaviour if no arguments provided firstPipeline = pipelinesFromSCM.stream() - .filter(p -> p.getFactor() == ReplicationFactor.THREE) + .filter(p -> p.getReplicationConfig().getRequiredNodes() == 3) .findFirst() .orElseThrow(() -> new IllegalArgumentException( "Pipeline ID is NOT defined, and no pipeline " + diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java index c06914a4b4a5..a554c3d560c2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java @@ -21,6 +21,7 @@ import java.util.concurrent.Callable; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -99,7 +100,9 @@ public Void call() throws Exception { } else { pipeline = pipelines.stream() - .filter(p -> p.getFactor() == HddsProtos.ReplicationFactor.THREE) + .filter( + p -> ReplicationConfig.getLegacyFactor(p.getReplicationConfig()) + == HddsProtos.ReplicationFactor.THREE) .findFirst() .orElseThrow(() -> new IllegalArgumentException( "Pipeline ID is NOT defined, and no pipeline " + diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/LeaderAppendLogEntryGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/LeaderAppendLogEntryGenerator.java index bf2cc044d99d..5cbd831b72d0 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/LeaderAppendLogEntryGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/LeaderAppendLogEntryGenerator.java @@ -26,6 +26,7 @@ import java.util.concurrent.LinkedBlockingQueue; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; @@ -39,7 +40,6 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.XceiverClientRatis; import org.apache.hadoop.hdds.scm.XceiverClientReply; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -191,8 +191,8 @@ private XceiverClientRatis createXceiverClient(OzoneConfiguration conf) { Pipeline pipeline = Pipeline.newBuilder() .setId(PipelineID.valueOf(UUID.fromString(pipelineId))) .setState(PipelineState.OPEN) - .setType(ReplicationType.RATIS) - .setFactor(ReplicationFactor.THREE) + .setReplicationConfig( + new RatisReplicationConfig(ReplicationFactor.THREE)) .setLeaderId(UUID.fromString(serverId)) .setNodes(datanodes) .build(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java index c8872de521e4..dd8082aa341a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java @@ -19,7 +19,8 @@ package org.apache.hadoop.ozone.genesis; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -57,6 +58,43 @@ public class BenchMarkContainerStateMap { private AtomicInteger runCount; private static int errorFrequency = 100; + public static Pipeline createPipeline(String containerName, + Iterable ids) throws IOException { + Objects.requireNonNull(ids, "ids == null"); + Preconditions.checkArgument(ids.iterator().hasNext()); + List dns = new ArrayList<>(); + ids.forEach(dns::add); + final Pipeline pipeline = Pipeline.newBuilder() + .setState(Pipeline.PipelineState.OPEN) + .setId(PipelineID.randomId()) + .setReplicationConfig( + new StandaloneReplicationConfig(ReplicationFactor.ONE)) + .setNodes(dns) + .build(); + return pipeline; + } + + public static Pipeline createSingleNodePipeline(String containerName) + throws IOException { + return createPipeline(containerName, 1); + } + + /** + * Create a pipeline with single node replica. + * + * @return Pipeline with single node in it. + * @throws IOException + */ + public static Pipeline createPipeline(String containerName, int numNodes) + throws IOException { + Preconditions.checkArgument(numNodes >= 1); + final List ids = new ArrayList<>(numNodes); + for (int i = 0; i < numNodes; i++) { + ids.add(GenesisUtil.createDatanodeDetails(UUID.randomUUID())); + } + return createPipeline(containerName, ids); + } + @Setup(Level.Trial) public void initialize() throws IOException { stateMap = new ContainerStateMap(); @@ -70,7 +108,8 @@ public void initialize() throws IOException { .setState(CLOSED) .setPipelineID(pipeline.getId()) .setReplicationType(pipeline.getType()) - .setReplicationFactor(pipeline.getFactor()) + .setReplicationFactor(ReplicationConfig + .getLegacyFactor(pipeline.getReplicationConfig())) .setUsedBytes(0) .setNumberOfKeys(0) .setStateEnterTime(Time.now()) @@ -90,7 +129,8 @@ public void initialize() throws IOException { .setState(OPEN) .setPipelineID(pipeline.getId()) .setReplicationType(pipeline.getType()) - .setReplicationFactor(pipeline.getFactor()) + .setReplicationFactor(ReplicationConfig + .getLegacyFactor(pipeline.getReplicationConfig())) .setUsedBytes(0) .setNumberOfKeys(0) .setStateEnterTime(Time.now()) @@ -109,7 +149,8 @@ public void initialize() throws IOException { .setState(OPEN) .setPipelineID(pipeline.getId()) .setReplicationType(pipeline.getType()) - .setReplicationFactor(pipeline.getFactor()) + .setReplicationFactor(ReplicationConfig + .getLegacyFactor(pipeline.getReplicationConfig())) .setUsedBytes(0) .setNumberOfKeys(0) .setStateEnterTime(Time.now()) @@ -126,43 +167,6 @@ public void initialize() throws IOException { } - public static Pipeline createSingleNodePipeline(String containerName) - throws IOException { - return createPipeline(containerName, 1); - } - - /** - * Create a pipeline with single node replica. - * - * @return Pipeline with single node in it. - * @throws IOException - */ - public static Pipeline createPipeline(String containerName, int numNodes) - throws IOException { - Preconditions.checkArgument(numNodes >= 1); - final List ids = new ArrayList<>(numNodes); - for (int i = 0; i < numNodes; i++) { - ids.add(GenesisUtil.createDatanodeDetails(UUID.randomUUID())); - } - return createPipeline(containerName, ids); - } - - public static Pipeline createPipeline(String containerName, - Iterable ids) throws IOException { - Objects.requireNonNull(ids, "ids == null"); - Preconditions.checkArgument(ids.iterator().hasNext()); - List dns = new ArrayList<>(); - ids.forEach(dns::add); - final Pipeline pipeline = Pipeline.newBuilder() - .setState(Pipeline.PipelineState.OPEN) - .setId(PipelineID.randomId()) - .setType(HddsProtos.ReplicationType.STAND_ALONE) - .setFactor(HddsProtos.ReplicationFactor.ONE) - .setNodes(dns) - .build(); - return pipeline; - } - @Benchmark public void createContainerBenchMark(BenchMarkContainerStateMap state, Blackhole bh) throws IOException { @@ -177,8 +181,10 @@ private ContainerInfo getContainerInfo(BenchMarkContainerStateMap state) return new ContainerInfo.Builder() .setState(CLOSED) .setPipelineID(pipeline.getId()) - .setReplicationType(pipeline.getType()) - .setReplicationFactor(pipeline.getFactor()) + .setReplicationType( + pipeline.getReplicationConfig().getReplicationType()) + .setReplicationFactor( + ReplicationConfig.getLegacyFactor(pipeline.getReplicationConfig())) .setUsedBytes(0) .setNumberOfKeys(0) .setStateEnterTime(Time.now()) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOzoneManager.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOzoneManager.java index 19b5e8e642bd..b1dd1ab6af4b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOzoneManager.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOzoneManager.java @@ -27,10 +27,10 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -99,7 +99,8 @@ public static void initialize() // prepare SCM PipelineManager pipelineManager = scm.getPipelineManager(); for (Pipeline pipeline : pipelineManager - .getPipelines(ReplicationType.RATIS, ReplicationFactor.THREE)) { + .getPipelines( + new RatisReplicationConfig(ReplicationFactor.THREE))) { pipelineManager.openPipeline(pipeline.getId()); } scm.getEventQueue().fireEvent(SCMEvents.SAFE_MODE_STATUS, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkSCM.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkSCM.java index 7bdb28c57a5f..a7e8f82f92c4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkSCM.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkSCM.java @@ -24,9 +24,9 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.block.BlockManager; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.events.SCMEvents; @@ -87,7 +87,8 @@ public static void initialize() // prepare SCM PipelineManager pipelineManager = scm.getPipelineManager(); for (Pipeline pipeline : pipelineManager - .getPipelines(ReplicationType.RATIS, ReplicationFactor.THREE)) { + .getPipelines( + new RatisReplicationConfig(ReplicationFactor.THREE))) { pipelineManager.openPipeline(pipeline.getId()); } scm.getEventQueue().fireEvent(SCMEvents.SAFE_MODE_STATUS, @@ -119,7 +120,7 @@ public static void tearDown() { public void allocateBlockBenchMark(BenchMarkSCM state, Blackhole bh) throws IOException { BenchMarkSCM.blockManager - .allocateBlock(50, ReplicationType.RATIS, ReplicationFactor.THREE, + .allocateBlock(50, new RatisReplicationConfig(ReplicationFactor.THREE), "Genesis", new ExcludeList()); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java index 93e1d2db068c..b511beea2287 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java @@ -26,6 +26,7 @@ import java.util.UUID; import org.apache.hadoop.conf.StorageUnit; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -164,8 +165,7 @@ static void addPipelines(HddsProtos.ReplicationFactor factor, Pipeline.newBuilder() .setState(Pipeline.PipelineState.OPEN) .setId(PipelineID.randomId()) - .setType(HddsProtos.ReplicationType.RATIS) - .setFactor(factor) + .setReplicationConfig(new RatisReplicationConfig(factor)) .setNodes(nodes) .build(); pipelineTable.put(pipeline.getId(),