Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.UnknownPipelineStateException;

/**
* Class wraps ozone container info.
Expand All @@ -48,22 +47,17 @@ public Pipeline getPipeline() {
return pipeline;
}

public static ContainerWithPipeline fromProtobuf(
HddsProtos.ContainerWithPipeline allocatedContainer)
throws UnknownPipelineStateException {
public static ContainerWithPipeline fromProtobuf(HddsProtos.ContainerWithPipeline allocatedContainer) {
return new ContainerWithPipeline(
ContainerInfo.fromProtobuf(allocatedContainer.getContainerInfo()),
Pipeline.getFromProtobuf(allocatedContainer.getPipeline()));
}

public HddsProtos.ContainerWithPipeline getProtobuf(int clientVersion)
throws UnknownPipelineStateException {
HddsProtos.ContainerWithPipeline.Builder builder =
HddsProtos.ContainerWithPipeline.newBuilder();
builder.setContainerInfo(getContainerInfo().getProtobuf())
.setPipeline(getPipeline().getProtobufMessage(clientVersion, Name.IO_PORTS));

return builder.build();
public HddsProtos.ContainerWithPipeline getProtobuf(int clientVersion) {
return HddsProtos.ContainerWithPipeline.newBuilder()
.setContainerInfo(getContainerInfo().getProtobuf())
.setPipeline(getPipeline().getProtobufMessage(clientVersion, Name.IO_PORTS))
.build();
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
package org.apache.hadoop.hdds.scm.pipeline;

import com.fasterxml.jackson.annotation.JsonIgnore;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import java.io.IOException;
Expand All @@ -31,6 +30,7 @@
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.UUID;
Expand Down Expand Up @@ -360,14 +360,11 @@ public ReplicationConfig getReplicationConfig() {
return replicationConfig;
}

public HddsProtos.Pipeline getProtobufMessage(int clientVersion)
throws UnknownPipelineStateException {
public HddsProtos.Pipeline getProtobufMessage(int clientVersion) {
return getProtobufMessage(clientVersion, Collections.emptySet());
}

public HddsProtos.Pipeline getProtobufMessage(int clientVersion, Set<DatanodeDetails.Port.Name> filterPorts)
throws UnknownPipelineStateException {

public HddsProtos.Pipeline getProtobufMessage(int clientVersion, Set<DatanodeDetails.Port.Name> filterPorts) {
List<HddsProtos.DatanodeDetailsProto> members = new ArrayList<>();
List<Integer> memberReplicaIndexes = new ArrayList<>();

Expand Down Expand Up @@ -426,8 +423,7 @@ public HddsProtos.Pipeline getProtobufMessage(int clientVersion, Set<DatanodeDet
return builder.build();
}

private static Pipeline getFromProtobufSetCreationTimestamp(
HddsProtos.Pipeline proto) throws UnknownPipelineStateException {
private static Pipeline getFromProtobufSetCreationTimestamp(HddsProtos.Pipeline proto) {
return toBuilder(proto)
.setCreateTimestamp(Instant.now())
.build();
Expand All @@ -441,9 +437,8 @@ public Builder toBuilder() {
return newBuilder(this);
}

public static Builder toBuilder(HddsProtos.Pipeline pipeline)
throws UnknownPipelineStateException {
Preconditions.checkNotNull(pipeline, "Pipeline is null");
public static Builder toBuilder(HddsProtos.Pipeline pipeline) {
Objects.requireNonNull(pipeline, "pipeline == null");

Map<DatanodeDetails, Integer> nodes = new LinkedHashMap<>();
int index = 0;
Expand Down Expand Up @@ -486,8 +481,7 @@ public static Builder toBuilder(HddsProtos.Pipeline pipeline)
.setCreateTimestamp(pipeline.getCreationTimeStamp());
}

public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline)
throws UnknownPipelineStateException {
public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline) {
return toBuilder(pipeline).build();
}

Expand Down Expand Up @@ -648,10 +642,10 @@ public Builder setReplicaIndexes(Map<DatanodeDetails, Integer> indexes) {
}

public Pipeline build() {
Preconditions.checkNotNull(id);
Preconditions.checkNotNull(replicationConfig);
Preconditions.checkNotNull(state);
Preconditions.checkNotNull(nodeStatus);
Objects.requireNonNull(id, "id == null");
Objects.requireNonNull(replicationConfig, "replicationConfig == null");
Objects.requireNonNull(state, "state == null");
Objects.requireNonNull(nodeStatus, "nodeStatus == null");

if (nodeOrder != null && !nodeOrder.isEmpty()) {
List<DatanodeDetails> nodesWithOrder = new ArrayList<>();
Expand Down Expand Up @@ -683,31 +677,29 @@ public Pipeline build() {
public enum PipelineState {
ALLOCATED, OPEN, DORMANT, CLOSED;

public static PipelineState fromProtobuf(HddsProtos.PipelineState state)
throws UnknownPipelineStateException {
Preconditions.checkNotNull(state, "Pipeline state is null");
public static PipelineState fromProtobuf(HddsProtos.PipelineState state) {
Objects.requireNonNull(state, "state == null");
switch (state) {
case PIPELINE_ALLOCATED: return ALLOCATED;
case PIPELINE_OPEN: return OPEN;
case PIPELINE_DORMANT: return DORMANT;
case PIPELINE_CLOSED: return CLOSED;
default:
throw new UnknownPipelineStateException(
"Pipeline state: " + state + " is not recognized.");
throw new IllegalArgumentException("Unexpected value " + state
+ " from " + state.getClass());
}
}

public static HddsProtos.PipelineState getProtobuf(PipelineState state)
throws UnknownPipelineStateException {
Preconditions.checkNotNull(state, "Pipeline state is null");
public static HddsProtos.PipelineState getProtobuf(PipelineState state) {
Objects.requireNonNull(state, "state == null");
switch (state) {
case ALLOCATED: return HddsProtos.PipelineState.PIPELINE_ALLOCATED;
case OPEN: return HddsProtos.PipelineState.PIPELINE_OPEN;
case DORMANT: return HddsProtos.PipelineState.PIPELINE_DORMANT;
case CLOSED: return HddsProtos.PipelineState.PIPELINE_CLOSED;
default:
throw new UnknownPipelineStateException(
"Pipeline state: " + state + " is not recognized.");
throw new IllegalArgumentException("Unexpected value " + state
+ " from " + state.getClass());
}
}
}
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ public void getProtobufMessageEC() throws IOException {
}

@Test
public void testReplicaIndexesSerialisedCorrectly() throws IOException {
public void testReplicaIndexesSerialisedCorrectly() {
Pipeline pipeline = MockPipeline.createEcPipeline();
HddsProtos.Pipeline protobufMessage = pipeline.getProtobufMessage(1);
Pipeline reloadedPipeline = Pipeline.getFromProtobuf(protobufMessage);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
Expand Down Expand Up @@ -159,12 +158,6 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
private final StorageContainerLocationProtocolPB rpcProxy;
private final SCMContainerLocationFailoverProxyProvider fpp;

/**
* This is used to check if 'leader' or 'follower' exists,
* in order to confirm whether we have enabled Ratis.
*/
private final List<String> scmRatisRolesToCheck = Arrays.asList("leader", "follower");

/**
* Creates a new StorageContainerLocationProtocolClientSideTranslatorPB.
*
Expand Down Expand Up @@ -376,12 +369,7 @@ public List<ContainerWithPipeline> getExistContainerWithPipelinesInBatch(
.getContainerWithPipelinesList();

for (HddsProtos.ContainerWithPipeline cp : protoCps) {
try {
cps.add(ContainerWithPipeline.fromProtobuf(cp));
} catch (IOException uex) {
// "fromProtobuf" may throw an exception
// do nothing , just go ahead
}
cps.add(ContainerWithPipeline.fromProtobuf(cp));
}
return cps;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.UnknownPipelineStateException;
import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo;
import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation;
Expand Down Expand Up @@ -101,42 +100,33 @@ public KeyLocation getProtobuf(boolean ignorePipeline, int clientVersion) {
.setCreateVersion(getCreateVersion())
.setPartNumber(getPartNumber());
if (!ignorePipeline) {
try {
Token<OzoneBlockTokenIdentifier> token = getToken();
if (token != null) {
builder.setToken(OMPBHelper.protoFromToken(token));
}

// Pipeline can be null when key create with override and
// on a versioning enabled bucket. for older versions of blocks
// We do not need to return pipeline as part of createKey,
// so we do not refresh pipeline in createKey, because of this reason
// for older version of blocks pipeline can be null.
// And also for key create we never need to return pipeline info
// for older version of blocks irrespective of versioning.

// Currently, we do not completely support bucket versioning.
// TODO: this needs to be revisited when bucket versioning
// implementation is handled.

Pipeline pipeline = getPipeline();
if (pipeline != null) {
builder.setPipeline(pipeline.getProtobufMessage(clientVersion));
}
} catch (UnknownPipelineStateException e) {
//TODO: fix me: we should not return KeyLocation without pipeline.
Token<OzoneBlockTokenIdentifier> token = getToken();
if (token != null) {
builder.setToken(OMPBHelper.protoFromToken(token));
}

// Pipeline can be null when key create with override and
// on a versioning enabled bucket. for older versions of blocks
// We do not need to return pipeline as part of createKey,
// so we do not refresh pipeline in createKey, because of this reason
// for older version of blocks pipeline can be null.
// And also for key create we never need to return pipeline info
// for older version of blocks irrespective of versioning.

// Currently, we do not completely support bucket versioning.
// TODO: this needs to be revisited when bucket versioning
// implementation is handled.

Pipeline pipeline = getPipeline();
if (pipeline != null) {
builder.setPipeline(pipeline.getProtobufMessage(clientVersion));
}
}
return builder.build();
}

private static Pipeline getPipeline(KeyLocation keyLocation) {
try {
return keyLocation.hasPipeline() ?
Pipeline.getFromProtobuf(keyLocation.getPipeline()) : null;
} catch (UnknownPipelineStateException e) {
return null;
}
return keyLocation.hasPipeline() ? Pipeline.getFromProtobuf(keyLocation.getPipeline()) : null;
}

public static OmKeyLocationInfo getFromProtobuf(KeyLocation keyLocation) {
Expand Down