Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/


package org.apache.hadoop.hdds.scm.container.common.helpers;

import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.ratis.util.Preconditions;

import java.io.IOException;

/**
* MoveDataNodePair encapsulates the source and target
* datanodes of a move option.
*/
public class MoveDataNodePair {
/**
* source datanode of current move option.
*/
private final DatanodeDetails src;

/**
* target datanode of current move option.
*/
private final DatanodeDetails tgt;

public MoveDataNodePair(DatanodeDetails src, DatanodeDetails tgt) {
this.src = src;
this.tgt = tgt;
}

public DatanodeDetails getTgt() {
return tgt;
}

public DatanodeDetails getSrc() {
return src;
}

public HddsProtos.MoveDataNodePairProto getProtobufMessage(int clientVersion)
throws IOException {
HddsProtos.MoveDataNodePairProto.Builder builder =
HddsProtos.MoveDataNodePairProto.newBuilder()
.setSrc(src.toProto(clientVersion))
.setTgt(tgt.toProto(clientVersion));
return builder.build();
}

public static MoveDataNodePair getFromProtobuf(
HddsProtos.MoveDataNodePairProto mdnpp) {
Preconditions.assertNotNull(mdnpp, "MoveDataNodePair is null");
DatanodeDetails src = DatanodeDetails.getFromProtoBuf(mdnpp.getSrc());
DatanodeDetails tgt = DatanodeDetails.getFromProtoBuf(mdnpp.getTgt());
return new MoveDataNodePair(src, tgt);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.MoveDataNodePair;
import org.apache.hadoop.hdds.security.x509.certificate.CertInfo;
import org.apache.hadoop.hdds.utils.DBStoreHAManager;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
Expand Down Expand Up @@ -147,4 +148,9 @@ public interface SCMMetadataStore extends DBStoreHAManager {
* Table that maintains sequence id information.
*/
Table<String, Long> getSequenceIdTable();

/**
* Table that maintains move information.
*/
Table<ContainerID, MoveDataNodePair> getMoveTable();
}
5 changes: 5 additions & 0 deletions hadoop-hdds/interface-client/src/main/proto/hdds.proto
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,11 @@ message ExtendedDatanodeDetailsProto {
optional string buildDate = 5;
}

message MoveDataNodePairProto {
required DatanodeDetailsProto src = 1;
required DatanodeDetailsProto tgt = 2;
}

/**
Proto message encapsulating information required to uniquely identify a
OzoneManager.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ enum RequestType {
BLOCK = 3;
SEQUENCE_ID = 4;
CERT_STORE = 5;
MOVE = 6;
}

message Method {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,8 @@
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.PipelineChoosePolicy;
import org.apache.hadoop.hdds.scm.ScmConfig;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
Expand Down Expand Up @@ -69,7 +67,6 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {

private final StorageContainerManager scm;
private final PipelineManager pipelineManager;
private final ContainerManagerV2 containerManager;
private final WritableContainerFactory writableContainerFactory;

private final long containerSize;
Expand All @@ -78,7 +75,6 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
private final SCMBlockDeletingService blockDeletingService;

private ObjectName mxBean;
private final PipelineChoosePolicy pipelineChoosePolicy;
private final SequenceIdGenerator sequenceIdGen;
private ScmBlockDeletingServiceMetrics metrics;
/**
Expand All @@ -94,8 +90,6 @@ public BlockManagerImpl(final ConfigurationSource conf,
Objects.requireNonNull(scm, "SCM cannot be null");
this.scm = scm;
this.pipelineManager = scm.getPipelineManager();
this.containerManager = scm.getContainerManager();
this.pipelineChoosePolicy = scm.getPipelineChoosePolicy();
this.sequenceIdGen = scm.getSequenceIdGen();
this.containerSize = (long)conf.getStorageSize(
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
Expand Down Expand Up @@ -123,7 +117,7 @@ public BlockManagerImpl(final ConfigurationSource conf,
OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
blockDeletingService =
new SCMBlockDeletingService(deletedBlockLog, containerManager,
new SCMBlockDeletingService(deletedBlockLog,
scm.getScmNodeManager(), scm.getEventQueue(), scm.getScmContext(),
scm.getSCMServiceManager(), svcInterval, serviceTimeout, conf,
metrics);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
import org.apache.hadoop.hdds.scm.ScmConfig;
import org.apache.hadoop.hdds.scm.container.ContainerManagerV2;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
import org.apache.hadoop.hdds.scm.ha.SCMContext;
import org.apache.hadoop.hdds.scm.ha.SCMService;
Expand Down Expand Up @@ -67,7 +66,6 @@ public class SCMBlockDeletingService extends BackgroundService

private static final int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 1;
private final DeletedBlockLog deletedBlockLog;
private final ContainerManagerV2 containerManager;
private final NodeManager nodeManager;
private final EventPublisher eventPublisher;
private final SCMContext scmContext;
Expand All @@ -83,14 +81,13 @@ public class SCMBlockDeletingService extends BackgroundService

@SuppressWarnings("parameternumber")
public SCMBlockDeletingService(DeletedBlockLog deletedBlockLog,
ContainerManagerV2 containerManager, NodeManager nodeManager,
NodeManager nodeManager,
EventPublisher eventPublisher, SCMContext scmContext,
SCMServiceManager serviceManager, Duration interval, long serviceTimeout,
ConfigurationSource conf, ScmBlockDeletingServiceMetrics metrics) {
super("SCMBlockDeletingService", interval.toMillis(), TimeUnit.MILLISECONDS,
BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout);
this.deletedBlockLog = deletedBlockLog;
this.containerManager = containerManager;
this.nodeManager = nodeManager;
this.eventPublisher = eventPublisher;
this.scmContext = scmContext;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
package org.apache.hadoop.hdds.scm.container;

import org.apache.hadoop.hdds.protocol.proto.HddsProtos;

import java.util.Set;

import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED;
Expand Down
Loading