diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java index 4cd769f4d245..f39755ffe8fc 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java @@ -23,6 +23,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Queue; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; @@ -31,6 +32,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -80,6 +82,18 @@ public class StateContext { private boolean shutdownGracefully = false; private final AtomicLong threadPoolNotAvailableCount; + /** + * term of latest leader SCM, extract from SCMCommand. + * + * Only leader SCM (both latest and stale) can send out SCMCommand, + * which will save its term in SCMCommand. Since latest leader SCM + * always has the highest term, term can be used to detect SCMCommand + * from stale leader SCM. + * + * For non-HA mode, term of SCMCommand will be 0. + */ + private Optional termOfLeaderSCM = Optional.empty(); + /** * Starting with a 2 sec heartbeat frequency which will be updated to the * real HB frequency after scm registration. With this method the @@ -470,6 +484,65 @@ public void execute(ExecutorService service, long time, TimeUnit unit) } } + /** + * After startup, datanode needs detect latest leader SCM before handling + * any SCMCommand, so that it won't be disturbed by stale leader SCM. + * + * The rule is: after majority SCMs are in HEARTBEAT state and has + * heard from leader SCMs (commandQueue is not empty), datanode will init + * termOfLeaderSCM with the max term found in commandQueue. + * + * The init process also works for non-HA mode. In that case, term of all + * SCMCommands will be 0. + */ + private void initTermOfLeaderSCM() { + // only init once + if (termOfLeaderSCM.isPresent()) { + return; + } + + AtomicInteger scmNum = new AtomicInteger(0); + AtomicInteger activeScmNum = new AtomicInteger(0); + + getParent().getConnectionManager().getValues() + .forEach(endpoint -> { + if (endpoint.isPassive()) { + return; + } + scmNum.incrementAndGet(); + if (endpoint.getState() + == EndpointStateMachine.EndPointStates.HEARTBEAT) { + activeScmNum.incrementAndGet(); + } + }); + + // majority SCMs should be in HEARTBEAT state. + if (activeScmNum.get() < scmNum.get() / 2 + 1) { + return; + } + + // if commandQueue is not empty, init termOfLeaderSCM + // with the largest term found in commandQueue + commandQueue.stream() + .mapToLong(SCMCommand::getTerm) + .max() + .ifPresent(term -> termOfLeaderSCM = Optional.of(term)); + } + + /** + * monotonically increase termOfLeaderSCM. + * Always record the latest term that has seen. + */ + private void updateTermOfLeaderSCM(SCMCommand command) { + if (!termOfLeaderSCM.isPresent()) { + LOG.error("should init termOfLeaderSCM before update it."); + return; + } + + termOfLeaderSCM = Optional.of( + Long.max(termOfLeaderSCM.get(), command.getTerm())); + } + /** * Returns the next command or null if it is empty. * @@ -478,7 +551,26 @@ public void execute(ExecutorService service, long time, TimeUnit unit) public SCMCommand getNextCommand() { lock.lock(); try { - return commandQueue.poll(); + initTermOfLeaderSCM(); + if (!termOfLeaderSCM.isPresent()) { + return null; // not ready yet + } + + while (true) { + SCMCommand command = commandQueue.poll(); + if (command == null) { + return null; + } + + updateTermOfLeaderSCM(command); + if (command.getTerm() == termOfLeaderSCM.get()) { + return command; + } + + LOG.warn("Detect and drop a SCMCommand {} from stale leader SCM," + + " stale term {}, latest term {}.", + command, command.getTerm(), termOfLeaderSCM.get()); + } } finally { lock.unlock(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java index da2034d93c2d..eac7b37e3383 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java @@ -272,6 +272,9 @@ private void processResponse(SCMHeartbeatResponseProto response, DeleteBlocksCommand db = DeleteBlocksCommand .getFromProtobuf( commandResponseProto.getDeleteBlocksCommandProto()); + if (commandResponseProto.hasTerm()) { + db.setTerm(commandResponseProto.getTerm()); + } if (!db.blocksTobeDeleted().isEmpty()) { if (LOG.isDebugEnabled()) { LOG.debug(DeletedContainerBlocksSummary @@ -285,6 +288,9 @@ private void processResponse(SCMHeartbeatResponseProto response, CloseContainerCommand closeContainer = CloseContainerCommand.getFromProtobuf( commandResponseProto.getCloseContainerCommandProto()); + if (commandResponseProto.hasTerm()) { + closeContainer.setTerm(commandResponseProto.getTerm()); + } if (LOG.isDebugEnabled()) { LOG.debug("Received SCM container close request for container {}", closeContainer.getContainerID()); @@ -295,6 +301,9 @@ private void processResponse(SCMHeartbeatResponseProto response, ReplicateContainerCommand replicateContainerCommand = ReplicateContainerCommand.getFromProtobuf( commandResponseProto.getReplicateContainerCommandProto()); + if (commandResponseProto.hasTerm()) { + replicateContainerCommand.setTerm(commandResponseProto.getTerm()); + } if (LOG.isDebugEnabled()) { LOG.debug("Received SCM container replicate request for container {}", replicateContainerCommand.getContainerID()); @@ -305,6 +314,9 @@ private void processResponse(SCMHeartbeatResponseProto response, DeleteContainerCommand deleteContainerCommand = DeleteContainerCommand.getFromProtobuf( commandResponseProto.getDeleteContainerCommandProto()); + if (commandResponseProto.hasTerm()) { + deleteContainerCommand.setTerm(commandResponseProto.getTerm()); + } if (LOG.isDebugEnabled()) { LOG.debug("Received SCM delete container request for container {}", deleteContainerCommand.getContainerID()); @@ -315,6 +327,9 @@ private void processResponse(SCMHeartbeatResponseProto response, CreatePipelineCommand createPipelineCommand = CreatePipelineCommand.getFromProtobuf( commandResponseProto.getCreatePipelineCommandProto()); + if (commandResponseProto.hasTerm()) { + createPipelineCommand.setTerm(commandResponseProto.getTerm()); + } if (LOG.isDebugEnabled()) { LOG.debug("Received SCM create pipeline request {}", createPipelineCommand.getPipelineID()); @@ -325,6 +340,9 @@ private void processResponse(SCMHeartbeatResponseProto response, ClosePipelineCommand closePipelineCommand = ClosePipelineCommand.getFromProtobuf( commandResponseProto.getClosePipelineCommandProto()); + if (commandResponseProto.hasTerm()) { + closePipelineCommand.setTerm(commandResponseProto.getTerm()); + } if (LOG.isDebugEnabled()) { LOG.debug("Received SCM close pipeline request {}", closePipelineCommand.getPipelineID()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index a44ef384362b..5fd1690c1f72 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -25,6 +25,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -85,6 +86,7 @@ public class OzoneContainer { private List dataScanners; private final BlockDeletingService blockDeletingService; private final GrpcTlsConfig tlsClientConfig; + private final AtomicBoolean isStarted; /** * Construct OzoneContainer object. @@ -152,6 +154,8 @@ public OzoneContainer(DatanodeDetails datanodeDetails, ConfigurationSource TimeUnit.MILLISECONDS, config); tlsClientConfig = RatisHelper.createTlsClientConfig( secConf, certClient != null ? certClient.getCACertificate() : null); + + isStarted = new AtomicBoolean(false); } public GrpcTlsConfig getTlsClientConfig() { @@ -240,6 +244,10 @@ private void stopContainerScrub() { * @throws IOException */ public void start(String scmId) throws IOException { + if (!isStarted.compareAndSet(false, true)) { + LOG.info("Ignore. OzoneContainer already started."); + return; + } LOG.info("Attempting to start container services."); startContainerScrub(); writeChannel.start(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java index 3c4e05b424af..4d87bb096cb6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java @@ -30,7 +30,13 @@ */ public abstract class SCMCommand implements IdentifiableEventPayload { - private long id; + private final long id; + + // Under HA mode, holds term of underlying RaftServer iff current + // SCM is a leader, otherwise, holds term 0. + // Notes that, the first elected leader is from term 1, term 0, + // as the initial value of currentTerm, is never used under HA mode. + private long term = 0; SCMCommand() { this.id = HddsIdFactory.getLongId(); @@ -59,4 +65,18 @@ public long getId() { return id; } + /** + * Get term of this command. + * @return term + */ + public long getTerm() { + return term; + } + + /** + * Set term of this command. + */ + public void setTerm(long term) { + this.term = term; + } } diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto index 4f610ff24b1a..973789a35369 100644 --- a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto @@ -303,6 +303,12 @@ message SCMCommandProto { optional ReplicateContainerCommandProto replicateContainerCommandProto = 6; optional CreatePipelineCommandProto createPipelineCommandProto = 7; optional ClosePipelineCommandProto closePipelineCommandProto = 8; + + // Under HA mode, holds term of underlying RaftServer iff current + // SCM is a leader, otherwise, holds term 0. + // Notes that, the first elected leader is from term 1, term 0, + // as the initial value of currentTerm, is never used under HA mode. + optional uint64 term = 15; } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java index 8ee26a25df03..0fd5e8276045 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java @@ -22,6 +22,7 @@ import org.apache.ratis.protocol.exceptions.NotLeaderException; import java.io.IOException; +import java.util.Optional; /** * SCMHAManager provides HA service for SCM. @@ -34,9 +35,13 @@ public interface SCMHAManager { void start() throws IOException; /** - * Returns true if the current SCM is the leader. + * For HA mode, return an Optional that holds term of the + * underlying RaftServer iff current SCM is in leader role. + * Otherwise, return an empty optional. + * + * For non-HA mode, return an Optional that holds term 0. */ - boolean isLeader(); + Optional isLeader(); /** * Returns RatisServer instance associated with the SCM instance. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java index 33f408d6b752..5271ac6f8e90 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java @@ -21,6 +21,7 @@ import java.util.List; import java.util.stream.Collectors; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.ratis.proto.RaftProtos; import org.apache.ratis.protocol.RaftGroupMemberId; import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.protocol.RaftPeerId; @@ -32,6 +33,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.util.Optional; /** * SCMHAManagerImpl uses Apache Ratis for HA implementation. We will have 2N+1 @@ -70,29 +72,28 @@ public void start() throws IOException { * {@inheritDoc} */ @Override - public boolean isLeader() { + public Optional isLeader() { if (!SCMHAUtils.isSCMHAEnabled(conf)) { // When SCM HA is not enabled, the current SCM is always the leader. - return true; + return Optional.of((long)0); } RaftServer server = ratisServer.getServer(); Preconditions.checkState(server instanceof RaftServerProxy); - RaftServerImpl serverImpl = null; try { // SCM only has one raft group. - serverImpl = ((RaftServerProxy) server) + RaftServerImpl serverImpl = ((RaftServerProxy) server) .getImpl(ratisServer.getRaftGroupId()); if (serverImpl != null) { - // Only when it's sure the current SCM is the leader, otherwise - // it should all return false. - return serverImpl.isLeader(); + RaftProtos.RoleInfoProto roleInfoProto = serverImpl.getRoleInfoProto(); + return roleInfoProto.hasLeaderInfo() + ? Optional.of(roleInfoProto.getLeaderInfo().getTerm()) + : Optional.empty(); } } catch (IOException ioe) { LOG.error("Fail to get RaftServer impl and therefore it's not clear " + "whether it's leader. ", ioe); } - - return false; + return Optional.empty(); } /** @@ -104,11 +105,6 @@ public SCMRatisServer getRatisServer() { } private RaftPeerId getPeerIdFromRoleInfo(RaftServerImpl serverImpl) { - /* - TODO: Fix Me - Ratis API has changed. - RaftServerImpl#getRoleInfoProto is no more public. - if (serverImpl.isLeader()) { return RaftPeerId.getRaftPeerId( serverImpl.getRoleInfoProto().getLeaderInfo().toString()); @@ -119,8 +115,6 @@ private RaftPeerId getPeerIdFromRoleInfo(RaftServerImpl serverImpl) { } else { return null; } - */ - return null; } @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 328f2712b5fe..89fd99ecd49f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -25,6 +25,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.UUID; import java.util.Collections; @@ -47,6 +48,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; +import org.apache.hadoop.hdds.scm.ha.SCMHAManager; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; @@ -106,13 +108,16 @@ public class SCMNodeManager implements NodeManager { new ConcurrentHashMap<>(); private final int numPipelinesPerMetadataVolume; private final int heavyNodeCriteria; + private final SCMHAManager scmhaManager; /** * Constructs SCM machine Manager. */ public SCMNodeManager(OzoneConfiguration conf, - SCMStorageConfig scmStorageConfig, EventPublisher eventPublisher, - NetworkTopology networkTopology) { + SCMStorageConfig scmStorageConfig, + EventPublisher eventPublisher, + NetworkTopology networkTopology, + SCMHAManager scmhaManager) { this.nodeStateManager = new NodeStateManager(conf, eventPublisher); this.version = VersionInfo.getLatestVersion(); this.commandQueue = new CommandQueue(); @@ -138,6 +143,14 @@ public SCMNodeManager(OzoneConfiguration conf, ScmConfigKeys.OZONE_SCM_PIPELINE_PER_METADATA_VOLUME_DEFAULT); String dnLimit = conf.get(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT); this.heavyNodeCriteria = dnLimit == null ? 0 : Integer.parseInt(dnLimit); + this.scmhaManager = scmhaManager; + } + + public SCMNodeManager(OzoneConfiguration conf, + SCMStorageConfig scmStorageConfig, + EventPublisher eventPublisher, + NetworkTopology networkTopology) { + this(conf, scmStorageConfig, eventPublisher, networkTopology, null); } private void registerMXBean() { @@ -658,6 +671,18 @@ public Set getContainers(DatanodeDetails datanodeDetails) // Refactor and remove all the usage of this method and delete this method. @Override public void addDatanodeCommand(UUID dnId, SCMCommand command) { + if (scmhaManager != null && command.getTerm() == 0) { + Optional termOpt = scmhaManager.isLeader(); + + if (!termOpt.isPresent()) { + LOG.warn("Not leader, drop SCMCommand {}.", command); + return; + } + + LOG.warn("Help set term {} for SCMCommand {}. It is not an accurate " + + "way to set term of SCMCommand.", termOpt.get(), command); + command.setTerm(termOpt.get()); + } this.commandQueue.addCommand(dnId, command); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java index 041c94179112..48fbdbff440f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java @@ -49,6 +49,7 @@ import java.util.List; import java.util.Map; import java.util.NavigableSet; +import java.util.Optional; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -637,13 +638,15 @@ public void setScmhaManager(SCMHAManager scmhaManager) { } /** - * Check if scm is current leader. - * @throws NotLeaderException when it's not the current leader. + * return term of underlying RaftServer if role of SCM is leader. + * @throws NotLeaderException when it's not leader. */ - private void checkLeader() throws NotLeaderException { - if (!scmhaManager.isLeader()) { + private long checkLeader() throws NotLeaderException { + Optional termOpt = scmhaManager.isLeader(); + if (!termOpt.isPresent()) { throw scmhaManager.triggerNotLeaderException(); } + return termOpt.get(); } private void setBackgroundPipelineCreator( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java index a2953415cb38..b71f906dfa0f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java @@ -292,6 +292,11 @@ public SCMCommandProto getCommandResponse(SCMCommand cmd) throws IOException { SCMCommandProto.Builder builder = SCMCommandProto.newBuilder(); + + // In HA mode, it is the term of current leader SCM. + // In non-HA mode, it is the default value 0. + builder.setTerm(cmd.getTerm()); + switch (cmd.getType()) { case reregisterCommand: return builder diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 5843d5afd847..501472d3bbe0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -425,7 +425,7 @@ private void initializeSystemManagers(OzoneConfiguration conf, scmNodeManager = configurator.getScmNodeManager(); } else { scmNodeManager = new SCMNodeManager( - conf, scmStorageConfig, eventQueue, clusterMap); + conf, scmStorageConfig, eventQueue, clusterMap, scmHAManager); } placementMetrics = SCMContainerPlacementMetrics.create(); @@ -1027,7 +1027,7 @@ public ReplicationManager getReplicationManager() { * @return - if the current scm is the leader. */ public boolean checkLeader() { - return scmHAManager.isLeader(); + return scmHAManager.isLeader().isPresent(); } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java index ac58438d9477..ab329a567039 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java @@ -24,6 +24,7 @@ import java.util.EnumMap; import java.util.List; import java.util.Map; +import java.util.Optional; import com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType; @@ -79,8 +80,8 @@ public void start() throws IOException { * {@inheritDoc} */ @Override - public boolean isLeader() { - return isLeader; + public Optional isLeader() { + return isLeader ? Optional.of((long)0) : Optional.empty(); } public void setIsLeader(boolean isLeader) { diff --git a/pom.xml b/pom.xml index 05c34d55bd41..d7f9a060ce5d 100644 --- a/pom.xml +++ b/pom.xml @@ -79,7 +79,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${ozone.version} - 1.1.0-913f5a4-SNAPSHOT + 1.1.0-4573fb7-SNAPSHOT 0.6.0-SNAPSHOT