Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RATIS_SNAPSHOT_DIR;

import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import java.io.File;
Expand Down Expand Up @@ -83,24 +82,12 @@ private SCMHAUtils() {
// not used
}

// This will be removed in follow-up Jira. Ref. HDDS-11754
private static boolean isRatisEnabled = true;
public static boolean isSCMHAEnabled(ConfigurationSource conf) {
return isRatisEnabled;
}

public static void setRatisEnabled(boolean value) {
isRatisEnabled = value;
}

public static String getPrimordialSCM(ConfigurationSource conf) {
return conf.get(ScmConfigKeys.OZONE_SCM_PRIMORDIAL_NODE_ID_KEY);
}

public static boolean isPrimordialSCM(ConfigurationSource conf,
String selfNodeId, String hostName) {
// This should only be called if SCM HA is enabled.
Preconditions.checkArgument(isSCMHAEnabled(conf));
String primordialNode = getPrimordialSCM(conf);
return primordialNode != null && (primordialNode
.equals(selfNodeId) || primordialNode.equals(hostName));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -221,10 +221,6 @@ public static boolean checkVolume(StorageVolume volume, String scmId,
} else if (rootFiles.length == 1) {
// The one file is the version file.
// DN started for first time or this is a newly added volume.
// Either the SCM ID or cluster ID will be used in naming the
// volume's working directory, depending on the datanode's layout version.
workingDirName = VersionedDatanodeFeatures.ScmHA
.chooseContainerPathID(conf, scmId, clusterId);
try {
volume.createWorkingDir(workingDirName, dbVolumeSet);
} catch (IOException e) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.scm.ha.SCMHAUtils;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
import org.apache.hadoop.ozone.OzoneConsts;
Expand Down Expand Up @@ -110,20 +109,6 @@ public static String chooseContainerPathID(StorageVolume volume,
}
}

/**
* Choose whether to use SCM ID or cluster ID based on SCM HA
* finalization status and SCM HA configuration.
*/
public static String chooseContainerPathID(ConfigurationSource conf,
String scmID, String clusterID) {
boolean scmHAEnabled = SCMHAUtils.isSCMHAEnabled(conf);
if (isFinalized(HDDSLayoutFeature.SCM_HA) || scmHAEnabled) {
return clusterID;
} else {
return scmID;
}
}

public static boolean upgradeVolumeIfNeeded(StorageVolume volume,
String clusterID) {
File clusterIDDir = new File(volume.getStorageDir(), clusterID);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.AddSCMRequest;
import org.apache.hadoop.hdds.scm.ScmInfo;
Expand Down Expand Up @@ -422,26 +421,13 @@ public static List<X509Certificate> buildCAX509List(ConfigurationSource conf) th
Collection<String> scmNodes = SCMHAUtils.getSCMNodeIds(conf);
SCMSecurityProtocolClientSideTranslatorPB scmSecurityProtocolClient =
HddsServerUtil.getScmSecurityClient(conf);
if (!SCMHAUtils.isSCMHAEnabled(conf)) {
List<String> caCertPemList = new ArrayList<>();
SCMGetCertResponseProto scmGetCertResponseProto =
scmSecurityProtocolClient.getCACert();
if (scmGetCertResponseProto.hasX509Certificate()) {
caCertPemList.add(scmGetCertResponseProto.getX509Certificate());
}
if (scmGetCertResponseProto.hasX509RootCACertificate()) {
caCertPemList.add(scmGetCertResponseProto.getX509RootCACertificate());
}
return OzoneSecurityUtil.convertToX509(caCertPemList);
int expectedCount = scmNodes.size() + 1;
if (scmNodes.size() > 1) {
return OzoneSecurityUtil.convertToX509(getCAListWithRetry(() -> waitForCACerts(
scmSecurityProtocolClient::listCACertificate,
expectedCount), waitDuration));
} else {
int expectedCount = scmNodes.size() + 1;
if (scmNodes.size() > 1) {
return OzoneSecurityUtil.convertToX509(getCAListWithRetry(() -> waitForCACerts(
scmSecurityProtocolClient::listCACertificate,
expectedCount), waitDuration));
} else {
return OzoneSecurityUtil.convertToX509(scmSecurityProtocolClient.listCACertificate());
}
return OzoneSecurityUtil.convertToX509(scmSecurityProtocolClient.listCACertificate());
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.ha.SCMHAInvocationHandler;
import org.apache.hadoop.hdds.scm.ha.SCMHAUtils;
import org.apache.hadoop.hdds.scm.ha.SCMRatisServer;
import org.apache.hadoop.hdds.scm.metadata.DBTransactionBuffer;
import org.apache.hadoop.hdds.utils.db.Table;
Expand Down Expand Up @@ -64,10 +63,8 @@ public DeletedBlockLogStateManagerImpl(ConfigurationSource conf,
this.deletedTable = deletedTable;
this.containerManager = containerManager;
this.transactionBuffer = txBuffer;
final boolean isRatisEnabled = SCMHAUtils.isSCMHAEnabled(conf);
this.deletingTxIDs = isRatisEnabled ? ConcurrentHashMap.newKeySet() : null;
this.skippingRetryTxIDs =
isRatisEnabled ? ConcurrentHashMap.newKeySet() : null;
this.deletingTxIDs = ConcurrentHashMap.newKeySet();
this.skippingRetryTxIDs = ConcurrentHashMap.newKeySet();
}

public TableIterator<Long, TypedTable.KeyValue<Long,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@
import org.apache.hadoop.hdds.scm.RemoveSCMRequest;
import org.apache.hadoop.hdds.scm.metadata.DBTransactionBuffer;
import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition;
import org.apache.hadoop.hdds.scm.metadata.SCMDBTransactionBufferImpl;
import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
import org.apache.hadoop.hdds.scm.security.SecretKeyManagerService;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
Expand Down Expand Up @@ -68,7 +67,7 @@ public class SCMHAManagerImpl implements SCMHAManager {
private final ConfigurationSource conf;
private final OzoneConfiguration ozoneConf;
private final SecurityConfig securityConfig;
private final DBTransactionBuffer transactionBuffer;
private final SCMHADBTransactionBufferImpl transactionBuffer;
private final SCMSnapshotProvider scmSnapshotProvider;
private final StorageContainerManager scm;
private ExitManager exitManager;
Expand All @@ -88,18 +87,10 @@ public SCMHAManagerImpl(final ConfigurationSource conf,
this.securityConfig = securityConfig;
this.scm = scm;
this.exitManager = new ExitManager();
if (SCMHAUtils.isSCMHAEnabled(conf)) {
this.transactionBuffer = new SCMHADBTransactionBufferImpl(scm);
this.ratisServer = new SCMRatisServerImpl(conf, scm,
(SCMHADBTransactionBuffer) transactionBuffer);
this.scmSnapshotProvider = newScmSnapshotProvider(scm);
grpcServer = new InterSCMGrpcProtocolService(conf, scm);
} else {
this.transactionBuffer = new SCMDBTransactionBufferImpl();
this.scmSnapshotProvider = null;
this.grpcServer = null;
this.ratisServer = null;
}
this.transactionBuffer = new SCMHADBTransactionBufferImpl(scm);
this.ratisServer = new SCMRatisServerImpl(conf, scm, transactionBuffer);
this.scmSnapshotProvider = newScmSnapshotProvider(scm);
this.grpcServer = new InterSCMGrpcProtocolService(conf, scm);

}

Expand Down Expand Up @@ -152,7 +143,7 @@ private void createStartTransactionBufferMonitor() {
TimeUnit.MILLISECONDS);
SCMHATransactionBufferMonitorTask monitorTask
= new SCMHATransactionBufferMonitorTask(
(SCMHADBTransactionBuffer) transactionBuffer, ratisServer, interval);
transactionBuffer, ratisServer, interval);
trxBufferMonitorService =
new BackgroundSCMService.Builder().setClock(scm.getSystemClock())
.setScmContext(scm.getScmContext())
Expand Down Expand Up @@ -180,9 +171,7 @@ public SCMSnapshotProvider getSCMSnapshotProvider() {

@Override
public SCMHADBTransactionBuffer asSCMHADBTransactionBuffer() {
Preconditions
.checkArgument(transactionBuffer instanceof SCMHADBTransactionBuffer);
return (SCMHADBTransactionBuffer)transactionBuffer;
return transactionBuffer;

}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@
import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
import org.apache.hadoop.hdds.utils.HddsServerUtil;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.common.Storage;
import org.apache.hadoop.ozone.ha.ConfUtils;
import org.apache.hadoop.ozone.util.OzoneNetUtils;
import org.slf4j.Logger;
Expand Down Expand Up @@ -145,23 +144,6 @@ public static SCMHANodeDetails loadDefaultConfig(
return new SCMHANodeDetails(scmNodeDetails, Collections.emptyList());
}

/** Validates SCM HA Config.
For Non Initialized SCM the value is true.
For Previously Initialized SCM the values are taken from the version file
<br>
Ratis SCM -> Non Ratis SCM is not supported.
This value is validated with the config provided.
**/
private static void validateSCMHAConfig(SCMStorageConfig scmStorageConfig,
OzoneConfiguration conf) {
Storage.StorageState state = scmStorageConfig.getState();
boolean scmHAEnableDefault = state == Storage.StorageState.INITIALIZED
? scmStorageConfig.isSCMHAEnabled()
: SCMHAUtils.isSCMHAEnabled(conf);
// If we have an initialized cluster, use the value from VERSION file.
SCMHAUtils.setRatisEnabled(scmHAEnableDefault);
}

public static SCMHANodeDetails loadSCMHAConfig(OzoneConfiguration conf,
SCMStorageConfig storageConfig)
throws IOException {
Expand All @@ -177,7 +159,6 @@ public static SCMHANodeDetails loadSCMHAConfig(OzoneConfiguration conf,
ScmConfigKeys.OZONE_SCM_DEFAULT_SERVICE_ID);

LOG.info("ServiceID for StorageContainerManager is {}", localScmServiceId);
validateSCMHAConfig(storageConfig, conf);
if (localScmServiceId == null) {
// There is no internal scm service id is being set, fall back to ozone
// .scm.service.ids.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,6 @@
import org.apache.hadoop.hdds.scm.events.SCMEvents;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
import org.apache.hadoop.hdds.scm.ha.SCMHAUtils;
import org.apache.hadoop.hdds.scm.ha.SCMRatisServer;
import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl;
import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo;
Expand Down Expand Up @@ -847,10 +846,6 @@ public ScmInfo getScmInfo() {
public void transferLeadership(String newLeaderId)
throws IOException {
getScm().checkAdminAccess(getRemoteUser(), false);
if (!SCMHAUtils.isSCMHAEnabled(getScm().getConfiguration())) {
throw new SCMException("SCM HA not enabled.", ResultCodes.INTERNAL_ERROR);
}

checkIfCertSignRequestAllowed(scm.getRootCARotationManager(),
false, config, "transferLeadership");

Expand Down
Loading