From 48b9d222277f0e5e7b24a3fc29fb429ba8862f66 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Wed, 10 May 2023 15:01:36 +0200 Subject: [PATCH 1/5] HDDS-8581. Avoid random ports in integration tests --- .../hadoop/fs/ozone/TestOzoneFsHAURLs.java | 1 - .../apache/hadoop/ozone/MiniOzoneCluster.java | 46 ++++++++---- .../hadoop/ozone/MiniOzoneClusterImpl.java | 75 +++++++++---------- .../ozone/MiniOzoneClusterProvider.java | 22 +----- .../om/TestOmContainerLocationCache.java | 1 - .../om/TestOzoneManagerConfiguration.java | 2 - 6 files changed, 68 insertions(+), 79 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java index 1fd55a29d761..ab98e03d1aea 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java @@ -109,7 +109,6 @@ public static void initClass() throws Exception { final String path = GenericTestUtils.getTempPath(omId); java.nio.file.Path metaDirPath = java.nio.file.Paths.get(path, "om-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 3); conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index 4d648ff3bd1a..bf51bd7012f7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -23,6 +23,7 @@ import java.util.OptionalInt; import java.util.UUID; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsConfigKeys; @@ -310,8 +311,6 @@ abstract class Builder { protected Optional scmId = Optional.empty(); protected Optional omId = Optional.empty(); - protected Boolean randomContainerPort = true; - protected Boolean randomContainerStreamPort = true; protected Boolean enableContainerDatastream = true; protected Optional datanodeReservedSpace = Optional.empty(); protected Optional chunkSize = Optional.empty(); @@ -423,18 +422,6 @@ public Builder setOmId(String id) { return this; } - /** - * If set to true container service will be started in a random port. - * - * @param randomPort enable random port - * - * @return MiniOzoneCluster.Builder - */ - public Builder setRandomContainerPort(boolean randomPort) { - randomContainerPort = randomPort; - return this; - } - /** * Sets the number of HddsDatanodes to be started as part of * MiniOzoneCluster. @@ -650,4 +637,35 @@ public Builder setDnLayoutVersion(int layoutVersion) { */ public abstract MiniOzoneCluster build() throws IOException; } + + /** + * Helper class to get free port avoiding randomness. + */ + class PortAllocator { + + private static final int MIN_PORT = 15000; + private static final int MAX_PORT = 64000; + private static final AtomicInteger NEXT_PORT = new AtomicInteger(MIN_PORT); + + private PortAllocator() { + // no instances + } + + static synchronized int getFreePort() { + int port = NEXT_PORT.getAndIncrement(); + if (port > MAX_PORT) { + NEXT_PORT.set(MIN_PORT); + port = NEXT_PORT.getAndIncrement(); + } + return port; + } + + static String localhostWithFreePort() { + return "127.0.0.1:" + getFreePort(); + } + + static String anyHostWithFreePort() { + return "0.0.0.0:" + getFreePort(); + } + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index 6e69ff91ddb3..27af65c313c7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.conf.ConfigurationTarget; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -83,23 +84,19 @@ import org.apache.commons.io.FileUtils; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; -import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name.RATIS; -import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name.RATIS_ADMIN; -import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name.RATIS_SERVER; -import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name.REPLICATION; -import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name.STANDALONE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_DATANODE_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_HTTP_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_TASK_SAFEMODE_WAIT_THRESHOLD; import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION; +import static org.apache.hadoop.ozone.MiniOzoneCluster.PortAllocator.anyHostWithFreePort; +import static org.apache.hadoop.ozone.MiniOzoneCluster.PortAllocator.getFreePort; +import static org.apache.hadoop.ozone.MiniOzoneCluster.PortAllocator.localhostWithFreePort; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_IPC_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT; import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR; @@ -399,24 +396,10 @@ private void waitForHddsDatanodeToStop(DatanodeDetails dn) @Override public void restartHddsDatanode(int i, boolean waitForDatanode) throws InterruptedException, TimeoutException { - HddsDatanodeService datanodeService = hddsDatanodes.get(i); + HddsDatanodeService datanodeService = hddsDatanodes.remove(i); stopDatanode(datanodeService); // ensure same ports are used across restarts. OzoneConfiguration config = datanodeService.getConf(); - DatanodeDetails dn = datanodeService.getDatanodeDetails(); - config.setBoolean(DFS_CONTAINER_IPC_RANDOM_PORT, false); - config.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, false); - config.setInt(DFS_CONTAINER_IPC_PORT, - dn.getPort(STANDALONE).getValue()); - config.setInt(DFS_CONTAINER_RATIS_IPC_PORT, - dn.getPort(RATIS).getValue()); - config.setInt(DFS_CONTAINER_RATIS_ADMIN_PORT, - dn.getPort(RATIS_ADMIN).getValue()); - config.setInt(DFS_CONTAINER_RATIS_SERVER_PORT, - dn.getPort(RATIS_SERVER).getValue()); - config.setFromObject(conf.getObject(ReplicationConfig.class) - .setPort(dn.getPort(REPLICATION).getValue())); - hddsDatanodes.remove(i); if (waitForDatanode) { // wait for node to be removed from SCM healthy node list. waitForHddsDatanodeToStop(datanodeService.getDatanodeDetails()); @@ -847,6 +830,7 @@ protected List createHddsDatanodes( List hddsDatanodes = new ArrayList<>(); for (int i = 0; i < numOfDatanodes; i++) { OzoneConfiguration dnConf = new OzoneConfiguration(conf); + configureDatanodePorts(dnConf); String datanodeBaseDir = path + "/datanode-" + Integer.toString(i); Path metaDir = Paths.get(datanodeBaseDir, "meta"); List dataDirs = new ArrayList<>(); @@ -903,10 +887,14 @@ private void configureLayoutVersionInDatanodes( } protected void configureSCM() { - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0"); + conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, + localhostWithFreePort()); + conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, + localhostWithFreePort()); + conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, + localhostWithFreePort()); + conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, + localhostWithFreePort()); conf.setInt(ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY, numOfScmHandlers); conf.set(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, "3s"); @@ -937,24 +925,30 @@ private void configureSCMheartbeat() { } private void configureOM() { - conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, "127.0.0.1:0"); + conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, localhostWithFreePort()); + conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, localhostWithFreePort()); + conf.set(OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, + localhostWithFreePort()); + conf.setInt(OMConfigKeys.OZONE_OM_RATIS_PORT_KEY, getFreePort()); conf.setInt(OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY, numOfOmHandlers); } private void configureHddsDatanodes() { - conf.set(ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY, "0.0.0.0:0"); - conf.set(HddsConfigKeys.HDDS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, - randomContainerPort); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, - randomContainerPort); conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, enableContainerDatastream); - conf.setBoolean(DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, - randomContainerStreamPort); + } - conf.setFromObject(new ReplicationConfig().setPort(0)); + protected void configureDatanodePorts(ConfigurationTarget conf) { + conf.set(ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY, + anyHostWithFreePort()); + conf.set(HddsConfigKeys.HDDS_DATANODE_HTTP_ADDRESS_KEY, + anyHostWithFreePort()); + conf.setInt(DFS_CONTAINER_IPC_PORT, getFreePort()); + conf.setInt(DFS_CONTAINER_RATIS_IPC_PORT, getFreePort()); + conf.setInt(DFS_CONTAINER_RATIS_ADMIN_PORT, getFreePort()); + conf.setInt(DFS_CONTAINER_RATIS_SERVER_PORT, getFreePort()); + conf.setInt(DFS_CONTAINER_RATIS_DATASTREAM_PORT, getFreePort()); + conf.setFromObject(new ReplicationConfig().setPort(getFreePort())); } private void configureTrace() { @@ -984,11 +978,12 @@ protected void configureRecon() throws IOException { + "/ozone_recon_derby.db"); conf.setFromObject(dbConfig); - conf.set(OZONE_RECON_HTTP_ADDRESS_KEY, "0.0.0.0:0"); - conf.set(OZONE_RECON_DATANODE_ADDRESS_KEY, "0.0.0.0:0"); + conf.set(OZONE_RECON_HTTP_ADDRESS_KEY, anyHostWithFreePort()); + conf.set(OZONE_RECON_DATANODE_ADDRESS_KEY, anyHostWithFreePort()); conf.set(OZONE_RECON_TASK_SAFEMODE_WAIT_THRESHOLD, "10s"); ConfigurationProvider.setConfiguration(conf); } + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java index ab2405ab8cd0..cdd12ac841e3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java @@ -18,20 +18,17 @@ package org.apache.hadoop.ozone; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.HashSet; -import java.util.List; import java.util.Set; import java.util.UUID; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.stream.Collectors; import static java.util.concurrent.TimeUnit.SECONDS; @@ -218,18 +215,7 @@ private Thread createClusters() { MiniOzoneCluster cluster = null; try { builder.setClusterId(UUID.randomUUID().toString()); - - OzoneConfiguration newConf = new OzoneConfiguration(conf); - List portList = getFreePortList(4); - newConf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, - "127.0.0.1:" + portList.get(0)); - newConf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, - "127.0.0.1:" + portList.get(1)); - newConf.set(OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, - "127.0.0.1:" + portList.get(2)); - newConf.setInt(OMConfigKeys.OZONE_OM_RATIS_PORT_KEY, - portList.get(3)); - builder.setConf(newConf); + builder.setConf(new OzoneConfiguration(conf)); cluster = builder.build(); cluster.waitForClusterToBeReady(); @@ -277,10 +263,4 @@ private void destroyRemainingClusters() { createdClusters.clear(); } - private List getFreePortList(int size) { - return org.apache.ratis.util.NetUtils.createLocalServerAddress(size) - .stream() - .map(inetSocketAddress -> inetSocketAddress.getPort()) - .collect(Collectors.toList()); - } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java index df7b3a78b239..5b91c1b1c055 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java @@ -153,7 +153,6 @@ public static void setUp() throws Exception { ExitUtils.disableSystemExit(); conf = new OzoneConfiguration(); - conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "127.0.0.1:0"); dir = GenericTestUtils.getRandomizedTestDir(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); conf.set(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, "true"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java index 389217a3e2f6..e475c294550d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; @@ -77,7 +76,6 @@ public void init() throws IOException { final String path = GenericTestUtils.getTempPath(omId); Path metaDirPath = Paths.get(path, "om-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); conf.setTimeDuration(OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_KEY, RATIS_RPC_TIMEOUT, TimeUnit.MILLISECONDS); From ed0cb3ef5c0c5d753dbabb6c71acccaf97ba0645 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Thu, 11 May 2023 10:01:38 +0200 Subject: [PATCH 2/5] Replace port reservation in MiniOzoneHAClusterImpl with incremental ports --- .../hadoop/ozone/MiniOzoneHAClusterImpl.java | 188 +++--------------- .../apache/hadoop/ozone/OzoneTestUtils.java | 23 --- 2 files changed, 27 insertions(+), 184 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java index 4cb535d62f0a..a0c0c22e91a2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java @@ -42,22 +42,15 @@ import org.apache.hadoop.ozone.recon.ReconServer; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ozone.test.GenericTestUtils; -import org.apache.ratis.util.IOUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.Closeable; import java.io.IOException; import java.net.BindException; -import java.net.ServerSocket; import java.util.ArrayList; -import java.util.HashMap; import java.util.Iterator; -import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.PrimitiveIterator; -import java.util.Queue; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.function.Function; @@ -65,7 +58,8 @@ import static java.util.Collections.singletonList; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION; -import static org.apache.hadoop.ozone.OzoneTestUtils.reservePorts; +import static org.apache.hadoop.ozone.MiniOzoneCluster.PortAllocator.getFreePort; +import static org.apache.hadoop.ozone.MiniOzoneCluster.PortAllocator.localhostWithFreePort; import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; /** @@ -299,13 +293,6 @@ public void waitForSCMToBeReady() }, 1000, waitForClusterToBeReadyTimeout); } - @Override - public void shutdown() { - super.shutdown(); - omhaService.releasePorts(); - scmhaService.releasePorts(); - } - @Override public void stop() { for (OzoneManager ozoneManager : this.omhaService.getServices()) { @@ -341,8 +328,7 @@ public void stopOzoneManager(String omNodeId) { } private static void configureOMPorts(ConfigurationTarget conf, - String omServiceId, String omNodeId, - ReservedPorts omPorts, ReservedPorts omRpcPorts) { + String omServiceId, String omNodeId) { String omAddrKey = ConfUtils.addKeySuffixes( OMConfigKeys.OZONE_OM_ADDRESS_KEY, omServiceId, omNodeId); @@ -353,14 +339,10 @@ private static void configureOMPorts(ConfigurationTarget conf, String omRatisPortKey = ConfUtils.addKeySuffixes( OMConfigKeys.OZONE_OM_RATIS_PORT_KEY, omServiceId, omNodeId); - PrimitiveIterator.OfInt nodePorts = omPorts.assign(omNodeId); - PrimitiveIterator.OfInt rpcPorts = omRpcPorts.assign(omNodeId); - conf.set(omAddrKey, "127.0.0.1:" + rpcPorts.nextInt()); - conf.set(omHttpAddrKey, "127.0.0.1:" + nodePorts.nextInt()); - conf.set(omHttpsAddrKey, "127.0.0.1:" + nodePorts.nextInt()); - conf.setInt(omRatisPortKey, nodePorts.nextInt()); - - omRpcPorts.release(omNodeId); + conf.set(omAddrKey, localhostWithFreePort()); + conf.set(omHttpAddrKey, localhostWithFreePort()); + conf.set(omHttpsAddrKey, localhostWithFreePort()); + conf.setInt(omRatisPortKey, getFreePort()); } /** @@ -376,15 +358,6 @@ public static class Builder extends MiniOzoneClusterImpl.Builder { private List activeSCMs = new ArrayList<>(); private List inactiveSCMs = new ArrayList<>(); - // These port reservations are for servers started when the component - // (OM or SCM) is started. These are Ratis, HTTP and HTTPS. We also have - // another set of ports for RPC endpoints, which are started as soon as - // the component is created (in methods called by OzoneManager and - // StorageContainerManager constructors respectively). So we need to manage - // them separately, see initOMHAConfig() and initSCMHAConfig(). - private final ReservedPorts omPorts = new ReservedPorts(3); - private final ReservedPorts scmPorts = new ReservedPorts(3); - /** * Creates a new Builder. * @@ -486,7 +459,7 @@ protected OMHAService createOMService() throws IOException, if (omServiceId == null) { OzoneManager om = createOM(); om.start(); - return new OMHAService(singletonList(om), null, null, null); + return new OMHAService(singletonList(om), null, null); } List omList = Lists.newArrayList(); @@ -549,7 +522,7 @@ protected OMHAService createOMService() throws IOException, retryCount, e); } } - return new OMHAService(activeOMs, inactiveOMs, omServiceId, omPorts); + return new OMHAService(activeOMs, inactiveOMs, omServiceId); } /** @@ -560,7 +533,7 @@ protected SCMHAService createSCMService() if (scmServiceId == null) { StorageContainerManager scm = createSCM(); scm.start(); - return new SCMHAService(singletonList(scm), null, null, null); + return new SCMHAService(singletonList(scm), null, null); } List scmList = Lists.newArrayList(); @@ -628,7 +601,7 @@ protected SCMHAService createSCMService() } } - return new SCMHAService(activeSCMs, inactiveSCMs, scmServiceId, scmPorts); + return new SCMHAService(activeSCMs, inactiveSCMs, scmServiceId); } /** @@ -646,10 +619,6 @@ private void initSCMHAConfig() { StringBuilder scmNodesKeyValue = new StringBuilder(); StringBuilder scmNames = new StringBuilder(); - scmPorts.reserve(numOfSCMs); - ReservedPorts scmRpcPorts = new ReservedPorts(4); - scmRpcPorts.reserve(numOfSCMs); - for (int i = 1; i <= numOfSCMs; i++) { String scmNodeId = SCM_NODE_ID_PREFIX + i; scmNodesKeyValue.append(",").append(scmNodeId); @@ -674,32 +643,25 @@ private void initSCMHAConfig() { String scmGrpcPortKey = ConfUtils.addKeySuffixes( ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY, scmServiceId, scmNodeId); - PrimitiveIterator.OfInt nodePorts = scmPorts.assign(scmNodeId); - PrimitiveIterator.OfInt rpcPorts = scmRpcPorts.assign(scmNodeId); conf.set(scmAddrKey, "127.0.0.1"); - conf.set(scmHttpAddrKey, "127.0.0.1:" + nodePorts.nextInt()); - conf.set(scmHttpsAddrKey, "127.0.0.1:" + nodePorts.nextInt()); + conf.set(scmHttpAddrKey, localhostWithFreePort()); + conf.set(scmHttpsAddrKey, localhostWithFreePort()); - int ratisPort = nodePorts.nextInt(); + int ratisPort = getFreePort(); conf.setInt(scmRatisPortKey, ratisPort); //conf.setInt("ozone.scm.ha.ratis.bind.port", ratisPort); - int dnPort = rpcPorts.nextInt(); + int dnPort = getFreePort(); conf.set(dnPortKey, "127.0.0.1:" + dnPort); scmNames.append(",localhost:").append(dnPort); - conf.set(ssClientKey, "127.0.0.1:" + rpcPorts.nextInt()); - conf.setInt(scmGrpcPortKey, rpcPorts.nextInt()); + conf.set(ssClientKey, localhostWithFreePort()); + conf.setInt(scmGrpcPortKey, getFreePort()); - int blockPort = rpcPorts.nextInt(); - conf.set(blockClientKey, "127.0.0.1:" + blockPort); + String blockAddress = localhostWithFreePort(); + conf.set(blockClientKey, blockAddress); conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, - "127.0.0.1:" + blockPort); - - if (i <= numOfActiveSCMs) { - scmPorts.release(scmNodeId); - } - scmRpcPorts.release(scmNodeId); + blockAddress); } conf.set(scmNodesKey, scmNodesKeyValue.substring(1)); @@ -720,19 +682,11 @@ private void initOMHAConfig() { OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId); List omNodeIds = new ArrayList<>(); - omPorts.reserve(numOfOMs); - ReservedPorts omRpcPorts = new ReservedPorts(1); - omRpcPorts.reserve(numOfOMs); - for (int i = 1; i <= numOfOMs; i++) { String omNodeId = OM_NODE_ID_PREFIX + i; omNodeIds.add(omNodeId); - configureOMPorts(conf, omServiceId, omNodeId, omPorts, omRpcPorts); - - if (i <= numOfActiveOMs) { - omPorts.release(omNodeId); - } + configureOMPorts(conf, omServiceId, omNodeId); } conf.set(omNodesKey, String.join(",", omNodeIds)); @@ -814,13 +768,8 @@ public void bootstrapOzoneManager(String omNodeId, private OzoneConfiguration addNewOMToConfig(String omServiceId, String omNodeId) { - ReservedPorts omPorts = omhaService.getPorts(); - omPorts.reserve(1); - ReservedPorts omRpcPorts = new ReservedPorts(1); - omRpcPorts.reserve(1); - OzoneConfiguration newConf = new OzoneConfiguration(getConf()); - configureOMPorts(newConf, omServiceId, omNodeId, omPorts, omRpcPorts); + configureOMPorts(newConf, omServiceId, omNodeId); String omNodesKey = ConfUtils.addKeySuffixes( OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId); @@ -929,7 +878,6 @@ static class MiniOzoneHAService { private List services; private String serviceId; private String serviceName; - private final ReservedPorts ports; // Active services s denote OM/SCM services which are up and running private List activeServices; @@ -940,9 +888,8 @@ static class MiniOzoneHAService { MiniOzoneHAService(String name, List activeList, List inactiveList, String serviceId, - ReservedPorts ports, Function idProvider) { + Function idProvider) { this.serviceName = name; - this.ports = ports != null ? ports : new ReservedPorts(0); this.serviceMap = Maps.newHashMap(); this.serviceIdProvider = idProvider; if (activeList != null) { @@ -974,10 +921,6 @@ public List getServices() { return services; } - public void releasePorts() { - ports.releaseAll(); - } - public List getActiveServices() { return activeServices; } @@ -1032,22 +975,17 @@ public void startInactiveService(String id, if (!inactiveServices.contains(service)) { throw new IOException(serviceName + " is already active."); } else { - ports.release(id); serviceStarter.execute(service); activeServices.add(service); inactiveServices.remove(service); } } - - public ReservedPorts getPorts() { - return ports; - } } static class OMHAService extends MiniOzoneHAService { OMHAService(List activeList, List inactiveList, - String serviceId, ReservedPorts omPorts) { - super("OM", activeList, inactiveList, serviceId, omPorts, + String serviceId) { + super("OM", activeList, inactiveList, serviceId, OzoneManager::getOMNodeId); } } @@ -1056,9 +994,9 @@ static class SCMHAService extends MiniOzoneHAService { SCMHAService(List activeList, List inactiveList, - String serviceId, ReservedPorts scmPorts) { + String serviceId) { super("SCM", activeList, inactiveList, serviceId, - scmPorts, StorageContainerManager::getSCMNodeId); + StorageContainerManager::getSCMNodeId); } } @@ -1097,76 +1035,4 @@ public void exitSystem(int status, String message, Logger log) } } - /** - * Reserves a number of ports for services. - */ - private static class ReservedPorts { - - private final Queue allPorts = new LinkedList<>(); - private final Map> assignedPorts = - new HashMap<>(); - private final int portsPerNode; - - ReservedPorts(int portsPerNode) { - this.portsPerNode = portsPerNode; - } - - /** - * Reserve {@code portsPerNode * nodes} ports by binding server sockets - * to random free ports. The sockets are kept open until - * {@link #release(String)} or {@link #releaseAll} is called. - */ - public void reserve(int nodes) { - Preconditions.checkState(allPorts.isEmpty()); - allPorts.addAll(reservePorts(portsPerNode * nodes)); - } - - /** - * Assign {@code portsPerNode} ports to a service identified by {@code id}. - * This set of ports should be released right before starting the service - * by calling {@link #release(String)}. - * - * @return iterator of the ports assigned - */ - public PrimitiveIterator.OfInt assign(String id) { - Preconditions.checkState(allPorts.size() >= portsPerNode); - List nodePorts = new LinkedList<>(); - for (int i = 0; i < portsPerNode; i++) { - nodePorts.add(allPorts.remove()); - } - assignedPorts.put(id, nodePorts); - LOG.debug("assign ports for {}: {}", id, nodePorts); - - return nodePorts.stream().mapToInt(ServerSocket::getLocalPort).iterator(); - } - - /** - * Release the ports assigned to the service identified by {@code id}. - * - * This closes the server sockets, making the same ports available for - * the service. Note: there is a race condition with other processes - * running on the host, but that's OK since this is for tests. - * - * If no ports are assigned to the service, this is a no-op. - */ - public void release(String id) { - List ports = assignedPorts.remove(id); - LOG.debug("release ports for {}: {}", id, ports); - if (ports != null) { - IOUtils.cleanup(LOG, ports.toArray(new Closeable[0])); - } - } - - /** - * Release all reserved ports, assigned or not. - */ - public void releaseAll() { - IOUtils.cleanup(LOG, allPorts.toArray(new Closeable[0])); - allPorts.clear(); - - for (String id : new ArrayList<>(assignedPorts.keySet())) { - release(id); - } - } - } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java index 0e3101be6fcc..59e95e7c2134 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java @@ -17,13 +17,7 @@ */ package org.apache.hadoop.ozone; -import java.io.Closeable; import java.io.IOException; -import java.io.UncheckedIOException; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.ServerSocket; -import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeoutException; @@ -41,7 +35,6 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.LambdaTestUtils.VoidCallable; -import org.apache.ratis.util.IOUtils; import org.apache.ratis.util.function.CheckedConsumer; import org.junit.Assert; @@ -157,22 +150,6 @@ public static void expectOmException( } } - public static List reservePorts(int count) { - List sockets = new ArrayList<>(count); - try { - for (int i = 0; i < count; i++) { - ServerSocket s = new ServerSocket(); - sockets.add(s); - s.setReuseAddress(true); - s.bind(new InetSocketAddress(InetAddress.getByName(null), 0), 1); - } - } catch (IOException e) { - IOUtils.cleanup(null, sockets.toArray(new Closeable[0])); - throw new UncheckedIOException(e); - } - return sockets; - } - /** * Close container & Wait till container state becomes CLOSED. */ From 4679f81e80b928879305427d7afc01781a11c871 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Fri, 12 May 2023 07:27:45 +0200 Subject: [PATCH 3/5] Limit max. port to 32000 --- .../src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index bf51bd7012f7..6fa18adbccaf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -644,7 +644,7 @@ public Builder setDnLayoutVersion(int layoutVersion) { class PortAllocator { private static final int MIN_PORT = 15000; - private static final int MAX_PORT = 64000; + private static final int MAX_PORT = 32000; private static final AtomicInteger NEXT_PORT = new AtomicInteger(MIN_PORT); private PortAllocator() { From 1a00032cb0f00cac1f5f727067bb2b278b0086ca Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Fri, 12 May 2023 08:15:23 +0200 Subject: [PATCH 4/5] Remove flaky tag from TestDecommissionAndMaintenance --- .../hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java index fb23046ac79e..f8e0403041f5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java @@ -43,7 +43,6 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.ozone.test.GenericTestUtils; -import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeEach; @@ -88,7 +87,6 @@ /** * Test from the scmclient for decommission and maintenance. */ -@Flaky({"HDDS-6028", "HDDS-6049"}) public class TestDecommissionAndMaintenance { private static final Logger LOG = LoggerFactory.getLogger(TestDecommissionAndMaintenance.class); From d992f07d434df760bd3b6c0f96a1295e037196b5 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Fri, 12 May 2023 09:55:10 +0200 Subject: [PATCH 5/5] Revert "Remove flaky tag from TestDecommissionAndMaintenance" This reverts commit 1a00032cb0f00cac1f5f727067bb2b278b0086ca. --- .../hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java index f8e0403041f5..fb23046ac79e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java @@ -43,6 +43,7 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.ozone.test.GenericTestUtils; +import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeEach; @@ -87,6 +88,7 @@ /** * Test from the scmclient for decommission and maintenance. */ +@Flaky({"HDDS-6028", "HDDS-6049"}) public class TestDecommissionAndMaintenance { private static final Logger LOG = LoggerFactory.getLogger(TestDecommissionAndMaintenance.class);