From b8218b216301fc062ee1145545ddbf663bb993ff Mon Sep 17 00:00:00 2001 From: tejaskriya Date: Thu, 18 Jan 2024 18:42:57 +0530 Subject: [PATCH 01/43] HDDS-9738. Display pipeline and container counts for decommissioning DN --- .../scm/cli/datanode/DatanodeCommands.java | 7 + .../DecommissionStatusSubCommand.java | 131 ++++++++++++++++++ .../scm/cli/datanode/StatusSubCommand.java | 6 + 3 files changed, 144 insertions(+) diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java index 8cb2114f57db..acfacbe38e13 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java @@ -46,6 +46,9 @@ @MetaInfServices(SubcommandWithParent.class) public class DatanodeCommands implements Callable, SubcommandWithParent { + @CommandLine.ParentCommand + private OzoneAdmin parent; + @Spec private CommandSpec spec; @@ -55,6 +58,10 @@ public Void call() throws Exception { return null; } + public OzoneAdmin getParent() { + return parent; + } + @Override public Class getParentType() { return OzoneAdmin.class; diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java index bbf1d8407605..270761587d95 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java @@ -17,20 +17,48 @@ */ package org.apache.hadoop.hdds.scm.cli.datanode; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; +import com.google.gson.Gson; +import org.apache.commons.io.IOUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.server.http.HttpConfig; +import org.apache.hadoop.hdfs.web.URLConnectionFactory; import picocli.CommandLine; +import javax.net.ssl.HttpsURLConnection; import java.io.IOException; +import java.io.InputStream; +import java.net.HttpURLConnection; +import java.net.MalformedURLException; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import java.util.stream.Stream; +import static java.net.HttpURLConnection.HTTP_CREATED; +import static java.net.HttpURLConnection.HTTP_OK; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HTTP_BIND_HOST_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HTTP_BIND_PORT_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HTTPS_BIND_PORT_DEFAULT; +import static org.apache.hadoop.hdds.server.http.HttpConfig.getHttpPolicy; +import static org.apache.hadoop.http.HttpServer2.HTTPS_SCHEME; +import static org.apache.hadoop.http.HttpServer2.HTTP_SCHEME; /** * Handler to print decommissioning nodes status. @@ -43,6 +71,9 @@ public class DecommissionStatusSubCommand extends ScmSubcommand { + @CommandLine.ParentCommand + private StatusSubCommand parent; + @CommandLine.Option(names = { "--id" }, description = "Show info by datanode UUID", defaultValue = "") @@ -79,10 +110,19 @@ public void execute(ScmClient scmClient) throws IOException { decommissioningNodes.size() + " node(s)"); } + Map counts = getCounts(); + int numDecomNodes; + Double num = (Double) counts.get("DecommissioningMaintenanceNodesTotal"); + if (num == null) { + numDecomNodes = -1; + } else { + numDecomNodes = num.intValue(); + } for (HddsProtos.Node node : decommissioningNodes) { DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf( node.getNodeID()); printDetails(datanode); + printCounts(datanode, counts, numDecomNodes); } } private void printDetails(DatanodeDetails datanode) { @@ -90,4 +130,95 @@ private void printDetails(DatanodeDetails datanode) { " (" + datanode.getNetworkLocation() + "/" + datanode.getIpAddress() + "/" + datanode.getHostName() + ")"); } + + private void printCounts(DatanodeDetails datanode, Map counts, int numDecomNodes) { + try { + for (int i = 1; i <= numDecomNodes; i++) { + if (datanode.getHostName().equals(counts.get("tag.datanode." + i))) { + int pipelines = ((Double)counts.get("PipelinesWaitingToCloseDN." + i)).intValue(); + int underReplicated = ((Double)counts.get("UnderReplicatedDN." + i)).intValue(); + int unclosed = ((Double)counts.get("UnclosedContainersDN." + i)).intValue(); + long startTime = ((Double)counts.get("StartTimeDN." + i)).longValue(); + System.out.print("Decommission started at : "); + Date date = new Date(startTime); + DateFormat formatter = new SimpleDateFormat("dd/MM/yyyy hh:mm:ss z"); + System.out.println(formatter.format(date)); + System.out.println("No. of Pipelines: " + pipelines); + System.out.println("No. of UnderReplicated containers: " + underReplicated); + System.out.println("No. of Unclosed Containers: " + unclosed); + return; + } + } + System.err.println("Error getting pipeline and container counts for " + datanode.getHostName()); + } catch (NullPointerException ex) { + System.err.println("Error getting pipeline and container counts for " + datanode.getHostName()); + } + } + + private Map getCounts() { + Map finalResult = new HashMap<>(); + try { + StringBuffer url = new StringBuffer(); + final OzoneConfiguration ozoneConf = parent + .getParent() + .getParent() + .getOzoneConf(); + final String protocol; + final URLConnectionFactory connectionFactory = URLConnectionFactory.newDefaultURLConnectionFactory(ozoneConf); + final HttpConfig.Policy webPolicy = getHttpPolicy(ozoneConf); + String host; + InputStream inputStream; + int errorCode; + + if (webPolicy.isHttpsEnabled()) { + protocol = HTTPS_SCHEME; + host = ozoneConf.get(OZONE_SCM_HTTPS_ADDRESS_KEY, + OZONE_SCM_HTTP_BIND_HOST_DEFAULT + OZONE_SCM_HTTPS_BIND_PORT_DEFAULT); + url.append(protocol).append("://").append(host).append("/jmx") + .append("?qry=Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics"); + + HttpsURLConnection httpsURLConnection = (HttpsURLConnection) connectionFactory + .openConnection(new URL(url.toString())); + httpsURLConnection.connect(); + errorCode = httpsURLConnection.getResponseCode(); + inputStream = httpsURLConnection.getInputStream(); + } else { + protocol = HTTP_SCHEME; + host = ozoneConf.get(OZONE_SCM_HTTP_ADDRESS_KEY, + OZONE_SCM_HTTP_BIND_HOST_DEFAULT + OZONE_SCM_HTTP_BIND_PORT_DEFAULT); + url.append(protocol + "://" + host).append("/jmx") + .append("?qry=Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics"); + + HttpURLConnection httpURLConnection = (HttpURLConnection) connectionFactory + .openConnection(new URL(url.toString())); + httpURLConnection.connect(); + errorCode = httpURLConnection.getResponseCode(); + inputStream = httpURLConnection.getInputStream(); + } + + if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) { + String response = IOUtils.toString(inputStream, StandardCharsets.UTF_8); + HashMap> result = new Gson().fromJson(response, HashMap.class); + finalResult = result.get("beans").get(0); + return finalResult; + } else { + throw new IOException("Unable to retrieve pipeline and container counts."); + } + } catch (MalformedURLException ex) { + System.err.println("Unable to retrieve pipeline and container counts."); + return finalResult; + } catch (IOException ex) { + throw new RuntimeException(ex); + } + } + + public StatusSubCommand getParent() { + return parent; + } + + @VisibleForTesting + public void setParent() { + parent = new StatusSubCommand(); + } + } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java index 9edcd3425a0d..48489041e7b5 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java @@ -40,6 +40,8 @@ @MetaInfServices(SubcommandWithParent.class) public class StatusSubCommand implements Callable, SubcommandWithParent { + @CommandLine.ParentCommand + private DatanodeCommands parent; @CommandLine.Spec private CommandLine.Model.CommandSpec spec; @@ -49,6 +51,10 @@ public Void call() throws Exception { return null; } + public DatanodeCommands getParent() { + return parent; + } + @Override public Class getParentType() { return DatanodeCommands.class; From 6bb7333e10d4dab5858594ed247476bba902ea9c Mon Sep 17 00:00:00 2001 From: tejaskriya Date: Fri, 19 Jan 2024 12:04:41 +0530 Subject: [PATCH 02/43] Test class NPE fix --- .../hadoop/hdds/scm/cli/datanode/DatanodeCommands.java | 5 +++++ .../scm/cli/datanode/DecommissionStatusSubCommand.java | 1 + .../hadoop/hdds/scm/cli/datanode/StatusSubCommand.java | 7 +++++++ .../scm/cli/datanode/TestDecommissionStatusSubCommand.java | 1 + 4 files changed, 14 insertions(+) diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java index acfacbe38e13..9c36228c86d8 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.scm.cli.datanode; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.OzoneAdmin; @@ -62,6 +63,10 @@ public OzoneAdmin getParent() { return parent; } + @VisibleForTesting + public void setParent() { + parent = new OzoneAdmin(); + } @Override public Class getParentType() { return OzoneAdmin.class; diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java index 270761587d95..f7e284f102b5 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java @@ -219,6 +219,7 @@ public StatusSubCommand getParent() { @VisibleForTesting public void setParent() { parent = new StatusSubCommand(); + parent.setParent(); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java index 48489041e7b5..bf2f8b6f99fa 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java @@ -17,6 +17,7 @@ * limitations under the License. */ +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.SubcommandWithParent; @@ -55,6 +56,12 @@ public DatanodeCommands getParent() { return parent; } + @VisibleForTesting + public void setParent() { + parent = new DatanodeCommands(); + parent.setParent(); + } + @Override public Class getParentType() { return DatanodeCommands.class; diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java index 902ee5e7a8d1..9647c40d788b 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java @@ -59,6 +59,7 @@ public class TestDecommissionStatusSubCommand { @BeforeEach public void setup() throws UnsupportedEncodingException { cmd = new DecommissionStatusSubCommand(); + cmd.setParent(); System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); } From 5ecc245e44b0cb40c990c648fae77e35e6fa2c7c Mon Sep 17 00:00:00 2001 From: tejaskriya Date: Wed, 24 Jan 2024 11:33:25 +0530 Subject: [PATCH 03/43] Fix TestDecommissionStatusSubCommand test cases --- .../scm/cli/datanode/DatanodeCommands.java | 5 +- .../DecommissionStatusSubCommand.java | 4 +- .../scm/cli/datanode/StatusSubCommand.java | 5 +- .../TestDecommissionStatusSubCommand.java | 47 +++++++++++++++++-- 4 files changed, 52 insertions(+), 9 deletions(-) diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java index 9c36228c86d8..dde4ef505477 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.OzoneAdmin; import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.kohsuke.MetaInfServices; import picocli.CommandLine; import picocli.CommandLine.Model.CommandSpec; @@ -64,8 +65,8 @@ public OzoneAdmin getParent() { } @VisibleForTesting - public void setParent() { - parent = new OzoneAdmin(); + public void setParent(OzoneConfiguration conf) { + parent = new OzoneAdmin(conf); } @Override public Class getParentType() { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java index f7e284f102b5..eaf112430984 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java @@ -217,9 +217,9 @@ public StatusSubCommand getParent() { } @VisibleForTesting - public void setParent() { + public void setParent(OzoneConfiguration conf) { parent = new StatusSubCommand(); - parent.setParent(); + parent.setParent(conf); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java index bf2f8b6f99fa..740a136ff04a 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.kohsuke.MetaInfServices; import picocli.CommandLine; import picocli.CommandLine.Command; @@ -57,9 +58,9 @@ public DatanodeCommands getParent() { } @VisibleForTesting - public void setParent() { + public void setParent(OzoneConfiguration conf) { parent = new DatanodeCommands(); - parent.setParent(); + parent.setParent(conf); } @Override diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java index 9647c40d788b..acd2ee52b0d3 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java @@ -17,8 +17,17 @@ */ package org.apache.hadoop.hdds.scm.cli.datanode; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.http.HttpConfig; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -27,6 +36,7 @@ import java.io.IOException; import java.io.PrintStream; import java.io.UnsupportedEncodingException; +import java.net.InetSocketAddress; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; @@ -35,6 +45,7 @@ import java.util.regex.Pattern; import picocli.CommandLine; +import sun.net.www.protocol.http.HttpURLConnection; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -55,15 +66,46 @@ public class TestDecommissionStatusSubCommand { private final PrintStream originalErr = System.err; private DecommissionStatusSubCommand cmd; private List nodes = getNodeDetails(2); + private static HttpServer httpServer; + private static OzoneConfiguration conf; + + @BeforeAll + public static void setupScmHttp() throws Exception { + httpServer = HttpServer.create(new InetSocketAddress(15000), 0); + httpServer.createContext("/jmx", new HttpHandler() { + public void handle(HttpExchange exchange) throws IOException { + byte[] response = ("{ \"beans\" : [ { " + + "\"name\" : \"Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics\", " + + "\"modelerType\" : \"NodeDecommissionMetrics\", \"DecommissioningMaintenanceNodesTotal\" : 0, " + + "\"RecommissionNodesTotal\" : 0, \"PipelinesWaitingToCloseTotal\" : 0, " + + "\"ContainersUnderReplicatedTotal\" : 0, \"ContainersUnClosedTotal\" : 0, " + + "\"ContainersSufficientlyReplicatedTotal\" : 0 } ]}").getBytes(); + exchange.sendResponseHeaders(HttpURLConnection.HTTP_OK, response.length); + exchange.getResponseBody().write(response); + exchange.close(); + } + }); + httpServer.start(); + } + @AfterAll + public static void shutdownScmHttp() { + if (httpServer != null) { + httpServer.stop(0); + } + } @BeforeEach public void setup() throws UnsupportedEncodingException { cmd = new DecommissionStatusSubCommand(); - cmd.setParent(); System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); + conf = new OzoneConfiguration(); + HttpConfig.Policy policy = HttpConfig.Policy.HTTP_ONLY; + conf.set(OzoneConfigKeys.OZONE_HTTP_POLICY_KEY, policy.name()); + conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "localhost:15000"); + conf.set(ScmConfigKeys.OZONE_SCM_HTTP_BIND_HOST_KEY, "localhost"); + cmd.setParent(conf); } - @AfterEach public void tearDown() { System.setOut(originalOut); @@ -201,7 +243,6 @@ public void testIpOptionDecommissionStatusFail() throws IOException { assertFalse(m.find()); } - private List getNodeDetails(int n) { List nodesList = new ArrayList<>(); From b699f179287140fb80a897ab2773500e8414aebd Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 18 Jan 2024 17:28:51 +0100 Subject: [PATCH 04/43] HDDS-10157. Download zlib fails with 403 Forbidden in CI (#6029) --- hadoop-hdds/rocks-native/pom.xml | 3 ++- hadoop-ozone/dev-support/checks/native.sh | 7 +++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/rocks-native/pom.xml b/hadoop-hdds/rocks-native/pom.xml index d3a015a18f9b..cfd361baeb86 100644 --- a/hadoop-hdds/rocks-native/pom.xml +++ b/hadoop-hdds/rocks-native/pom.xml @@ -56,6 +56,7 @@ 8 8 + https://zlib.net/fossils/zlib-${zlib.version}.tar.gz @@ -134,7 +135,7 @@ wget - https://zlib.net/fossils/zlib-${zlib.version}.tar.gz + ${zlib.url} zlib-${zlib.version}.tar.gz ${project.build.directory}/zlib diff --git a/hadoop-ozone/dev-support/checks/native.sh b/hadoop-ozone/dev-support/checks/native.sh index dc66f923a649..2bfa7733fcbd 100755 --- a/hadoop-ozone/dev-support/checks/native.sh +++ b/hadoop-ozone/dev-support/checks/native.sh @@ -19,6 +19,13 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" CHECK=native +zlib_version=$(mvn -N help:evaluate -Dexpression=zlib.version -q -DforceStdout) +if [[ -z "${zlib_version}" ]]; then + echo "ERROR zlib.version not defined in pom.xml" + exit 1 +fi + source "${DIR}/junit.sh" -Pnative -Drocks_tools_native \ + -Dzlib.url="https://github.com/madler/zlib/releases/download/v${zlib_version}/zlib-${zlib_version}.tar.gz" \ -DexcludedGroups="unhealthy" \ "$@" From ad9af7278ce035f1b0763895556b6b81e888e11c Mon Sep 17 00:00:00 2001 From: Sadanand Shenoy Date: Fri, 19 Jan 2024 00:32:39 +0530 Subject: [PATCH 05/43] HDDS-3849. Add tests for show rule status of scm safemode (#6027) --- .../scm/safemode/TestSCMSafeModeManager.java | 30 ++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java index 79adf009f003..4f0846234225 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java @@ -25,10 +25,13 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -135,6 +138,7 @@ private void testSafeMode(int numContainers) throws Exception { serviceManager, scmContext); assertTrue(scmSafeModeManager.getInSafeMode()); + validateRuleStatus("DatanodeSafeModeRule", "registered datanodes 0"); queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, HddsTestUtils.createNodeRegistrationContainerReport(containers)); @@ -176,7 +180,8 @@ public void testSafeModeExitRule() throws Exception { .getNumContainerWithOneReplicaReportedThreshold().value()); assertTrue(scmSafeModeManager.getInSafeMode()); - + validateRuleStatus("ContainerSafeModeRule", + "% of containers with at least one reported"); testContainerThreshold(containers.subList(0, 25), 0.25); assertEquals(25, scmSafeModeManager.getSafeModeMetrics() .getCurrentContainersWithOneReplicaReportedCount().value()); @@ -316,6 +321,13 @@ public void testSafeModeExitRuleWithPipelineAvailabilityCheck( scmContext); assertTrue(scmSafeModeManager.getInSafeMode()); + if (healthyPipelinePercent > 0) { + validateRuleStatus("HealthyPipelineSafeModeRule", + "healthy Ratis/THREE pipelines"); + } + validateRuleStatus("OneReplicaPipelineSafeModeRule", + "reported Ratis/THREE pipelines with at least one datanode"); + testContainerThreshold(containers, 1.0); List pipelines = pipelineManager.getPipelines(); @@ -374,6 +386,22 @@ public void testSafeModeExitRuleWithPipelineAvailabilityCheck( 100, 1000 * 5); } + /** + * @param safeModeRule verify that this rule is not satisfied + * @param stringToMatch string to match in the rule status. + */ + private void validateRuleStatus(String safeModeRule, String stringToMatch) { + Set>> ruleStatuses = + scmSafeModeManager.getRuleStatus().entrySet(); + for (Map.Entry> entry : ruleStatuses) { + if (entry.getKey().equals(safeModeRule)) { + Pair value = entry.getValue(); + assertEquals(false, value.getLeft()); + assertTrue(value.getRight().contains(stringToMatch)); + } + } + } + private void checkHealthy(int expectedCount) throws Exception { GenericTestUtils.waitFor(() -> scmSafeModeManager .getHealthyPipelineSafeModeRule() From ea387fc2f6b25db0c20e4d6288943ddde6910e09 Mon Sep 17 00:00:00 2001 From: Ritesh H Shukla Date: Thu, 18 Jan 2024 12:34:27 -0800 Subject: [PATCH 06/43] HDDS-10155. Use getOzoneKey when calculating the paths for keys (#6023) --- .../main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index da2d7728bafd..6e97ca2e7561 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -1680,9 +1680,7 @@ public List listStatus(OmKeyArgs args, boolean recursive, TableIterator> iterator; Iterator, CacheValue>> cacheIter = keyTable.cacheIterator(); - String startCacheKey = OZONE_URI_DELIMITER + volumeName + - OZONE_URI_DELIMITER + bucketName + OZONE_URI_DELIMITER + - ((startKey.equals(OZONE_URI_DELIMITER)) ? "" : startKey); + String startCacheKey = metadataManager.getOzoneKey(volumeName, bucketName, startKey); // First, find key in TableCache listStatusFindKeyInTableCache(cacheIter, keyArgs, startCacheKey, From 6d7817f5d15e6f710ceacf7384f41f849c2d8dae Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 19 Jan 2024 08:48:23 +0100 Subject: [PATCH 07/43] HDDS-10134. Avoid false positive ManagedObject leak report (#6013) --- .../hadoop/hdds/utils/db/managed/ManagedBloomFilter.java | 7 +++++-- .../hdds/utils/db/managed/ManagedColumnFamilyOptions.java | 7 +++++-- .../hdds/utils/db/managed/ManagedCompactRangeOptions.java | 7 +++++-- .../hadoop/hdds/utils/db/managed/ManagedDBOptions.java | 7 +++++-- .../hadoop/hdds/utils/db/managed/ManagedEnvOptions.java | 7 +++++-- .../hadoop/hdds/utils/db/managed/ManagedFlushOptions.java | 7 +++++-- .../utils/db/managed/ManagedIngestExternalFileOptions.java | 7 +++++-- .../hadoop/hdds/utils/db/managed/ManagedLRUCache.java | 7 +++++-- .../apache/hadoop/hdds/utils/db/managed/ManagedObject.java | 7 +++++-- .../hadoop/hdds/utils/db/managed/ManagedOptions.java | 7 +++++-- .../hadoop/hdds/utils/db/managed/ManagedReadOptions.java | 7 +++++-- .../apache/hadoop/hdds/utils/db/managed/ManagedSlice.java | 7 +++++-- .../hadoop/hdds/utils/db/managed/ManagedSstFileWriter.java | 7 +++++-- .../hadoop/hdds/utils/db/managed/ManagedStatistics.java | 7 +++++-- .../hadoop/hdds/utils/db/managed/ManagedWriteBatch.java | 7 +++++-- .../hadoop/hdds/utils/db/managed/ManagedWriteOptions.java | 7 +++++-- 16 files changed, 80 insertions(+), 32 deletions(-) diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBloomFilter.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBloomFilter.java index 32d08f46f229..8246d10820ba 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBloomFilter.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedBloomFilter.java @@ -31,7 +31,10 @@ public class ManagedBloomFilter extends BloomFilter { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java index dc6a8409260c..055d4be9d9a3 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedColumnFamilyOptions.java @@ -79,8 +79,11 @@ public boolean isReused() { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } /** diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedCompactRangeOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedCompactRangeOptions.java index 44f4dba8f8ff..6ac4a2fa5b67 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedCompactRangeOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedCompactRangeOptions.java @@ -31,7 +31,10 @@ public class ManagedCompactRangeOptions extends CompactRangeOptions { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java index dd8e20cd9557..638739ff557e 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java @@ -31,7 +31,10 @@ public class ManagedDBOptions extends DBOptions { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedEnvOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedEnvOptions.java index d19ffbda4f1a..388f5abea397 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedEnvOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedEnvOptions.java @@ -31,7 +31,10 @@ public class ManagedEnvOptions extends EnvOptions { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedFlushOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedFlushOptions.java index 7a2049efda84..b151f836f962 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedFlushOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedFlushOptions.java @@ -31,7 +31,10 @@ public class ManagedFlushOptions extends FlushOptions { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedIngestExternalFileOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedIngestExternalFileOptions.java index 36e8e36ef081..ec68f42e748a 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedIngestExternalFileOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedIngestExternalFileOptions.java @@ -31,7 +31,10 @@ public class ManagedIngestExternalFileOptions extends IngestExternalFileOptions @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLRUCache.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLRUCache.java index db8ff7ddbdd2..8130361d79de 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLRUCache.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLRUCache.java @@ -35,7 +35,10 @@ public ManagedLRUCache(long capacity) { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedObject.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedObject.java index cae72ab73078..522ca1ac3252 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedObject.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedObject.java @@ -41,7 +41,10 @@ public T get() { @Override public void close() { - original.close(); - leakTracker.close(); + try { + original.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedOptions.java index 73ee224a1ad3..9cf0a46fd8b6 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedOptions.java @@ -31,7 +31,10 @@ public class ManagedOptions extends Options { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedReadOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedReadOptions.java index af5d3879e7b0..39d41482751a 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedReadOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedReadOptions.java @@ -31,7 +31,10 @@ public class ManagedReadOptions extends ReadOptions { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java index b69dc5d70447..cff320fec5e2 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSlice.java @@ -40,9 +40,12 @@ public synchronized long getNativeHandle() { @Override protected void disposeInternal() { - super.disposeInternal(); // RocksMutableObject.close is final thus can't be decorated. // So, we decorate disposeInternal instead to track closure. - leakTracker.close(); + try { + super.disposeInternal(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileWriter.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileWriter.java index a80b7b69a1c8..0c9f27dd5eb5 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileWriter.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSstFileWriter.java @@ -38,7 +38,10 @@ public ManagedSstFileWriter(EnvOptions envOptions, @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedStatistics.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedStatistics.java index ecd731dd6fae..8fc166bb6122 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedStatistics.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedStatistics.java @@ -31,7 +31,10 @@ public class ManagedStatistics extends Statistics { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteBatch.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteBatch.java index 28aadf95f38e..bda1af7d59bb 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteBatch.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteBatch.java @@ -38,7 +38,10 @@ public ManagedWriteBatch(byte[] data) { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteOptions.java index d226b3e03eae..4ce8bc037bb6 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedWriteOptions.java @@ -31,7 +31,10 @@ public class ManagedWriteOptions extends WriteOptions { @Override public void close() { - super.close(); - leakTracker.close(); + try { + super.close(); + } finally { + leakTracker.close(); + } } } From c0ecebf47c9da53e20c84a78e4dae058402f5323 Mon Sep 17 00:00:00 2001 From: david1859168 <71422636+david1859168@users.noreply.github.com> Date: Fri, 19 Jan 2024 19:54:03 +1100 Subject: [PATCH 08/43] HDDS-7558. Translate "Topology awareness" doc into Mandarin Chinese (#6025) --- .../docs/content/feature/Topology.zh.md | 108 ++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 hadoop-hdds/docs/content/feature/Topology.zh.md diff --git a/hadoop-hdds/docs/content/feature/Topology.zh.md b/hadoop-hdds/docs/content/feature/Topology.zh.md new file mode 100644 index 000000000000..a366e3a2473c --- /dev/null +++ b/hadoop-hdds/docs/content/feature/Topology.zh.md @@ -0,0 +1,108 @@ +--- +title: "拓扑感知能力" +weight: 2 +menu: + main: + parent: 特性 +summary: 机架感知配置可以提高读/写性能 +--- + + +Ozone可以使用拓扑相关信息(例如机架位置)来优化读写管道。要获得完全的机架感知集群,Ozone需要三种不同的配置。 + + 1. 拓扑信息应由 Ozone 配置。 + 2. 当Ozone为特定管道/容器选择3个不同的数据节点时,拓扑相关信息就会被使用.(写入) + 3. 当Ozone读取一个Key时,它应该优先从最近的节点读取。 + + + +## 拓扑层次结构 + +拓扑层次结构可使用 net.topology.node.switch.mapping.impl 配置键进行配置。此配置应定义 org.apache.hadoop.net.CachedDNSToSwitchMapping 的实现。由于这是一个 Hadoop 类,因此该配置与 Hadoop 配置完全相同。 + +### 静态列表 + +静态列表可借助 ```TableMapping``` 进行配置:: + +```XML + + net.topology.node.switch.mapping.impl + org.apache.hadoop.net.TableMapping + + + net.topology.table.file.name + /opt/hadoop/compose/ozone-topology/network-config + +``` + +第二个配置选项应指向一个文本文件。文件格式为两列文本文件,各列之间用空格隔开。第一列是 IP 地址,第二列指定地址映射的机架。如果找不到与集群中主机相对应的条目,则会使用 /default-rack。 + +### 动态列表 + +机架信息可借助外部脚本识别: + + +```XML + + net.topology.node.switch.mapping.impl + org.apache.hadoop.net.ScriptBasedMapping + + + net.topology.script.file.name + /usr/local/bin/rack.sh + +``` + +如果使用外部脚本,则需要在配置文件中使用 net.topology.script.file.name 参数来指定。与 java 类不同,外部拓扑脚本不包含在 Ozone 发行版中,而是由管理员提供。Fork 拓扑脚本时,Ozone 会向 ARGV 发送多个 IP 地址。发送给拓扑脚本的 IP 地址数量由 net.topology.script.number.args 控制,默认为 100。如果将 net.topology.script.number.args 改为 1,则每个提交的 IP 地址都会Fork一个拓扑脚本。 + +## 写入路径 + +CLOSED容器放置可以通过 `ozone.scm.container.placement.impl` 配置键进行配置。 可用的容器放置策略可在 `org.apache.hdds.scm.container.placement` 包中找到。[包](https://github.com/apache/ozone/tree/master/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms). + +默认情况下, CLOSED容器使用 `SCMContainerPlacementRandom` 放置策略,该策略不支持拓扑感知。为了启用拓扑感知,可配置 `SCMContainerPlacementRackAware` 作为CLOSED容器放置策略: + +```XML + + ozone.scm.container.placement.impl + org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRackAware + +``` + +这种放置策略符合 HDFS 中使用的算法。在默认的 3 个副本中,两个副本位于同一个机架上,第三个副本位于不同的机架上。 + +这种实现方式适用于"/机架/节点 "这样的网络拓扑结构。如果网络拓扑结构的层数较多,则不建议使用此方法。 + +## 读取路径 + +最后,读取路径也应配置为从最近的 pipeline 读取数据。 + +```XML + + ozone.network.topology.aware.read + true + +``` + +## 参考文献 + + * 关于 `net.topology.node.switch.mapping.impl` 的 Hadoop 文档: https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/RackAwareness.html + * [设计文档]({{< ref path="design/topology.md" lang="en">}}) \ No newline at end of file From 2a4d89e727656bfa4de8a555ecfdf5e848b3c50b Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 19 Jan 2024 10:18:01 +0100 Subject: [PATCH 09/43] HDDS-9506. Remove JUnit4 dependency (#6028) --- hadoop-hdds/client/pom.xml | 1 - hadoop-hdds/common/pom.xml | 1 - hadoop-hdds/config/pom.xml | 1 - hadoop-hdds/container-service/pom.xml | 1 - .../common/volume/TestVolumeSet.java | 52 +++++++-------- .../dev-support/checkstyle/checkstyle.xml | 6 -- hadoop-hdds/erasurecode/pom.xml | 1 - hadoop-hdds/framework/pom.xml | 1 - hadoop-hdds/rocks-native/pom.xml | 1 - hadoop-hdds/rocksdb-checkpoint-differ/pom.xml | 1 - hadoop-hdds/server-scm/pom.xml | 1 - hadoop-hdds/test-utils/pom.xml | 4 -- .../apache/ozone/test/DisableOnProperty.java | 44 ------------- .../apache/ozone/test/JUnit5AwareTimeout.java | 32 ---------- .../org/apache/ozone/test/MetricsAsserts.java | 2 + hadoop-hdds/tools/pom.xml | 1 - hadoop-ozone/client/pom.xml | 1 - hadoop-ozone/common/pom.xml | 1 - .../mini-chaos-tests/pom.xml | 10 --- hadoop-ozone/insight/pom.xml | 1 - hadoop-ozone/integration-test/pom.xml | 10 --- hadoop-ozone/interface-storage/pom.xml | 1 - hadoop-ozone/ozone-manager/pom.xml | 1 - hadoop-ozone/ozonefs-common/pom.xml | 1 - hadoop-ozone/recon/pom.xml | 1 - hadoop-ozone/s3-secret-store/pom.xml | 1 - hadoop-ozone/s3gateway/pom.xml | 1 - pom.xml | 64 +++++++++++-------- 28 files changed, 62 insertions(+), 181 deletions(-) delete mode 100644 hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/DisableOnProperty.java delete mode 100644 hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/JUnit5AwareTimeout.java diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml index f500faa7a756..5c85fda966d8 100644 --- a/hadoop-hdds/client/pom.xml +++ b/hadoop-hdds/client/pom.xml @@ -30,7 +30,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - false diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index b3cf683ec80b..3d05f60e1018 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -29,7 +29,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - false diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml index e372c4b3558c..fb72f93570b9 100644 --- a/hadoop-hdds/config/pom.xml +++ b/hadoop-hdds/config/pom.xml @@ -29,7 +29,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - false diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index 058274e56687..13973c871e6c 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -28,7 +28,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> Apache Ozone HDDS Container Service jar - false diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java index 9d90659552e2..1159d4277c78 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java @@ -20,22 +20,18 @@ import java.io.IOException; import org.apache.commons.io.FileUtils; -import org.junit.jupiter.api.Timeout; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; -import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.GenericTestUtils.LogCapturer; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.apache.hadoop.ozone.container.common.volume.HddsVolume .HDDS_VOLUME_DIR; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assumptions.assumeThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -44,9 +40,13 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; +import org.slf4j.LoggerFactory; import java.io.File; import java.lang.reflect.Method; +import java.nio.file.Path; import java.util.ArrayList; import java.util.List; import java.util.UUID; @@ -59,12 +59,13 @@ public class TestVolumeSet { private OzoneConfiguration conf; private MutableVolumeSet volumeSet; - private final String baseDir = MiniDFSCluster.getBaseDirectory(); - private final String volume1 = baseDir + "disk1"; - private final String volume2 = baseDir + "disk2"; - private final List volumes = new ArrayList<>(); - private static final String DUMMY_IP_ADDR = "0.0.0.0"; + @TempDir + private Path baseDir; + + private String volume1; + private String volume2; + private final List volumes = new ArrayList<>(); private void initializeVolumeSet() throws Exception { volumeSet = new MutableVolumeSet(UUID.randomUUID().toString(), conf, @@ -74,6 +75,9 @@ private void initializeVolumeSet() throws Exception { @BeforeEach public void setup() throws Exception { conf = new OzoneConfiguration(); + volume1 = baseDir.resolve("disk1").toString(); + volume2 = baseDir.resolve("disk2").toString(); + String dataDirKey = volume1 + "," + volume2; volumes.add(volume1); volumes.add(volume2); @@ -94,8 +98,6 @@ public void shutdown() throws IOException { FileUtils.deleteDirectory(volume.getStorageDir()); } volumeSet.shutdown(); - - FileUtil.fullyDelete(new File(baseDir)); } private boolean checkVolumeExistsInVolumeSet(String volumeRoot) { @@ -115,11 +117,11 @@ public void testVolumeSetInitialization() throws Exception { // VolumeSet initialization should add volume1 and volume2 to VolumeSet assertEquals(volumesList.size(), volumes.size(), - "VolumeSet intialization is incorrect"); + "VolumeSet initialization is incorrect"); assertTrue(checkVolumeExistsInVolumeSet(volume1), - "VolumeSet not initailized correctly"); + "VolumeSet not initialized correctly"); assertTrue(checkVolumeExistsInVolumeSet(volume2), - "VolumeSet not initailized correctly"); + "VolumeSet not initialized correctly"); } @Test @@ -128,7 +130,7 @@ public void testAddVolume() { assertEquals(2, volumeSet.getVolumesList().size()); // Add a volume to VolumeSet - String volume3 = baseDir + "disk3"; + String volume3 = baseDir.resolve("disk3").toString(); boolean success = volumeSet.addVolume(volume3); assertTrue(success); @@ -223,31 +225,21 @@ public void testShutdown() throws Exception { } @Test - public void testFailVolumes() throws Exception { - MutableVolumeSet volSet = null; - File readOnlyVolumePath = new File(baseDir); + void testFailVolumes(@TempDir File readOnlyVolumePath, @TempDir File volumePath) throws Exception { //Set to readonly, so that this volume will be failed - readOnlyVolumePath.setReadOnly(); - File volumePath = GenericTestUtils.getRandomizedTestDir(); + assumeThat(readOnlyVolumePath.setReadOnly()).isTrue(); OzoneConfiguration ozoneConfig = new OzoneConfiguration(); ozoneConfig.set(HDDS_DATANODE_DIR_KEY, readOnlyVolumePath.getAbsolutePath() + "," + volumePath.getAbsolutePath()); ozoneConfig.set(HddsConfigKeys.OZONE_METADATA_DIRS, volumePath.getAbsolutePath()); - volSet = new MutableVolumeSet(UUID.randomUUID().toString(), ozoneConfig, + MutableVolumeSet volSet = new MutableVolumeSet(UUID.randomUUID().toString(), ozoneConfig, null, StorageVolume.VolumeType.DATA_VOLUME, null); assertEquals(1, volSet.getFailedVolumesList().size()); assertEquals(readOnlyVolumePath, volSet.getFailedVolumesList().get(0) .getStorageDir()); - //Set back to writable - try { - readOnlyVolumePath.setWritable(true); - volSet.shutdown(); - } finally { - FileUtil.fullyDelete(volumePath); - } - + volSet.shutdown(); } @Test diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml index baa9fa4e67d2..3a69c793c26f 100644 --- a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml +++ b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml @@ -209,12 +209,6 @@ - - - - - - diff --git a/hadoop-hdds/erasurecode/pom.xml b/hadoop-hdds/erasurecode/pom.xml index a632c65254ca..14511a160cea 100644 --- a/hadoop-hdds/erasurecode/pom.xml +++ b/hadoop-hdds/erasurecode/pom.xml @@ -30,7 +30,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - false diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml index 8ad0d11d0217..8f7f32b58df9 100644 --- a/hadoop-hdds/framework/pom.xml +++ b/hadoop-hdds/framework/pom.xml @@ -30,7 +30,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - false diff --git a/hadoop-hdds/rocks-native/pom.xml b/hadoop-hdds/rocks-native/pom.xml index cfd361baeb86..217021113161 100644 --- a/hadoop-hdds/rocks-native/pom.xml +++ b/hadoop-hdds/rocks-native/pom.xml @@ -52,7 +52,6 @@ - false 8 8 diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml index 3d5967e9c0c0..829c0d6ac362 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml +++ b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml @@ -30,7 +30,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - false diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index b42262fbabdd..39de9bbf19a4 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -29,7 +29,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - false diff --git a/hadoop-hdds/test-utils/pom.xml b/hadoop-hdds/test-utils/pom.xml index 4f78bd1f14d7..17871c196a0b 100644 --- a/hadoop-hdds/test-utils/pom.xml +++ b/hadoop-hdds/test-utils/pom.xml @@ -49,10 +49,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-logging commons-logging - - junit - junit - org.junit.jupiter junit-jupiter-api diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/DisableOnProperty.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/DisableOnProperty.java deleted file mode 100644 index cddbbd18080c..000000000000 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/DisableOnProperty.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ozone.test; - -import org.junit.rules.TestRule; -import org.junit.runner.Description; -import org.junit.runners.model.Statement; - -import java.util.Objects; - -/** - * Disables the delegate rule if the given system property matches a specific - * value. - */ -public class DisableOnProperty implements TestRule { - - private final TestRule delegate; - private final boolean enabled; - - public DisableOnProperty(TestRule delegate, String key, String value) { - this.delegate = delegate; - enabled = !Objects.equals(value, System.getProperty(key, "")); - } - - @Override - public Statement apply(Statement base, Description description) { - return enabled ? delegate.apply(base, description) : base; - } -} diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/JUnit5AwareTimeout.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/JUnit5AwareTimeout.java deleted file mode 100644 index 22840bd7a304..000000000000 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/JUnit5AwareTimeout.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ozone.test; - -import org.junit.rules.Timeout; - -/** - * Disables the given JUnit4 timeout rule if JUnit5-specific timeout-mode is set - * to "disabled". - */ -public class JUnit5AwareTimeout extends DisableOnProperty { - - public JUnit5AwareTimeout(Timeout delegate) { - super(delegate, "junit.jupiter.execution.timeout.mode", "disabled"); - } - -} diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java index 28d3b936ecab..fb74a22c6266 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/MetricsAsserts.java @@ -47,6 +47,8 @@ /** * Helpers for metrics source tests. + *

+ * Copied from Hadoop and migrated to AssertJ. */ public final class MetricsAsserts { diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml index 665b56d3ab08..5122f1d4a450 100644 --- a/hadoop-hdds/tools/pom.xml +++ b/hadoop-hdds/tools/pom.xml @@ -30,7 +30,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - false diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml index a5598311c4c5..a5a436436183 100644 --- a/hadoop-ozone/client/pom.xml +++ b/hadoop-ozone/client/pom.xml @@ -28,7 +28,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> Apache Ozone Client jar - false diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index 813edcb7d714..4af3fb18523d 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -29,7 +29,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - false diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml index fa6e0ae57566..604608a07fb8 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml @@ -34,21 +34,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> - - junit - junit - test - org.junit.jupiter junit-jupiter-engine test - - org.junit.vintage - junit-vintage-engine - test - org.junit.platform junit-platform-launcher diff --git a/hadoop-ozone/insight/pom.xml b/hadoop-ozone/insight/pom.xml index dcd03c04fa83..f5e044ddac2f 100644 --- a/hadoop-ozone/insight/pom.xml +++ b/hadoop-ozone/insight/pom.xml @@ -28,7 +28,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> Apache Ozone Insight Tool jar - false diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index 3eef8fa58c05..913cd639bf7c 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -119,21 +119,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-jar test - - junit - junit - test - org.junit.jupiter junit-jupiter-engine test - - org.junit.vintage - junit-vintage-engine - test - org.junit.platform junit-platform-launcher diff --git a/hadoop-ozone/interface-storage/pom.xml b/hadoop-ozone/interface-storage/pom.xml index 3ab535852ce4..8ec4e0d99409 100644 --- a/hadoop-ozone/interface-storage/pom.xml +++ b/hadoop-ozone/interface-storage/pom.xml @@ -28,7 +28,6 @@ Apache Ozone Storage Interface jar - false diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index 8438dbbf2c48..817d0ab5f755 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -29,7 +29,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - false diff --git a/hadoop-ozone/ozonefs-common/pom.xml b/hadoop-ozone/ozonefs-common/pom.xml index df52a3d29d0c..0543d461ea78 100644 --- a/hadoop-ozone/ozonefs-common/pom.xml +++ b/hadoop-ozone/ozonefs-common/pom.xml @@ -28,7 +28,6 @@ UTF-8 true - false diff --git a/hadoop-ozone/recon/pom.xml b/hadoop-ozone/recon/pom.xml index f6226a44f33d..2895840aaf3f 100644 --- a/hadoop-ozone/recon/pom.xml +++ b/hadoop-ozone/recon/pom.xml @@ -25,7 +25,6 @@ ozone-recon 7.33.6 - false diff --git a/hadoop-ozone/s3-secret-store/pom.xml b/hadoop-ozone/s3-secret-store/pom.xml index 150937f2207b..99f99c2d01d3 100644 --- a/hadoop-ozone/s3-secret-store/pom.xml +++ b/hadoop-ozone/s3-secret-store/pom.xml @@ -28,7 +28,6 @@ UTF-8 true - false diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index 9ad6fbab4714..f875047d04a2 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -28,7 +28,6 @@ UTF-8 true - false diff --git a/pom.xml b/pom.xml index 1c59f8863786..2d6e825e5181 100644 --- a/pom.xml +++ b/pom.xml @@ -220,7 +220,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 4.11.0 2.2 1.24 - 4.13.1 5.10.1 3.7.2 @@ -248,7 +247,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs --> [${javac.version},) [3.3.0,) - true -Xmx4096m -XX:+HeapDumpOnOutOfMemoryError @@ -1156,17 +1154,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs aws-java-sdk-s3 ${aws-java-sdk.version} - - junit - junit - ${junit4.version} - - - org.hamcrest - hamcrest-core - - - org.hamcrest hamcrest @@ -1806,6 +1793,17 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.hadoop.hdds.utils.db.managed.* + + + + + ban-imports + process-sources + + enforce + + + true Use directly from Guava @@ -1818,22 +1816,35 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs true Disable with @Unhealthy or @Slow instead (see HDDS-9276) - org.junit.Ignore org.junit.jupiter.api.Disabled - - - - - ban-junit4-imports - process-sources - - enforce - - - ${allow.junit4} - + + true + Use Ozone's version of the same class + + org.apache.hadoop.test.GenericTestUtils + org.apache.hadoop.test.LambdaTestUtils + + + org.apache.hadoop.fs.contract.* + org.apache.hadoop.tools.contract.* + + + + true + Use Ozone's version of the same class + + org.apache.hadoop.test.MetricsAssert + + + + true + Use Ozone's similar class + + org.apache.hadoop.hdfs.MiniDFSCluster + + true true @@ -1843,6 +1854,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.junit.jupiter.** + org.junit.platform.** From 07354825a49689a90d0a9e706c0e3e426d8e9fcc Mon Sep 17 00:00:00 2001 From: WangYuanben <48795318+YuanbenWang@users.noreply.github.com> Date: Fri, 19 Jan 2024 18:33:31 +0800 Subject: [PATCH 10/43] HDDS-10139. Support to get hosts from stdin when DN is decommissioning, recommissioning or entering maintenance. (#6019) --- .../cli/datanode/DecommissionSubCommand.java | 21 ++++++++++-- .../cli/datanode/MaintenanceSubCommand.java | 21 ++++++++++-- .../cli/datanode/RecommissionSubCommand.java | 21 ++++++++++-- .../datanode/TestDecommissionSubCommand.java | 34 +++++++++++++++++-- .../datanode/TestMaintenanceSubCommand.java | 34 +++++++++++++++++-- .../datanode/TestRecommissionSubCommand.java | 34 +++++++++++++++++-- 6 files changed, 150 insertions(+), 15 deletions(-) diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java index 23ff9176df9f..e7d3a4443831 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Scanner; /** * Decommission one or more datanodes. @@ -41,12 +42,26 @@ public class DecommissionSubCommand extends ScmSubcommand { @CommandLine.Spec private CommandLine.Model.CommandSpec spec; - @CommandLine.Parameters(description = "List of fully qualified host names") - private List hosts = new ArrayList<>(); + @CommandLine.Parameters(description = "One or more host names separated by spaces. " + + "To read from stdin, specify '-' and supply the host names " + + "separated by newlines.", + paramLabel = "") + private List parameters = new ArrayList<>(); @Override public void execute(ScmClient scmClient) throws IOException { - if (hosts.size() > 0) { + if (parameters.size() > 0) { + List hosts; + // Whether to read from stdin + if (parameters.get(0).equals("-")) { + hosts = new ArrayList<>(); + Scanner scanner = new Scanner(System.in, "UTF-8"); + while (scanner.hasNextLine()) { + hosts.add(scanner.nextLine().trim()); + } + } else { + hosts = parameters; + } List errors = scmClient.decommissionNodes(hosts); System.out.println("Started decommissioning datanode(s):\n" + String.join("\n", hosts)); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java index a64c400f66f1..82d263b416fb 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Scanner; /** * Place one or more datanodes into Maintenance Mode. @@ -41,8 +42,11 @@ public class MaintenanceSubCommand extends ScmSubcommand { @CommandLine.Spec private CommandLine.Model.CommandSpec spec; - @CommandLine.Parameters(description = "List of fully qualified host names") - private List hosts = new ArrayList<>(); + @CommandLine.Parameters(description = "One or more host names separated by spaces. " + + "To read from stdin, specify '-' and supply the host names " + + "separated by newlines.", + paramLabel = "") + private List parameters = new ArrayList<>(); @CommandLine.Option(names = {"--end"}, description = "Automatically end maintenance after the given hours. " + @@ -51,7 +55,18 @@ public class MaintenanceSubCommand extends ScmSubcommand { @Override public void execute(ScmClient scmClient) throws IOException { - if (hosts.size() > 0) { + if (parameters.size() > 0) { + List hosts; + // Whether to read from stdin + if (parameters.get(0).equals("-")) { + hosts = new ArrayList<>(); + Scanner scanner = new Scanner(System.in, "UTF-8"); + while (scanner.hasNextLine()) { + hosts.add(scanner.nextLine().trim()); + } + } else { + hosts = parameters; + } List errors = scmClient.startMaintenanceNodes(hosts, endInHours); System.out.println("Entering maintenance mode on datanode(s):\n" + diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java index 61f7826cf647..e21d61ed3d7f 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Scanner; /** * Recommission one or more datanodes. @@ -42,12 +43,26 @@ public class RecommissionSubCommand extends ScmSubcommand { @CommandLine.Spec private CommandLine.Model.CommandSpec spec; - @CommandLine.Parameters(description = "List of fully qualified host names") - private List hosts = new ArrayList<>(); + @CommandLine.Parameters(description = "One or more host names separated by spaces. " + + "To read from stdin, specify '-' and supply the host names " + + "separated by newlines.", + paramLabel = "") + private List parameters = new ArrayList<>(); @Override public void execute(ScmClient scmClient) throws IOException { - if (hosts.size() > 0) { + if (parameters.size() > 0) { + List hosts; + // Whether to read from stdin + if (parameters.get(0).equals("-")) { + hosts = new ArrayList<>(); + Scanner scanner = new Scanner(System.in, "UTF-8"); + while (scanner.hasNextLine()) { + hosts.add(scanner.nextLine().trim()); + } + } else { + hosts = parameters; + } List errors = scmClient.recommissionNodes(hosts); System.out.println("Started recommissioning datanode(s):\n" + String.join("\n", hosts)); diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java index 7e5b857d179c..afce23b5fd54 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java @@ -23,6 +23,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; @@ -47,6 +48,7 @@ public class TestDecommissionSubCommand { private DecommissionSubCommand cmd; + private ScmClient scmClient; private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); private final PrintStream originalOut = System.out; @@ -56,6 +58,7 @@ public class TestDecommissionSubCommand { @BeforeEach public void setup() throws UnsupportedEncodingException { cmd = new DecommissionSubCommand(); + scmClient = mock(ScmClient.class); System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); } @@ -66,9 +69,37 @@ public void tearDown() { System.setErr(originalErr); } + @Test + public void testMultipleHostnamesCanBeReadFromStdin() throws Exception { + when(scmClient.decommissionNodes(anyList())) + .thenAnswer(invocation -> new ArrayList()); + + String input = "host1\nhost2\nhost3\n"; + System.setIn(new ByteArrayInputStream(input.getBytes(DEFAULT_ENCODING))); + CommandLine c = new CommandLine(cmd); + c.parseArgs("-"); + cmd.execute(scmClient); + + Pattern p = Pattern.compile( + "^Started\\sdecommissioning\\sdatanode\\(s\\)", Pattern.MULTILINE); + Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + p = Pattern.compile("^host1$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + p = Pattern.compile("^host2$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + p = Pattern.compile("^host3$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + } + @Test public void testNoErrorsWhenDecommissioning() throws IOException { - ScmClient scmClient = mock(ScmClient.class); when(scmClient.decommissionNodes(anyList())) .thenAnswer(invocation -> new ArrayList()); @@ -92,7 +123,6 @@ public void testNoErrorsWhenDecommissioning() throws IOException { @Test public void testErrorsReportedWhenDecommissioning() throws IOException { - ScmClient scmClient = mock(ScmClient.class); when(scmClient.decommissionNodes(anyList())) .thenAnswer(invocation -> { ArrayList e = new ArrayList<>(); diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java index d3f7f026ddb9..694ba0e282c6 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java @@ -23,6 +23,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; @@ -48,6 +49,7 @@ public class TestMaintenanceSubCommand { private MaintenanceSubCommand cmd; + private ScmClient scmClient; private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); private final PrintStream originalOut = System.out; @@ -57,6 +59,7 @@ public class TestMaintenanceSubCommand { @BeforeEach public void setup() throws UnsupportedEncodingException { cmd = new MaintenanceSubCommand(); + scmClient = mock(ScmClient.class); System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); } @@ -67,9 +70,37 @@ public void tearDown() { System.setErr(originalErr); } + @Test + public void testMultipleHostnamesCanBeReadFromStdin() throws Exception { + when(scmClient.decommissionNodes(anyList())) + .thenAnswer(invocation -> new ArrayList()); + + String input = "host1\nhost2\nhost3\n"; + System.setIn(new ByteArrayInputStream(input.getBytes(DEFAULT_ENCODING))); + CommandLine c = new CommandLine(cmd); + c.parseArgs("-"); + cmd.execute(scmClient); + + Pattern p = Pattern.compile( + "^Entering\\smaintenance\\smode\\son\\sdatanode\\(s\\)", Pattern.MULTILINE); + Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + p = Pattern.compile("^host1$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + p = Pattern.compile("^host2$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + p = Pattern.compile("^host3$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + } + @Test public void testNoErrorsWhenEnteringMaintenance() throws IOException { - ScmClient scmClient = mock(ScmClient.class); when(scmClient.startMaintenanceNodes(anyList(), anyInt())) .thenAnswer(invocation -> new ArrayList()); @@ -94,7 +125,6 @@ public void testNoErrorsWhenEnteringMaintenance() throws IOException { @Test public void testErrorsReportedWhenEnteringMaintenance() throws IOException { - ScmClient scmClient = mock(ScmClient.class); when(scmClient.startMaintenanceNodes(anyList(), anyInt())) .thenAnswer(invocation -> { ArrayList e = new ArrayList<>(); diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java index 41ce0d90cb78..7f4dbec77344 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java @@ -23,6 +23,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; @@ -47,6 +48,7 @@ public class TestRecommissionSubCommand { private RecommissionSubCommand cmd; + private ScmClient scmClient; private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); private final PrintStream originalOut = System.out; @@ -56,6 +58,7 @@ public class TestRecommissionSubCommand { @BeforeEach public void setup() throws UnsupportedEncodingException { cmd = new RecommissionSubCommand(); + scmClient = mock(ScmClient.class); System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); } @@ -66,9 +69,37 @@ public void tearDown() { System.setErr(originalErr); } + @Test + public void testMultipleHostnamesCanBeReadFromStdin() throws Exception { + when(scmClient.decommissionNodes(anyList())) + .thenAnswer(invocation -> new ArrayList()); + + String input = "host1\nhost2\nhost3\n"; + System.setIn(new ByteArrayInputStream(input.getBytes(DEFAULT_ENCODING))); + CommandLine c = new CommandLine(cmd); + c.parseArgs("-"); + cmd.execute(scmClient); + + Pattern p = Pattern.compile( + "^Started\\srecommissioning\\sdatanode\\(s\\)", Pattern.MULTILINE); + Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + p = Pattern.compile("^host1$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + p = Pattern.compile("^host2$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + p = Pattern.compile("^host3$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + } + @Test public void testNoErrorsWhenRecommissioning() throws IOException { - ScmClient scmClient = mock(ScmClient.class); when(scmClient.recommissionNodes(anyList())) .thenAnswer(invocation -> new ArrayList()); @@ -92,7 +123,6 @@ public void testNoErrorsWhenRecommissioning() throws IOException { @Test public void testErrorsReportedWhenRecommissioning() throws IOException { - ScmClient scmClient = mock(ScmClient.class); when(scmClient.recommissionNodes(anyList())) .thenAnswer(invocation -> { ArrayList e = new ArrayList<>(); From 245e4dd14c305bbea529655a7ecd70d47ba44381 Mon Sep 17 00:00:00 2001 From: Ivan Andika <36403683+ivandika3@users.noreply.github.com> Date: Fri, 19 Jan 2024 20:05:24 +0800 Subject: [PATCH 11/43] HDDS-10138. NPE for SstFilteringService in OMDBCheckpointServlet.Lock (#6015) --- .../apache/hadoop/hdds/utils/db/RDBStore.java | 1 + .../hadoop/ozone/om/KeyManagerImpl.java | 2 + .../ozone/om/OMDBCheckpointServlet.java | 38 ++++++++++--------- 3 files changed, 24 insertions(+), 17 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java index cfd5e4e3947a..47000f8cbc41 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java @@ -197,6 +197,7 @@ public String getSnapshotsParentDir() { return snapshotsParentDir; } + @Override public RocksDBCheckpointDiffer getRocksDBCheckpointDiffer() { return rocksDBCheckpointDiffer; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 6e97ca2e7561..9cfd4043146a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -697,10 +697,12 @@ public BackgroundService getMultipartUploadCleanupService() { return multipartUploadCleanupService; } + @Override public SstFilteringService getSnapshotSstFilteringService() { return snapshotSstFilteringService; } + @Override public SnapshotDeletingService getSnapshotDeletingService() { return snapshotDeletingService; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java index 2a7771fe60a3..3e6d70626728 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java @@ -36,6 +36,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; +import com.google.common.base.Preconditions; import org.jetbrains.annotations.NotNull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -644,29 +645,35 @@ public BootstrapStateHandler.Lock getBootstrapStateLock() { } static class Lock extends BootstrapStateHandler.Lock { - private final BootstrapStateHandler keyDeletingService; - private final BootstrapStateHandler sstFilteringService; - private final BootstrapStateHandler rocksDbCheckpointDiffer; - private final BootstrapStateHandler snapshotDeletingService; + private final List locks; private final OzoneManager om; Lock(OzoneManager om) { + Preconditions.checkNotNull(om); + Preconditions.checkNotNull(om.getKeyManager()); + Preconditions.checkNotNull(om.getMetadataManager()); + Preconditions.checkNotNull(om.getMetadataManager().getStore()); + this.om = om; - keyDeletingService = om.getKeyManager().getDeletingService(); - sstFilteringService = om.getKeyManager().getSnapshotSstFilteringService(); - rocksDbCheckpointDiffer = om.getMetadataManager().getStore() - .getRocksDBCheckpointDiffer(); - snapshotDeletingService = om.getKeyManager().getSnapshotDeletingService(); + + locks = Stream.of( + om.getKeyManager().getDeletingService(), + om.getKeyManager().getSnapshotSstFilteringService(), + om.getMetadataManager().getStore().getRocksDBCheckpointDiffer(), + om.getKeyManager().getSnapshotDeletingService() + ) + .filter(Objects::nonNull) + .map(BootstrapStateHandler::getBootstrapStateLock) + .collect(Collectors.toList()); } @Override public BootstrapStateHandler.Lock lock() throws InterruptedException { // First lock all the handlers. - keyDeletingService.getBootstrapStateLock().lock(); - sstFilteringService.getBootstrapStateLock().lock(); - rocksDbCheckpointDiffer.getBootstrapStateLock().lock(); - snapshotDeletingService.getBootstrapStateLock().lock(); + for (BootstrapStateHandler.Lock lock : locks) { + lock.lock(); + } // Then wait for the double buffer to be flushed. om.awaitDoubleBufferFlush(); @@ -675,10 +682,7 @@ public BootstrapStateHandler.Lock lock() @Override public void unlock() { - snapshotDeletingService.getBootstrapStateLock().unlock(); - rocksDbCheckpointDiffer.getBootstrapStateLock().unlock(); - sstFilteringService.getBootstrapStateLock().unlock(); - keyDeletingService.getBootstrapStateLock().unlock(); + locks.forEach(BootstrapStateHandler.Lock::unlock); } } } From 4e923af1502010fe05ddb174b656eba43cd6e814 Mon Sep 17 00:00:00 2001 From: WangYuanben <48795318+YuanbenWang@users.noreply.github.com> Date: Fri, 19 Jan 2024 22:41:52 +0800 Subject: [PATCH 12/43] HDDS-10159. Add test for putting key with ECReplicationConfig by shell. (#6030) --- .../hadoop/ozone/shell/TestOzoneShellHA.java | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index 92381829f0ba..dd84489b68f4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -1249,6 +1249,29 @@ public void testPutKeyOnBucketWithECReplicationConfig() throws Exception { key.getReplicationConfig().getReplicationType()); } + @Test + public void testPutKeyWithECReplicationConfig() throws Exception { + final String volumeName = UUID.randomUUID().toString(); + final String bucketName = UUID.randomUUID().toString(); + final String keyName = UUID.randomUUID().toString(); + getVolume(volumeName); + String bucketPath = + Path.SEPARATOR + volumeName + Path.SEPARATOR + bucketName; + String[] args = + new String[] {"bucket", "create", bucketPath}; + execute(ozoneShell, args); + + args = new String[] {"key", "put", "-r", "rs-3-2-1024k", "-t", "EC", + bucketPath + Path.SEPARATOR + keyName, testFilePathString}; + execute(ozoneShell, args); + + OzoneKeyDetails key = + client.getObjectStore().getVolume(volumeName) + .getBucket(bucketName).getKey(keyName); + assertEquals(HddsProtos.ReplicationType.EC, + key.getReplicationConfig().getReplicationType()); + } + @Test public void testCreateBucketWithRatisReplicationConfig() throws Exception { final String volumeName = "volume101"; From d5c1bc4615de6bff5924e357bd5fd01044d36d1b Mon Sep 17 00:00:00 2001 From: Tejaskriya <87555809+Tejaskriya@users.noreply.github.com> Date: Fri, 19 Jan 2024 21:36:27 +0530 Subject: [PATCH 13/43] HDDS-9648. Create API to fetch info about a single datanode (#5856) --- .../hadoop/hdds/scm/client/ScmClient.java | 9 +++++++ .../StorageContainerLocationProtocol.java | 3 +++ ...ocationProtocolClientSideTranslatorPB.java | 16 +++++++++++ .../src/main/proto/ScmAdminProtocol.proto | 15 +++++++++-- ...ocationProtocolServerSideTranslatorPB.java | 20 ++++++++++++++ .../scm/server/SCMClientProtocolServer.java | 22 +++++++++++++++ .../scm/cli/ContainerOperationClient.java | 6 +++++ .../scm/cli/datanode/ListInfoSubcommand.java | 14 +++++++--- .../cli/datanode/TestListInfoSubcommand.java | 27 +++++++++++++++++++ 9 files changed, 126 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index b03cead27e79..120535405ecd 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -39,6 +39,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.UUID; /** * The interface to call into underlying container layer. @@ -194,6 +195,14 @@ List queryNode(HddsProtos.NodeOperationalState opState, HddsProtos.NodeState nodeState, HddsProtos.QueryScope queryScope, String poolName) throws IOException; + /** + * Returns a node with the given UUID. + * @param uuid - datanode uuid string + * @return A nodes that matches the requested UUID. + * @throws IOException + */ + HddsProtos.Node queryNode(UUID uuid) throws IOException; + /** * Allows a list of hosts to be decommissioned. The hosts are identified * by their hostname and optionally port in the format foo.com:port. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index b587cc924b06..be0f41b62295 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -44,6 +44,7 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.UUID; /** * ContainerLocationProtocol is used by an HDFS node to find the set of nodes @@ -232,6 +233,8 @@ List queryNode(HddsProtos.NodeOperationalState opState, HddsProtos.NodeState state, HddsProtos.QueryScope queryScope, String poolName, int clientVersion) throws IOException; + HddsProtos.Node queryNode(UUID uuid) throws IOException; + List decommissionNodes(List nodes) throws IOException; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index 330cfae30b2f..eb3f419e48d1 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -89,6 +89,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMCloseContainerResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartMaintenanceNodesRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartMaintenanceNodesResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SingleNodeQueryRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SingleNodeQueryResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerRequestProto; @@ -114,6 +116,7 @@ import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer; import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.ProtobufUtils; import java.io.Closeable; import java.io.IOException; @@ -123,6 +126,7 @@ import java.util.Map; import java.util.Optional; import java.util.function.Consumer; +import java.util.UUID; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.EC; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMCloseContainerResponseProto.Status.CONTAINER_ALREADY_CLOSED; @@ -486,6 +490,18 @@ public List queryNode( return response.getDatanodesList(); } + @Override + public HddsProtos.Node queryNode(UUID uuid) throws IOException { + SingleNodeQueryRequestProto request = SingleNodeQueryRequestProto.newBuilder() + .setUuid(ProtobufUtils.toProtobuf(uuid)) + .build(); + SingleNodeQueryResponseProto response = + submitRequest(Type.SingleNodeQuery, + builder -> builder.setSingleNodeQueryRequest(request)) + .getSingleNodeQueryResponse(); + return response.getDatanode(); + } + /** * Attempts to decommission the list of nodes. * @param nodes The list of hostnames or hostname:ports to decommission diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto index 49e71d2fe69a..6cfddcc2f6c4 100644 --- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto +++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto @@ -78,9 +78,10 @@ message ScmContainerLocationRequest { optional GetContainerReplicasRequestProto getContainerReplicasRequest = 39; optional ReplicationManagerReportRequestProto replicationManagerReportRequest = 40; optional ResetDeletedBlockRetryCountRequestProto resetDeletedBlockRetryCountRequest = 41; - optional TransferLeadershipRequestProto transferScmLeadershipRequest = 42; + optional TransferLeadershipRequestProto transferScmLeadershipRequest = 42; optional GetFailedDeletedBlocksTxnRequestProto getFailedDeletedBlocksTxnRequest = 43; optional DecommissionScmRequestProto decommissionScmRequest = 44; + optional SingleNodeQueryRequestProto singleNodeQueryRequest = 45; } message ScmContainerLocationResponse { @@ -130,9 +131,10 @@ message ScmContainerLocationResponse { optional GetContainerReplicasResponseProto getContainerReplicasResponse = 39; optional ReplicationManagerReportResponseProto getReplicationManagerReportResponse = 40; optional ResetDeletedBlockRetryCountResponseProto resetDeletedBlockRetryCountResponse = 41; - optional TransferLeadershipResponseProto transferScmLeadershipResponse = 42; + optional TransferLeadershipResponseProto transferScmLeadershipResponse = 42; optional GetFailedDeletedBlocksTxnResponseProto getFailedDeletedBlocksTxnResponse = 43; optional DecommissionScmResponseProto decommissionScmResponse = 44; + optional SingleNodeQueryResponseProto singleNodeQueryResponse = 45; enum Status { OK = 1; @@ -184,6 +186,7 @@ enum Type { TransferLeadership = 38; GetFailedDeletedBlocksTransaction = 39; DecommissionScm = 40; + SingleNodeQuery = 41; } /** @@ -326,6 +329,14 @@ message NodeQueryResponseProto { repeated Node datanodes = 1; } +message SingleNodeQueryRequestProto { + required UUID uuid = 1; +} + +message SingleNodeQueryResponseProto { + optional Node datanode = 1; +} + /* Datanode usage info request message. */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index 7738d0e3907e..6d47a78a7d77 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -92,6 +92,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationRequest; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse.Status; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SingleNodeQueryResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SingleNodeQueryRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartMaintenanceNodesRequestProto; @@ -120,6 +122,7 @@ import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics; import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages; +import org.apache.hadoop.util.ProtobufUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -460,6 +463,13 @@ public ScmContainerLocationResponse processRequest( .setNodeQueryResponse(queryNode(request.getNodeQueryRequest(), request.getVersion())) .build(); + case SingleNodeQuery: + return ScmContainerLocationResponse.newBuilder() + .setCmdType(request.getCmdType()) + .setStatus(Status.OK) + .setSingleNodeQueryResponse(querySingleNode(request + .getSingleNodeQueryRequest())) + .build(); case CloseContainer: return ScmContainerLocationResponse.newBuilder() .setCmdType(request.getCmdType()) @@ -866,6 +876,16 @@ public NodeQueryResponseProto queryNode( .build(); } + public SingleNodeQueryResponseProto querySingleNode( + SingleNodeQueryRequestProto request) + throws IOException { + + HddsProtos.Node datanode = impl.queryNode(ProtobufUtils.fromProtobuf(request.getUuid())); + return SingleNodeQueryResponseProto.newBuilder() + .setDatanode(datanode) + .build(); + } + public SCMCloseContainerResponseProto closeContainer( SCMCloseContainerRequestProto request) throws IOException { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 3d38fdbe8199..ac92ea893dbb 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -109,6 +109,7 @@ import java.util.TreeSet; import java.util.stream.Collectors; import java.util.stream.Stream; +import java.util.UUID; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StorageContainerLocationProtocolService.newReflectiveBlockingService; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_DEFAULT; @@ -613,6 +614,27 @@ public List queryNode( return result; } + @Override + public HddsProtos.Node queryNode(UUID uuid) + throws IOException { + HddsProtos.Node result = null; + try { + DatanodeDetails node = scm.getScmNodeManager().getNodeByUuid(uuid); + if (node != null) { + NodeStatus ns = scm.getScmNodeManager().getNodeStatus(node); + result = HddsProtos.Node.newBuilder() + .setNodeID(node.getProtoBufMessage()) + .addNodeStates(ns.getHealth()) + .addNodeOperationalStates(ns.getOperationalState()) + .build(); + } + } catch (NodeNotFoundException e) { + throw new IOException( + "An unexpected error occurred querying the NodeStatus", e); + } + return result; + } + @Override public List decommissionNodes(List nodes) throws IOException { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index 7aa91cec73c8..1daffbb9b940 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -59,6 +59,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.UUID; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_TOKEN_ENABLED_DEFAULT; @@ -225,6 +226,11 @@ public List queryNode( queryScope, poolName, ClientVersion.CURRENT_VERSION); } + @Override + public HddsProtos.Node queryNode(UUID uuid) throws IOException { + return storageContainerLocationClient.queryNode(uuid); + } + @Override public List decommissionNodes(List hosts) throws IOException { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java index db12ee2aacb1..325e362d4f4e 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java @@ -29,6 +29,7 @@ import java.io.IOException; import java.util.List; +import java.util.UUID; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -82,6 +83,15 @@ public class ListInfoSubcommand extends ScmSubcommand { @Override public void execute(ScmClient scmClient) throws IOException { pipelines = scmClient.listPipelines(); + if (!Strings.isNullOrEmpty(uuid)) { + HddsProtos.Node node = scmClient.queryNode(UUID.fromString(uuid)); + DatanodeWithAttributes dwa = new DatanodeWithAttributes(DatanodeDetails + .getFromProtoBuf(node.getNodeID()), + node.getNodeOperationalStates(0), + node.getNodeStates(0)); + printDatanodeInfo(dwa); + return; + } Stream allNodes = getAllNodes(scmClient).stream(); if (!Strings.isNullOrEmpty(ipaddress)) { allNodes = allNodes.filter(p -> p.getDatanodeDetails().getIpAddress() @@ -91,10 +101,6 @@ public void execute(ScmClient scmClient) throws IOException { allNodes = allNodes.filter(p -> p.getDatanodeDetails().getHostName() .compareToIgnoreCase(hostname) == 0); } - if (!Strings.isNullOrEmpty(uuid)) { - allNodes = allNodes.filter(p -> - p.getDatanodeDetails().getUuidString().equals(uuid)); - } if (!Strings.isNullOrEmpty(nodeOperationalState)) { allNodes = allNodes.filter(p -> p.getOpState().toString() .compareToIgnoreCase(nodeOperationalState) == 0); diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java index b6ae0a8ff4fa..1247b783b5cd 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java @@ -32,6 +32,7 @@ import java.util.UUID; import java.util.regex.Matcher; import java.util.regex.Pattern; +import picocli.CommandLine; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; @@ -101,6 +102,32 @@ public void testDataNodeOperationalStateAndHealthIncludedInOutput() assertTrue(m.find()); } + @Test + public void testDataNodeByUuidOutput() + throws Exception { + List nodes = getNodeDetails(); + + ScmClient scmClient = mock(ScmClient.class); + when(scmClient.queryNode(any())) + .thenAnswer(invocation -> nodes.get(0)); + when(scmClient.listPipelines()) + .thenReturn(new ArrayList<>()); + + CommandLine c = new CommandLine(cmd); + c.parseArgs("--id", nodes.get(0).getNodeID().getUuid()); + cmd.execute(scmClient); + + Pattern p = Pattern.compile( + "^Operational State:\\s+IN_SERVICE$", Pattern.MULTILINE); + Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + p = Pattern.compile(nodes.get(0).getNodeID().getUuid().toString(), + Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + } + private List getNodeDetails() { List nodes = new ArrayList<>(); From e77a047910a7503f1e320c0fb7acd33303cc6498 Mon Sep 17 00:00:00 2001 From: Devesh Kumar Singh Date: Sat, 20 Jan 2024 00:39:28 +0530 Subject: [PATCH 14/43] HDDS-9944. NSSummary commands should close OzoneClient. (#6018) --- .../admin/nssummary/DiskUsageSubCommand.java | 3 +- .../nssummary/FileSizeDistSubCommand.java | 3 +- .../ozone/admin/nssummary/NSSummaryAdmin.java | 63 ++++--------------- .../admin/nssummary/QuotaUsageSubCommand.java | 3 +- .../admin/nssummary/SummarySubCommand.java | 3 +- 5 files changed, 15 insertions(+), 60 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java index 59348894326a..0585fea000c9 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java @@ -106,8 +106,7 @@ public Void call() throws Exception { if (duResponse.get("status").equals("PATH_NOT_FOUND")) { printPathNotFound(); } else { - if (parent.isObjectStoreBucket(path) || - !parent.bucketIsPresentInThePath(path)) { + if (parent.isNotValidBucketOrOBSBucket(path)) { printBucketReminder(); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java index 5a2a2d11c025..f74ee109504c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java @@ -80,8 +80,7 @@ public Void call() throws Exception { } else if (distResponse.get("status").equals("TYPE_NOT_APPLICABLE")) { printTypeNA("File Size Distribution"); } else { - if (parent.isObjectStoreBucket(path) || - !parent.bucketIsPresentInThePath(path)) { + if (parent.isNotValidBucketOrOBSBucket(path)) { printBucketReminder(); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java index 0cd77626d157..0521e8fd7442 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java @@ -37,7 +37,6 @@ import java.io.IOException; import java.util.HashSet; -import java.util.Objects; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_ADDRESS_DEFAULT; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_ADDRESS_KEY; @@ -86,86 +85,46 @@ public Class getParentType() { return OzoneAdmin.class; } - public boolean isFileSystemOptimizedBucket(String path) throws IOException { - OFSPath ofsPath = new OFSPath(path, - OzoneConfiguration.of(getOzoneConfig())); - - OzoneClient ozoneClient = OzoneClientFactory.getRpcClient(getOzoneConfig()); - ObjectStore objectStore = ozoneClient.getObjectStore(); - - try { - OzoneBucket bucket = objectStore.getVolume(ofsPath.getVolumeName()) - .getBucket(ofsPath.getBucketName()); - - // Resolve the bucket layout in case this is a Link Bucket. - BucketLayout resolvedBucketLayout = - OzoneClientUtils.resolveLinkBucketLayout(bucket, objectStore, - new HashSet<>()); - - return resolvedBucketLayout.isFileSystemOptimized(); - } catch (IOException e) { - System.out.println( - "Bucket layout couldn't be verified for path: " + ofsPath + - ". Exception: " + e); - return false; - } - } - - public boolean isObjectStoreBucket(String path) throws IOException { - OFSPath ofsPath = new OFSPath(path, - OzoneConfiguration.of(getOzoneConfig())); - + private boolean isObjectStoreBucket(OzoneBucket bucket, ObjectStore objectStore) { boolean enableFileSystemPaths = getOzoneConfig() .getBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT); - - OzoneClient ozoneClient = OzoneClientFactory.getRpcClient(getOzoneConfig()); - ObjectStore objectStore = ozoneClient.getObjectStore(); - try { - OzoneBucket bucket = objectStore.getVolume(ofsPath.getVolumeName()) - .getBucket(ofsPath.getBucketName()); - // Resolve the bucket layout in case this is a Link Bucket. BucketLayout resolvedBucketLayout = OzoneClientUtils.resolveLinkBucketLayout(bucket, objectStore, new HashSet<>()); - return resolvedBucketLayout.isObjectStore(enableFileSystemPaths); } catch (IOException e) { System.out.println( - "Bucket layout couldn't be verified for path: " + ofsPath + - ". Exception: " + e); + "Bucket layout couldn't be resolved. Exception thrown: " + e); return false; } } /** - * Checking if the bucket is part of the path. + * Checks if bucket is OBS bucket or if bucket is part of the path. * Return false if path is root, just a volume or invalid. + * Returns false if bucket is part of path but not a OBS bucket. * @param path - * @return true if the bucket - * is not part of the given path. + * @return true if bucket is OBS bucket or not part of provided path. * @throws IOException */ - public boolean bucketIsPresentInThePath(String path) throws IOException { + public boolean isNotValidBucketOrOBSBucket(String path) { OFSPath ofsPath = new OFSPath(path, OzoneConfiguration.of(getOzoneConfig())); - - OzoneClient ozoneClient = OzoneClientFactory.getRpcClient(getOzoneConfig()); - ObjectStore objectStore = ozoneClient.getObjectStore(); - - try { + try (OzoneClient ozoneClient = OzoneClientFactory.getRpcClient(getOzoneConfig())) { + ObjectStore objectStore = ozoneClient.getObjectStore(); + // Checks if the bucket is part of the path. OzoneBucket bucket = objectStore.getVolume(ofsPath.getVolumeName()) .getBucket(ofsPath.getBucketName()); - - return Objects.nonNull(bucket); + return isObjectStoreBucket(bucket, objectStore); } catch (IOException e) { System.out.println( "Bucket layout couldn't be verified for path: " + ofsPath + ". Exception: " + e); - return false; } + return true; } /** diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java index c3494cf4ffba..113193c929b4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java @@ -80,8 +80,7 @@ public Void call() throws Exception { } else if (quotaResponse.get("status").equals("TYPE_NOT_APPLICABLE")) { printTypeNA("Quota"); } else { - if (parent.isObjectStoreBucket(path) || - !parent.bucketIsPresentInThePath(path)) { + if (parent.isNotValidBucketOrOBSBucket(path)) { printBucketReminder(); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java index 4a4946bb8092..9180274b9c70 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java @@ -76,8 +76,7 @@ public Void call() throws Exception { if (summaryResponse.get("status").equals("PATH_NOT_FOUND")) { printPathNotFound(); } else { - if (parent.isObjectStoreBucket(path) || - !parent.bucketIsPresentInThePath(path)) { + if (parent.isNotValidBucketOrOBSBucket(path)) { printBucketReminder(); } From 740a9a1416b397c38b5b3f687a0b197320285756 Mon Sep 17 00:00:00 2001 From: Tsz-Wo Nicholas Sze Date: Fri, 19 Jan 2024 11:41:29 -0800 Subject: [PATCH 15/43] HDDS-10126. Remove maxFlushedTransactionsInOneIteration from OzoneManagerDoubleBuffer (#6007) --- .../om/ratis/OzoneManagerDoubleBuffer.java | 83 ++++++------------ .../om/ratis/OzoneManagerStateMachine.java | 69 +++------------ .../OzoneManagerDoubleBufferMetrics.java | 11 +++ .../OzoneManagerStateMachineMetrics.java | 87 ------------------- .../ratis/TestOzoneManagerDoubleBuffer.java | 52 +++++------ ...eManagerDoubleBufferWithDummyResponse.java | 7 +- ...zoneManagerDoubleBufferWithOMResponse.java | 17 ++-- 7 files changed, 86 insertions(+), 240 deletions(-) delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerStateMachineMetrics.java diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index d1d971f3f4b6..fd7b18bb6810 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -106,11 +106,7 @@ OMClientResponse getResponse() { private final Daemon daemon; private final OMMetadataManager omMetadataManager; - private final AtomicLong flushedTransactionCount = new AtomicLong(0); - private final AtomicLong flushIterations = new AtomicLong(0); private final AtomicBoolean isRunning = new AtomicBoolean(false); - private final OzoneManagerDoubleBufferMetrics ozoneManagerDoubleBufferMetrics; - private long maxFlushedTransactionsInOneIteration; private final Consumer updateLastAppliedIndex; private final boolean isRatisEnabled; @@ -189,6 +185,13 @@ public OzoneManagerDoubleBuffer build() { } } + private final OzoneManagerDoubleBufferMetrics metrics = OzoneManagerDoubleBufferMetrics.create(); + + /** Accumulative count (for testing and debug only). */ + private final AtomicLong flushedTransactionCount = new AtomicLong(); + /** The number of flush iterations (for testing and debug only). */ + private final AtomicLong flushIterations = new AtomicLong(); + @SuppressWarnings("checkstyle:parameternumber") private OzoneManagerDoubleBuffer(OMMetadataManager omMetadataManager, Consumer updateLastAppliedIndex, @@ -203,8 +206,6 @@ private OzoneManagerDoubleBuffer(OMMetadataManager omMetadataManager, this.unFlushedTransactions = new Semaphore(maxUnFlushedTransactions); this.omMetadataManager = omMetadataManager; this.updateLastAppliedIndex = updateLastAppliedIndex; - this.ozoneManagerDoubleBufferMetrics = - OzoneManagerDoubleBufferMetrics.create(); this.flushNotifier = flushNotifier; isRunning.set(true); // Daemon thread which runs in background and flushes transactions to DB. @@ -354,8 +355,7 @@ private void flushBatch(Queue buffer) throws IOException { () -> omMetadataManager.getStore() .commitBatchOperation(batchOperation)); - ozoneManagerDoubleBufferMetrics.updateFlushTime( - Time.monotonicNow() - startTime); + metrics.updateFlushTime(Time.monotonicNow() - startTime); } // Complete futures first and then do other things. @@ -367,14 +367,10 @@ private void flushBatch(Queue buffer) throws IOException { .forEach(f -> f.complete(null)); } - flushedTransactionCount.addAndGet(flushedTransactionsSize); - flushIterations.incrementAndGet(); - - if (LOG.isDebugEnabled()) { - LOG.debug("Sync iteration {} flushed transactions in this iteration {}", - flushIterations.get(), - flushedTransactionsSize); - } + final long accumulativeCount = flushedTransactionCount.addAndGet(flushedTransactionsSize); + final long flushedIterations = flushIterations.incrementAndGet(); + LOG.debug("Sync iteration: {}, size in this iteration: {}, accumulative count: {}", + flushedIterations, flushedTransactionsSize, accumulativeCount); // Clean up committed transactions. cleanupCache(cleanupEpochs); @@ -386,7 +382,7 @@ private void flushBatch(Queue buffer) throws IOException { updateLastAppliedIndex.accept(lastTransaction); // set metrics. - updateMetrics(flushedTransactionsSize); + metrics.updateFlush(flushedTransactionsSize); } private String addToBatch(Queue buffer, BatchOperation batchOperation) { @@ -492,25 +488,6 @@ private void cleanupCache(Map> cleanupEpochs) { private synchronized void clearReadyBuffer() { readyBuffer.clear(); } - /** - * Update OzoneManagerDoubleBuffer metrics values. - */ - private void updateMetrics(int flushedTransactionsSize) { - ozoneManagerDoubleBufferMetrics.incrTotalNumOfFlushOperations(); - ozoneManagerDoubleBufferMetrics.incrTotalSizeOfFlushedTransactions( - flushedTransactionsSize); - ozoneManagerDoubleBufferMetrics.setAvgFlushTransactionsInOneIteration( - (float) ozoneManagerDoubleBufferMetrics - .getTotalNumOfFlushedTransactions() / - ozoneManagerDoubleBufferMetrics.getTotalNumOfFlushOperations()); - if (maxFlushedTransactionsInOneIteration < flushedTransactionsSize) { - maxFlushedTransactionsInOneIteration = flushedTransactionsSize; - ozoneManagerDoubleBufferMetrics - .setMaxNumberOfTransactionsFlushedInOneIteration( - flushedTransactionsSize); - } - ozoneManagerDoubleBufferMetrics.updateQueueSize(flushedTransactionsSize); - } /** * Stop OM DoubleBuffer flush thread. @@ -520,7 +497,7 @@ private void updateMetrics(int flushedTransactionsSize) { @SuppressWarnings("squid:S2142") public void stop() { stopDaemon(); - ozoneManagerDoubleBufferMetrics.unRegister(); + metrics.unRegister(); } @VisibleForTesting @@ -553,22 +530,6 @@ private void terminate(Throwable t, int status, OMResponse omResponse) { ExitUtils.terminate(status, message.toString(), t, LOG); } - /** - * Returns the flushed transaction count to OM DB. - * @return flushedTransactionCount - */ - public long getFlushedTransactionCount() { - return flushedTransactionCount.get(); - } - - /** - * Returns total number of flush iterations run by sync thread. - * @return flushIterations - */ - public long getFlushIterations() { - return flushIterations.get(); - } - /** * Add OmResponseBufferEntry to buffer. */ @@ -623,8 +584,20 @@ private synchronized void swapCurrentAndReadyBuffer() { } @VisibleForTesting - public OzoneManagerDoubleBufferMetrics getOzoneManagerDoubleBufferMetrics() { - return ozoneManagerDoubleBufferMetrics; + OzoneManagerDoubleBufferMetrics getMetrics() { + return metrics; + } + + /** @return the flushed transaction count to OM DB. */ + @VisibleForTesting + long getFlushedTransactionCountForTesting() { + return flushedTransactionCount.get(); + } + + /** @return total number of flush iterations run by sync thread. */ + @VisibleForTesting + long getFlushIterationsForTesting() { + return flushIterations.get(); } @VisibleForTesting diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java index 45767ec7d074..62e320e1e069 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java @@ -20,10 +20,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ThreadFactoryBuilder; -import com.google.protobuf.ServiceException; import java.io.IOException; import java.util.ArrayList; -import java.util.Collection; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; @@ -37,16 +35,13 @@ import org.apache.hadoop.ozone.om.OzoneManagerPrepareState; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; -import org.apache.hadoop.ozone.om.ratis.metrics.OzoneManagerStateMachineMetrics; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.lock.OMLockDetails; import org.apache.hadoop.ozone.om.response.DummyOMClientResponse; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocolPB.OzoneManagerRequestHandler; import org.apache.hadoop.ozone.protocolPB.RequestHandler; import org.apache.hadoop.security.UserGroupInformation; @@ -90,7 +85,6 @@ public class OzoneManagerStateMachine extends BaseStateMachine { LoggerFactory.getLogger(OzoneManagerStateMachine.class); private final SimpleStateMachineStorage storage = new SimpleStateMachineStorage(); - private final OzoneManagerRatisServer omRatisServer; private final OzoneManager ozoneManager; private RequestHandler handler; private RaftGroupId raftGroupId; @@ -106,14 +100,10 @@ public class OzoneManagerStateMachine extends BaseStateMachine { /** The last index skipped by {@link #notifyTermIndexUpdated(long, long)}. */ private volatile long lastSkippedIndex = RaftLog.INVALID_LOG_INDEX; - private OzoneManagerStateMachineMetrics metrics; - - public OzoneManagerStateMachine(OzoneManagerRatisServer ratisServer, boolean isTracingEnabled) throws IOException { - this.omRatisServer = ratisServer; this.isTracingEnabled = isTracingEnabled; - this.ozoneManager = omRatisServer.getOzoneManager(); + this.ozoneManager = ratisServer.getOzoneManager(); loadSnapshotInfoFromDB(); this.threadPrefix = ozoneManager.getThreadNamePrefix(); @@ -132,7 +122,6 @@ public OzoneManagerStateMachine(OzoneManagerRatisServer ratisServer, .setNameFormat(threadPrefix + "InstallSnapshotThread").build(); this.installSnapshotExecutor = HadoopExecutors.newSingleThreadExecutor(installSnapshotThreadFactory); - this.metrics = OzoneManagerStateMachineMetrics.create(); } /** @@ -270,7 +259,14 @@ public TransactionContext startTransaction( ctxt.setException(ioe); return ctxt; } - return handleStartTransactionRequests(raftClientRequest, omRequest); + + return TransactionContext.newBuilder() + .setClientRequest(raftClientRequest) + .setStateMachine(this) + .setServerRole(RaftProtos.RaftPeerRole.LEADER) + .setLogData(raftClientRequest.getMessage().getContent()) + .setStateMachineContext(omRequest) + .build(); } @Override @@ -499,18 +495,9 @@ public CompletableFuture notifyInstallSnapshotFromLeader( LOG.info("Received install snapshot notification from OM leader: {} with " + "term index: {}", leaderNodeId, firstTermIndexInLog); - CompletableFuture future = CompletableFuture.supplyAsync( + return CompletableFuture.supplyAsync( () -> ozoneManager.installSnapshotFromLeader(leaderNodeId), installSnapshotExecutor); - return future; - } - - /** - * Notifies the state machine that the raft peer is no longer leader. - */ - @Override - public void notifyNotLeader(Collection pendingEntries) - throws IOException { } @Override @@ -530,29 +517,10 @@ public void close() { } } - /** - * Handle the RaftClientRequest and return TransactionContext object. - * @param raftClientRequest - * @param omRequest - * @return TransactionContext - */ - private TransactionContext handleStartTransactionRequests( - RaftClientRequest raftClientRequest, OMRequest omRequest) { - - return TransactionContext.newBuilder() - .setClientRequest(raftClientRequest) - .setStateMachine(this) - .setServerRole(RaftProtos.RaftPeerRole.LEADER) - .setLogData(raftClientRequest.getMessage().getContent()) - .setStateMachineContext(omRequest) - .build(); - } - /** * Submits write request to OM and returns the response Message. * @param request OMRequest * @return response from OM - * @throws ServiceException */ private OMResponse runCommand(OMRequest request, TermIndex termIndex) { try { @@ -635,23 +603,10 @@ public OzoneManagerRequestHandler getHandler() { return (OzoneManagerRequestHandler) this.handler; } - @VisibleForTesting - public void setRaftGroupId(RaftGroupId raftGroupId) { - this.raftGroupId = raftGroupId; - } - - @VisibleForTesting - public OzoneManagerStateMachineMetrics getMetrics() { - return this.metrics; - } - public void stop() { ozoneManagerDoubleBuffer.stop(); HadoopExecutors.shutdown(executorService, LOG, 5, TimeUnit.SECONDS); HadoopExecutors.shutdown(installSnapshotExecutor, LOG, 5, TimeUnit.SECONDS); - if (metrics != null) { - metrics.unRegister(); - } } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java index f77eda081a7d..351f18528931 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java @@ -131,6 +131,17 @@ public void updateQueueSize(long size) { queueSize.add(size); } + public void updateFlush(int flushedTransactionsInOneIteration) { + incrTotalNumOfFlushOperations(); + incrTotalSizeOfFlushedTransactions(flushedTransactionsInOneIteration); + setAvgFlushTransactionsInOneIteration(getTotalNumOfFlushedTransactions() / (float)getTotalNumOfFlushOperations()); + final long max = getMaxNumberOfTransactionsFlushedInOneIteration(); + if (flushedTransactionsInOneIteration > max) { + maxNumberOfTransactionsFlushedInOneIteration.incr(flushedTransactionsInOneIteration - max); + } + updateQueueSize(flushedTransactionsInOneIteration); + } + @VisibleForTesting public MutableStat getQueueSize() { return queueSize; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerStateMachineMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerStateMachineMetrics.java deleted file mode 100644 index 51d26ef7ac0b..000000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerStateMachineMetrics.java +++ /dev/null @@ -1,87 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.ratis.metrics; - -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.MetricsSource; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MetricsRegistry; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.ozone.OzoneConsts; - -/** - * Class which maintains metrics related to OzoneManager state machine. - */ -@Metrics(about = "OzoneManagerStateMachine Metrics", context = OzoneConsts.OZONE) -public final class OzoneManagerStateMachineMetrics implements MetricsSource { - - private static final String SOURCE_NAME = - OzoneManagerStateMachineMetrics.class.getSimpleName(); - private MetricsRegistry registry; - private static OzoneManagerStateMachineMetrics instance; - - @Metric(about = "Number of apply transactions in applyTransactionMap.") - private MutableCounterLong applyTransactionMapSize; - - @Metric(about = "Number of ratis transactions in ratisTransactionMap.") - private MutableCounterLong ratisTransactionMapSize; - - private OzoneManagerStateMachineMetrics() { - registry = new MetricsRegistry(SOURCE_NAME); - } - - public static synchronized OzoneManagerStateMachineMetrics create() { - if (instance != null) { - return instance; - } else { - MetricsSystem ms = DefaultMetricsSystem.instance(); - OzoneManagerStateMachineMetrics metrics = new OzoneManagerStateMachineMetrics(); - instance = ms.register(SOURCE_NAME, "OzoneManager StateMachine Metrics", - metrics); - return instance; - } - } - - public void updateApplyTransactionMapSize(long size) { - this.applyTransactionMapSize.incr( - Math.negateExact(applyTransactionMapSize.value()) + size); - } - - public void updateRatisTransactionMapSize(long size) { - this.ratisTransactionMapSize.incr( - Math.negateExact(ratisTransactionMapSize.value()) + size); - } - - public void unRegister() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - ms.unregisterSource(SOURCE_NAME); - } - - public void getMetrics(MetricsCollector collector, boolean all) { - MetricsRecordBuilder rb = collector.addRecord(SOURCE_NAME); - - applyTransactionMapSize.snapshot(rb, all); - ratisTransactionMapSize.snapshot(rb, all); - rb.endRecord(); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java index 202234a0d436..21205c4dc334 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java @@ -81,16 +81,13 @@ class TestOzoneManagerDoubleBuffer { private OzoneManagerDoubleBuffer doubleBuffer; private OzoneManager ozoneManager; - private OmMetadataManagerImpl omMetadataManager; private S3SecretLockedManager secretManager; - private CreateSnapshotResponse snapshotResponse1 = - mock(CreateSnapshotResponse.class); - private CreateSnapshotResponse snapshotResponse2 = - mock(CreateSnapshotResponse.class); - private OMResponse omKeyResponse = mock(OMResponse.class); - private OMResponse omBucketResponse = mock(OMResponse.class); - private OMResponse omSnapshotResponse1 = mock(OMResponse.class); - private OMResponse omSnapshotResponse2 = mock(OMResponse.class); + private final CreateSnapshotResponse snapshotResponse1 = mock(CreateSnapshotResponse.class); + private final CreateSnapshotResponse snapshotResponse2 = mock(CreateSnapshotResponse.class); + private final OMResponse omKeyResponse = mock(OMResponse.class); + private final OMResponse omBucketResponse = mock(OMResponse.class); + private final OMResponse omSnapshotResponse1 = mock(OMResponse.class); + private final OMResponse omSnapshotResponse2 = mock(OMResponse.class); private static OMClientResponse omKeyCreateResponse = mock(OMKeyCreateResponse.class); private static OMClientResponse omBucketCreateResponse = @@ -104,10 +101,6 @@ class TestOzoneManagerDoubleBuffer { private OzoneManagerDoubleBuffer.FlushNotifier flushNotifier; private OzoneManagerDoubleBuffer.FlushNotifier spyFlushNotifier; - private static String userPrincipalId1 = "alice@EXAMPLE.COM"; - private static String userPrincipalId2 = "messi@EXAMPLE.COM"; - private static String userPrincipalId3 = "ronaldo@EXAMPLE.COM"; - @BeforeEach public void setup() throws IOException { OMMetrics omMetrics = OMMetrics.create(); @@ -117,8 +110,8 @@ public void setup() throws IOException { ozoneManager = mock(OzoneManager.class); when(ozoneManager.getMetrics()).thenReturn(omMetrics); - omMetadataManager = - new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); + + final OmMetadataManagerImpl omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); when(ozoneManager.getMaxUserVolumeCount()).thenReturn(10L); AuditLogger auditLogger = mock(AuditLogger.class); @@ -244,12 +237,10 @@ public void testOzoneManagerDoubleBuffer( // Flush the current buffer. doubleBuffer.flushCurrentBuffer(); - assertEquals(expectedFlushCounts, doubleBuffer.getFlushIterations()); - assertEquals(expectedFlushedTransactionCount, - doubleBuffer.getFlushedTransactionCount()); + assertEquals(expectedFlushCounts, doubleBuffer.getFlushIterationsForTesting()); + assertEquals(expectedFlushedTransactionCount, doubleBuffer.getFlushedTransactionCountForTesting()); - OzoneManagerDoubleBufferMetrics bufferMetrics = - doubleBuffer.getOzoneManagerDoubleBufferMetrics(); + final OzoneManagerDoubleBufferMetrics bufferMetrics = doubleBuffer.getMetrics(); assertEquals(expectedFlushCountsInMetric, bufferMetrics.getTotalNumOfFlushOperations()); @@ -259,6 +250,9 @@ public void testOzoneManagerDoubleBuffer( bufferMetrics.getMaxNumberOfTransactionsFlushedInOneIteration()); assertEquals(expectedAvgFlushTransactionsInMetric, bufferMetrics.getAvgFlushTransactionsInOneIteration(), 0.001); + + // reset max + bufferMetrics.setMaxNumberOfTransactionsFlushedInOneIteration(0); } @Test @@ -318,6 +312,10 @@ public void testAwaitFlush() throws Exception { @Test public void testS3SecretCacheSizePostDoubleBufferFlush() throws IOException { + final String userPrincipalId1 = "alice@EXAMPLE.COM"; + final String userPrincipalId2 = "messi@EXAMPLE.COM"; + final String userPrincipalId3 = "ronaldo@EXAMPLE.COM"; + // Create a secret for "alice". // This effectively makes alice an S3 admin. KerberosName.setRuleMechanism(DEFAULT_MECHANISM); @@ -338,9 +336,9 @@ public void testS3SecretCacheSizePostDoubleBufferFlush() throws IOException { doubleBuffer.stopDaemon(); // Create 3 secrets and store them in the cache and double buffer. - processSuccessSecretRequest(userPrincipalId1, 1, true); - processSuccessSecretRequest(userPrincipalId2, 2, true); - processSuccessSecretRequest(userPrincipalId3, 3, true); + processSuccessSecretRequest(userPrincipalId1, 1); + processSuccessSecretRequest(userPrincipalId2, 2); + processSuccessSecretRequest(userPrincipalId3, 3); S3SecretCache cache = secretManager.cache(); // Check if all the three secrets are cached. @@ -357,8 +355,7 @@ public void testS3SecretCacheSizePostDoubleBufferFlush() throws IOException { assertNull(cache.get(userPrincipalId1)); } finally { // cleanup metrics - OzoneManagerDoubleBufferMetrics metrics = - doubleBuffer.getOzoneManagerDoubleBufferMetrics(); + final OzoneManagerDoubleBufferMetrics metrics = doubleBuffer.getMetrics(); metrics.setMaxNumberOfTransactionsFlushedInOneIteration(0); metrics.setAvgFlushTransactionsInOneIteration(0); metrics.incrTotalSizeOfFlushedTransactions( @@ -368,10 +365,7 @@ public void testS3SecretCacheSizePostDoubleBufferFlush() throws IOException { } } - private void processSuccessSecretRequest( - String userPrincipalId, - int txLogIndex, - boolean shouldHaveResponse) throws IOException { + private void processSuccessSecretRequest(String userPrincipalId, int txLogIndex) throws IOException { S3GetSecretRequest s3GetSecretRequest = new S3GetSecretRequest( new S3GetSecretRequest( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java index 635f86f3ab5e..ee2e9043a362 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java @@ -96,8 +96,7 @@ public void stop() { public void testDoubleBufferWithDummyResponse() throws Exception { String volumeName = UUID.randomUUID().toString(); int bucketCount = 100; - OzoneManagerDoubleBufferMetrics metrics = - doubleBuffer.getOzoneManagerDoubleBufferMetrics(); + final OzoneManagerDoubleBufferMetrics metrics = doubleBuffer.getMetrics(); // As we have not flushed/added any transactions, all metrics should have // value zero. @@ -113,11 +112,11 @@ public void testDoubleBufferWithDummyResponse() throws Exception { 100, 60000); assertThat(metrics.getTotalNumOfFlushOperations()).isGreaterThan(0); - assertEquals(bucketCount, doubleBuffer.getFlushedTransactionCount()); + assertEquals(bucketCount, doubleBuffer.getFlushedTransactionCountForTesting()); assertThat(metrics.getMaxNumberOfTransactionsFlushedInOneIteration()).isGreaterThan(0); assertEquals(bucketCount, omMetadataManager.countRowsInTable( omMetadataManager.getBucketTable())); - assertThat(doubleBuffer.getFlushIterations()).isGreaterThan(0); + assertThat(doubleBuffer.getFlushIterationsForTesting()).isGreaterThan(0); assertThat(metrics.getFlushTime().lastStat().numSamples()).isGreaterThan(0); assertThat(metrics.getAvgFlushTransactionsInOneIteration()).isGreaterThan(0); assertEquals(bucketCount, (long) metrics.getQueueSize().lastStat().total()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java index 7178868dcf3f..006777141a6c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -173,9 +173,9 @@ public void testDoubleBufferWithMixOfTransactions() throws Exception { final int deleteCount = 5; // We are doing +1 for volume transaction. - GenericTestUtils.waitFor(() -> - doubleBuffer.getFlushedTransactionCount() == - (bucketCount + deleteCount + 1), 100, 120000); + GenericTestUtils.waitFor( + () -> doubleBuffer.getFlushedTransactionCountForTesting() == bucketCount + deleteCount + 1, + 100, 120000); assertEquals(1, omMetadataManager.countRowsInTable( omMetadataManager.getVolumeTable())); @@ -251,8 +251,9 @@ public void testDoubleBufferWithMixOfTransactionsParallel() throws Exception { final int deleteCount = 10; // We are doing +1 for volume transaction. - GenericTestUtils.waitFor(() -> doubleBuffer.getFlushedTransactionCount() - == (bucketCount + deleteCount + 2), 100, 120000); + GenericTestUtils.waitFor( + () -> doubleBuffer.getFlushedTransactionCountForTesting() == bucketCount + deleteCount + 2, + 100, 120000); assertEquals(2, omMetadataManager.countRowsInTable( omMetadataManager.getVolumeTable())); @@ -398,8 +399,8 @@ private void testDoubleBuffer(int volumeCount, int bucketsPerVolume) int expectedBuckets = bucketsPerVolume * volumeCount; long expectedTransactions = volumeCount + expectedBuckets; - GenericTestUtils.waitFor(() -> - expectedTransactions == doubleBuffer.getFlushedTransactionCount(), + GenericTestUtils.waitFor( + () -> expectedTransactions == doubleBuffer.getFlushedTransactionCountForTesting(), 100, volumeCount * 500); GenericTestUtils.waitFor(() -> @@ -411,7 +412,7 @@ private void testDoubleBuffer(int volumeCount, int bucketsPerVolume) assertRowCount(expectedBuckets, omMetadataManager.getBucketTable()), 300, volumeCount * 300); - assertThat(doubleBuffer.getFlushIterations()).isGreaterThan(0); + assertThat(doubleBuffer.getFlushIterationsForTesting()).isGreaterThan(0); } private boolean assertRowCount(int expected, Table table) { From b493cdb9faa2d48228686c0d605dcb8cf5362933 Mon Sep 17 00:00:00 2001 From: Ritesh H Shukla Date: Fri, 19 Jan 2024 13:08:27 -0800 Subject: [PATCH 16/43] HDDS-10154. isKeyPresentInTable should use iterator constructor with prefix (#6022) --- .../apache/hadoop/ozone/om/OmMetadataManagerImpl.java | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 0f298577237a..1f8c3ba3cd97 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -1032,10 +1032,13 @@ private boolean isKeyPresentInTableCache(String keyPrefix, */ private boolean isKeyPresentInTable(String keyPrefix, Table table) - throws IOException { + throws IOException { try (TableIterator> - keyIter = table.iterator()) { - KeyValue kv = keyIter.seek(keyPrefix); + keyIter = table.iterator(keyPrefix)) { + KeyValue kv = null; + if (keyIter.hasNext()) { + kv = keyIter.next(); + } // Iterate through all the entries in the table which start with // the current bucket's prefix. From 7550a9cb75cdc64db44e5b6e1b773e953a5a5730 Mon Sep 17 00:00:00 2001 From: Devesh Kumar Singh Date: Sat, 20 Jan 2024 13:25:18 +0530 Subject: [PATCH 17/43] HDDS-9968. Avoid using Files.createTempDirectory in AbstractReconSqlDBTest (#6034) --- .../hadoop/ozone/recon/ReconTestInjector.java | 2 +- .../recon/api/TestContainerStateCounts.java | 4 ++++ .../hadoop/ozone/recon/api/TestEndpoints.java | 4 ++++ .../ozone/recon/api/TestOmDBInsightEndPoint.java | 4 ++++ .../ozone/recon/api/TestTaskStatusService.java | 4 ++++ .../recon/fsck/TestContainerHealthTask.java | 4 ++++ .../persistence/AbstractReconSqlDBTest.java | 16 ++++++++++++---- .../TestReconInternalSchemaDefinition.java | 4 ++++ .../TestReconWithDifferentSqlDBs.java | 2 +- .../recon/persistence/TestSqlSchemaSetup.java | 4 ++++ .../persistence/TestStatsSchemaDefinition.java | 4 ++++ .../TestUtilizationSchemaDefinition.java | 4 ++++ .../recon/tasks/TestContainerSizeCountTask.java | 4 ++++ .../ozone/recon/tasks/TestFileSizeCountTask.java | 4 ++++ .../recon/tasks/TestOmTableInsightTask.java | 4 ++++ .../recon/tasks/TestReconTaskControllerImpl.java | 4 ++++ 16 files changed, 66 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/ReconTestInjector.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/ReconTestInjector.java index 6800604248b3..f295f4d1ff0e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/ReconTestInjector.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/ReconTestInjector.java @@ -203,7 +203,7 @@ protected void configure() { } if (withReconSqlDb) { - reconSqlDB = new AbstractReconSqlDBTest(); + reconSqlDB = new AbstractReconSqlDBTest(tmpDir.toPath()); modules.addAll(reconSqlDB.getReconSqlDBModules()); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerStateCounts.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerStateCounts.java index db6693ea5345..08d1a73dc057 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerStateCounts.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerStateCounts.java @@ -74,6 +74,10 @@ public class TestContainerStateCounts extends AbstractReconSqlDBTest { private static final int NUM_DELETED_CONTAINERS = 4; private static final int NUM_CLOSED_CONTAINERS = 3; + public TestContainerStateCounts() { + super(); + } + @BeforeEach public void setUp() throws Exception { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index 6bd0cba4df17..26d5044a7528 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -189,6 +189,10 @@ public class TestEndpoints extends AbstractReconSqlDBTest { private PipelineManager pipelineManager; private ReconPipelineManager reconPipelineManager; + public TestEndpoints() { + super(); + } + private void initializeInjector() throws Exception { reconOMMetadataManager = getTestReconOmMetadataManager( initializeNewOmMetadataManager(Files.createDirectory( diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java index 2c6253cc781e..85a6d9ec98ed 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -92,6 +92,10 @@ public class TestOmDBInsightEndPoint extends AbstractReconSqlDBTest { private OzoneConfiguration ozoneConfiguration; private Set generatedIds = new HashSet<>(); + public TestOmDBInsightEndPoint() { + super(); + } + private long generateUniqueRandomLong() { long newValue; do { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java index 5c071d353b5f..741dcf3be4cb 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java @@ -39,6 +39,10 @@ public class TestTaskStatusService extends AbstractReconSqlDBTest { private TaskStatusService taskStatusService; + public TestTaskStatusService() { + super(); + } + @BeforeEach public void setUp() { Injector parentInjector = getInjector(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java index 358799cc0330..2200cf3fca24 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java @@ -69,6 +69,10 @@ */ public class TestContainerHealthTask extends AbstractReconSqlDBTest { + public TestContainerHealthTask() { + super(); + } + @SuppressWarnings("checkstyle:methodlength") @Test public void testRun() throws Exception { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractReconSqlDBTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractReconSqlDBTest.java index d30a6232712b..d007fbb1cf7b 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractReconSqlDBTest.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractReconSqlDBTest.java @@ -46,21 +46,24 @@ import com.google.inject.Injector; import com.google.inject.Module; import com.google.inject.Provider; +import org.junit.jupiter.api.io.TempDir; +import org.springframework.util.FileSystemUtils; /** * Class that provides a Recon SQL DB with all the tables created, and APIs * to access the DAOs easily. */ public class AbstractReconSqlDBTest { - private Path temporaryFolder; - private Injector injector; private DSLContext dslContext; private Provider configurationProvider; public AbstractReconSqlDBTest() { + } + + public void init(Path temporaryFolder) { try { - temporaryFolder = Files.createTempDirectory("JunitConfig"); + FileSystemUtils.deleteRecursively(temporaryFolder.resolve("Config")); configurationProvider = new DerbyDataSourceConfigurationProvider(Files.createDirectory( temporaryFolder.resolve("Config")).toFile()); @@ -69,12 +72,17 @@ public AbstractReconSqlDBTest() { } } + public AbstractReconSqlDBTest(Path temporaryFolder) { + init(temporaryFolder); + } + protected AbstractReconSqlDBTest(Provider provider) { configurationProvider = provider; } @BeforeEach - public void createReconSchemaForTest() throws IOException { + public void createReconSchemaForTest(@TempDir Path temporaryFolder) throws IOException { + init(temporaryFolder); injector = Guice.createInjector(getReconSqlDBModules()); dslContext = DSL.using(new DefaultConfiguration().set( injector.getInstance(DataSource.class))); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java index 5570484c0f95..1230a9ba543e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java @@ -40,6 +40,10 @@ */ public class TestReconInternalSchemaDefinition extends AbstractReconSqlDBTest { + public TestReconInternalSchemaDefinition() { + super(); + } + @Test public void testSchemaCreated() throws Exception { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconWithDifferentSqlDBs.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconWithDifferentSqlDBs.java index b4cf76895723..180536884683 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconWithDifferentSqlDBs.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconWithDifferentSqlDBs.java @@ -63,7 +63,7 @@ public static Stream parametersSource() throws IOException { public void testSchemaSetup(Provider provider) throws SQLException, IOException { AbstractReconSqlDBTest reconSqlDB = new AbstractReconSqlDBTest(provider); - reconSqlDB.createReconSchemaForTest(); + reconSqlDB.createReconSchemaForTest(temporaryFolder); assertNotNull(reconSqlDB.getInjector()); assertNotNull(reconSqlDB.getConfiguration()); assertNotNull(reconSqlDB.getDslContext()); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSqlSchemaSetup.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSqlSchemaSetup.java index 498faa3d4316..4c6ae91998c7 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSqlSchemaSetup.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSqlSchemaSetup.java @@ -33,6 +33,10 @@ */ public class TestSqlSchemaSetup extends AbstractReconSqlDBTest { + public TestSqlSchemaSetup() { + super(); + } + /** * Make sure schema was created correctly. * @throws SQLException diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestStatsSchemaDefinition.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestStatsSchemaDefinition.java index 6407e64bf154..36e7edc56670 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestStatsSchemaDefinition.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestStatsSchemaDefinition.java @@ -40,6 +40,10 @@ */ public class TestStatsSchemaDefinition extends AbstractReconSqlDBTest { + public TestStatsSchemaDefinition() { + super(); + } + @Test public void testIfStatsSchemaCreated() throws Exception { Connection connection = getConnection(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java index 1c692b6a4887..7745aac9d3fd 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java @@ -52,6 +52,10 @@ */ public class TestUtilizationSchemaDefinition extends AbstractReconSqlDBTest { + public TestUtilizationSchemaDefinition() { + super(); + } + @Test public void testReconSchemaCreated() throws Exception { Connection connection = getConnection(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java index 9b5dc3bc9bca..eff330a796c9 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java @@ -54,6 +54,10 @@ public class TestContainerSizeCountTask extends AbstractReconSqlDBTest { private ContainerSizeCountTask task; private DSLContext dslContext; + public TestContainerSizeCountTask() { + super(); + } + @BeforeEach public void setUp() { utilizationSchemaDefinition = diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java index 2ded437301c7..badbb37fac5f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java @@ -60,6 +60,10 @@ public class TestFileSizeCountTask extends AbstractReconSqlDBTest { private FileSizeCountTask fileSizeCountTask; private DSLContext dslContext; + public TestFileSizeCountTask() { + super(); + } + @BeforeEach public void setUp() { fileCountBySizeDao = getDao(FileCountBySizeDao.class); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java index 9c0193e5020b..df014f4276fa 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java @@ -77,6 +77,10 @@ public class TestOmTableInsightTask extends AbstractReconSqlDBTest { private boolean isSetupDone = false; private ReconOMMetadataManager reconOMMetadataManager; + public TestOmTableInsightTask() { + super(); + } + private void initializeInjector() throws IOException { reconOMMetadataManager = getTestReconOmMetadataManager( initializeNewOmMetadataManager(Files.createDirectory( diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java index e6cdae9b85f3..4d2203dc5120 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java @@ -47,6 +47,10 @@ public class TestReconTaskControllerImpl extends AbstractReconSqlDBTest { private ReconTaskController reconTaskController; private ReconTaskStatusDao reconTaskStatusDao; + public TestReconTaskControllerImpl() { + super(); + } + @BeforeEach public void setUp() { OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); From 0eadbfd23d105976fdca4f3d18d6641e25c2d1d7 Mon Sep 17 00:00:00 2001 From: Hongbing Wang <284734261@qq.com> Date: Sat, 20 Jan 2024 17:30:01 +0800 Subject: [PATCH 18/43] HDDS-10162. Fix metric names in OMPerformanceMetrics (#6035) --- .../apache/hadoop/ozone/om/OMPerformanceMetrics.java | 10 +++++----- .../ozone/protocolPB/OzoneManagerRequestHandler.java | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java index 7ac97a538cae..d118e2f4ecc9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java @@ -108,10 +108,10 @@ public static void unregister() { private MutableRate createRatisRequestLatencyNs; @Metric(about = "Convert ratis response to om response nano seconds") - private MutableRate createOmResoonseLatencyNs; + private MutableRate createOmResponseLatencyNs; @Metric(about = "Ratis local command execution latency in nano seconds") - private MutableRate validateAndUpdateCacneLatencyNs; + private MutableRate validateAndUpdateCacheLatencyNs; @Metric(about = "ACLs check latency in listKeys") private MutableRate listKeysAclCheckLatencyNs; @@ -209,11 +209,11 @@ public MutableRate getCreateRatisRequestLatencyNs() { } public MutableRate getCreateOmResponseLatencyNs() { - return createOmResoonseLatencyNs; + return createOmResponseLatencyNs; } - public MutableRate getValidateAndUpdateCacneLatencyNs() { - return validateAndUpdateCacneLatencyNs; + public MutableRate getValidateAndUpdateCacheLatencyNs() { + return validateAndUpdateCacheLatencyNs; } public MutableRate getListKeysAclCheckLatencyNs() { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index d96637b46172..2795f3716db8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -397,7 +397,7 @@ public OMClientResponse handleWriteRequest(OMRequest omRequest, TermIndex termIn OMClientRequest omClientRequest = OzoneManagerRatisUtils.createClientRequest(omRequest, impl); return captureLatencyNs( - impl.getPerfMetrics().getValidateAndUpdateCacneLatencyNs(), + impl.getPerfMetrics().getValidateAndUpdateCacheLatencyNs(), () -> { OMClientResponse omClientResponse = omClientRequest.validateAndUpdateCache(getOzoneManager(), termIndex); From 05c7329d8d8dfcccea63f67d6d4f6b828140804a Mon Sep 17 00:00:00 2001 From: Tsz-Wo Nicholas Sze Date: Sat, 20 Jan 2024 03:08:00 -0800 Subject: [PATCH 19/43] HDDS-10171. Fix checkstyle:parameternumber in OzoneManagerDoubleBuffer. (#6038) --- .../om/ratis/OzoneManagerDoubleBuffer.java | 122 ++++++++++-------- .../om/ratis/OzoneManagerStateMachine.java | 12 +- ...ManagerProtocolServerSideTranslatorPB.java | 65 ++++------ .../ratis/TestOzoneManagerDoubleBuffer.java | 16 +-- ...eManagerDoubleBufferWithDummyResponse.java | 4 +- ...zoneManagerDoubleBufferWithOMResponse.java | 4 +- 6 files changed, 105 insertions(+), 118 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index fd7b18bb6810..2c1276c43e73 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.om.ratis; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -53,6 +52,7 @@ import org.apache.hadoop.util.Time; import org.apache.ratis.server.protocol.TermIndex; import org.apache.ratis.util.ExitUtils; +import org.apache.ratis.util.Preconditions; import org.apache.ratis.util.function.CheckedRunnable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -94,32 +94,11 @@ OMClientResponse getResponse() { } } - // Taken unbounded queue, if sync thread is taking too long time, we - // might end up taking huge memory to add entries to the buffer. - // TODO: We can avoid this using unbounded queue and use queue with - // capacity, if queue is full we can wait for sync to be completed to - // add entries. But in this also we might block rpc handlers, as we - // clear entries after sync. Or we can come up with a good approach to - // solve this. - private Queue currentBuffer; - private Queue readyBuffer; - - private final Daemon daemon; - private final OMMetadataManager omMetadataManager; - private final AtomicBoolean isRunning = new AtomicBoolean(false); - - private final Consumer updateLastAppliedIndex; - private final boolean isRatisEnabled; - private final boolean isTracingEnabled; - private final Semaphore unFlushedTransactions; - private final FlushNotifier flushNotifier; - private final S3SecretManager s3SecretManager; - /** * Builder for creating OzoneManagerDoubleBuffer. */ - public static class Builder { - private OMMetadataManager mm; + public static final class Builder { + private OMMetadataManager omMetadataManager; private Consumer updateLastAppliedIndex = termIndex -> { }; private boolean isRatisEnabled = false; private boolean isTracingEnabled = false; @@ -128,9 +107,10 @@ public static class Builder { private S3SecretManager s3SecretManager; private String threadPrefix = ""; + private Builder() { } - public Builder setOmMetadataManager(OMMetadataManager omm) { - this.mm = omm; + public Builder setOmMetadataManager(OMMetadataManager omMetadataManager) { + this.omMetadataManager = omMetadataManager; return this; } @@ -149,8 +129,8 @@ public Builder enableTracing(boolean enableTracing) { return this; } - public Builder setmaxUnFlushedTransactionCount(int size) { - this.maxUnFlushedTransactionCount = size; + public Builder setMaxUnFlushedTransactionCount(int maxUnFlushedTransactionCount) { + this.maxUnFlushedTransactionCount = maxUnFlushedTransactionCount; return this; } @@ -170,21 +150,48 @@ public Builder setS3SecretManager(S3SecretManager s3SecretManager) { } public OzoneManagerDoubleBuffer build() { - if (isRatisEnabled) { - Preconditions.checkState(maxUnFlushedTransactionCount > 0L, - "when ratis is enable, maxUnFlushedTransactions " + - "should be bigger than 0"); - } + Preconditions.assertTrue(isRatisEnabled == maxUnFlushedTransactionCount > 0L, + () -> "Ratis is " + (isRatisEnabled ? "enabled" : "disabled") + + " but maxUnFlushedTransactionCount = " + maxUnFlushedTransactionCount); if (flushNotifier == null) { flushNotifier = new FlushNotifier(); } - return new OzoneManagerDoubleBuffer(mm, updateLastAppliedIndex, isRatisEnabled, - isTracingEnabled, maxUnFlushedTransactionCount, - flushNotifier, s3SecretManager, threadPrefix); + return new OzoneManagerDoubleBuffer(this); } } + public static Builder newBuilder() { + return new Builder(); + } + + static Semaphore newSemaphore(int permits) { + return permits > 0 ? new Semaphore(permits) : null; + } + + private Queue currentBuffer; + private Queue readyBuffer; + /** + * Limit the number of un-flushed transactions for {@link OzoneManagerStateMachine}. + * It is set to null if ratis is disabled; see {@link #isRatisEnabled()}. + */ + private final Semaphore unFlushedTransactions; + + /** To flush the buffers. */ + private final Daemon daemon; + /** Is the {@link #daemon} running? */ + private final AtomicBoolean isRunning = new AtomicBoolean(false); + /** Notify flush operations are completed by the {@link #daemon}. */ + private final FlushNotifier flushNotifier; + + private final OMMetadataManager omMetadataManager; + + private final Consumer updateLastAppliedIndex; + + private final S3SecretManager s3SecretManager; + + private final boolean isTracingEnabled; + private final OzoneManagerDoubleBufferMetrics metrics = OzoneManagerDoubleBufferMetrics.create(); /** Accumulative count (for testing and debug only). */ @@ -192,27 +199,27 @@ public OzoneManagerDoubleBuffer build() { /** The number of flush iterations (for testing and debug only). */ private final AtomicLong flushIterations = new AtomicLong(); - @SuppressWarnings("checkstyle:parameternumber") - private OzoneManagerDoubleBuffer(OMMetadataManager omMetadataManager, - Consumer updateLastAppliedIndex, - boolean isRatisEnabled, boolean isTracingEnabled, - int maxUnFlushedTransactions, - FlushNotifier flushNotifier, S3SecretManager s3SecretManager, - String threadPrefix) { + private OzoneManagerDoubleBuffer(Builder b) { this.currentBuffer = new ConcurrentLinkedQueue<>(); this.readyBuffer = new ConcurrentLinkedQueue<>(); - this.isRatisEnabled = isRatisEnabled; - this.isTracingEnabled = isTracingEnabled; - this.unFlushedTransactions = new Semaphore(maxUnFlushedTransactions); - this.omMetadataManager = omMetadataManager; - this.updateLastAppliedIndex = updateLastAppliedIndex; - this.flushNotifier = flushNotifier; + + this.omMetadataManager = b.omMetadataManager; + this.s3SecretManager = b.s3SecretManager; + this.updateLastAppliedIndex = b.updateLastAppliedIndex; + this.flushNotifier = b.flushNotifier; + this.unFlushedTransactions = newSemaphore(b.maxUnFlushedTransactionCount); + + this.isTracingEnabled = b.isTracingEnabled; + isRunning.set(true); // Daemon thread which runs in background and flushes transactions to DB. daemon = new Daemon(this::flushTransactions); - daemon.setName(threadPrefix + "OMDoubleBufferFlushThread"); + daemon.setName(b.threadPrefix + "OMDoubleBufferFlushThread"); daemon.start(); - this.s3SecretManager = s3SecretManager; + } + + private boolean isRatisEnabled() { + return unFlushedTransactions != null; } /** @@ -220,6 +227,7 @@ private OzoneManagerDoubleBuffer(OMMetadataManager omMetadataManager, * blocking until all are available, or the thread is interrupted. */ public void acquireUnFlushedTransactions(int n) throws InterruptedException { + Preconditions.assertTrue(isRatisEnabled(), "Ratis is not enabled"); unFlushedTransactions.acquire(n); } @@ -227,7 +235,7 @@ public void acquireUnFlushedTransactions(int n) throws InterruptedException { * Releases the given number of permits, * returning them to the unFlushedTransactions. */ - public void releaseUnFlushedTransactions(int n) { + void releaseUnFlushedTransactions(int n) { unFlushedTransactions.release(n); } @@ -360,7 +368,7 @@ private void flushBatch(Queue buffer) throws IOException { // Complete futures first and then do other things. // So that handler threads will be released. - if (!isRatisEnabled) { + if (!isRatisEnabled()) { buffer.stream() .map(Entry::getResponse) .map(OMClientResponse::getFlushFuture) @@ -375,7 +383,7 @@ private void flushBatch(Queue buffer) throws IOException { // Clean up committed transactions. cleanupCache(cleanupEpochs); - if (isRatisEnabled) { + if (isRatisEnabled()) { releaseUnFlushedTransactions(flushedTransactionsSize); } // update the last updated index in OzoneManagerStateMachine. @@ -537,7 +545,7 @@ public synchronized void add(OMClientResponse response, TermIndex termIndex) { currentBuffer.add(new Entry(termIndex, response)); notify(); - if (!isRatisEnabled) { + if (!isRatisEnabled()) { response.setFlushFuture(new CompletableFuture<>()); } } @@ -639,7 +647,7 @@ private CompletableFuture await() { } private int complete() { - Preconditions.checkState(future.complete(count)); + Preconditions.assertTrue(future.complete(count)); return future.join(); } } @@ -654,7 +662,7 @@ synchronized CompletableFuture await() { final int flush = flushCount + 2; LOG.debug("await flush {}", flush); final Entry entry = flushFutures.computeIfAbsent(flush, key -> new Entry()); - Preconditions.checkState(flushFutures.size() <= 2); + Preconditions.assertTrue(flushFutures.size() <= 2); return entry.await(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java index 62e320e1e069..90fcba40f5d0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java @@ -392,11 +392,11 @@ public CompletableFuture query(Message request) { public synchronized void pause() { LOG.info("OzoneManagerStateMachine is pausing"); statePausedCount.incrementAndGet(); - if (getLifeCycleState() == LifeCycle.State.PAUSED) { + final LifeCycle.State state = getLifeCycleState(); + if (state == LifeCycle.State.PAUSED) { return; } - final LifeCycle lc = getLifeCycle(); - if (lc.getCurrentState() != LifeCycle.State.NEW) { + if (state != LifeCycle.State.NEW) { getLifeCycle().transition(LifeCycle.State.PAUSING); getLifeCycle().transition(LifeCycle.State.PAUSED); } @@ -423,13 +423,13 @@ public synchronized void unpause(long newLastAppliedSnaphsotIndex, } public OzoneManagerDoubleBuffer buildDoubleBufferForRatis() { - int maxUnflushedTransactionSize = ozoneManager.getConfiguration() + final int maxUnFlushedTransactionCount = ozoneManager.getConfiguration() .getInt(OMConfigKeys.OZONE_OM_UNFLUSHED_TRANSACTION_MAX_COUNT, OMConfigKeys.OZONE_OM_UNFLUSHED_TRANSACTION_MAX_COUNT_DEFAULT); - return new OzoneManagerDoubleBuffer.Builder() + return OzoneManagerDoubleBuffer.newBuilder() .setOmMetadataManager(ozoneManager.getMetadataManager()) .setUpdateLastAppliedIndex(this::updateLastAppliedTermIndex) - .setmaxUnFlushedTransactionCount(maxUnflushedTransactionSize) + .setMaxUnFlushedTransactionCount(maxUnFlushedTransactionCount) .setThreadPrefix(threadPrefix) .setS3SecretManager(ozoneManager.getS3SecretManager()) .enableRatis(true) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java index a4ec2c2f200a..cf9bb4f0bbce 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java @@ -62,21 +62,20 @@ import org.slf4j.LoggerFactory; /** - * This class is the server-side translator that forwards requests received on - * {@link OzoneManagerProtocolPB} - * to the OzoneManagerService server implementation. + * This is the server-side translator that forwards requests received + * from {@link OzoneManagerProtocolPB} to {@link OzoneManager}. */ -public class OzoneManagerProtocolServerSideTranslatorPB implements - OzoneManagerProtocolPB { - private static final Logger LOG = LoggerFactory - .getLogger(OzoneManagerProtocolServerSideTranslatorPB.class); - private static final String OM_REQUESTS_PACKAGE = - "org.apache.hadoop.ozone"; +public class OzoneManagerProtocolServerSideTranslatorPB implements OzoneManagerProtocolPB { + private static final Logger LOG = LoggerFactory .getLogger(OzoneManagerProtocolServerSideTranslatorPB.class); + private static final String OM_REQUESTS_PACKAGE = "org.apache.hadoop.ozone"; private final OzoneManagerRatisServer omRatisServer; private final RequestHandler handler; - private final boolean isRatisEnabled; private final OzoneManager ozoneManager; + /** + * Only used to handle write requests when ratis is disabled. + * When ratis is enabled, write requests are handled by the state machine. + */ private final OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer; private final AtomicLong transactionIndex; private final OzoneProtocolMessageDispatcher("OzoneProtocol", metrics, LOG, OMPBHelper::processForDebug, OMPBHelper::processForDebug); + // TODO: make this injectable for testing... - requestValidations = - new RequestValidations() - .fromPackage(OM_REQUESTS_PACKAGE) - .withinContext( - ValidationContext.of(ozoneManager.getVersionManager(), - ozoneManager.getMetadataManager())) - .load(); + this.requestValidations = new RequestValidations() + .fromPackage(OM_REQUESTS_PACKAGE) + .withinContext(ValidationContext.of(ozoneManager.getVersionManager(), ozoneManager.getMetadataManager())) + .load(); + } + private boolean isRatisEnabled() { + return ozoneManagerDoubleBuffer == null; } /** @@ -197,7 +188,7 @@ private OMResponse internalProcessRequest(OMRequest request) throws } } - if (!isRatisEnabled) { + if (!isRatisEnabled()) { return submitRequestDirectlyToOM(request); } @@ -320,13 +311,7 @@ private OMResponse submitRequestDirectlyToOM(OMRequest request) { return omClientResponse.getOMResponse(); } - /** - * Create OMResponse from the specified OMRequest and exception. - * - * @param omRequest - * @param exception - * @return OMResponse - */ + /** @return an {@link OMResponse} from the given {@link OMRequest} and the given exception. */ private OMResponse createErrorResponse( OMRequest omRequest, IOException exception) { // Added all write command types here, because in future if any of the @@ -344,7 +329,7 @@ private OMResponse createErrorResponse( } public void stop() { - if (!isRatisEnabled) { + if (ozoneManagerDoubleBuffer != null) { ozoneManagerDoubleBuffer.stop(); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java index 21205c4dc334..c7d15bfe5f23 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java @@ -131,10 +131,10 @@ public void setup() throws IOException { flushNotifier = new OzoneManagerDoubleBuffer.FlushNotifier(); spyFlushNotifier = spy(flushNotifier); - doubleBuffer = new OzoneManagerDoubleBuffer.Builder() + doubleBuffer = OzoneManagerDoubleBuffer.newBuilder() .setOmMetadataManager(omMetadataManager) .setS3SecretManager(secretManager) - .setmaxUnFlushedTransactionCount(1000) + .setMaxUnFlushedTransactionCount(1000) .enableRatis(true) .setFlushNotifier(spyFlushNotifier) .build(); @@ -289,7 +289,7 @@ public void testAwaitFlush() throws Exception { doubleBuffer.getCurrentBufferSize()); // Start double buffer and wait for flush. - final Future await = awaitFlush(); + final Future await = doubleBuffer.awaitFlushAsync(); Future flusher = flushTransactions(executorService); await.get(); @@ -302,7 +302,7 @@ public void testAwaitFlush() throws Exception { assertEquals(0, doubleBuffer.getReadyBufferSize()); // Run again to make sure it works when double buffer is empty - awaitFlush().get(); + doubleBuffer.awaitFlushAsync().get(); // Clean up. flusher.cancel(false); @@ -323,8 +323,7 @@ public void testS3SecretCacheSizePostDoubleBufferFlush() throws IOException { "RULE:[2:$1@$0](.*@EXAMPLE.COM)s/@.*//\n" + "RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\n" + "DEFAULT"); - UserGroupInformation ugiAlice; - ugiAlice = UserGroupInformation.createRemoteUser(userPrincipalId1); + final UserGroupInformation ugiAlice = UserGroupInformation.createRemoteUser(userPrincipalId1); UserGroupInformation.createRemoteUser(userPrincipalId2); UserGroupInformation.createRemoteUser(userPrincipalId3); assertEquals("alice", ugiAlice.getShortUserName()); @@ -393,11 +392,6 @@ private OzoneManagerProtocolProtos.OMRequest s3GetSecretRequest( ).build(); } - // Return a future that waits for the flush. - private Future awaitFlush() { - return doubleBuffer.awaitFlushAsync(); - } - private Future flushTransactions(ExecutorService executorService) { return executorService.submit(() -> { doubleBuffer.resume(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java index ee2e9043a362..dd8e642721e6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java @@ -75,9 +75,9 @@ public void setup() throws IOException { folder.toAbsolutePath().toString()); omMetadataManager = new OmMetadataManagerImpl(configuration, null); - doubleBuffer = new OzoneManagerDoubleBuffer.Builder() + doubleBuffer = OzoneManagerDoubleBuffer.newBuilder() .setOmMetadataManager(omMetadataManager) - .setmaxUnFlushedTransactionCount(10000) + .setMaxUnFlushedTransactionCount(10000) .enableRatis(true) .build(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java index 006777141a6c..e8674b616377 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -107,9 +107,9 @@ public void setup() throws IOException { auditLogger = mock(AuditLogger.class); when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); - doubleBuffer = new OzoneManagerDoubleBuffer.Builder() + doubleBuffer = OzoneManagerDoubleBuffer.newBuilder() .setOmMetadataManager(omMetadataManager) - .setmaxUnFlushedTransactionCount(100000) + .setMaxUnFlushedTransactionCount(100000) .enableRatis(true) .build(); } From 88afd8f51284f4d31f7ce36efa37345da0fb36af Mon Sep 17 00:00:00 2001 From: Tejaskriya <87555809+Tejaskriya@users.noreply.github.com> Date: Sat, 20 Jan 2024 21:00:59 +0530 Subject: [PATCH 20/43] HDDS-9051. Change level of log in NetworkTopologyImpl when no nodes are available to INFO (#5942) --- .../org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java index 5e0697eaafd2..2dc86c1b6856 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java @@ -657,7 +657,7 @@ private Node chooseNodeInternal(String scope, int leafIndex, ancestorGen); if (availableNodes <= 0) { - LOG.warn("No available node in (scope=\"{}\" excludedScope=\"{}\" " + + LOG.info("No available node in (scope=\"{}\" excludedScope=\"{}\" " + "excludedNodes=\"{}\" ancestorGen=\"{}\").", scopeNode.getNetworkFullPath(), excludedScopes, excludedNodes, ancestorGen); From c75e24a4f620d71a48f0eb01b121599129edf4b6 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Sun, 21 Jan 2024 15:42:31 +0100 Subject: [PATCH 21/43] HDDS-10168. Add Ozone 1.4.0 to compatibility acceptance tests (#6040) --- hadoop-ozone/dist/src/main/compose/upgrade/test.sh | 5 +++-- hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml | 7 +++++++ hadoop-ozone/dist/src/main/compose/xcompat/test.sh | 6 +++--- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh index a267080bb190..9d7ec5d4e604 100755 --- a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh +++ b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh @@ -35,8 +35,9 @@ RESULT_DIR="$ALL_RESULT_DIR" create_results_dir # This is the version of Ozone that should use the runner image to run the # code that was built. Other versions will pull images from docker hub. -export OZONE_CURRENT_VERSION=1.4.0 -run_test ha non-rolling-upgrade 1.3.0 "$OZONE_CURRENT_VERSION" +export OZONE_CURRENT_VERSION=1.5.0 +run_test ha non-rolling-upgrade 1.4.0 "$OZONE_CURRENT_VERSION" +# run_test ha non-rolling-upgrade 1.3.0 "$OZONE_CURRENT_VERSION" # run_test ha non-rolling-upgrade 1.2.1 "$OZONE_CURRENT_VERSION" # run_test om-ha non-rolling-upgrade 1.1.0 "$OZONE_CURRENT_VERSION" diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml index 15d4c7e427da..2057cdd8a993 100644 --- a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml +++ b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml @@ -45,6 +45,13 @@ services: volumes: - ../..:/opt/ozone command: ["sleep","1000000"] + old_client_1_4_0: + image: apache/ozone:1.4.0 + env_file: + - docker-config + volumes: + - ../..:/opt/ozone + command: ["sleep","1000000"] new_client: image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION} env_file: diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh index baa239d56a82..419d397c19ec 100755 --- a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh +++ b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh @@ -21,8 +21,8 @@ COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" export COMPOSE_DIR basename=$(basename ${COMPOSE_DIR}) -current_version=1.4.0 -old_versions="1.0.0 1.1.0 1.2.1 1.3.0" # container is needed for each version in clients.yaml +current_version=1.5.0 +old_versions="1.0.0 1.1.0 1.2.1 1.3.0 1.4.0" # container is needed for each version in clients.yaml # shellcheck source=hadoop-ozone/dist/src/main/compose/testlib.sh source "${COMPOSE_DIR}/../testlib.sh" @@ -77,7 +77,7 @@ test_cross_compatibility() { test_ec_cross_compatibility() { echo "Running Erasure Coded storage backward compatibility tests." - local cluster_versions_with_ec="1.3.0" + local cluster_versions_with_ec="1.3.0 1.4.0" local non_ec_client_versions="1.0.0 1.1.0 1.2.1" for cluster_version in ${cluster_versions_with_ec}; do From d160eaaa8b53553242ad0b4c5b198ae865383d00 Mon Sep 17 00:00:00 2001 From: Will Xiao Date: Mon, 22 Jan 2024 00:07:49 +0800 Subject: [PATCH 22/43] HDDS-10151. Replace single-use Random objects with RandomUtils in test classes (#6041) --- .../hdds/scm/storage/TestBlockInputStream.java | 5 ++--- .../TestECBlockReconstructedStripeInputStream.java | 4 ++-- .../hadoop/ozone/common/TestChecksumByteBuffer.java | 8 +++----- .../hdds/server/http/TestHttpServer2Metrics.java | 12 +++++------- .../hadoop/hdds/scm/block/TestDeletedBlockLog.java | 13 +++++-------- .../TestSCMContainerPlacementRackAware.java | 8 ++++---- .../TestReplicatedBlockChecksumComputer.java | 11 ++++------- .../fs/contract/AbstractContractSeekTest.java | 7 +++---- .../fs/ozone/AbstractRootedOzoneFileSystemTest.java | 5 ++--- .../hdds/scm/pipeline/TestLeaderChoosePolicy.java | 5 ++--- .../apache/hadoop/ozone/TestMultipartObjectGet.java | 5 ++--- .../hadoop/ozone/client/rpc/TestWatchForCommit.java | 6 +++--- .../ozone/shell/TestDeletedBlocksTxnShell.java | 7 +++---- .../apache/hadoop/ozone/recon/TestReconUtils.java | 5 ++--- 14 files changed, 42 insertions(+), 59 deletions(-) diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java index 3dc5a82b3355..ae8c114d8138 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm.storage; import com.google.common.primitives.Bytes; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -50,7 +51,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Random; import java.util.concurrent.ExecutionException; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicBoolean; @@ -186,9 +186,8 @@ public void testSeek() throws Exception { assertThrows(EOFException.class, () -> seekAndVerify(finalPos)); // Seek to random positions between 0 and the block size. - Random random = new Random(); for (int i = 0; i < 10; i++) { - pos = random.nextInt(blockSize); + pos = RandomUtils.nextInt(0, blockSize); seekAndVerify(pos); } } diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java index c708fc28ddbf..c32cea095187 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.client.io; import com.google.common.collect.ImmutableSet; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; @@ -41,7 +42,6 @@ import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Random; import java.util.Set; import java.util.SplittableRandom; import java.util.concurrent.ExecutorService; @@ -645,7 +645,7 @@ public void testSeekToPartialOffsetFails() { } private Integer getRandomStreamIndex(Set set) { - return set.stream().skip(new Random().nextInt(set.size())) + return set.stream().skip(RandomUtils.nextInt(0, set.size())) .findFirst().orElse(null); } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java index 5b88f5cb3003..9567fa2c281e 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java @@ -22,7 +22,7 @@ import org.junit.jupiter.api.Test; import java.nio.charset.StandardCharsets; -import java.util.Random; +import org.apache.commons.lang3.RandomUtils; import java.util.zip.Checksum; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -59,11 +59,9 @@ void testCorrectness() { checkBytes("hello world!".getBytes(StandardCharsets.UTF_8)); - final Random random = new Random(); - final byte[] bytes = new byte[1 << 10]; + final int len = 1 << 10; for (int i = 0; i < 1000; i++) { - random.nextBytes(bytes); - checkBytes(bytes, random.nextInt(bytes.length)); + checkBytes(RandomUtils.nextBytes(len), RandomUtils.nextInt(0, len)); } } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2Metrics.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2Metrics.java index 257c543d22ce..3f00bc53d249 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2Metrics.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2Metrics.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.server.http; +import org.apache.commons.lang3.RandomUtils; import static org.apache.hadoop.hdds.server.http.HttpServer2Metrics.HttpServer2MetricsInfo.HttpServerIdleThreadCount; import static org.apache.hadoop.hdds.server.http.HttpServer2Metrics.HttpServer2MetricsInfo.HttpServerMaxThreadCount; import static org.apache.hadoop.hdds.server.http.HttpServer2Metrics.HttpServer2MetricsInfo.HttpServerThreadCount; @@ -36,8 +37,6 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.util.Random; - /** * Testing HttpServer2Metrics. */ @@ -57,11 +56,10 @@ public void setup() { @Test public void testMetrics() { // crate mock metrics - Random random = new Random(); - int threadCount = random.nextInt(); - int maxThreadCount = random.nextInt(); - int idleThreadCount = random.nextInt(); - int threadQueueWaitingTaskCount = random.nextInt(); + int threadCount = RandomUtils.nextInt(); + int maxThreadCount = RandomUtils.nextInt(); + int idleThreadCount = RandomUtils.nextInt(); + int threadQueueWaitingTaskCount = RandomUtils.nextInt(); String name = "s3g"; when(threadPool.getThreads()).thenReturn(threadCount); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index be57aa8ea6a3..9292ffa865c7 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.scm.block; +import org.apache.commons.lang3.RandomUtils; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -70,7 +71,6 @@ import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.Random; import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeoutException; @@ -218,9 +218,8 @@ private Map> generateData(int dataSize) throws IOException { private Map> generateData(int dataSize, HddsProtos.LifeCycleState state) throws IOException { Map> blockMap = new HashMap<>(); - Random random = new Random(1); - int continerIDBase = random.nextInt(100); - int localIDBase = random.nextInt(1000); + int continerIDBase = RandomUtils.nextInt(0, 100); + int localIDBase = RandomUtils.nextInt(0, 1000); for (int i = 0; i < dataSize; i++) { long containerID = continerIDBase + i; updateContainerMetadata(containerID, state); @@ -692,13 +691,12 @@ public void testInadequateReplicaCommit() throws Exception { @Test public void testRandomOperateTransactions() throws Exception { mockContainerHealthResult(true); - Random random = new Random(); int added = 0, committed = 0; List blocks = new ArrayList<>(); List txIDs; // Randomly add/get/commit/increase transactions. for (int i = 0; i < 100; i++) { - int state = random.nextInt(4); + int state = RandomUtils.nextInt(0, 4); if (state == 0) { addTransactions(generateData(10), true); added += 10; @@ -803,8 +801,7 @@ public void testDeletedBlockTransactions() // add two transactions for same container containerID = blocks.get(0).getContainerID(); Map> deletedBlocksMap = new HashMap<>(); - Random random = new Random(); - long localId = random.nextLong(); + long localId = RandomUtils.nextLong(); deletedBlocksMap.put(containerID, new LinkedList<>( Collections.singletonList(localId))); addTransactions(deletedBlocksMap, true); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java index 39e19135efa2..3ed6ac89d6fb 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java @@ -19,9 +19,11 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Random; import java.util.stream.IntStream; +import org.apache.commons.lang3.RandomUtils; +import org.apache.commons.lang3.StringUtils; + import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -48,8 +50,6 @@ import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; -import org.apache.commons.lang3.StringUtils; - import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; @@ -625,7 +625,7 @@ public void testOutOfServiceNodesNotSelected(int datanodeCount) { for (int i = 0; i < 10; i++) { // Set a random DN to in_service and ensure it is always picked - int index = new Random().nextInt(dnInfos.size()); + int index = RandomUtils.nextInt(0, dnInfos.size()); dnInfos.get(index).setNodeStatus(NodeStatus.inServiceHealthy()); try { List datanodeDetails = diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java index 5cf4401bae25..6162f1ae5a41 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.client.checksum; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.util.DataChecksum; @@ -27,7 +28,6 @@ import java.nio.ByteBuffer; import java.util.Collections; import java.util.List; -import java.util.Random; import static org.apache.hadoop.hdds.scm.OzoneClientConfig.ChecksumCombineMode.COMPOSITE_CRC; import static org.apache.hadoop.hdds.scm.OzoneClientConfig.ChecksumCombineMode.MD5MD5CRC; @@ -40,9 +40,8 @@ public class TestReplicatedBlockChecksumComputer { @Test public void testComputeMd5Crc() throws IOException { final int lenOfBytes = 32; - byte[] randomChunkChecksum = new byte[lenOfBytes]; - Random r = new Random(); - r.nextBytes(randomChunkChecksum); + byte[] randomChunkChecksum = RandomUtils.nextBytes(lenOfBytes); + MD5Hash emptyBlockMD5 = MD5Hash.digest(randomChunkChecksum); byte[] emptyBlockMD5Hash = emptyBlockMD5.getDigest(); AbstractBlockChecksumComputer computer = @@ -56,9 +55,7 @@ public void testComputeMd5Crc() throws IOException { @Test public void testComputeCompositeCrc() throws IOException { final int lenOfBytes = 32; - byte[] randomChunkChecksum = new byte[lenOfBytes]; - Random r = new Random(); - r.nextBytes(randomChunkChecksum); + byte[] randomChunkChecksum = RandomUtils.nextBytes(lenOfBytes); CrcComposer crcComposer = CrcComposer.newCrcComposer(DataChecksum.Type.CRC32C, 4); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java index 49c693268e70..618025dc06f7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.contract; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataInputStream; @@ -31,7 +32,6 @@ import java.io.EOFException; import java.io.IOException; -import java.util.Random; import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; @@ -341,15 +341,14 @@ public void testRandomSeeks() throws Throwable { byte[] buf = dataset(filesize, 0, 255); Path randomSeekFile = path("testrandomseeks.bin"); createFile(getFileSystem(), randomSeekFile, true, buf); - Random r = new Random(); // Record the sequence of seeks and reads which trigger a failure. int[] seeks = new int[10]; int[] reads = new int[10]; try (FSDataInputStream stm = getFileSystem().open(randomSeekFile)) { for (int i = 0; i < limit; i++) { - int seekOff = r.nextInt(buf.length); - int toRead = r.nextInt(Math.min(buf.length - seekOff, 32000)); + int seekOff = RandomUtils.nextInt(0, buf.length); + int toRead = RandomUtils.nextInt(0, Math.min(buf.length - seekOff, 32000)); seeks[i % seeks.length] = seekOff; reads[i % reads.length] = toRead; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java index 61b0281c659a..1675807d230f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java @@ -96,7 +96,6 @@ import java.util.LinkedList; import java.util.List; import java.util.Optional; -import java.util.Random; import java.util.Set; import java.util.TreeSet; import java.util.UUID; @@ -2000,7 +1999,7 @@ private void checkInvalidPath(Path path) { @Test void testRenameFile() throws Exception { - final String dir = "/dir" + new Random().nextInt(1000); + final String dir = "/dir" + RandomUtils.nextInt(0, 1000); Path dirPath = new Path(getBucketPath() + dir); Path file1Source = new Path(getBucketPath() + dir + "/file1_Copy"); @@ -2026,7 +2025,7 @@ void testRenameFile() throws Exception { */ @Test void testRenameFileToDir() throws Exception { - final String dir = "/dir" + new Random().nextInt(1000); + final String dir = "/dir" + RandomUtils.nextInt(0, 1000); Path dirPath = new Path(getBucketPath() + dir); getFs().mkdirs(dirPath); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java index 725b17ee9d64..439b563d6330 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.scm.pipeline; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; @@ -34,7 +35,6 @@ import java.util.concurrent.TimeUnit; import java.util.HashMap; import java.util.Map; -import java.util.Random; import java.util.UUID; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE; @@ -172,7 +172,6 @@ public void testMinLeaderCountChoosePolicy() throws Exception { // each datanode has leaderNumOfEachDn leaders after balance checkLeaderBalance(dnNum, leaderNumOfEachDn); - Random r = new Random(0); for (int i = 0; i < 10; i++) { // destroy some pipelines, wait new pipelines created, // then check leader balance @@ -181,7 +180,7 @@ public void testMinLeaderCountChoosePolicy() throws Exception { .getPipelines(RatisReplicationConfig.getInstance( ReplicationFactor.THREE), Pipeline.PipelineState.OPEN); - int destroyNum = r.nextInt(pipelines.size()); + int destroyNum = RandomUtils.nextInt(0, pipelines.size()); for (int k = 0; k <= destroyNum; k++) { pipelineManager.closePipeline(pipelines.get(k), false); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java index 0dae8a8b0dc6..c2e671b896e6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.conf.DefaultConfigManager; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.IOUtils; @@ -41,7 +42,6 @@ import javax.ws.rs.core.UriInfo; import java.io.IOException; import java.io.ByteArrayInputStream; -import java.security.SecureRandom; import java.util.ArrayList; import java.util.UUID; import java.util.List; @@ -217,8 +217,7 @@ public void testMultipart() throws Exception { private static String generateRandomContent(int sizeInMB) { int bytesToGenerate = sizeInMB * 1024 * 1024; - byte[] randomBytes = new byte[bytesToGenerate]; - new SecureRandom().nextBytes(randomBytes); + byte[] randomBytes = RandomUtils.nextBytes(bytesToGenerate); return Base64.getEncoder().encodeToString(randomBytes); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java index d03c57bf4e4f..b053a4394bf9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java @@ -22,7 +22,7 @@ import java.time.Duration; import java.util.ArrayList; import java.util.List; -import java.util.Random; +import org.apache.commons.lang3.RandomUtils; import java.util.UUID; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -280,7 +280,7 @@ public void testWatchForCommitForRetryfailure() throws Exception { // as well as there is no logIndex generate in Ratis. // The basic idea here is just to test if its throws an exception. xceiverClient - .watchForCommit(index + new Random().nextInt(100) + 10); + .watchForCommit(index + RandomUtils.nextInt(0, 100) + 10); fail("expected exception not thrown"); } catch (Exception e) { assertInstanceOf(ExecutionException.class, e); @@ -374,7 +374,7 @@ public void testWatchForCommitForGroupMismatchException() throws Exception { // The basic idea here is just to test if its throws an exception. xceiverClient .watchForCommit(reply.getLogIndex() + - new Random().nextInt(100) + 10); + RandomUtils.nextInt(0, 100) + 10); fail("Expected exception not thrown"); } catch (Exception e) { assertInstanceOf(GroupMismatchException.class, HddsClientUtils.checkForException(e)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java index e74041ceafb7..36b970f4ee9d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.shell; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -52,7 +53,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Random; import java.util.Set; import java.util.UUID; import java.util.regex.Matcher; @@ -129,9 +129,8 @@ public void shutdown() { //> private Map> generateData(int dataSize) throws Exception { Map> blockMap = new HashMap<>(); - Random random = new Random(1); - int continerIDBase = random.nextInt(100); - int localIDBase = random.nextInt(1000); + int continerIDBase = RandomUtils.nextInt(0, 100); + int localIDBase = RandomUtils.nextInt(0, 1000); for (int i = 0; i < dataSize; i++) { long containerID = continerIDBase + i; updateContainerMetadata(containerID); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java index b34c8d31c6fa..07196e29eaad 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java @@ -41,8 +41,8 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.net.URL; -import java.util.Random; +import org.apache.commons.lang3.RandomUtils; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -206,9 +206,8 @@ public void testNextClosestPowerIndexOfTwo() { assertNextClosestPowerIndexOfTwo(n - 1); } - final Random random = new Random(); for (int i = 0; i < 10; i++) { - assertNextClosestPowerIndexOfTwo(random.nextLong()); + assertNextClosestPowerIndexOfTwo(RandomUtils.nextLong()); } } From a72cd72a399c74cc5991fc4473bc5fc1f3fa834f Mon Sep 17 00:00:00 2001 From: Maksim Myskov Date: Sun, 21 Jan 2024 21:40:05 +0300 Subject: [PATCH 23/43] HDDS-10014. Fixed internal error on generating S3 secret via HTTP (#5887) --- .../dist/src/main/smoketest/commonlib.robot | 2 + .../src/main/smoketest/s3/commonawslib.robot | 6 +++ .../main/smoketest/s3/secretgenerate.robot | 37 +++++++++++-------- .../src/main/smoketest/s3/secretrevoke.robot | 27 +++++++------- .../s3secret/S3SecretManagementEndpoint.java | 30 ++++++++++----- .../ozone/s3secret/TestSecretGenerate.java | 18 +++++++++ 6 files changed, 83 insertions(+), 37 deletions(-) diff --git a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot index 7d9edcdef448..55ed9ddf5044 100644 --- a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot @@ -32,10 +32,12 @@ Get test user principal [return] ${user}/${instance}@EXAMPLE.COM Kinit HTTP user + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skip in unsecure cluster ${principal} = Get test user principal HTTP Wait Until Keyword Succeeds 2min 10sec Execute kinit -k -t /etc/security/keytabs/HTTP.keytab ${principal} Kinit test user + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skip in unsecure cluster [arguments] ${user} ${keytab} ${TEST_USER} = Get test user principal ${user} Set Suite Variable ${TEST_USER} diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot index c0b2c9f7bfae..840fb963d8d1 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot @@ -207,3 +207,9 @@ Verify Multipart Upload ${tmp} = Catenate @{files} Execute cat ${tmp} > /tmp/original${random} Compare files /tmp/original${random} /tmp/verify${random} + +Revoke S3 secrets + Execute and Ignore Error ozone s3 revokesecret -y + Execute and Ignore Error ozone s3 revokesecret -y -u testuser + Execute and Ignore Error ozone s3 revokesecret -y -u testuser2 + diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot index b9f6993f45e2..70dcfa1abede 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot @@ -21,30 +21,37 @@ Library String Resource ../commonlib.robot Resource ./commonawslib.robot Test Timeout 5 minutes -Suite Setup Setup s3 tests Default Tags no-bucket-type +Test Setup Run Keywords Kinit test user testuser testuser.keytab +... AND Revoke S3 secrets +Test Teardown Run Keyword Revoke S3 secrets *** Variables *** ${ENDPOINT_URL} http://s3g:9878 +${SECURITY_ENABLED} true *** Test Cases *** S3 Gateway Generate Secret - Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret - IF '${SECURITY_ENABLED}' == 'true' - Should contain ${result} HTTP/1.1 200 OK ignore_case=True - Should Match Regexp ${result} .*.* - ELSE - Should contain ${result} S3 Secret endpoint is disabled. - END + Should contain ${result} HTTP/1.1 200 OK ignore_case=True + Should Match Regexp ${result} .*.* + +S3 Gateway Secret Already Exists + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + Execute ozone s3 getsecret ${OM_HA_PARAM} + ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret + Should contain ${result} HTTP/1.1 400 S3_SECRET_ALREADY_EXISTS ignore_case=True S3 Gateway Generate Secret By Username - Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser + Should contain ${result} HTTP/1.1 200 OK ignore_case=True + Should Match Regexp ${result} .*.* + +S3 Gateway Generate Secret By Username For Other User + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled ${result} = Execute curl -X PUT --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2 - IF '${SECURITY_ENABLED}' == 'true' - Should contain ${result} HTTP/1.1 200 OK ignore_case=True - Should Match Regexp ${result} .*.* - ELSE - Should contain ${result} S3 Secret endpoint is disabled. - END + Should contain ${result} HTTP/1.1 200 OK ignore_case=True + Should Match Regexp ${result} .*.* \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot index 27b4580f419b..0f15f23067b0 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot @@ -21,8 +21,9 @@ Library String Resource ../commonlib.robot Resource ./commonawslib.robot Test Timeout 5 minutes -Suite Setup Setup s3 tests Default Tags no-bucket-type +Test Setup Run Keywords Kinit test user testuser testuser.keytab +... AND Revoke S3 secrets *** Variables *** ${ENDPOINT_URL} http://s3g:9878 @@ -31,19 +32,19 @@ ${SECURITY_ENABLED} true *** Test Cases *** S3 Gateway Revoke Secret - Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + Execute ozone s3 getsecret ${OM_HA_PARAM} ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret - IF '${SECURITY_ENABLED}' == 'true' - Should contain ${result} HTTP/1.1 200 OK ignore_case=True - ELSE - Should contain ${result} S3 Secret endpoint is disabled. - END + Should contain ${result} HTTP/1.1 200 OK ignore_case=True S3 Gateway Revoke Secret By Username - Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + Execute ozone s3 getsecret -u testuser ${OM_HA_PARAM} + ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser + Should contain ${result} HTTP/1.1 200 OK ignore_case=True + +S3 Gateway Revoke Secret By Username For Other User + Pass Execution If '${SECURITY_ENABLED}' == 'false' Skipping this check as security is not enabled + Execute ozone s3 getsecret -u testuser2 ${OM_HA_PARAM} ${result} = Execute curl -X DELETE --negotiate -u : -v ${ENDPOINT_URL}/secret/testuser2 - IF '${SECURITY_ENABLED}' == 'true' - Should contain ${result} HTTP/1.1 200 OK ignore_case=True - ELSE - Should contain ${result} S3 Secret endpoint is disabled. - END \ No newline at end of file + Should contain ${result} HTTP/1.1 200 OK ignore_case=True \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java index 3c932da57d77..a86a92820c06 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java @@ -32,6 +32,7 @@ import javax.ws.rs.core.Response; import java.io.IOException; +import static javax.ws.rs.core.Response.Status.BAD_REQUEST; import static javax.ws.rs.core.Response.Status.NOT_FOUND; /** @@ -55,15 +56,26 @@ public Response generate(@PathParam("username") String username) return generateInternal(username); } - private Response generateInternal(@Nullable String username) - throws IOException { - S3SecretResponse s3SecretResponse = new S3SecretResponse(); - S3SecretValue s3SecretValue = generateS3Secret(username); - s3SecretResponse.setAwsSecret(s3SecretValue.getAwsSecret()); - s3SecretResponse.setAwsAccessKey(s3SecretValue.getAwsAccessKey()); - AUDIT.logReadSuccess(buildAuditMessageForSuccess( - S3GAction.GENERATE_SECRET, getAuditParameters())); - return Response.ok(s3SecretResponse).build(); + private Response generateInternal(@Nullable String username) throws IOException { + try { + S3SecretValue s3SecretValue = generateS3Secret(username); + + S3SecretResponse s3SecretResponse = new S3SecretResponse(); + s3SecretResponse.setAwsSecret(s3SecretValue.getAwsSecret()); + s3SecretResponse.setAwsAccessKey(s3SecretValue.getAwsAccessKey()); + AUDIT.logWriteSuccess(buildAuditMessageForSuccess( + S3GAction.GENERATE_SECRET, getAuditParameters())); + return Response.ok(s3SecretResponse).build(); + } catch (OMException e) { + AUDIT.logWriteFailure(buildAuditMessageForFailure( + S3GAction.GENERATE_SECRET, getAuditParameters(), e)); + if (e.getResult() == OMException.ResultCodes.S3_SECRET_ALREADY_EXISTS) { + return Response.status(BAD_REQUEST.getStatusCode(), e.getResult().toString()).build(); + } else { + LOG.error("Can't execute get secret request: ", e); + return Response.serverError().build(); + } + } } private S3SecretValue generateS3Secret(@Nullable String username) diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java index f3c17d5807ef..e6ff4024d1e1 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3secret/TestSecretGenerate.java @@ -22,6 +22,7 @@ import java.security.Principal; import javax.ws.rs.container.ContainerRequestContext; import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.Response; import javax.ws.rs.core.SecurityContext; import javax.ws.rs.core.UriInfo; @@ -30,6 +31,7 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -38,6 +40,7 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.junit.jupiter.MockitoExtension; +import static javax.ws.rs.core.Response.Status.BAD_REQUEST; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.when; @@ -96,6 +99,21 @@ void testSecretGenerate() throws IOException { assertEquals(USER_NAME, response.getAwsAccessKey()); } + @Test + void testIfSecretAlreadyExists() throws IOException { + when(principal.getName()).thenReturn(USER_NAME); + when(securityContext.getUserPrincipal()).thenReturn(principal); + when(context.getSecurityContext()).thenReturn(securityContext); + when(proxy.getS3Secret(any())).thenThrow(new OMException("Secret already exists", + OMException.ResultCodes.S3_SECRET_ALREADY_EXISTS)); + + Response response = endpoint.generate(); + + assertEquals(BAD_REQUEST.getStatusCode(), response.getStatus()); + assertEquals(OMException.ResultCodes.S3_SECRET_ALREADY_EXISTS.toString(), + response.getStatusInfo().getReasonPhrase()); + } + @Test void testSecretGenerateWithUsername() throws IOException { S3SecretResponse response = From 69c4629ca01c12594d7da1e5ef041d7b7788c2ed Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 22 Jan 2024 09:30:35 +0100 Subject: [PATCH 24/43] HDDS-10080. Let junit.sh fail if no tests were matched in repeated run (#6036) --- hadoop-ozone/dev-support/checks/junit.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/hadoop-ozone/dev-support/checks/junit.sh b/hadoop-ozone/dev-support/checks/junit.sh index 417ae35e5e42..768a1f32a38b 100755 --- a/hadoop-ozone/dev-support/checks/junit.sh +++ b/hadoop-ozone/dev-support/checks/junit.sh @@ -79,6 +79,12 @@ for i in $(seq 1 ${ITERATIONS}); do fi if [[ ${ITERATIONS} -gt 1 ]]; then + if ! grep -q "Tests run: [^0]" "${REPORT_DIR}/output.log"; then + echo "No tests were run" >> "${REPORT_DIR}/summary.txt" + irc=1 + FAIL_FAST=true + fi + REPORT_DIR="${original_report_dir}" echo "Iteration ${i} exit code: ${irc}" | tee -a "${REPORT_FILE}" fi From ed2488cddf11cf0c8264563b7efa3fab21559c73 Mon Sep 17 00:00:00 2001 From: jianghuazhu <740087514@qq.com> Date: Mon, 22 Jan 2024 18:02:05 +0800 Subject: [PATCH 25/43] HDDS-9988. Show used storage percent in SCM UI (#5882) --- .../hadoop/hdds/scm/node/SCMNodeManager.java | 102 ++++++++++++++++-- .../resources/webapps/scm/scm-overview.html | 6 ++ .../src/main/resources/webapps/scm/scm.js | 2 + .../hdds/scm/node/TestSCMNodeManager.java | 46 ++++++++ 4 files changed, 150 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index e9b7d220e1f2..cc5fb9aa776e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -69,7 +69,9 @@ import javax.management.ObjectName; import java.io.IOException; +import java.math.RoundingMode; import java.net.InetAddress; +import java.text.DecimalFormat; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -138,9 +140,11 @@ public class SCMNodeManager implements NodeManager { * consistent view of the node state. */ private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); - private final String opeState = "OPSTATE"; - private final String comState = "COMSTATE"; - private final String lastHeartbeat = "LASTHEARTBEAT"; + private static final String OPESTATE = "OPSTATE"; + private static final String COMSTATE = "COMSTATE"; + private static final String LASTHEARTBEAT = "LASTHEARTBEAT"; + private static final String USEDSPACEPERCENT = "USEDSPACEPERCENT"; + private static final String TOTALCAPACITY = "CAPACITY"; /** * Constructs SCM machine Manager. */ @@ -1103,9 +1107,9 @@ public Map> getNodeStatusInfo() { heartbeatTimeDiff = getLastHeartbeatTimeDiff(dni.getLastHeartbeatTime()); } Map map = new HashMap<>(); - map.put(opeState, opstate); - map.put(comState, healthState); - map.put(lastHeartbeat, heartbeatTimeDiff); + map.put(OPESTATE, opstate); + map.put(COMSTATE, healthState); + map.put(LASTHEARTBEAT, heartbeatTimeDiff); if (httpPort != null) { map.put(httpPort.getName().toString(), httpPort.getValue().toString()); } @@ -1113,11 +1117,97 @@ public Map> getNodeStatusInfo() { map.put(httpsPort.getName().toString(), httpsPort.getValue().toString()); } + String capacity = calculateStorageCapacity(dni.getStorageReports()); + map.put(TOTALCAPACITY, capacity); + String[] storagePercentage = calculateStoragePercentage( + dni.getStorageReports()); + String scmUsedPerc = storagePercentage[0]; + String nonScmUsedPerc = storagePercentage[1]; + map.put(USEDSPACEPERCENT, + "Ozone: " + scmUsedPerc + "%, other: " + nonScmUsedPerc + "%"); nodes.put(hostName, map); } return nodes; } + /** + * Calculate the storage capacity of the DataNode node. + * @param storageReports Calculate the storage capacity corresponding + * to the storage collection. + * @return + */ + public static String calculateStorageCapacity( + List storageReports) { + long capacityByte = 0; + if (storageReports != null && !storageReports.isEmpty()) { + for (StorageReportProto storageReport : storageReports) { + capacityByte += storageReport.getCapacity(); + } + } + + double ua = capacityByte; + StringBuilder unit = new StringBuilder("B"); + if (ua > 1024) { + ua = ua / 1024; + unit.replace(0, 1, "KB"); + } + if (ua > 1024) { + ua = ua / 1024; + unit.replace(0, 2, "MB"); + } + if (ua > 1024) { + ua = ua / 1024; + unit.replace(0, 2, "GB"); + } + if (ua > 1024) { + ua = ua / 1024; + unit.replace(0, 2, "TB"); + } + + DecimalFormat decimalFormat = new DecimalFormat("#0.0"); + decimalFormat.setRoundingMode(RoundingMode.HALF_UP); + String capacity = decimalFormat.format(ua); + return capacity + unit.toString(); + } + + /** + * Calculate the storage usage percentage of a DataNode node. + * @param storageReports Calculate the storage percentage corresponding + * to the storage collection. + * @return + */ + public static String[] calculateStoragePercentage( + List storageReports) { + String[] storagePercentage = new String[2]; + String usedPercentage = "N/A"; + String nonUsedPercentage = "N/A"; + if (storageReports != null && !storageReports.isEmpty()) { + long capacity = 0; + long scmUsed = 0; + long remaining = 0; + for (StorageReportProto storageReport : storageReports) { + capacity += storageReport.getCapacity(); + scmUsed += storageReport.getScmUsed(); + remaining += storageReport.getRemaining(); + } + long scmNonUsed = capacity - scmUsed - remaining; + + DecimalFormat decimalFormat = new DecimalFormat("#0.00"); + decimalFormat.setRoundingMode(RoundingMode.HALF_UP); + + double usedPerc = ((double) scmUsed / capacity) * 100; + usedPerc = usedPerc > 100.0 ? 100.0 : usedPerc; + double nonUsedPerc = ((double) scmNonUsed / capacity) * 100; + nonUsedPerc = nonUsedPerc > 100.0 ? 100.0 : nonUsedPerc; + usedPercentage = decimalFormat.format(usedPerc); + nonUsedPercentage = decimalFormat.format(nonUsedPerc); + } + + storagePercentage[0] = usedPercentage; + storagePercentage[1] = nonUsedPercentage; + return storagePercentage; + } + /** * Based on the current time and the last heartbeat, calculate the time difference * and get a string of the relative value. E.g. "2s ago", "1m 2s ago", etc. diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html index 214a2ad7868a..fdd8de15b6a9 100644 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html +++ b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html @@ -48,6 +48,10 @@

Node Status

HostName + Used Space Percent + Capacity Operational State element.key === "USEDSPACEPERCENT").value, + capacity: value && value.find((element) => element.key === "CAPACITY").value, comstate: value && value.find((element) => element.key === "COMSTATE").value, lastheartbeat: value && value.find((element) => element.key === "LASTHEARTBEAT").value, port: portSpec.port, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index 85a70b646739..930774a54bf3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -86,6 +86,7 @@ import java.util.Map; import java.util.function.Predicate; import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.emptyList; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -123,6 +124,8 @@ import static org.mockito.Mockito.eq; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; import org.mockito.ArgumentCaptor; import org.slf4j.Logger; @@ -1572,6 +1575,49 @@ public void testScmStatsFromNodeReport() } } + private List generateStorageReportProto( + int volumeCount, UUID dnId, long capacity, long used, long remaining) { + List reports = new ArrayList<>(volumeCount); + boolean failed = true; + for (int x = 0; x < volumeCount; x++) { + String storagePath = testDir.getAbsolutePath() + "/" + dnId; + reports.add(HddsTestUtils + .createStorageReport(dnId, storagePath, capacity, + used, remaining, null, failed)); + failed = !failed; + } + return reports; + } + + private static Stream calculateStoragePercentageScenarios() { + return Stream.of( + Arguments.of(600, 65, 500, 1, "600.0B", "10.83", "5.83"), + Arguments.of(10000, 1000, 8800, 12, "117.2KB", "10.00", "2.00"), + Arguments.of(100000000, 1000, 899999, 12, "1.1GB", "0.00", "99.10"), + Arguments.of(10000, 1000, 0, 0, "0.0B", "N/A", "N/A"), + Arguments.of(0, 0, 0, 0, "0.0B", "N/A", "N/A"), + Arguments.of(1010, 547, 400, 5, "4.9KB", "54.16", "6.24") + ); + } + + @ParameterizedTest + @MethodSource("calculateStoragePercentageScenarios") + public void testCalculateStoragePercentage(long perCapacity, + long used, long remaining, int volumeCount, String totalCapacity, + String scmUsedPerc, String nonScmUsedPerc) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + UUID dnId = dn.getUuid(); + List reports = volumeCount > 0 ? + generateStorageReportProto(volumeCount, dnId, perCapacity, + used, remaining) : null; + String capacityResult = SCMNodeManager.calculateStorageCapacity(reports); + assertEquals(totalCapacity, capacityResult); + String[] storagePercentage = SCMNodeManager.calculateStoragePercentage( + reports); + assertEquals(scmUsedPerc, storagePercentage[0]); + assertEquals(nonScmUsedPerc, storagePercentage[1]); + } + /** * Test multiple nodes sending initial heartbeat with their node report * with multiple volumes. From 3bd5e3616101d28d8f16279362fef18095e7163f Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 22 Jan 2024 12:21:15 +0100 Subject: [PATCH 26/43] HDDS-10178. Shaded Jar build failure in case-insensitive filesystem (#6045) --- .github/workflows/ci.yml | 6 +++++- pom.xml | 8 ++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d209d766a356..ea56d8f0c532 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -135,13 +135,17 @@ jobs: - build-info - build - basic - runs-on: ubuntu-20.04 timeout-minutes: 30 if: needs.build-info.outputs.needs-compile == 'true' strategy: matrix: java: [ 11, 17, 21 ] + include: + - os: ubuntu-20.04 + - java: 8 + os: macos-12 fail-fast: false + runs-on: ${{ matrix.os }} steps: - name: Download Ozone source tarball uses: actions/download-artifact@v3 diff --git a/pom.xml b/pom.xml index 2d6e825e5181..e3fb473b80d7 100644 --- a/pom.xml +++ b/pom.xml @@ -273,6 +273,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 2.8.1 1.9 3.6.1 + 4.2.2 0.29.0 1.3.1 2.3.0 @@ -1731,6 +1732,13 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.maven.plugins maven-dependency-plugin ${maven-dependency-plugin.version} + + + org.codehaus.plexus + plexus-archiver + ${plexus-archiver.version} + + org.apache.maven.plugins From c3be4e92a8cbeb1e9997510118995d1cf9871e04 Mon Sep 17 00:00:00 2001 From: Will Xiao Date: Mon, 22 Jan 2024 20:55:26 +0800 Subject: [PATCH 27/43] HDDS-6796. Extract method for building OMRequest in TrashOzoneFileSystem (#6048) --- .../hadoop/ozone/om/TrashOzoneFileSystem.java | 31 +++++++++---------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java index 5ada61373464..6e1c9da34cbc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java @@ -460,6 +460,18 @@ List listKeys(String volumeName, String bucketName, String startKey, } } + /** + * Returns a OMRequest builder with specified type. + * @param cmdType type of the request + */ + private OzoneManagerProtocolProtos.OMRequest.Builder + createOMRequest(OzoneManagerProtocolProtos.Type cmdType) throws IOException { + return OzoneManagerProtocolProtos.OMRequest.newBuilder() + .setClientId(CLIENT_ID.toString()) + .setVersion(ClientVersion.CURRENT_VERSION) + .setUserInfo(getUserInfo()) + .setCmdType(cmdType); + } private OzoneManagerProtocolProtos.OMRequest getRenameKeyRequest( @@ -483,12 +495,8 @@ List listKeys(String volumeName, String bucketName, String startKey, OzoneManagerProtocolProtos.OMRequest omRequest = null; try { - omRequest = OzoneManagerProtocolProtos.OMRequest.newBuilder() - .setClientId(CLIENT_ID.toString()) - .setVersion(ClientVersion.CURRENT_VERSION) - .setUserInfo(getUserInfo()) + omRequest = createOMRequest(OzoneManagerProtocolProtos.Type.RenameKey) .setRenameKeyRequest(renameKeyRequest) - .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey) .build(); } catch (IOException e) { LOG.error("Couldn't get userinfo", e); @@ -549,13 +557,8 @@ private OzoneManagerProtocolProtos.OMRequest getDeleteKeyRequest( OzoneManagerProtocolProtos.OMRequest omRequest = null; try { - omRequest = - OzoneManagerProtocolProtos.OMRequest.newBuilder() - .setClientId(CLIENT_ID.toString()) - .setVersion(ClientVersion.CURRENT_VERSION) - .setUserInfo(getUserInfo()) + omRequest = createOMRequest(OzoneManagerProtocolProtos.Type.DeleteKey) .setDeleteKeyRequest(deleteKeyRequest) - .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey) .build(); } catch (IOException e) { LOG.error("Couldn't get userinfo", e); @@ -619,12 +622,8 @@ boolean processKeyPath(List keyPathList) { OzoneManagerProtocolProtos.OMRequest omRequest = null; try { - omRequest = OzoneManagerProtocolProtos.OMRequest.newBuilder() - .setClientId(CLIENT_ID.toString()) - .setVersion(ClientVersion.CURRENT_VERSION) - .setUserInfo(getUserInfo()) + omRequest = createOMRequest(OzoneManagerProtocolProtos.Type.DeleteKeys) .setDeleteKeysRequest(deleteKeysRequest) - .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKeys) .build(); } catch (IOException e) { LOG.error("Couldn't get userinfo", e); From 227b64b5a958ec7debbd2cabbf134490ad38013f Mon Sep 17 00:00:00 2001 From: Hongbing Wang <284734261@qq.com> Date: Tue, 23 Jan 2024 01:11:32 +0800 Subject: [PATCH 28/43] HDDS-9345. Add CapacityPipelineChoosePolicy considering datanode storage space (#5354) --- .../org/apache/hadoop/hdds/scm/ScmConfig.java | 32 ++++- .../hadoop/hdds/scm/PipelineChoosePolicy.java | 10 ++ .../placement/metrics/SCMNodeMetric.java | 16 ++- .../placement/metrics/SCMNodeStat.java | 9 ++ .../CapacityPipelineChoosePolicy.java | 136 ++++++++++++++++++ .../PipelineChoosePolicyFactory.java | 10 +- .../scm/server/StorageContainerManager.java | 4 +- .../TestWritableECContainerProvider.java | 24 +++- .../TestCapacityPipelineChoosePolicy.java | 107 ++++++++++++++ .../TestPipelineChoosePolicyFactory.java | 19 ++- 10 files changed, 340 insertions(+), 27 deletions(-) rename hadoop-hdds/{common => server-scm}/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java (86%) create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/CapacityPipelineChoosePolicy.java create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java index 46816a63d349..2fc04e00f23b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java @@ -73,8 +73,17 @@ public class ScmConfig extends ReconfigurableConfig { + "org.apache.hadoop.hdds.scm.PipelineChoosePolicy. " + "The class decides which pipeline will be used to find or " + "allocate Ratis containers. If not set, " - + "org.apache.hadoop.hdds.scm.pipeline.choose.algorithms. " - + "RandomPipelineChoosePolicy will be used as default value." + + "org.apache.hadoop.hdds.scm.pipeline.choose.algorithms." + + "RandomPipelineChoosePolicy will be used as default value. " + + "The following values can be used, " + + "(1) org.apache.hadoop.hdds.scm.pipeline.choose.algorithms." + + "RandomPipelineChoosePolicy : random choose one pipeline. " + + "(2) org.apache.hadoop.hdds.scm.pipeline.choose.algorithms." + + "HealthyPipelineChoosePolicy : random choose one healthy pipeline. " + + "(3) org.apache.hadoop.hdds.scm.pipeline.choose.algorithms." + + "CapacityPipelineChoosePolicy : choose the pipeline with lower " + + "utilization from the two pipelines. Note that random choose " + + "method will be executed twice in this policy." ) private String pipelineChoosePolicyName; @@ -85,11 +94,20 @@ public class ScmConfig extends ReconfigurableConfig { tags = { ConfigTag.SCM, ConfigTag.PIPELINE }, description = "The full name of class which implements " - + "org.apache.hadoop.hdds.scm.PipelineChoosePolicy. " - + "The class decides which pipeline will be used when " - + "selecting an EC Pipeline. If not set, " - + "org.apache.hadoop.hdds.scm.pipeline.choose.algorithms. " - + "RandomPipelineChoosePolicy will be used as default value." + + "org.apache.hadoop.hdds.scm.PipelineChoosePolicy. " + + "The class decides which pipeline will be used when " + + "selecting an EC Pipeline. If not set, " + + "org.apache.hadoop.hdds.scm.pipeline.choose.algorithms." + + "RandomPipelineChoosePolicy will be used as default value. " + + "The following values can be used, " + + "(1) org.apache.hadoop.hdds.scm.pipeline.choose.algorithms." + + "RandomPipelineChoosePolicy : random choose one pipeline. " + + "(2) org.apache.hadoop.hdds.scm.pipeline.choose.algorithms." + + "HealthyPipelineChoosePolicy : random choose one healthy pipeline. " + + "(3) org.apache.hadoop.hdds.scm.pipeline.choose.algorithms." + + "CapacityPipelineChoosePolicy : choose the pipeline with lower " + + "utilization from the two pipelines. Note that random choose " + + "method will be executed twice in this policy." ) private String ecPipelineChoosePolicyName; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java similarity index 86% rename from hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java rename to hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java index 76439a78464f..e1d0fdd35aa1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/PipelineChoosePolicy.java @@ -17,6 +17,7 @@ package org.apache.hadoop.hdds.scm; +import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import java.util.List; @@ -26,6 +27,15 @@ */ public interface PipelineChoosePolicy { + /** + * Updates the policy with NodeManager. + * @return updated policy. + */ + default PipelineChoosePolicy init(final NodeManager nodeManager) { + // override if the policy requires nodeManager + return this; + } + /** * Given an initial list of pipelines, return one of the pipelines. * diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java index 330bf67416ae..094e535dcbd9 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java @@ -23,7 +23,8 @@ /** * SCM Node Metric that is used in the placement classes. */ -public class SCMNodeMetric implements DatanodeMetric { +public class SCMNodeMetric implements DatanodeMetric, + Comparable { private SCMNodeStat stat; /** @@ -195,12 +196,12 @@ public void subtract(SCMNodeStat value) { * @throws ClassCastException if the specified object's type prevents it * from being compared to this object. */ - //@Override - public int compareTo(SCMNodeStat o) { - if (isEqual(o)) { + @Override + public int compareTo(SCMNodeMetric o) { + if (isEqual(o.get())) { return 0; } - if (isGreater(o)) { + if (isGreater(o.get())) { return 1; } else { return -1; @@ -225,4 +226,9 @@ public boolean equals(Object o) { public int hashCode() { return stat != null ? stat.hashCode() : 0; } + + @Override + public String toString() { + return "SCMNodeMetric{" + stat.toString() + '}'; + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java index 2a848a04eff5..5456e6ee5273 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java @@ -174,4 +174,13 @@ public int hashCode() { return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get() ^ committed.get() ^ freeSpaceToSpare.get()); } + + @Override + public String toString() { + return "SCMNodeStat{" + + "capacity=" + capacity.get() + + ", scmUsed=" + scmUsed.get() + + ", remaining=" + remaining.get() + + '}'; + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/CapacityPipelineChoosePolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/CapacityPipelineChoosePolicy.java new file mode 100644 index 000000000000..a95a473de6d4 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/CapacityPipelineChoosePolicy.java @@ -0,0 +1,136 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.pipeline.choose.algorithms; + +import org.apache.hadoop.hdds.scm.PipelineChoosePolicy; +import org.apache.hadoop.hdds.scm.PipelineRequestInformation; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.Deque; +import java.util.List; +import java.util.Objects; + +/** + * Pipeline choose policy that randomly choose pipeline with relatively + * lower utilization. + *

+ * The Algorithm is as follows, Pick 2 random pipelines from a given pool of + * pipelines and then pick the pipeline which has lower utilization. + * This leads to a higher probability of pipelines with lower utilization + * to be picked. + *

+ * For those wondering why we choose two pipelines randomly and choose the + * pipeline with lower utilization. There are links to this original papers in + * HDFS-11564. + * Also, the same algorithm applies to SCMContainerPlacementCapacity. + *

+ */ +public class CapacityPipelineChoosePolicy implements PipelineChoosePolicy { + + private static final Logger LOG = + LoggerFactory.getLogger(PipelineChoosePolicy.class); + + private NodeManager nodeManager; + + private final PipelineChoosePolicy healthPolicy; + + public CapacityPipelineChoosePolicy() { + healthPolicy = new HealthyPipelineChoosePolicy(); + } + + @Override + public PipelineChoosePolicy init(final NodeManager scmNodeManager) { + this.nodeManager = scmNodeManager; + return this; + } + + @Override + public Pipeline choosePipeline(List pipelineList, + PipelineRequestInformation pri) { + Pipeline pipeline1 = healthPolicy.choosePipeline(pipelineList, pri); + Pipeline pipeline2 = healthPolicy.choosePipeline(pipelineList, pri); + + int result = new CapacityPipelineComparator(this) + .compare(pipeline1, pipeline2); + + LOG.debug("Chosen the {} pipeline", result <= 0 ? "first" : "second"); + return result <= 0 ? pipeline1 : pipeline2; + } + + @Override + public int choosePipelineIndex(List pipelineList, + PipelineRequestInformation pri) { + List mutableList = new ArrayList<>(pipelineList); + Pipeline pipeline = choosePipeline(mutableList, pri); + return pipelineList.indexOf(pipeline); + } + + /** + * Return a list of SCMNodeMetrics corresponding to the DataNodes in the + * pipeline, sorted in descending order based on scm used storage. + * @param pipeline pipeline + * @return sorted SCMNodeMetrics corresponding the pipeline + */ + private Deque getSortedNodeFromPipeline(Pipeline pipeline) { + Deque sortedNodeStack = new ArrayDeque<>(); + pipeline.getNodes().stream() + .map(nodeManager::getNodeStat) + .filter(Objects::nonNull) + .sorted() + .forEach(sortedNodeStack::push); + return sortedNodeStack; + } + + static class CapacityPipelineComparator implements Comparator { + private final CapacityPipelineChoosePolicy policy; + + CapacityPipelineComparator(CapacityPipelineChoosePolicy policy) { + this.policy = policy; + } + @Override + public int compare(Pipeline p1, Pipeline p2) { + if (p1.getId().equals(p2.getId())) { + LOG.debug("Compare the same pipeline {}", p1); + return 0; + } + Deque sortedNodes1 = policy.getSortedNodeFromPipeline(p1); + Deque sortedNodes2 = policy.getSortedNodeFromPipeline(p2); + + // Compare the scmUsed weight of the node in the two sorted node stacks + LOG.debug("Compare scmUsed weight in pipelines, first : {}, second : {}", + sortedNodes1, sortedNodes2); + int result = 0; + int count = 0; + while (result == 0 && + !sortedNodes1.isEmpty() && !sortedNodes2.isEmpty()) { + count++; + LOG.debug("Compare {} round", count); + result = sortedNodes1.pop().compareTo(sortedNodes2.pop()); + } + return result; + } + } + +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java index d040dbe2bcaf..90736a018132 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.scm.PipelineChoosePolicy; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.NodeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -48,14 +49,14 @@ public final class PipelineChoosePolicyFactory { private PipelineChoosePolicyFactory() { } - public static PipelineChoosePolicy getPolicy( + public static PipelineChoosePolicy getPolicy(final NodeManager nodeManager, ScmConfig scmConfig, boolean forEC) throws SCMException { Class policyClass = null; String policyName = forEC ? scmConfig.getECPipelineChoosePolicyName() : scmConfig.getPipelineChoosePolicyName(); try { policyClass = getClass(policyName, PipelineChoosePolicy.class); - return createPipelineChoosePolicyFromClass(policyClass); + return createPipelineChoosePolicyFromClass(nodeManager, policyClass); } catch (Exception e) { Class defaultPolicy = forEC ? OZONE_SCM_EC_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT : @@ -64,13 +65,14 @@ public static PipelineChoosePolicy getPolicy( LOG.error("Met an exception while create pipeline choose policy " + "for the given class {}. Fallback to the default pipeline " + " choose policy {}", policyName, defaultPolicy, e); - return createPipelineChoosePolicyFromClass(defaultPolicy); + return createPipelineChoosePolicyFromClass(nodeManager, defaultPolicy); } throw e; } } private static PipelineChoosePolicy createPipelineChoosePolicyFromClass( + final NodeManager nodeManager, Class policyClass) throws SCMException { Constructor constructor; try { @@ -86,7 +88,7 @@ private static PipelineChoosePolicy createPipelineChoosePolicyFromClass( } try { - return constructor.newInstance(); + return constructor.newInstance().init(nodeManager); } catch (Exception e) { throw new RuntimeException("Failed to instantiate class " + policyClass.getCanonicalName() + " for " + e.getMessage()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 1a3ea2515f2d..046be68760c6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -804,9 +804,9 @@ private void initializeSystemManagers(OzoneConfiguration conf, ScmConfig scmConfig = conf.getObject(ScmConfig.class); pipelineChoosePolicy = PipelineChoosePolicyFactory - .getPolicy(scmConfig, false); + .getPolicy(scmNodeManager, scmConfig, false); ecPipelineChoosePolicy = PipelineChoosePolicyFactory - .getPolicy(scmConfig, true); + .getPolicy(scmNodeManager, scmConfig, true); if (configurator.getWritableContainerFactory() != null) { writableContainerFactory = configurator.getWritableContainerFactory(); } else { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java index 54d2ffed8284..4f86450d03e7 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableECContainerProvider.java @@ -34,7 +34,11 @@ import org.apache.hadoop.hdds.scm.ha.SCMHAManager; import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; +import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; +import org.apache.hadoop.hdds.scm.net.NodeSchema; +import org.apache.hadoop.hdds.scm.net.NodeSchemaManager; import org.apache.hadoop.hdds.scm.pipeline.WritableECContainerProvider.WritableECContainerProviderConfig; +import org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.CapacityPipelineChoosePolicy; import org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.HealthyPipelineChoosePolicy; import org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.RandomPipelineChoosePolicy; import org.apache.hadoop.hdds.utils.db.DBStore; @@ -54,8 +58,13 @@ import java.util.Map; import java.util.NavigableSet; import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import static org.apache.hadoop.hdds.conf.StorageUnit.BYTES; +import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; +import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; +import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.CLOSED; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -84,7 +93,7 @@ public class TestWritableECContainerProvider { private OzoneConfiguration conf; private DBStore dbStore; private SCMHAManager scmhaManager; - private MockNodeManager nodeManager; + private static MockNodeManager nodeManager; private WritableContainerProvider provider; private ECReplicationConfig repConfig; @@ -93,8 +102,20 @@ public class TestWritableECContainerProvider { public static Collection policies() { Collection policies = new ArrayList<>(); + // init nodeManager + NodeSchemaManager.getInstance().init(new NodeSchema[] + {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA}, true); + NetworkTopologyImpl cluster = + new NetworkTopologyImpl(NodeSchemaManager.getInstance()); + int count = 10; + List datanodes = IntStream.range(0, count) + .mapToObj(i -> MockDatanodeDetails.randomDatanodeDetails()) + .collect(Collectors.toList()); + nodeManager = new MockNodeManager(cluster, datanodes, false, count); + policies.add(new RandomPipelineChoosePolicy()); policies.add(new HealthyPipelineChoosePolicy()); + policies.add(new CapacityPipelineChoosePolicy().init(nodeManager)); return policies; } @@ -110,7 +131,6 @@ void setup(@TempDir File testDir) throws IOException { dbStore = DBStoreBuilder.createDBStore( conf, new SCMDBDefinition()); scmhaManager = SCMHAManagerStub.getInstance(true); - nodeManager = new MockNodeManager(true, 10); pipelineManager = new MockPipelineManager(dbStore, scmhaManager, nodeManager); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java new file mode 100644 index 000000000000..421d2396bfaf --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.scm.pipeline.choose.algorithms; + +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.scm.PipelineChoosePolicy; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; +import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Test for the capacity pipeline choose policy. + */ +public class TestCapacityPipelineChoosePolicy { + + @Test + public void testChoosePipeline() throws Exception { + + // given 4 datanode + List datanodes = new ArrayList<>(); + for (int i = 0; i < 4; i++) { + datanodes.add(MockDatanodeDetails.randomDatanodeDetails()); + } + // dn0 dn1 dn2 dn3 + // used 0 10 20 30 + NodeManager mockNodeManager = mock(NodeManager.class); + when(mockNodeManager.getNodeStat(datanodes.get(0))) + .thenReturn(new SCMNodeMetric(100L, 0, 100L, 0, 0)); + when(mockNodeManager.getNodeStat(datanodes.get(1))) + .thenReturn(new SCMNodeMetric(100L, 10L, 90L, 0, 0)); + when(mockNodeManager.getNodeStat(datanodes.get(2))) + .thenReturn(new SCMNodeMetric(100L, 20L, 80L, 0, 0)); + when(mockNodeManager.getNodeStat(datanodes.get(3))) + .thenReturn(new SCMNodeMetric(100L, 30L, 70L, 0, 0)); + + PipelineChoosePolicy policy = new CapacityPipelineChoosePolicy().init(mockNodeManager); + + // generate 4 pipelines, and every pipeline has 3 datanodes + // + // pipeline0 dn1 dn2 dn3 + // pipeline1 dn0 dn2 dn3 + // pipeline2 dn0 dn1 dn3 + // pipeline3 dn0 dn1 dn2 + // + // In the above scenario, pipeline0 vs pipeline1 runs through three rounds + // of comparisons, (dn3 <-> dn3) -> (dn2 <-> dn2 ) -> (dn1 <-> dn0), + // finally comparing dn0 and dn1, and dn0 wins, so pipeline1 is selected. + // + List pipelines = new ArrayList<>(); + for (int i = 0; i < 4; i++) { + List dns = new ArrayList<>(); + for (int j = 0; j < datanodes.size(); j++) { + if (i != j) { + dns.add(datanodes.get(j)); + } + } + Pipeline pipeline = MockPipeline.createPipeline(dns); + MockRatisPipelineProvider.markPipelineHealthy(pipeline); + pipelines.add(pipeline); + } + + Map selectedCount = new HashMap<>(); + for (Pipeline pipeline : pipelines) { + selectedCount.put(pipeline, 0); + } + for (int i = 0; i < 1000; i++) { + // choosePipeline + Pipeline pipeline = policy.choosePipeline(pipelines, null); + assertNotNull(pipeline); + selectedCount.put(pipeline, selectedCount.get(pipeline) + 1); + } + + // The selected count from most to least should be : + // pipeline3 > pipeline2 > pipeline1 > pipeline0 + assertThat(selectedCount.get(pipelines.get(3))).isGreaterThan(selectedCount.get(pipelines.get(2))); + assertThat(selectedCount.get(pipelines.get(2))).isGreaterThan(selectedCount.get(pipelines.get(1))); + assertThat(selectedCount.get(pipelines.get(1))).isGreaterThan(selectedCount.get(pipelines.get(0))); + } +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestPipelineChoosePolicyFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestPipelineChoosePolicyFactory.java index 7d0a72ed2fb8..82fed5953aa8 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestPipelineChoosePolicyFactory.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestPipelineChoosePolicyFactory.java @@ -21,7 +21,9 @@ import org.apache.hadoop.hdds.scm.PipelineChoosePolicy; import org.apache.hadoop.hdds.scm.PipelineRequestInformation; import org.apache.hadoop.hdds.scm.ScmConfig; +import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -42,17 +44,20 @@ public class TestPipelineChoosePolicyFactory { private ScmConfig scmConfig; + private NodeManager nodeManager; + @BeforeEach public void setup() { //initialize network topology instance conf = new OzoneConfiguration(); scmConfig = conf.getObject(ScmConfig.class); + nodeManager = new MockNodeManager(true, 5); } @Test public void testDefaultPolicy() throws IOException { PipelineChoosePolicy policy = PipelineChoosePolicyFactory - .getPolicy(scmConfig, false); + .getPolicy(nodeManager, scmConfig, false); assertSame(OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT, policy.getClass()); } @@ -60,7 +65,7 @@ public void testDefaultPolicy() throws IOException { @Test public void testDefaultPolicyEC() throws IOException { PipelineChoosePolicy policy = PipelineChoosePolicyFactory - .getPolicy(scmConfig, true); + .getPolicy(nodeManager, scmConfig, true); assertSame(OZONE_SCM_EC_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT, policy.getClass()); } @@ -69,7 +74,7 @@ public void testDefaultPolicyEC() throws IOException { public void testNonDefaultPolicyEC() throws IOException { scmConfig.setECPipelineChoosePolicyName(DummyGoodImpl.class.getName()); PipelineChoosePolicy policy = PipelineChoosePolicyFactory - .getPolicy(scmConfig, true); + .getPolicy(nodeManager, scmConfig, true); assertSame(DummyGoodImpl.class, policy.getClass()); } @@ -121,10 +126,10 @@ public void testConstructorNotFound() throws SCMException { scmConfig.setPipelineChoosePolicyName(DummyImpl.class.getName()); scmConfig.setECPipelineChoosePolicyName(DummyImpl.class.getName()); PipelineChoosePolicy policy = - PipelineChoosePolicyFactory.getPolicy(scmConfig, false); + PipelineChoosePolicyFactory.getPolicy(nodeManager, scmConfig, false); assertSame(OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT, policy.getClass()); - policy = PipelineChoosePolicyFactory.getPolicy(scmConfig, true); + policy = PipelineChoosePolicyFactory.getPolicy(nodeManager, scmConfig, true); assertSame(OZONE_SCM_EC_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT, policy.getClass()); } @@ -137,10 +142,10 @@ public void testClassNotImplemented() throws SCMException { scmConfig.setECPipelineChoosePolicyName( "org.apache.hadoop.hdds.scm.pipeline.choose.policy.HelloWorld"); PipelineChoosePolicy policy = - PipelineChoosePolicyFactory.getPolicy(scmConfig, false); + PipelineChoosePolicyFactory.getPolicy(nodeManager, scmConfig, false); assertSame(OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT, policy.getClass()); - policy = PipelineChoosePolicyFactory.getPolicy(scmConfig, true); + policy = PipelineChoosePolicyFactory.getPolicy(nodeManager, scmConfig, true); assertSame(OZONE_SCM_EC_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT, policy.getClass()); } From 0c7561096846c82b667a59f8a922da172898409e Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 22 Jan 2024 19:01:22 +0100 Subject: [PATCH 29/43] Revert "HDDS-9426. Calculate Exclusive size for deep cleaned snapshot's deleted directories. (#5579)" (#6051) Reason for revert: incompatible proto changes This reverts commit 05284942fa5ec8026de0618f61af9b7cd758a90b. This reverts commit d969689e83241e2d5bec2c878324ee1ce84c0fe4. --- .../src/main/resources/ozone-default.xml | 19 - .../apache/hadoop/ozone/om/OMConfigKeys.java | 12 - .../hadoop/ozone/om/helpers/SnapshotInfo.java | 44 +- .../ozone/om/helpers/TestOmSnapshotInfo.java | 11 +- .../TestSnapshotDirectoryCleaningService.java | 272 --------- .../src/main/proto/OmClientProtocol.proto | 10 +- .../apache/hadoop/ozone/om/KeyManager.java | 7 - .../hadoop/ozone/om/KeyManagerImpl.java | 30 - .../snapshot/OMSnapshotPurgeRequest.java | 21 +- .../OMSnapshotSetPropertyRequest.java | 35 +- .../service/AbstractKeyDeletingService.java | 100 ---- .../ozone/om/service/KeyDeletingService.java | 151 +++-- .../SnapshotDirectoryCleaningService.java | 515 ------------------ ...SnapshotSetPropertyRequestAndResponse.java | 8 +- .../om/service/TestKeyDeletingService.java | 10 +- 15 files changed, 158 insertions(+), 1087 deletions(-) delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 6a29dc81ec94..7debcd479ec4 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -3600,25 +3600,6 @@ - - ozone.snapshot.directory.service.timeout - 300s - OZONE, PERFORMANCE, OM - - Timeout value for SnapshotDirectoryCleaningService. - - - - - ozone.snapshot.directory.service.interval - 24h - OZONE, PERFORMANCE, OM - - The time interval between successive SnapshotDirectoryCleaningService - thread run. - - - ozone.scm.event.ContainerReport.thread.pool.size 10 diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 5dd7579eb916..58f341b74aca 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -390,18 +390,6 @@ private OMConfigKeys() { public static final String OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT = "60s"; - /** - * Configuration properties for Snapshot Directory Service. - */ - public static final String OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL = - "ozone.snapshot.directory.service.interval"; - public static final String OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL_DEFAULT - = "24h"; - public static final String OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT = - "ozone.snapshot.directory.service.timeout"; - public static final String - OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT_DEFAULT = "300s"; - public static final String OZONE_PATH_DELETING_LIMIT_PER_TASK = "ozone.path.deleting.limit.per.task"; // default is 6000 taking account of 32MB buffer size, and assuming diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index 56103ccb3ab8..8ee9c6ee1f52 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -123,7 +123,6 @@ public static SnapshotStatus valueOf(SnapshotStatusProto status) { private long referencedReplicatedSize; private long exclusiveSize; private long exclusiveReplicatedSize; - private boolean deepCleanedDeletedDir; /** * Private constructor, constructed via builder. @@ -163,8 +162,7 @@ private SnapshotInfo(UUID snapshotId, long referencedSize, long referencedReplicatedSize, long exclusiveSize, - long exclusiveReplicatedSize, - boolean deepCleanedDeletedDir) { + long exclusiveReplicatedSize) { this.snapshotId = snapshotId; this.name = name; this.volumeName = volumeName; @@ -183,7 +181,6 @@ private SnapshotInfo(UUID snapshotId, this.referencedReplicatedSize = referencedReplicatedSize; this.exclusiveSize = exclusiveSize; this.exclusiveReplicatedSize = exclusiveReplicatedSize; - this.deepCleanedDeletedDir = deepCleanedDeletedDir; } public void setName(String name) { @@ -288,7 +285,7 @@ public void setSstFiltered(boolean sstFiltered) { } public SnapshotInfo.Builder toBuilder() { - return new Builder() + return new SnapshotInfo.Builder() .setSnapshotId(snapshotId) .setName(name) .setVolumeName(volumeName) @@ -305,8 +302,7 @@ public SnapshotInfo.Builder toBuilder() { .setReferencedSize(referencedSize) .setReferencedReplicatedSize(referencedReplicatedSize) .setExclusiveSize(exclusiveSize) - .setExclusiveReplicatedSize(exclusiveReplicatedSize) - .setDeepCleanedDeletedDir(deepCleanedDeletedDir); + .setExclusiveReplicatedSize(exclusiveReplicatedSize); } /** @@ -331,7 +327,6 @@ public static class Builder { private long referencedReplicatedSize; private long exclusiveSize; private long exclusiveReplicatedSize; - private boolean deepCleanedDeletedDir; public Builder() { // default values @@ -428,11 +423,6 @@ public Builder setExclusiveReplicatedSize(long exclusiveReplicatedSize) { return this; } - public Builder setDeepCleanedDeletedDir(boolean deepCleanedDeletedDir) { - this.deepCleanedDeletedDir = deepCleanedDeletedDir; - return this; - } - public SnapshotInfo build() { Preconditions.checkNotNull(name); return new SnapshotInfo( @@ -453,8 +443,7 @@ public SnapshotInfo build() { referencedSize, referencedReplicatedSize, exclusiveSize, - exclusiveReplicatedSize, - deepCleanedDeletedDir + exclusiveReplicatedSize ); } } @@ -476,8 +465,7 @@ public OzoneManagerProtocolProtos.SnapshotInfo getProtobuf() { .setReferencedSize(referencedSize) .setReferencedReplicatedSize(referencedReplicatedSize) .setExclusiveSize(exclusiveSize) - .setExclusiveReplicatedSize(exclusiveReplicatedSize) - .setDeepCleanedDeletedDir(deepCleanedDeletedDir); + .setExclusiveReplicatedSize(exclusiveReplicatedSize); if (pathPreviousSnapshotId != null) { sib.setPathPreviousSnapshotID(toProtobuf(pathPreviousSnapshotId)); @@ -550,11 +538,6 @@ public static SnapshotInfo getFromProtobuf( snapshotInfoProto.getExclusiveReplicatedSize()); } - if (snapshotInfoProto.hasDeepCleanedDeletedDir()) { - osib.setDeepCleanedDeletedDir( - snapshotInfoProto.getDeepCleanedDeletedDir()); - } - osib.setSnapshotPath(snapshotInfoProto.getSnapshotPath()) .setCheckpointDir(snapshotInfoProto.getCheckpointDir()) .setDbTxSequenceNumber(snapshotInfoProto.getDbTxSequenceNumber()); @@ -639,14 +622,6 @@ public long getExclusiveReplicatedSize() { return exclusiveReplicatedSize; } - public boolean getDeepCleanedDeletedDir() { - return deepCleanedDeletedDir; - } - - public void setDeepCleanedDeletedDir(boolean deepCleanedDeletedDir) { - this.deepCleanedDeletedDir = deepCleanedDeletedDir; - } - /** * Generate default name of snapshot, (used if user doesn't provide one). */ @@ -680,8 +655,7 @@ public static SnapshotInfo newInstance(String volumeName, .setSnapshotPath(volumeName + OM_KEY_PREFIX + bucketName) .setVolumeName(volumeName) .setBucketName(bucketName) - .setDeepClean(false) - .setDeepCleanedDeletedDir(false); + .setDeepClean(true); if (snapshotId != null) { builder.setCheckpointDir(getCheckpointDirName(snapshotId)); @@ -714,8 +688,7 @@ public boolean equals(Object o) { referencedSize == that.referencedSize && referencedReplicatedSize == that.referencedReplicatedSize && exclusiveSize == that.exclusiveSize && - exclusiveReplicatedSize == that.exclusiveReplicatedSize && - deepCleanedDeletedDir == that.deepCleanedDeletedDir; + exclusiveReplicatedSize == that.exclusiveReplicatedSize; } @Override @@ -726,7 +699,7 @@ public int hashCode() { globalPreviousSnapshotId, snapshotPath, checkpointDir, deepClean, sstFiltered, referencedSize, referencedReplicatedSize, - exclusiveSize, exclusiveReplicatedSize, deepCleanedDeletedDir); + exclusiveSize, exclusiveReplicatedSize); } /** @@ -753,7 +726,6 @@ public SnapshotInfo copyObject() { .setReferencedReplicatedSize(referencedReplicatedSize) .setExclusiveSize(exclusiveSize) .setExclusiveReplicatedSize(exclusiveReplicatedSize) - .setDeepCleanedDeletedDir(deepCleanedDeletedDir) .build(); } } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java index dd9cf34c8e29..6dc3f913d0f8 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java @@ -66,13 +66,12 @@ private SnapshotInfo createSnapshotInfo() { .setSnapshotPath(SNAPSHOT_PATH) .setCheckpointDir(CHECKPOINT_DIR) .setDbTxSequenceNumber(DB_TX_SEQUENCE_NUMBER) - .setDeepClean(false) + .setDeepClean(true) .setSstFiltered(false) .setReferencedSize(2000L) .setReferencedReplicatedSize(6000L) .setExclusiveSize(1000L) .setExclusiveReplicatedSize(3000L) - .setDeepCleanedDeletedDir(false) .build(); } @@ -90,13 +89,12 @@ private OzoneManagerProtocolProtos.SnapshotInfo createSnapshotInfoProto() { .setSnapshotPath(SNAPSHOT_PATH) .setCheckpointDir(CHECKPOINT_DIR) .setDbTxSequenceNumber(DB_TX_SEQUENCE_NUMBER) - .setDeepClean(false) + .setDeepClean(true) .setSstFiltered(false) .setReferencedSize(2000L) .setReferencedReplicatedSize(6000L) .setExclusiveSize(1000L) .setExclusiveReplicatedSize(3000L) - .setDeepCleanedDeletedDir(false) .build(); } @@ -142,9 +140,6 @@ public void testSnapshotInfoToProto() { assertEquals( snapshotInfoEntryExpected.getExclusiveReplicatedSize(), snapshotInfoEntryActual.getExclusiveReplicatedSize()); - assertEquals( - snapshotInfoEntryExpected.getDeepCleanedDeletedDir(), - snapshotInfoEntryActual.getDeepCleanedDeletedDir()); assertEquals(snapshotInfoEntryExpected, snapshotInfoEntryActual); } @@ -181,8 +176,6 @@ public void testSnapshotInfoProtoToSnapshotInfo() { snapshotInfoActual.getExclusiveSize()); assertEquals(snapshotInfoExpected.getExclusiveReplicatedSize(), snapshotInfoActual.getExclusiveReplicatedSize()); - assertEquals(snapshotInfoExpected.getDeepCleanedDeletedDir(), - snapshotInfoActual.getDeepCleanedDeletedDir()); assertEquals(snapshotInfoExpected, snapshotInfoActual); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java deleted file mode 100644 index 6b39b76c5466..000000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java +++ /dev/null @@ -1,272 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om; - -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.TestDataUtil; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService; -import org.apache.ozone.test.GenericTestUtils; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; - -/** - * Test Snapshot Directory Service. - */ -@Timeout(300) -public class TestSnapshotDirectoryCleaningService { - - private static final Logger LOG = - LoggerFactory.getLogger(TestSnapshotDirectoryCleaningService.class); - - private static MiniOzoneCluster cluster; - private static FileSystem fs; - private static String volumeName; - private static String bucketName; - private static OzoneClient client; - - @BeforeAll - public static void init() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL, 2500); - conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 2500, - TimeUnit.MILLISECONDS); - conf.setBoolean(OZONE_ACL_ENABLED, true); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .build(); - cluster.waitForClusterToBeReady(); - client = cluster.newClient(); - - // create a volume and a bucket to be used by OzoneFileSystem - OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, - BucketLayout.FILE_SYSTEM_OPTIMIZED); - volumeName = bucket.getVolumeName(); - bucketName = bucket.getName(); - - String rootPath = String.format("%s://%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName); - - // Set the fs.defaultFS and start the filesystem - conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); - // Set the number of keys to be processed during batch operate. - conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5); - - fs = FileSystem.get(conf); - } - - @AfterAll - public static void teardown() { - IOUtils.closeQuietly(client); - if (cluster != null) { - cluster.shutdown(); - } - IOUtils.closeQuietly(fs); - } - - @AfterEach - public void cleanup() { - try { - Path root = new Path("/"); - FileStatus[] fileStatuses = fs.listStatus(root); - for (FileStatus fileStatus : fileStatuses) { - fs.delete(fileStatus.getPath(), true); - } - } catch (IOException ex) { - fail("Failed to cleanup files."); - } - } - - @SuppressWarnings("checkstyle:LineLength") - @Test - public void testExclusiveSizeWithDirectoryDeepClean() throws Exception { - - Table deletedDirTable = - cluster.getOzoneManager().getMetadataManager().getDeletedDirTable(); - Table keyTable = - cluster.getOzoneManager().getMetadataManager() - .getKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); - Table dirTable = - cluster.getOzoneManager().getMetadataManager().getDirectoryTable(); - Table deletedKeyTable = - cluster.getOzoneManager().getMetadataManager().getDeletedTable(); - Table snapshotInfoTable = - cluster.getOzoneManager().getMetadataManager().getSnapshotInfoTable(); - SnapshotDirectoryCleaningService snapshotDirectoryCleaningService = - cluster.getOzoneManager().getKeyManager().getSnapshotDirectoryService(); - - /* DirTable - /v/b/snapDir - /v/b/snapDir/appRoot0-2/ - /v/b/snapDir/appRoot0-2/parentDir0-2/ - FileTable - /v/b/snapDir/testKey0 - testKey4 = 5 keys - /v/b/snapDir/appRoot0-2/parentDir0-2/childFile = 9 keys - /v/b/snapDir/appRoot0/parentDir0-2/childFile0-4 = 15 keys - */ - - Path root = new Path("/snapDir"); - // Create parent dir from root. - fs.mkdirs(root); - - // Add 5 files inside root dir - // Creates /v/b/snapDir/testKey0 - testKey4 - for (int i = 0; i < 5; i++) { - Path path = new Path(root, "testKey" + i); - try (FSDataOutputStream stream = fs.create(path)) { - stream.write(1); - } - } - - // Creates /v/b/snapDir/appRoot0-2/parentDir0-2/childFile - for (int i = 0; i < 3; i++) { - for (int j = 0; j < 3; j++) { - Path appRoot = new Path(root, "appRoot" + j); - Path parent = new Path(appRoot, "parentDir" + i); - Path child = new Path(parent, "childFile"); - try (FSDataOutputStream stream = fs.create(child)) { - stream.write(1); - } - } - } - - assertTableRowCount(keyTable, 14); - assertTableRowCount(dirTable, 13); - // Create snapshot - client.getObjectStore().createSnapshot(volumeName, bucketName, "snap1"); - - // Creates /v/b/snapDir/appRoot0/parentDir0-2/childFile0-4 - for (int i = 0; i < 3; i++) { - Path appRoot = new Path(root, "appRoot0"); - Path parent = new Path(appRoot, "parentDir" + i); - for (int j = 0; j < 5; j++) { - Path child = new Path(parent, "childFile" + j); - try (FSDataOutputStream stream = fs.create(child)) { - stream.write(1); - } - } - } - - for (int i = 5; i < 10; i++) { - Path path = new Path(root, "testKey" + i); - try (FSDataOutputStream stream = fs.create(path)) { - stream.write(1); - } - } - - assertTableRowCount(deletedDirTable, 0); - assertTableRowCount(keyTable, 34); - assertTableRowCount(dirTable, 13); - Path appRoot0 = new Path(root, "appRoot0"); - // Only parentDir0-2/childFile under appRoot0 is exclusive for snap1 - fs.delete(appRoot0, true); - assertTableRowCount(deletedDirTable, 1); - client.getObjectStore().createSnapshot(volumeName, bucketName, "snap2"); - - // Delete testKey0-9 - for (int i = 0; i < 10; i++) { - Path testKey = new Path(root, "testKey" + i); - fs.delete(testKey, false); - } - - fs.delete(root, true); - assertTableRowCount(deletedKeyTable, 10); - client.getObjectStore().createSnapshot(volumeName, bucketName, "snap3"); - long prevRunCount = snapshotDirectoryCleaningService.getRunCount().get(); - GenericTestUtils.waitFor(() -> snapshotDirectoryCleaningService.getRunCount().get() - > prevRunCount + 1, 100, 10000); - - Thread.sleep(2000); - Map expectedSize = new HashMap() {{ - // /v/b/snapDir/appRoot0/parentDir0-2/childFile contribute - // exclusive size, /v/b/snapDir/appRoot0/parentDir0-2/childFile0-4 - // are deep cleaned and hence don't contribute to size. - put("snap1", 3L); - // Only testKey5-9 contribute to the exclusive size - put("snap2", 5L); - put("snap3", 0L); - }}; - Thread.sleep(500); - try (TableIterator> - iterator = snapshotInfoTable.iterator()) { - while (iterator.hasNext()) { - Table.KeyValue snapshotEntry = iterator.next(); - String snapshotName = snapshotEntry.getValue().getName(); - assertEquals(expectedSize.get(snapshotName), snapshotEntry.getValue(). - getExclusiveSize()); - // Since for the test we are using RATIS/THREE - assertEquals(expectedSize.get(snapshotName) * 3, - snapshotEntry.getValue().getExclusiveReplicatedSize()); - - } - } - } - - private void assertTableRowCount(Table table, int count) - throws TimeoutException, InterruptedException { - GenericTestUtils.waitFor(() -> assertTableRowCount(count, table), 1000, - 120000); // 2 minutes - } - - private boolean assertTableRowCount(int expectedCount, - Table table) { - long count = 0L; - try { - count = cluster.getOzoneManager().getMetadataManager() - .countRowsInTable(table); - LOG.info("{} actual row count={}, expectedCount={}", table.getName(), - count, expectedCount); - } catch (IOException ex) { - fail("testDoubleBuffer failed with: " + ex); - } - return count == expectedCount; - } -} diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index e49f23b11528..fd83981507c6 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -851,7 +851,6 @@ message SnapshotInfo { // snapshot exclusive size after replication optional uint64 exclusiveReplicatedSize = 18; // note: shared sizes can be calculated from: referenced - exclusive - optional bool deepCleanedDeletedDir = 19; } message SnapshotDiffJobProto { @@ -1892,16 +1891,15 @@ message SnapshotMoveKeyInfos { message SnapshotPurgeRequest { repeated string snapshotDBKeys = 1; + repeated string updatedSnapshotDBKey = 2; } message SetSnapshotPropertyRequest { - optional string snapshotKey = 1; - optional SnapshotSize snapshotSize = 2; - optional bool deepCleanedDeletedDir = 3; - optional bool deepCleanedDeletedKey = 4; + optional SnapshotProperty snapshotProperty = 1; } -message SnapshotSize { +message SnapshotProperty { + optional string snapshotKey = 1; optional uint64 exclusiveSize = 2; optional uint64 exclusiveReplicatedSize = 3; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index 4378701426c2..0fe1cdbe8031 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -31,7 +31,6 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.service.KeyDeletingService; import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; -import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import java.io.IOException; @@ -286,10 +285,4 @@ List getPendingDeletionSubFiles(long volumeId, * @return Background service. */ SnapshotDeletingService getSnapshotDeletingService(); - - /** - * Returns the instance of Snapshot Directory service. - * @return Background service. - */ - SnapshotDirectoryCleaningService getSnapshotDirectoryService(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 9cfd4043146a..cac2aa53f6fa 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -86,7 +86,6 @@ import org.apache.hadoop.ozone.om.service.MultipartUploadCleanupService; import org.apache.hadoop.ozone.om.service.OpenKeyCleanupService; import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; -import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; @@ -132,10 +131,6 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_CLEANUP_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_CLEANUP_SERVICE_TIMEOUT_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.om.OzoneManagerUtils.getBucketLayout; @@ -186,7 +181,6 @@ public class KeyManagerImpl implements KeyManager { private BackgroundService openKeyCleanupService; private BackgroundService multipartUploadCleanupService; - private SnapshotDirectoryCleaningService snapshotDirectoryCleaningService; public KeyManagerImpl(OzoneManager om, ScmClient scmClient, OzoneConfiguration conf, OMPerformanceMetrics metrics) { @@ -306,22 +300,6 @@ public void start(OzoneConfiguration configuration) { } } - if (snapshotDirectoryCleaningService == null && - ozoneManager.isFilesystemSnapshotEnabled()) { - long dirDeleteInterval = configuration.getTimeDuration( - OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL, - OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - long serviceTimeout = configuration.getTimeDuration( - OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT, - OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - snapshotDirectoryCleaningService = new SnapshotDirectoryCleaningService( - dirDeleteInterval, TimeUnit.MILLISECONDS, serviceTimeout, - ozoneManager, scmClient.getBlockClient()); - snapshotDirectoryCleaningService.start(); - } - if (multipartUploadCleanupService == null) { long serviceInterval = configuration.getTimeDuration( OZONE_OM_MPU_CLEANUP_SERVICE_INTERVAL, @@ -368,10 +346,6 @@ public void stop() throws IOException { multipartUploadCleanupService.shutdown(); multipartUploadCleanupService = null; } - if (snapshotDirectoryCleaningService != null) { - snapshotDirectoryCleaningService.shutdown(); - snapshotDirectoryCleaningService = null; - } } private OmBucketInfo getBucketInfo(String volumeName, String bucketName) @@ -707,10 +681,6 @@ public SnapshotDeletingService getSnapshotDeletingService() { return snapshotDeletingService; } - public SnapshotDirectoryCleaningService getSnapshotDirectoryService() { - return snapshotDirectoryCleaningService; - } - public boolean isSstFilteringSvcEnabled() { long serviceInterval = ozoneManager.getConfiguration() .getTimeDuration(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index b7dba8260269..09711c704567 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -72,10 +72,22 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn try { List snapshotDbKeys = snapshotPurgeRequest .getSnapshotDBKeysList(); + List snapInfosToUpdate = snapshotPurgeRequest + .getUpdatedSnapshotDBKeyList(); Map updatedSnapInfos = new HashMap<>(); Map updatedPathPreviousAndGlobalSnapshots = new HashMap<>(); + // Snapshots that are already deepCleaned by the KeyDeletingService + // can be marked as deepCleaned. + for (String snapTableKey : snapInfosToUpdate) { + SnapshotInfo snapInfo = omMetadataManager.getSnapshotInfoTable() + .get(snapTableKey); + + updateSnapshotInfoAndCache(snapInfo, omMetadataManager, + trxnLogIndex, updatedSnapInfos, false); + } + // Snapshots that are purged by the SnapshotDeletingService // will update the next snapshot so that is can be deep cleaned // by the KeyDeletingService in the next run. @@ -88,7 +100,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn snapshotChainManager, omSnapshotManager); updateSnapshotInfoAndCache(nextSnapshot, omMetadataManager, - trxnLogIndex, updatedSnapInfos); + trxnLogIndex, updatedSnapInfos, true); updateSnapshotChainAndCache(omMetadataManager, fromSnapshot, trxnLogIndex, updatedPathPreviousAndGlobalSnapshots); ozoneManager.getOmSnapshotManager().getSnapshotCache() @@ -108,12 +120,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, OmMetadataManagerImpl omMetadataManager, long trxnLogIndex, - Map updatedSnapInfos) { + Map updatedSnapInfos, boolean deepClean) { if (snapInfo != null) { - // Setting next snapshot deep clean to false, Since the - // current snapshot is deleted. We can potentially - // reclaim more keys in the next snapshot. - snapInfo.setDeepClean(false); + snapInfo.setDeepClean(deepClean); // Update table cache first omMetadataManager.getSnapshotInfoTable().addCacheEntry( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java index b3dd5206c993..966a265af806 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java @@ -30,7 +30,7 @@ import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotSetPropertyResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotProperty; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,10 +60,16 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OzoneManagerProtocolProtos.SetSnapshotPropertyRequest setSnapshotPropertyRequest = getOmRequest() .getSetSnapshotPropertyRequest(); + + SnapshotProperty snapshotProperty = setSnapshotPropertyRequest + .getSnapshotProperty(); SnapshotInfo updatedSnapInfo = null; try { - String snapshotKey = setSnapshotPropertyRequest.getSnapshotKey(); + String snapshotKey = snapshotProperty.getSnapshotKey(); + long exclusiveSize = snapshotProperty.getExclusiveSize(); + long exclusiveReplicatedSize = snapshotProperty + .getExclusiveReplicatedSize(); updatedSnapInfo = metadataManager.getSnapshotInfoTable() .get(snapshotKey); @@ -73,28 +79,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn " is not found", INVALID_SNAPSHOT_ERROR); } - if (setSnapshotPropertyRequest.hasDeepCleanedDeletedDir()) { - updatedSnapInfo.setDeepCleanedDeletedDir(setSnapshotPropertyRequest - .getDeepCleanedDeletedDir()); - } - - if (setSnapshotPropertyRequest.hasDeepCleanedDeletedKey()) { - updatedSnapInfo.setDeepClean(setSnapshotPropertyRequest - .getDeepCleanedDeletedKey()); - } - - if (setSnapshotPropertyRequest.hasSnapshotSize()) { - SnapshotSize snapshotSize = setSnapshotPropertyRequest - .getSnapshotSize(); - long exclusiveSize = updatedSnapInfo.getExclusiveSize() + - snapshotSize.getExclusiveSize(); - long exclusiveReplicatedSize = updatedSnapInfo - .getExclusiveReplicatedSize() + snapshotSize - .getExclusiveReplicatedSize(); - // Set Exclusive size. - updatedSnapInfo.setExclusiveSize(exclusiveSize); - updatedSnapInfo.setExclusiveReplicatedSize(exclusiveReplicatedSize); - } + // Set Exclusive size. + updatedSnapInfo.setExclusiveSize(exclusiveSize); + updatedSnapInfo.setExclusiveReplicatedSize(exclusiveReplicatedSize); // Update Table Cache metadataManager.getSnapshotInfoTable().addCacheEntry( new CacheKey<>(snapshotKey), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java index 21ad0872769a..1091053ebdc4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java @@ -469,106 +469,6 @@ public long optimizeDirDeletesAndSubmitRequest(long remainNum, return remainNum; } - /** - * To calculate Exclusive Size for current snapshot, Check - * the next snapshot deletedTable if the deleted key is - * referenced in current snapshot and not referenced in the - * previous snapshot then that key is exclusive to the current - * snapshot. Here since we are only iterating through - * deletedTable we can check the previous and previous to - * previous snapshot to achieve the same. - * previousSnapshot - Snapshot for which exclusive size is - * getting calculating. - * currSnapshot - Snapshot's deletedTable is used to calculate - * previousSnapshot snapshot's exclusive size. - * previousToPrevSnapshot - Snapshot which is used to check - * if key is exclusive to previousSnapshot. - */ - @SuppressWarnings("checkstyle:ParameterNumber") - public void calculateExclusiveSize( - SnapshotInfo previousSnapshot, - SnapshotInfo previousToPrevSnapshot, - OmKeyInfo keyInfo, - OmBucketInfo bucketInfo, long volumeId, - Table snapRenamedTable, - Table previousKeyTable, - Table prevRenamedTable, - Table previousToPrevKeyTable, - Map exclusiveSizeMap, - Map exclusiveReplicatedSizeMap) throws IOException { - String prevSnapKey = previousSnapshot.getTableKey(); - long exclusiveReplicatedSize = - exclusiveReplicatedSizeMap.getOrDefault( - prevSnapKey, 0L) + keyInfo.getReplicatedSize(); - long exclusiveSize = exclusiveSizeMap.getOrDefault( - prevSnapKey, 0L) + keyInfo.getDataSize(); - - // If there is no previous to previous snapshot, then - // the previous snapshot is the first snapshot. - if (previousToPrevSnapshot == null) { - exclusiveSizeMap.put(prevSnapKey, exclusiveSize); - exclusiveReplicatedSizeMap.put(prevSnapKey, - exclusiveReplicatedSize); - } else { - OmKeyInfo keyInfoPrevSnapshot = getPreviousSnapshotKeyName( - keyInfo, bucketInfo, volumeId, - snapRenamedTable, previousKeyTable); - OmKeyInfo keyInfoPrevToPrevSnapshot = getPreviousSnapshotKeyName( - keyInfoPrevSnapshot, bucketInfo, volumeId, - prevRenamedTable, previousToPrevKeyTable); - // If the previous to previous snapshot doesn't - // have the key, then it is exclusive size for the - // previous snapshot. - if (keyInfoPrevToPrevSnapshot == null) { - exclusiveSizeMap.put(prevSnapKey, exclusiveSize); - exclusiveReplicatedSizeMap.put(prevSnapKey, - exclusiveReplicatedSize); - } - } - } - - private OmKeyInfo getPreviousSnapshotKeyName( - OmKeyInfo keyInfo, OmBucketInfo bucketInfo, long volumeId, - Table snapRenamedTable, - Table previousKeyTable) throws IOException { - - if (keyInfo == null) { - return null; - } - - String dbKeyPrevSnap; - if (bucketInfo.getBucketLayout().isFileSystemOptimized()) { - dbKeyPrevSnap = getOzoneManager().getMetadataManager().getOzonePathKey( - volumeId, - bucketInfo.getObjectID(), - keyInfo.getParentObjectID(), - keyInfo.getFileName()); - } else { - dbKeyPrevSnap = getOzoneManager().getMetadataManager().getOzoneKey( - keyInfo.getVolumeName(), - keyInfo.getBucketName(), - keyInfo.getKeyName()); - } - - String dbRenameKey = getOzoneManager().getMetadataManager().getRenameKey( - keyInfo.getVolumeName(), - keyInfo.getBucketName(), - keyInfo.getObjectID()); - - String renamedKey = snapRenamedTable.getIfExist(dbRenameKey); - OmKeyInfo prevKeyInfo = renamedKey != null ? - previousKeyTable.get(renamedKey) : - previousKeyTable.get(dbKeyPrevSnap); - - if (prevKeyInfo == null || - prevKeyInfo.getObjectID() != keyInfo.getObjectID()) { - return null; - } - - return isBlockLocationInfoSame(prevKeyInfo, keyInfo) ? - prevKeyInfo : null; - } - protected boolean isBufferLimitCrossed( int maxLimit, int cLimit, int increment) { return cLimit + increment >= maxLimit; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java index e89608e82db2..6dcc2544b4da 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java @@ -48,7 +48,8 @@ import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotProperty; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.hdds.utils.BackgroundTask; @@ -97,7 +98,6 @@ public class KeyDeletingService extends AbstractKeyDeletingService { private final Map exclusiveSizeMap; private final Map exclusiveReplicatedSizeMap; private final Set completedExclusiveSizeSet; - private final Map snapshotSeekMap; public KeyDeletingService(OzoneManager ozoneManager, ScmBlockLocationProtocol scmClient, @@ -116,7 +116,6 @@ public KeyDeletingService(OzoneManager ozoneManager, this.exclusiveSizeMap = new HashMap<>(); this.exclusiveReplicatedSizeMap = new HashMap<>(); this.completedExclusiveSizeSet = new HashSet<>(); - this.snapshotSeekMap = new HashMap<>(); } /** @@ -259,8 +258,8 @@ private void processSnapshotDeepClean(int delCount) // Deep clean only on active snapshot. Deleted Snapshots will be // cleaned up by SnapshotDeletingService. - if (currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE || - currSnapInfo.getDeepClean()) { + if (!currSnapInfo.getSnapshotStatus().equals(SNAPSHOT_ACTIVE) || + !currSnapInfo.getDeepClean()) { continue; } @@ -343,22 +342,11 @@ private void processSnapshotDeepClean(int delCount) RepeatedOmKeyInfo>> deletedIterator = snapDeletedTable .iterator()) { - String lastKeyInCurrentRun = null; - String deletedTableSeek = snapshotSeekMap.getOrDefault( - currSnapInfo.getTableKey(), snapshotBucketKey); - deletedIterator.seek(deletedTableSeek); - // To avoid processing the last key from the previous - // run again. - if (!deletedTableSeek.equals(snapshotBucketKey) && - deletedIterator.hasNext()) { - deletedIterator.next(); - } - + deletedIterator.seek(snapshotBucketKey); while (deletedIterator.hasNext() && delCount < keyLimitPerTask) { Table.KeyValue deletedKeyValue = deletedIterator.next(); String deletedKey = deletedKeyValue.getKey(); - lastKeyInCurrentRun = deletedKey; // Exit if it is out of the bucket scope. if (!deletedKey.startsWith(snapshotBucketKey)) { @@ -378,8 +366,7 @@ private void processSnapshotDeepClean(int delCount) calculateExclusiveSize(previousSnapshot, previousToPrevSnapshot, keyInfo, bucketInfo, volumeId, snapRenamedTable, previousKeyTable, prevRenamedTable, - previousToPrevKeyTable, exclusiveSizeMap, - exclusiveReplicatedSizeMap); + previousToPrevKeyTable); } if (isKeyReclaimable(previousKeyTable, snapRenamedTable, @@ -419,15 +406,6 @@ private void processSnapshotDeepClean(int delCount) completedExclusiveSizeSet.add( previousSnapshot.getTableKey()); } - - snapshotSeekMap.remove(currSnapInfo.getTableKey()); - } else { - // There are keys that still needs processing - // we can continue from it in the next iteration - if (lastKeyInCurrentRun != null) { - snapshotSeekMap.put(currSnapInfo.getTableKey(), - lastKeyInCurrentRun); - } } if (!keysToPurge.isEmpty()) { @@ -442,8 +420,98 @@ private void processSnapshotDeepClean(int delCount) } } - updateDeepCleanedSnapshots(deepCleanedSnapshots); updateSnapshotExclusiveSize(); + updateDeepCleanedSnapshots(deepCleanedSnapshots); + } + + /** + * To calculate Exclusive Size for current snapshot, Check + * the next snapshot deletedTable if the deleted key is + * referenced in current snapshot and not referenced in the + * previous snapshot then that key is exclusive to the current + * snapshot. Here since we are only iterating through + * deletedTable we can check the previous and previous to + * previous snapshot to achieve the same. + * previousSnapshot - Snapshot for which exclusive size is + * getting calculating. + * currSnapshot - Snapshot's deletedTable is used to calculate + * previousSnapshot snapshot's exclusive size. + * previousToPrevSnapshot - Snapshot which is used to check + * if key is exclusive to previousSnapshot. + */ + @SuppressWarnings("checkstyle:ParameterNumber") + private void calculateExclusiveSize( + SnapshotInfo previousSnapshot, + SnapshotInfo previousToPrevSnapshot, + OmKeyInfo keyInfo, + OmBucketInfo bucketInfo, long volumeId, + Table snapRenamedTable, + Table previousKeyTable, + Table prevRenamedTable, + Table previousToPrevKeyTable) throws IOException { + String prevSnapKey = previousSnapshot.getTableKey(); + long exclusiveReplicatedSize = + exclusiveReplicatedSizeMap.getOrDefault( + prevSnapKey, 0L) + keyInfo.getReplicatedSize(); + long exclusiveSize = exclusiveSizeMap.getOrDefault( + prevSnapKey, 0L) + keyInfo.getDataSize(); + + // If there is no previous to previous snapshot, then + // the previous snapshot is the first snapshot. + if (previousToPrevSnapshot == null) { + exclusiveSizeMap.put(prevSnapKey, exclusiveSize); + exclusiveReplicatedSizeMap.put(prevSnapKey, + exclusiveReplicatedSize); + } else { + OmKeyInfo keyInfoPrevSnapshot = getPreviousSnapshotKeyName( + keyInfo, bucketInfo, volumeId, + snapRenamedTable, previousKeyTable); + OmKeyInfo keyInfoPrevToPrevSnapshot = getPreviousSnapshotKeyName( + keyInfoPrevSnapshot, bucketInfo, volumeId, + prevRenamedTable, previousToPrevKeyTable); + // If the previous to previous snapshot doesn't + // have the key, then it is exclusive size for the + // previous snapshot. + if (keyInfoPrevToPrevSnapshot == null) { + exclusiveSizeMap.put(prevSnapKey, exclusiveSize); + exclusiveReplicatedSizeMap.put(prevSnapKey, + exclusiveReplicatedSize); + } + } + } + + private OmKeyInfo getPreviousSnapshotKeyName( + OmKeyInfo keyInfo, OmBucketInfo bucketInfo, long volumeId, + Table snapRenamedTable, + Table previousKeyTable) throws IOException { + + if (keyInfo == null) { + return null; + } + + String dbKeyPrevSnap; + if (bucketInfo.getBucketLayout().isFileSystemOptimized()) { + dbKeyPrevSnap = getOzoneManager().getMetadataManager().getOzonePathKey( + volumeId, + bucketInfo.getObjectID(), + keyInfo.getParentObjectID(), + keyInfo.getFileName()); + } else { + dbKeyPrevSnap = getOzoneManager().getMetadataManager().getOzoneKey( + keyInfo.getVolumeName(), + keyInfo.getBucketName(), + keyInfo.getKeyName()); + } + + String dbRenameKey = getOzoneManager().getMetadataManager().getRenameKey( + keyInfo.getVolumeName(), + keyInfo.getBucketName(), + keyInfo.getObjectID()); + + String renamedKey = snapRenamedTable.getIfExist(dbRenameKey); + dbKeyPrevSnap = renamedKey != null ? renamedKey : dbKeyPrevSnap; + + return previousKeyTable.get(dbKeyPrevSnap); } private void updateSnapshotExclusiveSize() { @@ -457,15 +525,15 @@ private void updateSnapshotExclusiveSize() { while (completedSnapshotIterator.hasNext()) { ClientId clientId = ClientId.randomId(); String dbKey = completedSnapshotIterator.next(); - SnapshotSize snapshotSize = SnapshotSize.newBuilder() - .setExclusiveSize(exclusiveSizeMap.getOrDefault(dbKey, 0L)) + SnapshotProperty snapshotProperty = SnapshotProperty.newBuilder() + .setSnapshotKey(dbKey) + .setExclusiveSize(exclusiveSizeMap.get(dbKey)) .setExclusiveReplicatedSize( - exclusiveReplicatedSizeMap.getOrDefault(dbKey, 0L)) + exclusiveReplicatedSizeMap.get(dbKey)) .build(); SetSnapshotPropertyRequest setSnapshotPropertyRequest = SetSnapshotPropertyRequest.newBuilder() - .setSnapshotKey(dbKey) - .setSnapshotSize(snapshotSize) + .setSnapshotProperty(snapshotProperty) .build(); OMRequest omRequest = OMRequest.newBuilder() @@ -481,17 +549,16 @@ private void updateSnapshotExclusiveSize() { } private void updateDeepCleanedSnapshots(List deepCleanedSnapshots) { - for (String deepCleanedSnapshot: deepCleanedSnapshots) { + if (!deepCleanedSnapshots.isEmpty()) { ClientId clientId = ClientId.randomId(); - SetSnapshotPropertyRequest setSnapshotPropertyRequest = - SetSnapshotPropertyRequest.newBuilder() - .setSnapshotKey(deepCleanedSnapshot) - .setDeepCleanedDeletedKey(true) - .build(); + SnapshotPurgeRequest snapshotPurgeRequest = SnapshotPurgeRequest + .newBuilder() + .addAllUpdatedSnapshotDBKey(deepCleanedSnapshots) + .build(); OMRequest omRequest = OMRequest.newBuilder() - .setCmdType(Type.SetSnapshotProperty) - .setSetSnapshotPropertyRequest(setSnapshotPropertyRequest) + .setCmdType(Type.SnapshotPurge) + .setSnapshotPurgeRequest(snapshotPurgeRequest) .setClientId(clientId.toString()) .build(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java deleted file mode 100644 index 9a60f6303861..000000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java +++ /dev/null @@ -1,515 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om.service; - -import com.google.common.annotations.VisibleForTesting; -import com.google.protobuf.ServiceException; -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.hdds.utils.BackgroundTask; -import org.apache.hadoop.hdds.utils.BackgroundTaskQueue; -import org.apache.hadoop.hdds.utils.BackgroundTaskResult; -import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.om.IOmMetadataReader; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshot; -import org.apache.hadoop.ozone.om.OmSnapshotManager; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.SnapshotChainManager; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; -import org.apache.hadoop.ozone.om.request.file.OMFileRequest; -import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; -import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Stack; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; - -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; -import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; -import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.getDirectoryInfo; -import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso; - -/** - * Snapshot BG Service for deleted directory deep clean and exclusive size - * calculation for deleted directories. - */ -public class SnapshotDirectoryCleaningService - extends AbstractKeyDeletingService { - // Use only a single thread for DirDeletion. Multiple threads would read - // or write to same tables and can send deletion requests for same key - // multiple times. - private static final int SNAPSHOT_DIR_CORE_POOL_SIZE = 1; - - private final AtomicBoolean suspended; - private final Map exclusiveSizeMap; - private final Map exclusiveReplicatedSizeMap; - - public SnapshotDirectoryCleaningService(long interval, TimeUnit unit, - long serviceTimeout, - OzoneManager ozoneManager, - ScmBlockLocationProtocol scmClient) { - super(SnapshotDirectoryCleaningService.class.getSimpleName(), - interval, unit, SNAPSHOT_DIR_CORE_POOL_SIZE, serviceTimeout, - ozoneManager, scmClient); - this.suspended = new AtomicBoolean(false); - this.exclusiveSizeMap = new HashMap<>(); - this.exclusiveReplicatedSizeMap = new HashMap<>(); - } - - private boolean shouldRun() { - if (getOzoneManager() == null) { - // OzoneManager can be null for testing - return true; - } - return getOzoneManager().isLeaderReady() && !suspended.get(); - } - - /** - * Suspend the service. - */ - @VisibleForTesting - public void suspend() { - suspended.set(true); - } - - /** - * Resume the service if suspended. - */ - @VisibleForTesting - public void resume() { - suspended.set(false); - } - - @Override - public BackgroundTaskQueue getTasks() { - BackgroundTaskQueue queue = new BackgroundTaskQueue(); - queue.add(new SnapshotDirectoryCleaningService.SnapshotDirTask()); - return queue; - } - - private class SnapshotDirTask implements BackgroundTask { - - @Override - public BackgroundTaskResult call() { - if (!shouldRun()) { - return BackgroundTaskResult.EmptyTaskResult.newResult(); - } - LOG.debug("Running SnapshotDirectoryCleaningService"); - - getRunCount().incrementAndGet(); - OmSnapshotManager omSnapshotManager = - getOzoneManager().getOmSnapshotManager(); - Table snapshotInfoTable = - getOzoneManager().getMetadataManager().getSnapshotInfoTable(); - OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) - getOzoneManager().getMetadataManager(); - SnapshotChainManager snapChainManager = metadataManager - .getSnapshotChainManager(); - - try (TableIterator> iterator = snapshotInfoTable.iterator()) { - - while (iterator.hasNext()) { - SnapshotInfo currSnapInfo = iterator.next().getValue(); - - // Expand deleted dirs only on active snapshot. Deleted Snapshots - // will be cleaned up by SnapshotDeletingService. - if (currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE || - currSnapInfo.getDeepCleanedDeletedDir()) { - continue; - } - - ReferenceCounted - rcPrevOmSnapshot = null; - ReferenceCounted - rcPrevToPrevOmSnapshot = null; - try { - long volumeId = metadataManager - .getVolumeId(currSnapInfo.getVolumeName()); - // Get bucketInfo for the snapshot bucket to get bucket layout. - String dbBucketKey = metadataManager - .getBucketKey(currSnapInfo.getVolumeName(), - currSnapInfo.getBucketName()); - OmBucketInfo bucketInfo = metadataManager - .getBucketTable().get(dbBucketKey); - - if (bucketInfo == null) { - throw new IllegalStateException("Bucket " + "/" + - currSnapInfo.getVolumeName() + "/" + currSnapInfo - .getBucketName() + - " is not found. BucketInfo should not be " + - "null for snapshotted bucket. The OM is in " + - "unexpected state."); - } - - SnapshotInfo previousSnapshot = getPreviousActiveSnapshot( - currSnapInfo, snapChainManager, omSnapshotManager); - SnapshotInfo previousToPrevSnapshot = null; - - Table previousKeyTable = null; - Table prevRenamedTable = null; - - if (previousSnapshot != null) { - rcPrevOmSnapshot = omSnapshotManager.checkForSnapshot( - previousSnapshot.getVolumeName(), - previousSnapshot.getBucketName(), - getSnapshotPrefix(previousSnapshot.getName()), false); - OmSnapshot omPreviousSnapshot = (OmSnapshot) - rcPrevOmSnapshot.get(); - - previousKeyTable = omPreviousSnapshot.getMetadataManager() - .getKeyTable(bucketInfo.getBucketLayout()); - prevRenamedTable = omPreviousSnapshot - .getMetadataManager().getSnapshotRenamedTable(); - previousToPrevSnapshot = getPreviousActiveSnapshot( - previousSnapshot, snapChainManager, omSnapshotManager); - } - - Table previousToPrevKeyTable = null; - if (previousToPrevSnapshot != null) { - rcPrevToPrevOmSnapshot = omSnapshotManager.checkForSnapshot( - previousToPrevSnapshot.getVolumeName(), - previousToPrevSnapshot.getBucketName(), - getSnapshotPrefix(previousToPrevSnapshot.getName()), false); - OmSnapshot omPreviousToPrevSnapshot = (OmSnapshot) - rcPrevToPrevOmSnapshot.get(); - - previousToPrevKeyTable = omPreviousToPrevSnapshot - .getMetadataManager() - .getKeyTable(bucketInfo.getBucketLayout()); - } - - String dbBucketKeyForDir = getOzonePathKeyForFso(metadataManager, - currSnapInfo.getVolumeName(), currSnapInfo.getBucketName()); - try (ReferenceCounted - rcCurrOmSnapshot = omSnapshotManager.checkForSnapshot( - currSnapInfo.getVolumeName(), - currSnapInfo.getBucketName(), - getSnapshotPrefix(currSnapInfo.getName()), - false)) { - - OmSnapshot currOmSnapshot = (OmSnapshot) rcCurrOmSnapshot.get(); - Table snapDeletedDirTable = - currOmSnapshot.getMetadataManager().getDeletedDirTable(); - - try (TableIterator> deletedDirIterator = snapDeletedDirTable - .iterator(dbBucketKeyForDir)) { - - while (deletedDirIterator.hasNext()) { - Table.KeyValue deletedDirInfo = - deletedDirIterator.next(); - - // For each deleted directory we do an in-memory DFS and - // do a deep clean and exclusive size calculation. - iterateDirectoryTree(deletedDirInfo, volumeId, bucketInfo, - previousSnapshot, previousToPrevSnapshot, - currOmSnapshot, previousKeyTable, prevRenamedTable, - previousToPrevKeyTable, dbBucketKeyForDir); - } - updateDeepCleanSnapshotDir(currSnapInfo.getTableKey()); - if (previousSnapshot != null) { - updateExclusiveSize(previousSnapshot.getTableKey()); - } - } - } - } finally { - IOUtils.closeQuietly(rcPrevOmSnapshot, rcPrevToPrevOmSnapshot); - } - } - } catch (IOException ex) { - LOG.error("Error while running directory deep clean on snapshots." + - " Will retry at next run.", ex); - } - return BackgroundTaskResult.EmptyTaskResult.newResult(); - } - } - - @SuppressWarnings("checkstyle:ParameterNumber") - private void iterateDirectoryTree( - Table.KeyValue deletedDirInfo, long volumeId, - OmBucketInfo bucketInfo, - SnapshotInfo previousSnapshot, - SnapshotInfo previousToPrevSnapshot, - OmSnapshot currOmSnapshot, - Table previousKeyTable, - Table prevRenamedTable, - Table previousToPrevKeyTable, - String dbBucketKeyForDir) throws IOException { - - Table snapDirTable = - currOmSnapshot.getMetadataManager().getDirectoryTable(); - Table snapRenamedTable = - currOmSnapshot.getMetadataManager().getSnapshotRenamedTable(); - - Stack stackNodes = new Stack<>(); - OmDirectoryInfo omDeletedDirectoryInfo = - getDirectoryInfo(deletedDirInfo.getValue()); - String dirPathDbKey = currOmSnapshot.getMetadataManager() - .getOzonePathKey(volumeId, bucketInfo.getObjectID(), - omDeletedDirectoryInfo); - // Stack Init - StackNode topLevelDir = new StackNode(); - topLevelDir.setDirKey(dirPathDbKey); - topLevelDir.setDirValue(omDeletedDirectoryInfo); - stackNodes.push(topLevelDir); - - try (TableIterator> - directoryIterator = snapDirTable.iterator(dbBucketKeyForDir)) { - - while (!stackNodes.isEmpty()) { - StackNode stackTop = stackNodes.peek(); - // First process all the files in the current directory - // and then do a DFS for directory. - if (StringUtils.isEmpty(stackTop.getSubDirSeek())) { - processFilesUnderDir(previousSnapshot, - previousToPrevSnapshot, - volumeId, - bucketInfo, - stackTop.getDirValue(), - currOmSnapshot.getMetadataManager(), - snapRenamedTable, - previousKeyTable, - prevRenamedTable, - previousToPrevKeyTable); - // Format : /volId/bucketId/parentId/ - String seekDirInDB = currOmSnapshot.getMetadataManager() - .getOzonePathKey(volumeId, bucketInfo.getObjectID(), - stackTop.getDirValue().getObjectID(), ""); - stackTop.setSubDirSeek(seekDirInDB); - } else { - // Adding \0 to seek the next greater element. - directoryIterator.seek(stackTop.getSubDirSeek() + "\0"); - if (directoryIterator.hasNext()) { - - Table.KeyValue deletedSubDirInfo = directoryIterator.next(); - String deletedSubDirKey = deletedSubDirInfo.getKey(); - String prefixCheck = currOmSnapshot.getMetadataManager() - .getOzoneDeletePathDirKey(stackTop.getSubDirSeek()); - // Exit if it is out of the sub dir prefix scope. - if (!deletedSubDirKey.startsWith(prefixCheck)) { - stackNodes.pop(); - } else { - stackTop.setSubDirSeek(deletedSubDirKey); - StackNode nextSubDir = new StackNode(); - nextSubDir.setDirKey(deletedSubDirInfo.getKey()); - nextSubDir.setDirValue(deletedSubDirInfo.getValue()); - stackNodes.push(nextSubDir); - } - } else { - stackNodes.pop(); - } - } - } - } - } - - private void updateExclusiveSize(String prevSnapshotKeyTable) { - ClientId clientId = ClientId.randomId(); - SnapshotSize snapshotSize = SnapshotSize.newBuilder() - .setExclusiveSize( - exclusiveSizeMap.getOrDefault(prevSnapshotKeyTable, 0L)) - .setExclusiveReplicatedSize( - exclusiveReplicatedSizeMap.getOrDefault( - prevSnapshotKeyTable, 0L)) - .build(); - exclusiveSizeMap.remove(prevSnapshotKeyTable); - exclusiveReplicatedSizeMap.remove(prevSnapshotKeyTable); - SetSnapshotPropertyRequest - setSnapshotPropertyRequest = - SetSnapshotPropertyRequest.newBuilder() - .setSnapshotKey(prevSnapshotKeyTable) - .setSnapshotSize(snapshotSize) - .build(); - - OMRequest omRequest = OMRequest.newBuilder() - .setCmdType(Type.SetSnapshotProperty) - .setSetSnapshotPropertyRequest(setSnapshotPropertyRequest) - .setClientId(clientId.toString()) - .build(); - - submitRequest(omRequest, clientId); - } - - @SuppressWarnings("checkstyle:ParameterNumber") - private void processFilesUnderDir( - SnapshotInfo previousSnapshot, - SnapshotInfo previousToPrevSnapshot, - long volumeId, - OmBucketInfo bucketInfo, - OmDirectoryInfo parentInfo, - OMMetadataManager metadataManager, - Table snapRenamedTable, - Table previousKeyTable, - Table prevRenamedTable, - Table previousToPrevKeyTable) - throws IOException { - String seekFileInDB = metadataManager.getOzonePathKey(volumeId, - bucketInfo.getObjectID(), - parentInfo.getObjectID(), ""); - List blocksForKeyDelete = new ArrayList<>(); - - Table fileTable = metadataManager.getFileTable(); - try (TableIterator> - iterator = fileTable.iterator(seekFileInDB)) { - - while (iterator.hasNext()) { - Table.KeyValue entry = iterator.next(); - OmKeyInfo fileInfo = entry.getValue(); - if (!OMFileRequest.isImmediateChild(fileInfo.getParentObjectID(), - parentInfo.getObjectID())) { - break; - } - - String ozoneDeletePathKey = metadataManager - .getOzoneDeletePathKey(fileInfo.getObjectID(), entry.getKey()); - if (isKeyReclaimable(previousKeyTable, snapRenamedTable, - fileInfo, bucketInfo, volumeId, null)) { - for (OmKeyLocationInfoGroup keyLocations : - fileInfo.getKeyLocationVersions()) { - List item = keyLocations.getLocationList().stream() - .map(b -> new BlockID(b.getContainerID(), b.getLocalID())) - .collect(Collectors.toList()); - BlockGroup keyBlocks = BlockGroup.newBuilder() - .setKeyName(ozoneDeletePathKey) - .addAllBlockIDs(item) - .build(); - blocksForKeyDelete.add(keyBlocks); - } - // TODO: Add Retry mechanism. - getScmClient().deleteKeyBlocks(blocksForKeyDelete); - } else if (previousSnapshot != null) { - calculateExclusiveSize(previousSnapshot, previousToPrevSnapshot, - fileInfo, bucketInfo, volumeId, snapRenamedTable, - previousKeyTable, prevRenamedTable, previousToPrevKeyTable, - exclusiveSizeMap, exclusiveReplicatedSizeMap); - } - } - } - } - - private void updateDeepCleanSnapshotDir(String snapshotKeyTable) { - ClientId clientId = ClientId.randomId(); - SetSnapshotPropertyRequest setSnapshotPropertyRequest = - SetSnapshotPropertyRequest.newBuilder() - .setSnapshotKey(snapshotKeyTable) - .setDeepCleanedDeletedDir(true) - .build(); - - OMRequest omRequest = OMRequest.newBuilder() - .setCmdType(Type.SetSnapshotProperty) - .setSetSnapshotPropertyRequest(setSnapshotPropertyRequest) - .setClientId(clientId.toString()) - .build(); - - submitRequest(omRequest, clientId); - } - - public void submitRequest(OMRequest omRequest, ClientId clientId) { - try { - if (isRatisEnabled()) { - OzoneManagerRatisServer server = - getOzoneManager().getOmRatisServer(); - - RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() - .setClientId(clientId) - .setServerId(server.getRaftPeerId()) - .setGroupId(server.getRaftGroupId()) - .setCallId(getRunCount().get()) - .setMessage(Message.valueOf( - OMRatisHelper.convertRequestToByteString(omRequest))) - .setType(RaftClientRequest.writeRequestType()) - .build(); - - server.submitRequest(omRequest, raftClientRequest); - } else { - getOzoneManager().getOmServerProtocol() - .submitRequest(null, omRequest); - } - } catch (ServiceException e) { - LOG.error("Snapshot deep cleaning request failed. " + - "Will retry at next run.", e); - } - } - - /** - * Stack node data for directory deep clean for snapshot. - */ - private static class StackNode { - private String dirKey; - private OmDirectoryInfo dirValue; - private String subDirSeek; - - public String getDirKey() { - return dirKey; - } - - public void setDirKey(String dirKey) { - this.dirKey = dirKey; - } - - public OmDirectoryInfo getDirValue() { - return dirValue; - } - - public void setDirValue(OmDirectoryInfo dirValue) { - this.dirValue = dirValue; - } - - public String getSubDirSeek() { - return subDirSeek; - } - - public void setSubDirSeek(String subDirSeek) { - this.subDirSeek = subDirSeek; - } - - @Override - public String toString() { - return "StackNode{" + - "dirKey='" + dirKey + '\'' + - ", dirObjectId=" + dirValue.getObjectID() + - ", subDirSeek='" + subDirSeek + '\'' + - '}'; - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java index 09ce3ac3bffd..f643688f8b83 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java @@ -36,7 +36,7 @@ import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotProperty; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.BeforeEach; @@ -150,14 +150,14 @@ private List createSnapshotUpdateSizeRequest() iterator = omMetadataManager.getSnapshotInfoTable().iterator()) { while (iterator.hasNext()) { String snapDbKey = iterator.next().getKey(); - SnapshotSize snapshotSize = SnapshotSize.newBuilder() + SnapshotProperty snapshotSize = SnapshotProperty.newBuilder() + .setSnapshotKey(snapDbKey) .setExclusiveSize(exclusiveSize) .setExclusiveReplicatedSize(exclusiveSizeAfterRepl) .build(); SetSnapshotPropertyRequest snapshotUpdateSizeRequest = SetSnapshotPropertyRequest.newBuilder() - .setSnapshotKey(snapDbKey) - .setSnapshotSize(snapshotSize) + .setSnapshotProperty(snapshotSize) .build(); OMRequest omRequest = OMRequest.newBuilder() diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java index cc8dee24bd48..df27bae1ba5b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java @@ -121,7 +121,7 @@ private OzoneConfiguration createConfAndInitValues() throws IOException { } System.setProperty(DBConfigFromFile.CONFIG_DIR, "/"); ServerUtils.setOzoneMetaDirPath(conf, newFolder.toString()); - conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 1000, + conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); @@ -207,7 +207,7 @@ public void checkIfDeleteServiceWithFailingSCM() // Make sure that we have run the background thread 5 times more GenericTestUtils.waitFor( () -> keyDeletingService.getRunCount().get() >= 5, - 100, 10000); + 100, 1000); // Since SCM calls are failing, deletedKeyCount should be zero. assertEquals(0, keyDeletingService.getDeletedKeyCount().get()); assertEquals(keyCount, keyManager @@ -544,7 +544,7 @@ public void testSnapshotDeepClean() throws Exception { // Create Snap3, traps all the deleted keys. writeClient.createSnapshot(volumeName, bucketName, "snap3"); assertTableRowCount(snapshotInfoTable, 3, metadataManager); - checkSnapDeepCleanStatus(snapshotInfoTable, false); + checkSnapDeepCleanStatus(snapshotInfoTable, true); keyDeletingService.resume(); @@ -564,8 +564,9 @@ volumeName, bucketName, getSnapshotPrefix("snap3"), true)) { assertTableRowCount(snap3deletedTable, 0, metadataManager); assertTableRowCount(deletedTable, 0, metadataManager); - checkSnapDeepCleanStatus(snapshotInfoTable, true); + checkSnapDeepCleanStatus(snapshotInfoTable, false); } + } @Test @@ -669,7 +670,6 @@ public void testSnapshotExclusiveSize() throws Exception { iterator = snapshotInfoTable.iterator()) { while (iterator.hasNext()) { Table.KeyValue snapshotEntry = iterator.next(); - System.out.println(snapshotEntry.getValue()); String snapshotName = snapshotEntry.getValue().getName(); assertEquals(expectedSize.get(snapshotName), snapshotEntry.getValue(). From 7bb17c707253fef03bec3cccba9fd44a38cba63d Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Mon, 22 Jan 2024 10:13:31 -0800 Subject: [PATCH 30/43] HDDS-9288. Intermittent failure in TestSnapshotDeletingService#testMultipleSnapshotKeyReclaim --- .../org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java index 12844c23cd7b..f1f5aabe38ac 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java @@ -197,7 +197,6 @@ public void testMultipleSnapshotKeyReclaim() throws Exception { } @SuppressWarnings("checkstyle:MethodLength") - @Flaky("HDDS-9023") @Test public void testSnapshotWithFSO() throws Exception { Table dirTable = From 51d175472dcc26162fff715c07db23dfedc3c0f1 Mon Sep 17 00:00:00 2001 From: Raju Balpande <146973984+raju-balpande@users.noreply.github.com> Date: Tue, 23 Jan 2024 01:46:10 +0530 Subject: [PATCH 31/43] HDDS-9998. Add static import for assertions and mocks in remaining ozone modules (#6056) --- ...estVulnerableUnhealthyReplicasHandler.java | 3 +- .../scm/node/TestDatanodeAdminMonitor.java | 5 +- .../ozone/insight/TestBaseInsightPoint.java | 16 ++-- .../insight/TestConfigurationSubCommand.java | 15 +-- .../ozone/insight/TestLogSubcommand.java | 5 +- .../container/TestECContainerRecovery.java | 9 +- .../commandhandler/TestBlockDeletion.java | 92 ++++++++++--------- .../TestCloseContainerHandler.java | 7 +- .../TestDeleteContainerHandler.java | 85 +++++++---------- .../TestRefreshVolumeUsageHandler.java | 6 +- .../om/helpers/TestOmPrefixInfoCodec.java | 4 +- .../om/helpers/TestS3SecretValueCodec.java | 9 +- .../request/key/TestOMKeyDeleteRequest.java | 3 +- .../key/TestOMKeyDeleteRequestWithFSO.java | 28 ++---- .../acl/TestSharedTmpDirAuthorizer.java | 14 +-- .../TestGeneratorDatanode.java | 5 +- .../scm/TestDecommissionScmSubcommand.java | 10 +- .../hadoop/ozone/shell/TestOzoneAddress.java | 58 ++++++------ .../shell/TestOzoneAddressClientCreation.java | 47 +++++----- .../shell/keys/TestChecksumKeyHandler.java | 30 +++--- 20 files changed, 219 insertions(+), 232 deletions(-) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java index 8fa4c974e1ba..28eccd5211c3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestVulnerableUnhealthyReplicasHandler.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.mockito.Mockito; import java.util.Collections; import java.util.HashSet; @@ -190,7 +189,7 @@ public void testReturnsTrueForQuasiClosedContainerWithVulnerableReplicaWhenAllRe ContainerReplica unhealthy = createContainerReplica(container.containerID(), 0, DECOMMISSIONING, State.UNHEALTHY, sequenceId); replicas.add(unhealthy); - Mockito.when(replicationManager.getNodeStatus(Mockito.any(DatanodeDetails.class))) + when(replicationManager.getNodeStatus(any(DatanodeDetails.class))) .thenAnswer(invocation -> { DatanodeDetails dn = invocation.getArgument(0); if (dn.equals(unhealthy.getDatanodeDetails())) { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java index 523d4226cb43..a2df04742f55 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java @@ -41,7 +41,6 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.mockito.Mockito; import java.io.IOException; import java.util.Collections; @@ -406,7 +405,7 @@ public void testDecommissionWaitsForUnhealthyReplicaWithUniqueOriginToReplicateN replicas.add(unhealthy); nodeManager.setContainers(dn1, ImmutableSet.of(containerID)); - Mockito.when(repManager.getContainerReplicaCount(Mockito.eq(containerID))) + when(repManager.getContainerReplicaCount(eq(containerID))) .thenReturn(new RatisContainerReplicaCount(container, replicas, Collections.emptyList(), 2, false)); DatanodeAdminMonitorTestUtil.mockCheckContainerState(repManager, true); @@ -430,7 +429,7 @@ public void testDecommissionWaitsForUnhealthyReplicaWithUniqueOriginToReplicateN .setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails()) .build(); replicas.add(copyOfUnhealthyOnNewNode); - Mockito.when(repManager.getContainerReplicaCount(Mockito.eq(containerID))) + when(repManager.getContainerReplicaCount(eq(containerID))) .thenReturn(new RatisContainerReplicaCount(container, replicas, Collections.emptyList(), 2, false)); DatanodeAdminMonitorTestUtil.mockCheckContainerState(repManager, false); diff --git a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java index b2d68545d06f..85faf99419a4 100644 --- a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java +++ b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestBaseInsightPoint.java @@ -17,12 +17,14 @@ */ package org.apache.hadoop.ozone.insight; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.util.HashMap; import java.util.Map; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + /** * Test common insight point utility methods. */ @@ -42,14 +44,14 @@ public String getDescription() { Map filters = new HashMap<>(); filters.put("datanode", "123"); - Assertions.assertTrue(insightPoint + assertTrue(insightPoint .filterLog(filters, "This a log specific to [datanode=123]")); - Assertions.assertFalse(insightPoint + assertFalse(insightPoint .filterLog(filters, "This a log specific to [datanode=234]")); //with empty filters - Assertions.assertTrue(insightPoint + assertTrue(insightPoint .filterLog(new HashMap<>(), "This a log specific to [datanode=234]")); //with multiple filters @@ -57,14 +59,14 @@ public String getDescription() { filters.put("datanode", "123"); filters.put("pipeline", "abcd"); - Assertions.assertFalse(insightPoint + assertFalse(insightPoint .filterLog(filters, "This a log specific to [datanode=123]")); - Assertions.assertTrue(insightPoint + assertTrue(insightPoint .filterLog(filters, "This a log specific to [datanode=123] [pipeline=abcd]")); - Assertions.assertFalse(insightPoint + assertFalse(insightPoint .filterLog(filters, "This a log specific to [datanode=456] [pipeline=abcd]")); diff --git a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java index 9be82ebc41d6..701652bee09c 100644 --- a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java +++ b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java @@ -27,11 +27,12 @@ import org.apache.hadoop.hdds.conf.ConfigTag; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; +import static org.assertj.core.api.Assertions.assertThat; + /** * Test insight report which prints out configs. */ @@ -60,12 +61,12 @@ public void testPrintConfig() throws UnsupportedEncodingException { subCommand.printConfig(CustomConfig.class, conf); final String output = out.toString(StandardCharsets.UTF_8.name()); - Assertions.assertTrue(output.contains(">>> ozone.scm.client.address")); - Assertions.assertTrue(output.contains("default: localhost")); - Assertions.assertTrue(output.contains("current: omclient")); - Assertions.assertTrue(output.contains(">>> ozone.scm.client.secure")); - Assertions.assertTrue(output.contains("default: true")); - Assertions.assertTrue(output.contains("current: true")); + assertThat(output).contains(">>> ozone.scm.client.address"); + assertThat(output).contains("default: localhost"); + assertThat(output).contains("current: omclient"); + assertThat(output).contains(">>> ozone.scm.client.secure"); + assertThat(output).contains("default: true"); + assertThat(output).contains("current: true"); } /** diff --git a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestLogSubcommand.java b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestLogSubcommand.java index 01402085861d..f895a91c5376 100644 --- a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestLogSubcommand.java +++ b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestLogSubcommand.java @@ -17,9 +17,10 @@ */ package org.apache.hadoop.ozone.insight; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import static org.junit.jupiter.api.Assertions.assertEquals; + /** * Testing utility methods of the log subcommand test. */ @@ -36,6 +37,6 @@ public void filterLog() { + "storageLocation: \"/tmp/hadoop-neo/dfs/data\"\\n capacity: " + "250438021120\\n scmUsed: 16384\\n remaining: 212041244672\\n " + "storageType: DISK\\n failed: false\\n}\\n"); - Assertions.assertEquals(10, result.split("\n").length); + assertEquals(10, result.split("\n").length); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java index b50f2ac8d68a..5a7e404f507b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java @@ -52,7 +52,6 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -72,6 +71,8 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_RECOVERING_CONTAINER_TIMEOUT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_RECOVERING_CONTAINER_TIMEOUT_DEFAULT; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doAnswer; /** * Tests the EC recovery and over replication processing. @@ -308,7 +309,7 @@ public void testECContainerRecoveryWithTimedOutRecovery() throws Exception { .mockFieldReflection(handler, "coordinator"); - Mockito.doAnswer(invocation -> { + doAnswer(invocation -> { GenericTestUtils.waitFor(() -> dn.getDatanodeStateMachine() .getContainer() @@ -320,8 +321,8 @@ public void testECContainerRecoveryWithTimedOutRecovery() throws Exception { reconstructedDN.set(dn); invocation.callRealMethod(); return null; - }).when(coordinator).reconstructECBlockGroup(Mockito.any(), Mockito.any(), - Mockito.any(), Mockito.any()); + }).when(coordinator).reconstructECBlockGroup(any(), any(), + any(), any()); } // Shutting down DN triggers close pipeline and close container. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index 5dec1799b406..e5c0d5d2532e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -72,7 +72,6 @@ import org.apache.ozone.test.GenericTestUtils.LogCapturer; import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; @@ -95,14 +94,19 @@ import static java.lang.Math.max; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; -import static org.apache.hadoop.hdds - .HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EXPIRED_CONTAINER_REPLICA_OP_SCRUB_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import static org.apache.hadoop.ozone - .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** * Tests for Block deletion. @@ -246,23 +250,23 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception { } }, 1000, 10000); // No containers with deleted blocks - Assertions.assertTrue(containerIdsWithDeletedBlocks.isEmpty()); + assertTrue(containerIdsWithDeletedBlocks.isEmpty()); // Delete transactionIds for the containers should be 0. // NOTE: this test assumes that all the container is KetValueContainer. If // other container types is going to be added, this test should be checked. matchContainerTransactionIds(); - Assertions.assertEquals(0L, + assertEquals(0L, metrics.getNumBlockDeletionTransactionCreated()); writeClient.deleteKey(keyArgs); Thread.sleep(5000); // The blocks should not be deleted in the DN as the container is open - Throwable e = Assertions.assertThrows(AssertionError.class, + Throwable e = assertThrows(AssertionError.class, () -> verifyBlocksDeleted(omKeyLocationInfoGroupList)); - Assertions.assertTrue( + assertTrue( e.getMessage().startsWith("expected: but was:")); - Assertions.assertEquals(0L, metrics.getNumBlockDeletionTransactionSent()); + assertEquals(0L, metrics.getNumBlockDeletionTransactionSent()); // close the containers which hold the blocks for the key OzoneTestUtils.closeAllContainers(scm.getEventQueue(), scm); @@ -291,7 +295,7 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception { }, 2000, 30000); // Few containers with deleted blocks - Assertions.assertFalse(containerIdsWithDeletedBlocks.isEmpty()); + assertFalse(containerIdsWithDeletedBlocks.isEmpty()); // Containers in the DN and SCM should have same delete transactionIds matchContainerTransactionIds(); @@ -312,12 +316,12 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception { cluster.restartHddsDatanode(0, true); matchContainerTransactionIds(); - Assertions.assertEquals(metrics.getNumBlockDeletionTransactionCreated(), + assertEquals(metrics.getNumBlockDeletionTransactionCreated(), metrics.getNumBlockDeletionTransactionCompleted()); - Assertions.assertTrue(metrics.getNumBlockDeletionCommandSent() >= + assertTrue(metrics.getNumBlockDeletionCommandSent() >= metrics.getNumBlockDeletionCommandSuccess() + metrics.getBNumBlockDeletionCommandFailure()); - Assertions.assertTrue(metrics.getNumBlockDeletionTransactionSent() >= + assertTrue(metrics.getNumBlockDeletionTransactionSent() >= metrics.getNumBlockDeletionTransactionFailure() + metrics.getNumBlockDeletionTransactionSuccess()); LOG.info(metrics.toString()); @@ -326,7 +330,7 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception { for (int i = 5; i >= 0; i--) { if (logCapturer.getOutput().contains("1(" + i + ")")) { for (int j = 0; j <= i; j++) { - Assertions.assertTrue(logCapturer.getOutput() + assertTrue(logCapturer.getOutput() .contains("1(" + i + ")")); } break; @@ -367,8 +371,8 @@ public void testContainerStatisticsAfterDelete() throws Exception { final int valueSize = value.getBytes(UTF_8).length; final int keyCount = 1; containerInfos.stream().forEach(container -> { - Assertions.assertEquals(valueSize, container.getUsedBytes()); - Assertions.assertEquals(keyCount, container.getNumberOfKeys()); + assertEquals(valueSize, container.getUsedBytes()); + assertEquals(keyCount, container.getNumberOfKeys()); }); OzoneTestUtils.closeAllContainers(scm.getEventQueue(), scm); @@ -389,7 +393,7 @@ public void testContainerStatisticsAfterDelete() throws Exception { containerMap.values().forEach(container -> { KeyValueContainerData containerData = (KeyValueContainerData)container.getContainerData(); - Assertions.assertEquals(0, containerData.getNumPendingDeletionBlocks()); + assertEquals(0, containerData.getNumPendingDeletionBlocks()); }); }); @@ -398,7 +402,7 @@ public void testContainerStatisticsAfterDelete() throws Exception { ((EventQueue)scm.getEventQueue()).processAll(1000); containerInfos = scm.getContainerManager().getContainers(); containerInfos.stream().forEach(container -> - Assertions.assertEquals(HddsProtos.LifeCycleState.DELETING, + assertEquals(HddsProtos.LifeCycleState.DELETING, container.getState())); LogCapturer logCapturer = LogCapturer.captureLogs( legacyEnabled ? LegacyReplicationManager.LOG : ReplicationManager.LOG); @@ -422,14 +426,14 @@ public void testContainerStatisticsAfterDelete() throws Exception { List infos = scm.getContainerManager().getContainers(); try { infos.stream().forEach(container -> { - Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED, + assertEquals(HddsProtos.LifeCycleState.DELETED, container.getState()); try { - Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED, + assertEquals(HddsProtos.LifeCycleState.DELETED, scm.getScmMetadataStore().getContainerTable() .get(container.containerID()).getState()); } catch (IOException e) { - Assertions.fail( + fail( "Container from SCM DB should be marked as DELETED"); } }); @@ -477,8 +481,8 @@ public void testContainerStateAfterDNRestart() throws Exception { final int keyCount = 1; List containerIdList = new ArrayList<>(); containerInfos.stream().forEach(container -> { - Assertions.assertEquals(valueSize, container.getUsedBytes()); - Assertions.assertEquals(keyCount, container.getNumberOfKeys()); + assertEquals(valueSize, container.getUsedBytes()); + assertEquals(keyCount, container.getNumberOfKeys()); containerIdList.add(container.getContainerID()); }); @@ -499,14 +503,14 @@ public void testContainerStateAfterDNRestart() throws Exception { ContainerID containerId = ContainerID.valueOf( containerInfos.get(0).getContainerID()); // Before restart container state is non-empty - Assertions.assertFalse(getContainerFromDN( + assertFalse(getContainerFromDN( cluster.getHddsDatanodes().get(0), containerId.getId()) .getContainerData().isEmpty()); // Restart DataNode cluster.restartHddsDatanode(0, true); // After restart also container state remains non-empty. - Assertions.assertFalse(getContainerFromDN( + assertFalse(getContainerFromDN( cluster.getHddsDatanodes().get(0), containerId.getId()) .getContainerData().isEmpty()); @@ -526,14 +530,14 @@ public void testContainerStateAfterDNRestart() throws Exception { 100, 10 * 1000); // Container state should be empty now as key got deleted - Assertions.assertTrue(getContainerFromDN( + assertTrue(getContainerFromDN( cluster.getHddsDatanodes().get(0), containerId.getId()) .getContainerData().isEmpty()); // Restart DataNode cluster.restartHddsDatanode(0, true); // Container state should be empty even after restart - Assertions.assertTrue(getContainerFromDN( + assertTrue(getContainerFromDN( cluster.getHddsDatanodes().get(0), containerId.getId()) .getContainerData().isEmpty()); @@ -543,14 +547,14 @@ public void testContainerStateAfterDNRestart() throws Exception { List infos = scm.getContainerManager().getContainers(); try { infos.stream().forEach(container -> { - Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED, + assertEquals(HddsProtos.LifeCycleState.DELETED, container.getState()); try { - Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED, + assertEquals(HddsProtos.LifeCycleState.DELETED, scm.getScmMetadataStore().getContainerTable() .get(container.containerID()).getState()); } catch (IOException e) { - Assertions.fail( + fail( "Container from SCM DB should be marked as DELETED"); } }); @@ -607,8 +611,8 @@ public void testContainerDeleteWithInvalidKeyCount() final int keyCount = 1; List containerIdList = new ArrayList<>(); containerInfos.stream().forEach(container -> { - Assertions.assertEquals(valueSize, container.getUsedBytes()); - Assertions.assertEquals(keyCount, container.getNumberOfKeys()); + assertEquals(valueSize, container.getUsedBytes()); + assertEquals(keyCount, container.getNumberOfKeys()); containerIdList.add(container.getContainerID()); }); @@ -635,7 +639,7 @@ public void testContainerDeleteWithInvalidKeyCount() = scm.getContainerManager().getContainerReplicas(containerId); // Ensure for all replica isEmpty are false in SCM - Assertions.assertTrue(scm.getContainerManager().getContainerReplicas( + assertTrue(scm.getContainerManager().getContainerReplicas( containerId).stream(). allMatch(replica -> !replica.isEmpty())); @@ -680,14 +684,14 @@ public void testContainerDeleteWithInvalidKeyCount() List infos = scm.getContainerManager().getContainers(); try { infos.stream().forEach(container -> { - Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED, + assertEquals(HddsProtos.LifeCycleState.DELETED, container.getState()); try { - Assertions.assertEquals(HddsProtos.LifeCycleState.DELETED, + assertEquals(HddsProtos.LifeCycleState.DELETED, scm.getScmMetadataStore().getContainerTable() .get(container.containerID()).getState()); } catch (IOException e) { - Assertions.fail( + fail( "Container from SCM DB should be marked as DELETED"); } }); @@ -702,7 +706,7 @@ public void testContainerDeleteWithInvalidKeyCount() private void verifyTransactionsCommitted() throws IOException { scm.getScmBlockManager().getDeletedBlockLog(); for (long txnID = 1; txnID <= maxTransactionId; txnID++) { - Assertions.assertNull( + assertNull( scm.getScmMetadataStore().getDeletedBlocksTXTable().get(txnID)); } } @@ -716,15 +720,15 @@ private void matchContainerTransactionIds() throws IOException { for (ContainerData containerData : containerDataList) { long containerId = containerData.getContainerID(); if (containerIdsWithDeletedBlocks.contains(containerId)) { - Assertions.assertTrue( + assertTrue( scm.getContainerInfo(containerId).getDeleteTransactionId() > 0); maxTransactionId = max(maxTransactionId, scm.getContainerInfo(containerId).getDeleteTransactionId()); } else { - Assertions.assertEquals( + assertEquals( scm.getContainerInfo(containerId).getDeleteTransactionId(), 0); } - Assertions.assertEquals( + assertEquals( ((KeyValueContainerData) dnContainerSet.getContainer(containerId) .getContainerData()).getDeleteTransactionId(), scm.getContainerInfo(containerId).getDeleteTransactionId()); @@ -741,7 +745,7 @@ private void verifyBlocksCreated( KeyValueContainerData cData = (KeyValueContainerData) dnContainerSet .getContainer(blockID.getContainerID()).getContainerData(); try (DBHandle db = BlockUtils.getDB(cData, conf)) { - Assertions.assertNotNull(db.getStore().getBlockDataTable() + assertNotNull(db.getStore().getBlockDataTable() .get(cData.getBlockKey(blockID.getLocalID()))); } }, omKeyLocationInfoGroups); @@ -763,11 +767,11 @@ private void verifyBlocksDeleted( String blockKey = cData.getBlockKey(blockID.getLocalID()); BlockData blockData = blockDataTable.get(blockKey); - Assertions.assertNull(blockData); + assertNull(blockData); String deletingKey = cData.getDeletingBlockKey( blockID.getLocalID()); - Assertions.assertNull(blockDataTable.get(deletingKey)); + assertNull(blockDataTable.get(deletingKey)); } containerIdsWithDeletedBlocks.add(blockID.getContainerID()); }, omKeyLocationInfoGroups); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java index ec47c76d94d2..7cb3c7797fa0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java @@ -45,9 +45,10 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -117,7 +118,7 @@ public void test() throws Exception { Pipeline pipeline = cluster.getStorageContainerManager() .getPipelineManager().getPipeline(container.getPipelineID()); - Assertions.assertFalse(isContainerClosed(cluster, containerId.getId())); + assertFalse(isContainerClosed(cluster, containerId.getId())); DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0).getDatanodeDetails(); @@ -135,7 +136,7 @@ public void test() throws Exception { 5 * 1000); //double check if it's really closed (waitFor also throws an exception) - Assertions.assertTrue(isContainerClosed(cluster, containerId.getId())); + assertTrue(isContainerClosed(cluster, containerId.getId())); } private static Boolean isContainerClosed(MiniOzoneCluster cluster, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java index 332683658b14..b74e6f3c9116 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java @@ -57,7 +57,6 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -77,6 +76,9 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests DeleteContainerCommand Handler. @@ -165,8 +167,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue() HddsDatanodeService hddsDatanodeService = cluster.getHddsDatanodes().get(0); - Assertions.assertFalse(isContainerClosed(hddsDatanodeService, - containerId.getId())); + assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId())); DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails(); @@ -189,8 +190,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue() 500, 5 * 1000); //double check if it's really closed (waitFor also throws an exception) - Assertions.assertTrue(isContainerClosed(hddsDatanodeService, - containerId.getId())); + assertTrue(isContainerClosed(hddsDatanodeService, containerId.getId())); // Delete key, which will make isEmpty flag to true in containerData objectStore.getVolume(volumeName) @@ -217,8 +217,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue() lingeringBlock.createNewFile(); // Check container exists before sending delete container command - Assertions.assertFalse(isContainerDeleted(hddsDatanodeService, - containerId.getId())); + assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId())); // Set container blockCount to 0 to mock that it is empty as per RocksDB getContainerfromDN(hddsDatanodeService, containerId.getId()) @@ -243,10 +242,8 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue() contains("Files still part of the container on delete"), 500, 5 * 2000); - Assertions.assertTrue(!isContainerDeleted(hddsDatanodeService, - containerId.getId())); - Assertions.assertTrue(beforeDeleteFailedCount < - metrics.getContainerDeleteFailedNonEmpty()); + assertTrue(!isContainerDeleted(hddsDatanodeService, containerId.getId())); + assertTrue(beforeDeleteFailedCount < metrics.getContainerDeleteFailedNonEmpty()); // Send the delete command. It should pass with force flag. // Deleting a non-empty container should pass on the DN when the force flag // is true @@ -260,10 +257,8 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue() GenericTestUtils.waitFor(() -> isContainerDeleted(hddsDatanodeService, containerId.getId()), 500, 5 * 1000); - Assertions.assertTrue(isContainerDeleted(hddsDatanodeService, - containerId.getId())); - Assertions.assertTrue(beforeForceCount < - metrics.getContainerForceDelete()); + assertTrue(isContainerDeleted(hddsDatanodeService, containerId.getId())); + assertTrue(beforeForceCount < metrics.getContainerForceDelete()); kv.setCheckChunksFilePath(false); } @@ -297,8 +292,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckFalse() HddsDatanodeService hddsDatanodeService = cluster.getHddsDatanodes().get(0); - Assertions.assertFalse(isContainerClosed(hddsDatanodeService, - containerId.getId())); + assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId())); DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails(); @@ -313,8 +307,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckFalse() 500, 5 * 1000); //double check if it's really closed (waitFor also throws an exception) - Assertions.assertTrue(isContainerClosed(hddsDatanodeService, - containerId.getId())); + assertTrue(isContainerClosed(hddsDatanodeService, containerId.getId())); // Delete key, which will make isEmpty flag to true in containerData objectStore.getVolume(volumeName) @@ -341,8 +334,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckFalse() lingeringBlock.createNewFile(); // Check container exists before sending delete container command - Assertions.assertFalse(isContainerDeleted(hddsDatanodeService, - containerId.getId())); + assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId())); // send delete container to the datanode SCMCommand command = new DeleteContainerCommand(containerId.getId(), @@ -357,8 +349,7 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckFalse() GenericTestUtils.waitFor(() -> isContainerDeleted(hddsDatanodeService, containerId.getId()), 500, 5 * 1000); - Assertions.assertTrue(isContainerDeleted(hddsDatanodeService, - containerId.getId())); + assertTrue(isContainerDeleted(hddsDatanodeService, containerId.getId())); } @Test @@ -384,8 +375,7 @@ public void testDeleteNonEmptyContainerBlockTable() HddsDatanodeService hddsDatanodeService = cluster.getHddsDatanodes().get(0); - Assertions.assertFalse(isContainerClosed(hddsDatanodeService, - containerId.getId())); + assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId())); DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails(); @@ -418,11 +408,11 @@ public void testDeleteNonEmptyContainerBlockTable() 500, 5 * 1000); //double check if it's really closed (waitFor also throws an exception) - Assertions.assertTrue(isContainerClosed(hddsDatanodeService, + assertTrue(isContainerClosed(hddsDatanodeService, containerId.getId())); // Check container exists before sending delete container command - Assertions.assertFalse(isContainerDeleted(hddsDatanodeService, + assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId())); long containerDeleteFailedNonEmptyBlockDB = @@ -446,9 +436,9 @@ public void testDeleteNonEmptyContainerBlockTable() contains("the container is not empty with blockCount"), 500, 5 * 2000); - Assertions.assertTrue(!isContainerDeleted(hddsDatanodeService, + assertTrue(!isContainerDeleted(hddsDatanodeService, containerId.getId())); - Assertions.assertTrue(containerDeleteFailedNonEmptyBlockDB < + assertTrue(containerDeleteFailedNonEmptyBlockDB < metrics.getContainerDeleteFailedNonEmpty()); // Now empty the container Dir and try with a non-empty block table @@ -470,8 +460,7 @@ public void testDeleteNonEmptyContainerBlockTable() cluster.getStorageContainerManager().getScmContext().getTermOfLeader()); nodeManager.addDatanodeCommand(datanodeDetails.getUuid(), command); Thread.sleep(5000); - Assertions.assertTrue(!isContainerDeleted(hddsDatanodeService, - containerId.getId())); + assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId())); // Send the delete command. It should pass with force flag. long beforeForceCount = metrics.getContainerForceDelete(); command = new DeleteContainerCommand(containerId.getId(), true); @@ -483,9 +472,9 @@ public void testDeleteNonEmptyContainerBlockTable() GenericTestUtils.waitFor(() -> isContainerDeleted(hddsDatanodeService, containerId.getId()), 500, 5 * 1000); - Assertions.assertTrue(isContainerDeleted(hddsDatanodeService, + assertTrue(isContainerDeleted(hddsDatanodeService, containerId.getId())); - Assertions.assertTrue(beforeForceCount < + assertTrue(beforeForceCount < metrics.getContainerForceDelete()); } @@ -507,8 +496,7 @@ public void testContainerDeleteWithInvalidBlockCount() HddsDatanodeService hddsDatanodeService = cluster.getHddsDatanodes().get(0); - Assertions.assertFalse(isContainerClosed(hddsDatanodeService, - containerId.getId())); + assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId())); DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails(); NodeManager nodeManager = @@ -525,12 +513,10 @@ public void testContainerDeleteWithInvalidBlockCount() 500, 5 * 1000); //double check if it's really closed (waitFor also throws an exception) - Assertions.assertTrue(isContainerClosed(hddsDatanodeService, - containerId.getId())); + assertTrue(isContainerClosed(hddsDatanodeService, containerId.getId())); // Check container exists before sending delete container command - Assertions.assertFalse(isContainerDeleted(hddsDatanodeService, - containerId.getId())); + assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId())); // Clear block table clearBlocksTable(getContainerfromDN(hddsDatanodeService, @@ -561,8 +547,7 @@ public void testContainerDeleteWithInvalidBlockCount() GenericTestUtils.waitFor(() -> isContainerDeleted(hddsDatanodeService, containerId.getId()), 500, 5 * 1000); - Assertions.assertTrue(isContainerDeleted(hddsDatanodeService, - containerId.getId())); + assertTrue(isContainerDeleted(hddsDatanodeService, containerId.getId())); } @@ -612,8 +597,7 @@ public void testDeleteContainerRequestHandlerOnClosedContainer() HddsDatanodeService hddsDatanodeService = cluster.getHddsDatanodes().get(0); - Assertions.assertFalse(isContainerClosed(hddsDatanodeService, - containerId.getId())); + assertFalse(isContainerClosed(hddsDatanodeService, containerId.getId())); DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails(); @@ -630,12 +614,10 @@ public void testDeleteContainerRequestHandlerOnClosedContainer() 500, 5 * 1000); //double check if it's really closed (waitFor also throws an exception) - Assertions.assertTrue(isContainerClosed(hddsDatanodeService, - containerId.getId())); + assertTrue(isContainerClosed(hddsDatanodeService, containerId.getId())); // Check container exists before sending delete container command - Assertions.assertFalse(isContainerDeleted(hddsDatanodeService, - containerId.getId())); + assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId())); // send delete container to the datanode SCMCommand command = new DeleteContainerCommand(containerId.getId(), @@ -656,8 +638,7 @@ public void testDeleteContainerRequestHandlerOnClosedContainer() ContainerMetrics metrics = hddsDatanodeService .getDatanodeStateMachine().getContainer().getMetrics(); - Assertions.assertEquals(1, - metrics.getContainerDeleteFailedNonEmpty()); + assertEquals(1, metrics.getContainerDeleteFailedNonEmpty()); // Delete key, which will make isEmpty flag to true in containerData objectStore.getVolume(volumeName) @@ -678,7 +659,7 @@ public void testDeleteContainerRequestHandlerOnClosedContainer() isContainerDeleted(hddsDatanodeService, containerId.getId()), 500, 5 * 1000); - Assertions.assertTrue(isContainerDeleted(hddsDatanodeService, + assertTrue(isContainerDeleted(hddsDatanodeService, containerId.getId())); } @@ -723,7 +704,7 @@ public void testDeleteContainerRequestHandlerOnOpenContainer() } } - Assertions.assertFalse(isContainerDeleted(hddsDatanodeService, + assertFalse(isContainerDeleted(hddsDatanodeService, containerId.getId())); @@ -738,7 +719,7 @@ public void testDeleteContainerRequestHandlerOnOpenContainer() isContainerDeleted(hddsDatanodeService, containerId.getId()), 500, 5 * 1000); - Assertions.assertTrue(isContainerDeleted(hddsDatanodeService, + assertTrue(isContainerDeleted(hddsDatanodeService, containerId.getId())); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java index c47f09930993..23382b2abe6c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestRefreshVolumeUsageHandler.java @@ -30,7 +30,6 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -43,6 +42,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Test the behaviour of the datanode and scm when communicating @@ -101,7 +101,7 @@ public void test() throws Exception { //a new key is created, but the datanode default REFRESH_PERIOD is 1 hour, //still the cache is updated, so the scm will eventually get the new //used space from the datanode through node report. - Assertions.assertTrue(cluster.getStorageContainerManager() + assertTrue(cluster.getStorageContainerManager() .getScmNodeManager().getUsageInfo(datanodeDetails) .getScmNodeStat().getScmUsed().isEqual(currentScmUsed)); @@ -116,7 +116,7 @@ public void test() throws Exception { //after waiting for several node report , this usage info //in SCM should be updated as we have updated the DN's cached usage info. - Assertions.assertTrue(cluster.getStorageContainerManager() + assertTrue(cluster.getStorageContainerManager() .getScmNodeManager().getUsageInfo(datanodeDetails) .getScmNodeStat().getScmUsed().isGreater(currentScmUsed)); diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfoCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfoCodec.java index 6015491468c0..f3ad1d8c7628 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfoCodec.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfoCodec.java @@ -22,7 +22,6 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.io.IOException; @@ -30,6 +29,7 @@ import java.util.List; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * Test {@link OmPrefixInfo#getCodec()}. @@ -57,6 +57,6 @@ public void testToAndFromPersistedFormat() throws IOException { OmPrefixInfo opiLoad = codec.fromPersistedFormat( codec.toPersistedFormat(opiSave)); - Assertions.assertEquals(opiSave, opiLoad, "Loaded not equals to saved"); + assertEquals(opiSave, opiLoad, "Loaded not equals to saved"); } } diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestS3SecretValueCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestS3SecretValueCodec.java index 19cb68619715..d1c4372ca915 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestS3SecretValueCodec.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestS3SecretValueCodec.java @@ -22,9 +22,12 @@ import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.Proto2CodecTestBase; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; + + /** * Test {@link S3SecretValue#getCodec()}. */ @@ -44,10 +47,10 @@ public void testCodecWithCorrectData() throws Exception { UUID.randomUUID().toString()); byte[] data = codec.toPersistedFormat(s3SecretValue); - Assertions.assertNotNull(data); + assertNotNull(data); S3SecretValue docdedS3Secret = codec.fromPersistedFormat(data); - Assertions.assertEquals(s3SecretValue, docdedS3Secret); + assertEquals(s3SecretValue, docdedS3Secret); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java index cb585caefd94..00d1883d749c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java @@ -28,7 +28,6 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -52,7 +51,7 @@ public void testPreExecute(String testKeyName) throws Exception { OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); String ozoneKey = addKeyToTable(testKeyName); OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); - Assertions.assertNotNull(omKeyInfo); + assertNotNull(omKeyInfo); doPreExecute(createDeleteKeyRequest(testKeyName)); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java index 96483fb587ce..9dafab090295 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java @@ -21,8 +21,9 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; + import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.OzonePrefixPathImpl; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -33,7 +34,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.security.acl.OzonePrefixPath; import org.apache.hadoop.util.Time; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -112,7 +112,7 @@ public void testPreExecute(String testKeyName) throws Exception { OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); String ozoneKey = addKeyToTable(); OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey); - Assertions.assertNotNull(omKeyInfo); + assertNotNull(omKeyInfo); doPreExecute(createDeleteKeyRequest()); } @@ -142,19 +142,14 @@ public void testOzonePrefixPathViewer() throws Exception { verifyPath(ozonePrefixPath, "c/d", "c/d/e"); verifyPath(ozonePrefixPath, "c/d/e", "c/d/e/file1"); - try { - ozonePrefixPath.getChildren("c/d/e/file1"); - fail("Should throw INVALID_KEY_NAME as the given " + - "path is a file."); - } catch (OMException ome) { - assertEquals(OMException.ResultCodes.INVALID_KEY_NAME, - ome.getResult()); - } + OMException ome = assertThrows(OMException.class, () -> ozonePrefixPath.getChildren("c/d/e/file1"), + "Should throw INVALID_KEY_NAME as the given path is a file."); + assertEquals(OMException.ResultCodes.INVALID_KEY_NAME, ome.getResult()); // OzonePrefixPathImpl on a file - ozonePrefixPath = new OzonePrefixPathImpl(volumeName, + OzonePrefixPathImpl ozonePrefixPathFile1 = new OzonePrefixPathImpl(volumeName, bucketName, "c/d/e/file1", keyManager); - status = ozonePrefixPath.getOzoneFileStatus(); + status = ozonePrefixPathFile1.getOzoneFileStatus(); assertNotNull(status); assertEquals("c/d/e/file1", status.getTrimmedName()); assertEquals("c/d/e/file1", status.getKeyInfo().getKeyName()); @@ -168,12 +163,7 @@ private void verifyPath(OzonePrefixPath ozonePrefixPath, String pathName, pathName); assertTrue(pathItr.hasNext(), "Failed to list keyPaths"); assertEquals(expectedPath, pathItr.next().getTrimmedName()); - try { - pathItr.next(); - fail("Reached end of the list!"); - } catch (NoSuchElementException nse) { - // expected - } + assertThrows(NoSuchElementException.class, () -> pathItr.next(), "Reached end of the list!"); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestSharedTmpDirAuthorizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestSharedTmpDirAuthorizer.java index ff3609b810cd..ce5aa12c568b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestSharedTmpDirAuthorizer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestSharedTmpDirAuthorizer.java @@ -22,10 +22,12 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; -import org.mockito.Mockito; import java.util.stream.Stream; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + /** * Tests for {@link SharedTmpDirAuthorizer}. */ @@ -37,8 +39,8 @@ public class TestSharedTmpDirAuthorizer { @BeforeAll public static void setUp() { - nativeAuthorizer = Mockito.mock(OzoneNativeAuthorizer.class); - authorizer = Mockito.mock(TestOzoneAuthorizerFactory + nativeAuthorizer = mock(OzoneNativeAuthorizer.class); + authorizer = mock(TestOzoneAuthorizerFactory .MockThirdPartyAuthorizer.class); sharedTmpDirAuthorizer = @@ -65,13 +67,13 @@ public void testCheckAccess(String volumeName, .setKeyName("key1") .build(); - RequestContext context = Mockito.mock(RequestContext.class); + RequestContext context = mock(RequestContext.class); sharedTmpDirAuthorizer.checkAccess(objInfo, context); if (isNative) { - Mockito.verify(nativeAuthorizer).checkAccess(objInfo, context); + verify(nativeAuthorizer).checkAccess(objInfo, context); } else { - Mockito.verify(authorizer).checkAccess(objInfo, context); + verify(authorizer).checkAccess(objInfo, context); } } } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/containergenerator/TestGeneratorDatanode.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/containergenerator/TestGeneratorDatanode.java index 7792e03e1160..35a1ba20bb58 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/containergenerator/TestGeneratorDatanode.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/containergenerator/TestGeneratorDatanode.java @@ -16,12 +16,13 @@ */ package org.apache.hadoop.ozone.freon.containergenerator; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.util.Arrays; import java.util.HashSet; +import static org.junit.jupiter.api.Assertions.assertEquals; + /** * Test datanode container generation placement. */ @@ -64,7 +65,7 @@ public void compare( int maxDatanodes, int overlap, Integer... expectations) { - Assertions.assertEquals( + assertEquals( new HashSet(Arrays.asList(expectations)), GeneratorDatanode.getPlacement(containerId, maxDatanodes, overlap)); } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestDecommissionScmSubcommand.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestDecommissionScmSubcommand.java index 2cb3be1d0b48..77191df75a2e 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestDecommissionScmSubcommand.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestDecommissionScmSubcommand.java @@ -25,14 +25,14 @@ import java.io.IOException; import java.util.UUID; - import org.junit.jupiter.api.Test; -import static org.junit.jupiter.api.Assertions.assertTrue; -import org.mockito.Mockito; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import picocli.CommandLine; /** @@ -66,7 +66,7 @@ public void testDecommissionScmInputParams() throws Exception { .setSuccess(true) .build(); - Mockito.when(client.decommissionScm(any())) + when(client.decommissionScm(any())) .thenAnswer(invocation -> ( response)); @@ -94,7 +94,7 @@ public void testDecommissionScmScmRemoveErrors() throws Exception { .setErrorMsg("Cannot remove current leader.") .build(); - Mockito.when(client.decommissionScm(any())) + when(client.decommissionScm(any())) .thenAnswer(invocation -> ( response)); diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java index e831bf5f671a..8ace369b8c9f 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java @@ -23,10 +23,14 @@ import org.apache.hadoop.ozone.client.OzoneClientException; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + /** * Test ozone URL parsing. @@ -60,7 +64,7 @@ public void checkRootUrlType(String prefix) throws OzoneClientException { public void checkVolumeUrlType(String prefix) throws OzoneClientException { address = new OzoneAddress(prefix + "vol1"); address.ensureVolumeAddress(); - Assertions.assertEquals("vol1", address.getVolumeName()); + assertEquals("vol1", address.getVolumeName()); } @ParameterizedTest @@ -68,13 +72,13 @@ public void checkVolumeUrlType(String prefix) throws OzoneClientException { public void checkBucketUrlType(String prefix) throws OzoneClientException { address = new OzoneAddress(prefix + "vol1/bucket"); address.ensureBucketAddress(); - Assertions.assertEquals("vol1", address.getVolumeName()); - Assertions.assertEquals("bucket", address.getBucketName()); + assertEquals("vol1", address.getVolumeName()); + assertEquals("bucket", address.getBucketName()); address = new OzoneAddress(prefix + "vol1/bucket/"); address.ensureBucketAddress(); - Assertions.assertEquals("vol1", address.getVolumeName()); - Assertions.assertEquals("bucket", address.getBucketName()); + assertEquals("vol1", address.getVolumeName()); + assertEquals("bucket", address.getBucketName()); } @ParameterizedTest @@ -82,22 +86,22 @@ public void checkBucketUrlType(String prefix) throws OzoneClientException { public void checkKeyUrlType(String prefix) throws OzoneClientException { address = new OzoneAddress(prefix + "vol1/bucket/key"); address.ensureKeyAddress(); - Assertions.assertEquals("vol1", address.getVolumeName()); - Assertions.assertEquals("bucket", address.getBucketName()); - Assertions.assertEquals("key", address.getKeyName()); + assertEquals("vol1", address.getVolumeName()); + assertEquals("bucket", address.getBucketName()); + assertEquals("key", address.getKeyName()); address = new OzoneAddress(prefix + "vol1/bucket/key/"); address.ensureKeyAddress(); - Assertions.assertEquals("vol1", address.getVolumeName()); - Assertions.assertEquals("bucket", address.getBucketName()); - Assertions.assertEquals("key/", address.getKeyName()); + assertEquals("vol1", address.getVolumeName()); + assertEquals("bucket", address.getBucketName()); + assertEquals("key/", address.getKeyName()); address = new OzoneAddress(prefix + "vol1/bucket/key1/key3/key"); address.ensureKeyAddress(); - Assertions.assertEquals("vol1", address.getVolumeName()); - Assertions.assertEquals("bucket", address.getBucketName()); - Assertions.assertEquals("key1/key3/key", address.getKeyName()); - Assertions.assertFalse(address.isPrefix(), "this should not be a prefix"); + assertEquals("vol1", address.getVolumeName()); + assertEquals("bucket", address.getBucketName()); + assertEquals("key1/key3/key", address.getKeyName()); + assertFalse(address.isPrefix(), "this should not be a prefix"); } @ParameterizedTest @@ -105,10 +109,10 @@ public void checkKeyUrlType(String prefix) throws OzoneClientException { public void checkPrefixUrlType(String prefix) throws OzoneClientException { address = new OzoneAddress(prefix + "vol1/bucket/prefix"); address.ensurePrefixAddress(); - Assertions.assertEquals("vol1", address.getVolumeName()); - Assertions.assertEquals("bucket", address.getBucketName()); - Assertions.assertEquals("prefix", address.getKeyName()); - Assertions.assertTrue(address.isPrefix(), "this should be a prefix"); + assertEquals("vol1", address.getVolumeName()); + assertEquals("bucket", address.getBucketName()); + assertEquals("prefix", address.getKeyName()); + assertTrue(address.isPrefix(), "this should be a prefix"); } @ParameterizedTest @@ -116,11 +120,10 @@ public void checkPrefixUrlType(String prefix) throws OzoneClientException { public void checkSnapshotUrlType(String prefix) throws OzoneClientException { address = new OzoneAddress(prefix + "vol1/bucket/.snapshot/snap1"); address.ensureSnapshotAddress(); - Assertions.assertEquals("vol1", address.getVolumeName()); - Assertions.assertEquals("bucket", address.getBucketName()); - Assertions.assertEquals(".snapshot/snap1", - address.getSnapshotNameWithIndicator()); - Assertions.assertEquals(".snapshot/snap1", address.getKeyName()); + assertEquals("vol1", address.getVolumeName()); + assertEquals("bucket", address.getBucketName()); + assertEquals(".snapshot/snap1", address.getSnapshotNameWithIndicator()); + assertEquals(".snapshot/snap1", address.getKeyName()); String message = "Only a snapshot name with " + @@ -128,9 +131,8 @@ public void checkSnapshotUrlType(String prefix) throws OzoneClientException { address = new OzoneAddress(prefix + "vol1/bucket/.snapshot"); - OzoneClientException exception = Assertions - .assertThrows(OzoneClientException.class, + OzoneClientException exception = assertThrows(OzoneClientException.class, () -> address.ensureSnapshotAddress()); - Assertions.assertTrue(exception.getMessage().contains(message)); + assertTrue(exception.getMessage().contains(message)); } } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java index 02bc2cade061..2457a00fe52c 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java @@ -27,7 +27,10 @@ import org.apache.hadoop.hdds.conf.InMemoryConfiguration; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; -import org.junit.jupiter.api.Assertions; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import org.junit.jupiter.api.Test; @@ -41,7 +44,7 @@ public void implicitNonHA() throws OzoneClientException, IOException { TestableOzoneAddress address = new TestableOzoneAddress("/vol1/bucket1/key1"); address.createClient(new InMemoryConfiguration()); - Assertions.assertTrue(address.simpleCreation); + assertTrue(address.simpleCreation); } @Test @@ -51,8 +54,8 @@ public void implicitHAOneServiceId() new TestableOzoneAddress("/vol1/bucket1/key1"); address.createClient( new InMemoryConfiguration(OZONE_OM_SERVICE_IDS_KEY, "service1")); - Assertions.assertFalse(address.simpleCreation); - Assertions.assertEquals("service1", address.serviceId); + assertFalse(address.simpleCreation); + assertEquals("service1", address.serviceId); } @Test @@ -60,7 +63,7 @@ public void implicitHaMultipleServiceId() throws OzoneClientException, IOException { TestableOzoneAddress address = new TestableOzoneAddress("/vol1/bucket1/key1"); - Assertions.assertThrows(OzoneClientException.class, () -> + assertThrows(OzoneClientException.class, () -> address.createClient(new InMemoryConfiguration(OZONE_OM_SERVICE_IDS_KEY, "service1,service2"))); } @@ -73,8 +76,8 @@ public void explicitHaMultipleServiceId() address.createClient( new InMemoryConfiguration(OZONE_OM_SERVICE_IDS_KEY, "service1,service2")); - Assertions.assertFalse(address.simpleCreation); - Assertions.assertEquals("service1", address.serviceId); + assertFalse(address.simpleCreation); + assertEquals("service1", address.serviceId); } @Test @@ -82,9 +85,9 @@ public void explicitNonHAHostPort() throws OzoneClientException, IOException { TestableOzoneAddress address = new TestableOzoneAddress("o3://om:9862/vol1/bucket1/key1"); address.createClient(new InMemoryConfiguration()); - Assertions.assertFalse(address.simpleCreation); - Assertions.assertEquals("om", address.host); - Assertions.assertEquals(9862, address.port); + assertFalse(address.simpleCreation); + assertEquals("om", address.host); + assertEquals(9862, address.port); } @Test @@ -94,9 +97,9 @@ public void explicitHAHostPortWithServiceId() new TestableOzoneAddress("o3://om:9862/vol1/bucket1/key1"); address.createClient( new InMemoryConfiguration(OZONE_OM_SERVICE_IDS_KEY, "service1")); - Assertions.assertFalse(address.simpleCreation); - Assertions.assertEquals("om", address.host); - Assertions.assertEquals(9862, address.port); + assertFalse(address.simpleCreation); + assertEquals("om", address.host); + assertEquals(9862, address.port); } @Test @@ -107,9 +110,9 @@ public void explicitAHostPortWithServiceIds() address.createClient( new InMemoryConfiguration(OZONE_OM_SERVICE_IDS_KEY, "service1,service2")); - Assertions.assertFalse(address.simpleCreation); - Assertions.assertEquals("om", address.host); - Assertions.assertEquals(9862, address.port); + assertFalse(address.simpleCreation); + assertEquals("om", address.host); + assertEquals(9862, address.port); } @Test @@ -118,8 +121,8 @@ public void explicitNonHAHost() throws OzoneClientException, IOException { new TestableOzoneAddress("o3://om/vol1/bucket1/key1"); address.createClient( new InMemoryConfiguration(OZONE_OM_SERVICE_IDS_KEY, "service1")); - Assertions.assertFalse(address.simpleCreation); - Assertions.assertEquals("om", address.host); + assertFalse(address.simpleCreation); + assertEquals("om", address.host); } @Test @@ -127,16 +130,16 @@ public void explicitHAHostPort() throws OzoneClientException, IOException { TestableOzoneAddress address = new TestableOzoneAddress("o3://om:1234/vol1/bucket1/key1"); address.createClient(new InMemoryConfiguration()); - Assertions.assertFalse(address.simpleCreation); - Assertions.assertEquals("om", address.host); - Assertions.assertEquals(1234, address.port); + assertFalse(address.simpleCreation); + assertEquals("om", address.host); + assertEquals(1234, address.port); } @Test public void explicitWrongScheme() throws OzoneClientException, IOException { TestableOzoneAddress address = new TestableOzoneAddress("ssh://host/vol1/bucket1/key1"); - Assertions.assertThrows(OzoneClientException.class, () -> + assertThrows(OzoneClientException.class, () -> address.createClient(new InMemoryConfiguration())); } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java index c55cb9f55ac4..d4fa929614fb 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java @@ -32,10 +32,8 @@ import org.apache.hadoop.ozone.client.checksum.CrcUtil; import org.apache.hadoop.util.DataChecksum; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.mockito.Mockito; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -43,8 +41,10 @@ import java.io.UnsupportedEncodingException; import java.nio.charset.StandardCharsets; -import static org.mockito.ArgumentMatchers.anyString; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * Tests for ChecksumKeyHandler. @@ -91,32 +91,30 @@ public void testChecksumKeyHandler() ObjectStore objectStore = mock(ObjectStore.class); OzoneClient client = mock(OzoneClient.class); - Mockito.when(client.getObjectStore()).thenReturn(objectStore); + when(client.getObjectStore()).thenReturn(objectStore); OzoneVolume volume = mock(OzoneVolume.class); OzoneBucket bucket = mock(OzoneBucket.class); OzoneKeyDetails key = mock(OzoneKeyDetails.class); - Mockito.when(volume.getBucket(anyString())).thenReturn(bucket); - Mockito.when(bucket.getKey(anyString())) - .thenReturn(key); - Mockito.when(objectStore.getVolume(anyString())). - thenReturn(volume); - Mockito.when(key.getDataSize()).thenReturn(keySize); + when(volume.getBucket(anyString())).thenReturn(bucket); + when(bucket.getKey(anyString())).thenReturn(key); + when(objectStore.getVolume(anyString())).thenReturn(volume); + when(key.getDataSize()).thenReturn(keySize); cmd.execute(client, address); ObjectMapper mapper = new ObjectMapper(); JsonNode json = mapper.readTree(outContent.toString("UTF-8")); - Assertions.assertEquals("volume", json.get("volumeName").asText()); - Assertions.assertEquals("bucket", json.get("bucketName").asText()); - Assertions.assertEquals("key", json.get("name").asText()); - Assertions.assertEquals(keySize, json.get("dataSize").asLong()); - Assertions.assertEquals("COMPOSITE-CRC32", json.get("algorithm").asText()); + assertEquals("volume", json.get("volumeName").asText()); + assertEquals("bucket", json.get("bucketName").asText()); + assertEquals("key", json.get("name").asText()); + assertEquals(keySize, json.get("dataSize").asLong()); + assertEquals("COMPOSITE-CRC32", json.get("algorithm").asText()); String expectedChecksum = javax.xml.bind.DatatypeConverter.printHexBinary( CrcUtil.intToBytes(Integer.valueOf(CHECKSUM))); - Assertions.assertEquals(expectedChecksum, json.get("checksum").asText()); + assertEquals(expectedChecksum, json.get("checksum").asText()); } } From fab5107cd8c8e9077642ef6ee143f88f616eb36f Mon Sep 17 00:00:00 2001 From: david1859168 <71422636+david1859168@users.noreply.github.com> Date: Tue, 23 Jan 2024 16:36:01 +1100 Subject: [PATCH 32/43] HDDS-9181. Provide documentation for Decommissioning in Ozone in Mandarin (#6047) Co-authored-by: David Wang --- .../docs/content/feature/Decommission.zh.md | 96 +++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 hadoop-hdds/docs/content/feature/Decommission.zh.md diff --git a/hadoop-hdds/docs/content/feature/Decommission.zh.md b/hadoop-hdds/docs/content/feature/Decommission.zh.md new file mode 100644 index 000000000000..ad959469b953 --- /dev/null +++ b/hadoop-hdds/docs/content/feature/Decommission.zh.md @@ -0,0 +1,96 @@ +--- +title: "Decommissioning" +weight: 1 +menu: + main: + parent: 特性 +summary: Decommissioning of SCM, OM and Datanode. +--- + + +# DataNode Decommission + +DataNode Decommission是从Ozone集群中删除现有DataNode的过程中,同时确保新数据不会被写入正在Decommission的DataNode。当你启动DataNode Decommission的操作时候,Ozone会自动确保在Decommission完成之前,该数据节点上的所有Storage containers都在另一个DataNode上创建了额外的副本。因此,DataNode在Decommission完成后可以继续运行,并可用于读取,但不能用于写入,直到手动停止DataNode的服务。 + +当我们启动Decommission时,这个操作首先要检查节点的当前状态,理想情况下应该是 "IN_SERVICE",然后将其状态更改为 "DECOMMISSIONING",并启动Decommission的流程: + +1. 首先它会触发一个事件,关闭节点上的所有Pipelines,同时关闭所有Containers。 + +2. 然后获取节点上的Container信息,并检查是否需要新的副本。如果需要,创建新的副本的任务就会被调度起来。 + +3. 复制任务被调度后,节点仍处于待处理状态,直到复制任务完成。 + +4. 在此阶段,节点将完成Decommission的过程,然后节点状态将更改为 "DECOMMISSIONED"。 + +要检查DataNode的当前状态,可以执行以下命令, +```shell +ozone admin datanode list +``` + +要decommission某台datanode的时候,可以执行下面的命令, + +```shell +ozone admin datanode decommission [-hV] [-id=] + [--scm=] [...] +``` +您可以输入多个主机,以便一起Decommission多个DataNode。 + +**Note:** 要Recommission某台DataNode的时候,可在命令行执行以下命令, +```shell +ozone admin datanode recommission [-hV] [-id=] + [--scm=] [...] +``` + +# OM Decommission + +Ozone Manager(OM)Decommissioning是指从 OM HA Ring 中从容地(gracefully)移除一个 OM 的过程。 + +要Decommission OM 并将这个节点从 OM HA ring中移除,需要执行以下步骤。 +1. 将要被Decommission的 OM 节点的 _OM NodeId_ 添加到所有其他 OM 的 _ozone-site.xml_ 中的 _ozone.om.decommissioned.nodes._ 属性中。 +2. 运行以下命令Decommission这台 OM 节点. +```shell +ozone admin om decommission -id= -nodeid= -hostname= [optional --force] +``` + _force选项将跳过检查 _ozone-site.xml_ 中的 OM 配置是否已更新,并将Decommission节点添加至 _**ozone.om.decommissioned.nodes**_ 配置中.

**Note -** 建议在Decommissioning一个 OM 节点之前bootstrap另一个 OM 节点,以保持OM的高可用性(HA).

+ +# SCM Decommission + +存储容器管理器 (SCM) Decommissioning 是允许您从容地(gracefully)将一个 SCM 从 SCM HA Ring 中移除的过程。 + +在Decommission一台SCM,并将其从SCM HA ring中移除时,需要执行以下步骤。 +```shell +ozone admin scm decommission [-hV] [--service-id=] -nodeid= +``` +执行以下命令可获得 "nodeId": **"ozone admin scm roles "** + +### Leader SCM +如果需要decommission **leader** SCM, 您必须先将leader的角色转移到另一个 scm,然后再Decommission这个节点。 + +您可以使用以下的命令来转移leader的角色, +```shell +ozone admin scm transfer [--service-id=] -n= +``` +在Leader的角色成功地转移之后,您可以继续decommission的操作。 + +### Primordial SCM +如果要decommission **primordial** scm,必须更改 _ozone.scm.primordial.node.id_ 的属性,使其指向不同的 SCM,然后再继续decommissioning。 + +### 注意 +在运行SCM decommissioning的操作期间,应手动删除decommissioned SCM的私钥。私钥可在 _hdds.metadata.dir_ 中找到。 + +在支持证书吊销之前(HDDS-8399),需要手动删除decommissioned SCM上的证书。 From d7146e8211b2ccf79445dcacc27e77ef290d57bf Mon Sep 17 00:00:00 2001 From: david1859168 <71422636+david1859168@users.noreply.github.com> Date: Tue, 23 Jan 2024 16:36:32 +1100 Subject: [PATCH 33/43] HDDS-7557. Translate "Merge Container RocksDB in DN" doc into Mandarin Chinese (#6039) Co-authored-by: David Wang --- .../content/feature/dn-merge-rocksdb.zh.md | 70 +++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 hadoop-hdds/docs/content/feature/dn-merge-rocksdb.zh.md diff --git a/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.zh.md b/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.zh.md new file mode 100644 index 000000000000..cd3eb5fbdc52 --- /dev/null +++ b/hadoop-hdds/docs/content/feature/dn-merge-rocksdb.zh.md @@ -0,0 +1,70 @@ +--- +title: "在DataNode上合并Container的RocksDB" +weight: 2 +menu: + main: + parent: 特性 +summary: Ozone DataNode Container模式简介V3 +--- + + +在 Ozone 中,用户数据被分割成blocks并存储在 HDDS Container中。Container是 Ozone/HDDS 的基本复制单元。每个Container都有自己的元数据和数据, 数据以文件形式保存在磁盘上,元数据保存在RocksDB中。 + +目前,数据节点上的每个Container都有一个RocksDB。随着用户数据的不断增长,一个DataNode上将会有成百上千个RocksDB实例。在一个JVM中管理如此多的RocksDB实例是一个巨大的挑战。 + +与当前使用方法不同,"Merge Container RocksDB in DN"功能将为每个Volume只使用一个RocksDB,并在此RocksDB中保存所有Container的元数据。 + +## 配置 + +这主要是DataNode的功能,不需要太多配置。 + +如果更倾向于为每个Container使用一个RocksDB的模式,那么这下面的配置可以禁用上面所介绍的功能。请注意,一旦启用该功能,强烈建议以后不要再禁用。 + +```XML + + hdds.datanode.container.schema.v3.enabled + false + Disable or enable this feature. + +``` + +无需任何特殊配置,单个RocksDB将会被创建在"hdds.datanode.dir"中所配置的数据卷下。 + +对于一些有高性能要求的高级集群管理员,他/她可以利用快速存储来保存RocksDB。在这种情况下,请配置下面这两个属性。 + +```XML + + hdds.datanode.container.db.dir + + This setting is optional. Specify where the per-disk rocksdb instances will be stored. + + + hdds.datanode.failed.db.volumes.tolerated + -1 + The number of db volumes that are allowed to fail before a datanode stops offering service. + Default -1 means unlimited, but we should have at least one good volume left. + +``` + +### 向后兼容性 + +Existing containers each has one RocksDB for them will be still accessible after this feature is enabled. All container data will co-exist in an existing Ozone cluster. + +## 参考文献 + + * [设计文档]({{< ref path="design/dn-merge-rocksdb.md" lang="en">}}) \ No newline at end of file From 06c8546c84d96b04a0caa88f8dfdf34790f7c42b Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 23 Jan 2024 12:54:11 +0100 Subject: [PATCH 34/43] HDDS-10186. Add static import for assertions and mocks (#6067) --- .../TestRatisUnderReplicationHandler.java | 6 ++--- .../replication/TestReplicationManager.java | 7 +++--- .../server/ratis/TestCSMMetrics.java | 5 ++-- .../metrics/TestContainerMetrics.java | 7 +++--- .../metrics/TestDatanodeQueueMetrics.java | 16 ++++++------- .../ozoneimpl/TestOzoneContainerWithTLS.java | 22 +++++++---------- .../container/server/TestContainerServer.java | 4 ++-- .../server/TestSecureContainerServer.java | 16 ++++++------- .../ozone/dn/ratis/TestDnRatisLogParser.java | 8 +++---- ...stDatanodeHddsVolumeFailureToleration.java | 6 ++--- .../freon/TestOmBucketReadWriteFileOps.java | 14 ++++++----- .../shell/TestDeletedBlocksTxnShell.java | 19 ++++++++------- .../shell/TestOzoneContainerUpgradeShell.java | 9 +++---- .../ozone/shell/TestOzoneDebugShell.java | 17 ++++++------- .../shell/TestTransferLeadershipShell.java | 24 +++++++++++-------- .../helpers/TestOmMultipartKeyInfoCodec.java | 7 +++--- .../ozone/om/helpers/TestOmPrefixInfo.java | 22 ++++++++--------- 17 files changed, 103 insertions(+), 106 deletions(-) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java index ca86cb689fb0..3f724ba44a86 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisUnderReplicationHandler.java @@ -39,11 +39,9 @@ import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.ratis.protocol.exceptions.NotLeaderException; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; -import org.mockito.Mockito; import java.io.IOException; import java.util.ArrayList; @@ -605,11 +603,11 @@ public void testUnderReplicationWithVulnerableReplicasOnUniqueOrigins() throws I DECOMMISSIONING, State.UNHEALTHY, sequenceID); replicas.add(unhealthyReplica); UnderReplicatedHealthResult result = getUnderReplicatedHealthResult(); - Mockito.when(result.hasVulnerableUnhealthy()).thenReturn(true); + when(result.hasVulnerableUnhealthy()).thenReturn(true); final Set>> commands = testProcessing(replicas, Collections.emptyList(), result, 2, 1); - Assertions.assertEquals(unhealthyReplica.getDatanodeDetails(), commands.iterator().next().getKey()); + assertEquals(unhealthyReplica.getDatanodeDetails(), commands.iterator().next().getKey()); } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java index fe1cdcc06957..c67008c097ba 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java @@ -56,7 +56,6 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; import org.mockito.ArgumentCaptor; -import org.mockito.Mockito; import java.io.IOException; import java.time.Instant; @@ -530,7 +529,7 @@ public void testQuasiClosedContainerWithUnhealthyReplicaOnDecommissioningNodeWit ContainerReplicaProto.State.UNHEALTHY); replicas.add(unhealthy); storeContainerAndReplicas(container, replicas); - Mockito.when(replicationManager.getNodeStatus(any(DatanodeDetails.class))) + when(replicationManager.getNodeStatus(any(DatanodeDetails.class))) .thenAnswer(invocation -> { DatanodeDetails dn = invocation.getArgument(0); if (dn.equals(unhealthy.getDatanodeDetails())) { @@ -550,9 +549,9 @@ public void testQuasiClosedContainerWithUnhealthyReplicaOnDecommissioningNodeWit assertEquals(0, repQueue.overReplicatedQueueSize()); // next, this test sets up some mocks to test if RatisUnderReplicationHandler will handle this container correctly - Mockito.when(ratisPlacementPolicy.chooseDatanodes(anyList(), anyList(), eq(null), eq(1), anyLong(), + when(ratisPlacementPolicy.chooseDatanodes(anyList(), anyList(), eq(null), eq(1), anyLong(), anyLong())).thenAnswer(invocation -> ImmutableList.of(MockDatanodeDetails.randomDatanodeDetails())); - Mockito.when(nodeManager.getTotalDatanodeCommandCounts(any(DatanodeDetails.class), any(), any())) + when(nodeManager.getTotalDatanodeCommandCounts(any(DatanodeDetails.class), any(), any())) .thenAnswer(invocation -> { Map map = new HashMap<>(); map.put(SCMCommandProto.Type.replicateContainerCommand, 0); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java index cd6dfb171c05..76a0f1ed2142 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java @@ -66,7 +66,6 @@ import org.apache.ratis.util.function.CheckedBiFunction; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Assertions; /** * This class tests the metrics of ContainerStateMachine. @@ -142,7 +141,7 @@ static void runContainerStateMachineMetrics( pipeline, blockID, 1024); ContainerCommandResponseProto response = client.sendCommand(writeChunkRequest); - Assertions.assertEquals(ContainerProtos.Result.SUCCESS, + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); metric = getMetrics(CSMMetrics.SOURCE_NAME + @@ -160,7 +159,7 @@ static void runContainerStateMachineMetrics( ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest .getWriteChunk()); response = client.sendCommand(readChunkRequest); - Assertions.assertEquals(ContainerProtos.Result.SUCCESS, + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); metric = getMetrics(CSMMetrics.SOURCE_NAME + diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java index 0b83c650fe0a..d4900bb48783 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java @@ -55,7 +55,8 @@ import static org.apache.ozone.test.MetricsAsserts.assertCounter; import static org.apache.ozone.test.MetricsAsserts.assertQuantileGauges; import static org.apache.ozone.test.MetricsAsserts.getMetrics; -import org.junit.jupiter.api.Assertions; +import static org.junit.jupiter.api.Assertions.assertEquals; + import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -121,7 +122,7 @@ public void testContainerMetrics() throws Exception { pipeline, blockID, 1024); ContainerCommandResponseProto response = client.sendCommand(writeChunkRequest); - Assertions.assertEquals(ContainerProtos.Result.SUCCESS, + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); //Read Chunk @@ -129,7 +130,7 @@ public void testContainerMetrics() throws Exception { ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest .getWriteChunk()); response = client.sendCommand(readChunkRequest); - Assertions.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); MetricsRecordBuilder containerMetrics = getMetrics( "StorageContainerMetrics"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java index 06e1f933749a..b3f3030d70ad 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java @@ -17,14 +17,12 @@ package org.apache.hadoop.ozone.container.metrics; -import org.apache.commons.text.WordUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeQueueMetrics; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -34,10 +32,12 @@ import java.io.IOException; import java.util.UUID; +import static org.apache.commons.text.WordUtils.capitalize; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeQueueMetrics.COMMAND_DISPATCHER_QUEUE_PREFIX; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeQueueMetrics.STATE_CONTEXT_COMMAND_QUEUE_PREFIX; import static org.apache.ozone.test.MetricsAsserts.getLongGauge; import static org.apache.ozone.test.MetricsAsserts.getMetrics; +import static org.assertj.core.api.Assertions.assertThat; /** * Test for queue metrics of datanodes. @@ -89,14 +89,12 @@ public void init() throws Exception { @Test public void testQueueMetrics() { - for (SCMCommandProto.Type type: SCMCommandProto.Type.values()) { - Assertions.assertTrue( - getGauge(STATE_CONTEXT_COMMAND_QUEUE_PREFIX + - WordUtils.capitalize(String.valueOf(type)) + "Size") >= 0); - Assertions.assertTrue( - getGauge(COMMAND_DISPATCHER_QUEUE_PREFIX + - WordUtils.capitalize(String.valueOf(type)) + "Size") >= 0); + String typeSize = capitalize(String.valueOf(type)) + "Size"; + assertThat(getGauge(STATE_CONTEXT_COMMAND_QUEUE_PREFIX + typeSize)) + .isGreaterThanOrEqualTo(0); + assertThat(getGauge(COMMAND_DISPATCHER_QUEUE_PREFIX + typeSize)) + .isGreaterThanOrEqualTo(0); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java index 841f344fc346..b3c8b732c16c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java @@ -42,7 +42,6 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.ozone.test.GenericTestUtils.LogCapturer; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -352,20 +351,15 @@ private Token createContainer( } private long createAndCloseContainer( - XceiverClientSpi client, boolean useToken) { + XceiverClientSpi client, boolean useToken) throws IOException { long id = getTestContainerID(); - try { - Token - token = createContainer(client, useToken, id); - - ContainerCommandRequestProto request = - getCloseContainer(client.getPipeline(), id, token); - ContainerCommandResponseProto response = client.sendCommand(request); - assertNotNull(response); - assertSame(response.getResult(), ContainerProtos.Result.SUCCESS); - } catch (Exception e) { - Assertions.fail(e); - } + Token token = createContainer(client, useToken, id); + + ContainerCommandRequestProto request = + getCloseContainer(client.getPipeline(), id, token); + ContainerCommandResponseProto response = client.sendCommand(request); + assertNotNull(response); + assertSame(response.getResult(), ContainerProtos.Result.SUCCESS); return id; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index 0451ba5c98e1..2e3cefb94fe7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -67,7 +67,6 @@ import org.apache.ratis.util.function.CheckedBiConsumer; import org.apache.ratis.util.function.CheckedBiFunction; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -75,6 +74,7 @@ import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.apache.ratis.rpc.SupportedRpcType.GRPC; +import static org.junit.jupiter.api.Assertions.assertNotNull; /** * Test Containers. @@ -170,7 +170,7 @@ static void runTestClientServer( ContainerTestHelper .getCreateContainerRequest( ContainerTestHelper.getTestContainerID(), pipeline); - Assertions.assertNotNull(request.getTraceID()); + assertNotNull(request.getTraceID()); client.sendCommand(request); } finally { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index 2880d90db2fa..3e2e092c2f5a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -95,24 +95,22 @@ import org.apache.commons.lang3.RandomUtils; import org.apache.commons.lang3.exception.ExceptionUtils; import org.apache.ratis.rpc.RpcType; - -import static org.apache.ratis.rpc.SupportedRpcType.GRPC; - import org.apache.ratis.util.ExitUtils; import org.apache.ratis.util.function.CheckedBiConsumer; import org.apache.ratis.util.function.CheckedBiFunction; + +import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import static org.apache.ratis.rpc.SupportedRpcType.GRPC; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; - /** * Test Container servers when security is enabled. */ @@ -320,7 +318,7 @@ private static void assertFailsTokenVerification(XceiverClientSpi client, String msg = response.getMessage(); assertTrue(msg.contains(BLOCK_TOKEN_VERIFICATION_FAILED.name()), msg); } else { - final Throwable t = Assertions.assertThrows(Throwable.class, + final Throwable t = assertThrows(Throwable.class, () -> client.sendCommand(request)); assertRootCauseMessage(BLOCK_TOKEN_VERIFICATION_FAILED.name(), t); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java index eae12fd4dc92..cca47e17e407 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java @@ -30,12 +30,12 @@ import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.assertj.core.api.Assertions.assertThat; /** * Test Datanode Ratis log parser. @@ -78,14 +78,14 @@ public void testRatisLogParsing() throws Exception { File currentDir = new File(pipelineDir, "current"); File logFile = new File(currentDir, "log_inprogress_0"); GenericTestUtils.waitFor(logFile::exists, 100, 15000); - Assertions.assertTrue(logFile.isFile()); + assertThat(logFile).isFile(); DatanodeRatisLogParser datanodeRatisLogParser = new DatanodeRatisLogParser(); datanodeRatisLogParser.setSegmentFile(logFile); datanodeRatisLogParser.parseRatisLogs( DatanodeRatisLogParser::smToContainerLogString); - Assertions.assertTrue(out.toString(StandardCharsets.UTF_8.name()) - .contains("Num Total Entries:")); + assertThat(out.toString(StandardCharsets.UTF_8.name())) + .contains("Num Total Entries:"); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java index 719c38816f4c..0273deb50e61 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java @@ -34,7 +34,6 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.GenericTestUtils.LogCapturer; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -56,6 +55,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; +import static org.assertj.core.api.Assertions.assertThat; /** * This class tests datanode can tolerate configured num of failed volumes. @@ -141,8 +141,8 @@ public void testDNCorrectlyHandlesVolumeFailureOnStartup() throws Exception { // cluster. GenericTestUtils.waitFor(() -> exitCapturer.getOutput() .contains("Exiting with status 1: ExitException"), 500, 60000); - Assertions.assertTrue(dsmCapturer.getOutput() - .contains("DatanodeStateMachine Shutdown due to too many bad volumes")); + assertThat(dsmCapturer.getOutput()) + .contains("DatanodeStateMachine Shutdown due to too many bad volumes"); // restore bad volumes DatanodeTestUtils.restoreBadRootDir(volRootDir0); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java index 045470647351..5244bb857905 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteFileOps.java @@ -34,7 +34,6 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.raftlog.RaftLog; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; @@ -46,6 +45,9 @@ import java.io.IOException; import java.net.URI; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; + /** * Test for OmBucketReadWriteFileOps. */ @@ -207,7 +209,7 @@ private void verifyFileCreation(int expectedCount, FileStatus[] fileStatuses, } } } - Assertions.assertEquals(expectedCount, actual, "Mismatch Count!"); + assertEquals(expectedCount, actual, "Mismatch Count!"); } private void verifyOMLockMetrics(OMLockMetrics omLockMetrics) { @@ -218,7 +220,7 @@ private void verifyOMLockMetrics(OMLockMetrics omLockMetrics) { omLockMetrics.getLongestReadLockWaitingTimeMs()); int readWaitingSamples = Integer.parseInt(readLockWaitingTimeMsStat.split(" ")[2]); - Assertions.assertTrue(readWaitingSamples > 0, "Read Lock Waiting Samples should be positive"); + assertThat(readWaitingSamples).isPositive(); String readLockHeldTimeMsStat = omLockMetrics.getReadLockHeldTimeMsStat(); LOG.info("Read Lock Held Time Stat: " + readLockHeldTimeMsStat); @@ -226,7 +228,7 @@ private void verifyOMLockMetrics(OMLockMetrics omLockMetrics) { omLockMetrics.getLongestReadLockHeldTimeMs()); int readHeldSamples = Integer.parseInt(readLockHeldTimeMsStat.split(" ")[2]); - Assertions.assertTrue(readHeldSamples > 0, "Read Lock Held Samples should be positive"); + assertThat(readHeldSamples).isPositive(); String writeLockWaitingTimeMsStat = omLockMetrics.getWriteLockWaitingTimeMsStat(); @@ -235,7 +237,7 @@ private void verifyOMLockMetrics(OMLockMetrics omLockMetrics) { omLockMetrics.getLongestWriteLockWaitingTimeMs()); int writeWaitingSamples = Integer.parseInt(writeLockWaitingTimeMsStat.split(" ")[2]); - Assertions.assertTrue(writeWaitingSamples > 0, "Write Lock Waiting Samples should be positive"); + assertThat(writeWaitingSamples).isPositive(); String writeLockHeldTimeMsStat = omLockMetrics.getWriteLockHeldTimeMsStat(); LOG.info("Write Lock Held Time Stat: " + writeLockHeldTimeMsStat); @@ -243,7 +245,7 @@ private void verifyOMLockMetrics(OMLockMetrics omLockMetrics) { omLockMetrics.getLongestWriteLockHeldTimeMs()); int writeHeldSamples = Integer.parseInt(writeLockHeldTimeMsStat.split(" ")[2]); - Assertions.assertTrue(writeHeldSamples > 0, "Write Lock Held Samples should be positive"); + assertThat(writeHeldSamples).isPositive(); } private static class ParameterBuilder { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java index 36b970f4ee9d..3b525cfa1019 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java @@ -35,7 +35,6 @@ import org.apache.hadoop.ozone.admin.scm.GetFailedDeletedBlocksTxnSubcommand; import org.apache.hadoop.ozone.admin.scm.ResetDeletedBlockRetryCountSubcommand; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -61,6 +60,8 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * Test for DeletedBlocksTxnSubcommand Cli. @@ -193,7 +194,7 @@ public void testDeletedBlocksTxnSubcommand() throws Exception { flush(); currentValidTxnNum = deletedBlockLog.getNumOfValidTransactions(); LOG.info("Valid num of txns: {}", currentValidTxnNum); - Assertions.assertEquals(30, currentValidTxnNum); + assertEquals(30, currentValidTxnNum); // let the first 20 txns be failed List txIds = new ArrayList<>(); @@ -207,7 +208,7 @@ public void testDeletedBlocksTxnSubcommand() throws Exception { flush(); currentValidTxnNum = deletedBlockLog.getNumOfValidTransactions(); LOG.info("Valid num of txns: {}", currentValidTxnNum); - Assertions.assertEquals(10, currentValidTxnNum); + assertEquals(10, currentValidTxnNum); ContainerOperationClient scmClient = new ContainerOperationClient(conf); CommandLine cmd; @@ -223,12 +224,12 @@ public void testDeletedBlocksTxnSubcommand() throws Exception { while (m.find()) { matchCount += 1; } - Assertions.assertEquals(20, matchCount); + assertEquals(20, matchCount); // print the first 10 failed txns info into file cmd.parseArgs("-o", txnFile.getAbsolutePath(), "-c", "10"); getCommand.execute(scmClient); - Assertions.assertTrue(txnFile.exists()); + assertThat(txnFile).exists(); ResetDeletedBlockRetryCountSubcommand resetCommand = new ResetDeletedBlockRetryCountSubcommand(); @@ -240,7 +241,7 @@ public void testDeletedBlocksTxnSubcommand() throws Exception { flush(); currentValidTxnNum = deletedBlockLog.getNumOfValidTransactions(); LOG.info("Valid num of txns: {}", currentValidTxnNum); - Assertions.assertEquals(20, currentValidTxnNum); + assertEquals(20, currentValidTxnNum); // reset the given txIds list cmd.parseArgs("-l", "11,12,13,14,15"); @@ -248,7 +249,7 @@ public void testDeletedBlocksTxnSubcommand() throws Exception { flush(); currentValidTxnNum = deletedBlockLog.getNumOfValidTransactions(); LOG.info("Valid num of txns: {}", currentValidTxnNum); - Assertions.assertEquals(25, currentValidTxnNum); + assertEquals(25, currentValidTxnNum); // reset the non-existing txns and valid txns, should do nothing cmd.parseArgs("-l", "1,2,3,4,5,100,101,102,103,104,105"); @@ -256,7 +257,7 @@ public void testDeletedBlocksTxnSubcommand() throws Exception { flush(); currentValidTxnNum = deletedBlockLog.getNumOfValidTransactions(); LOG.info("Valid num of txns: {}", currentValidTxnNum); - Assertions.assertEquals(25, currentValidTxnNum); + assertEquals(25, currentValidTxnNum); // reset all the result expired txIds, all transactions should be available cmd.parseArgs("-a"); @@ -264,6 +265,6 @@ public void testDeletedBlocksTxnSubcommand() throws Exception { flush(); currentValidTxnNum = deletedBlockLog.getNumOfValidTransactions(); LOG.info("Valid num of txns: {}", currentValidTxnNum); - Assertions.assertEquals(30, currentValidTxnNum); + assertEquals(30, currentValidTxnNum); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java index 5f2380e97e99..153d97e4d3dc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java @@ -45,7 +45,6 @@ import org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.slf4j.Logger; @@ -72,6 +71,8 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * Test Ozone Container upgrade shell. @@ -157,7 +158,7 @@ public void testNormalContainerUpgrade() throws Exception { String[] args = new String[]{"upgrade", "--yes"}; int exitCode = commandLine.execute(args); - Assertions.assertEquals(0, exitCode); + assertEquals(0, exitCode); // datanode2 NodeOperationalState is IN_SERVICE upgrade fail. OzoneConfiguration datanode2Conf = datanodeConfigs.get(1); @@ -169,9 +170,9 @@ public void testNormalContainerUpgrade() throws Exception { String[] args2 = new String[]{"upgrade", "--yes"}; int exit2Code = commandLine2.execute(args2); - Assertions.assertEquals(0, exit2Code); + assertEquals(0, exit2Code); String cmdOut = stdout2.toString(); - Assertions.assertTrue(cmdOut.contains("IN_MAINTENANCE")); + assertThat(cmdOut).contains("IN_MAINTENANCE"); } private CommandLine upgradeCommand(PrintWriter pstdout) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java index e89e1217fb44..05e26200aa0d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java @@ -44,7 +44,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import picocli.CommandLine; @@ -69,6 +68,8 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_CHECKPOINT_DIR; import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * Test Ozone Debug shell. @@ -127,12 +128,12 @@ public void testChunkInfoCmdBeforeAfterCloseContainer() throws Exception { writeKey(volumeName, bucketName, keyName); int exitCode = runChunkInfoCommand(volumeName, bucketName, keyName); - Assertions.assertEquals(0, exitCode); + assertEquals(0, exitCode); closeContainerForKey(volumeName, bucketName, keyName); exitCode = runChunkInfoCommand(volumeName, bucketName, keyName); - Assertions.assertEquals(0, exitCode); + assertEquals(0, exitCode); } @Test @@ -142,7 +143,7 @@ public void testChunkInfoVerifyPathsAreDifferent() throws Exception { final String keyName = UUID.randomUUID().toString(); writeKey(volumeName, bucketName, keyName); int exitCode = runChunkInfoAndVerifyPaths(volumeName, bucketName, keyName); - Assertions.assertEquals(0, exitCode); + assertEquals(0, exitCode); } @Test @@ -163,7 +164,7 @@ public void testLdbCliForOzoneSnapshot() throws Exception { OzoneSnapshot snapshot = client.getObjectStore().listSnapshot(volumeName, bucketName, null, null) .next(); - Assertions.assertEquals(snapshotName, snapshot.getName()); + assertEquals(snapshotName, snapshot.getName()); String dbPath = getSnapshotDBPath(snapshot.getCheckpointDir()); String snapshotCurrent = dbPath + OM_KEY_PREFIX + "CURRENT"; GenericTestUtils @@ -171,9 +172,9 @@ public void testLdbCliForOzoneSnapshot() throws Exception { String[] args = new String[] {"--db=" + dbPath, "scan", "--cf", "keyTable"}; int exitCode = cmd.execute(args); - Assertions.assertEquals(0, exitCode); + assertEquals(0, exitCode); String cmdOut = stdout.toString(); - Assertions.assertTrue(cmdOut.contains(keyName)); + assertThat(cmdOut).contains(keyName); } private static String getSnapshotDBPath(String checkPointDir) { @@ -233,7 +234,7 @@ private int runChunkInfoAndVerifyPaths(String volumeName, String bucketName, // DN storage directories are set differently for each DN // in MiniOzoneCluster as datanode-0,datanode-1,datanode-2 which is why // we expect 3 paths here in the set. - Assertions.assertEquals(3, blockFilePaths.size()); + assertEquals(3, blockFilePaths.size()); } return exitCode; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java index 3e5377ce228d..f27e22e9065b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java @@ -27,7 +27,6 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.ratis.protocol.RaftPeer; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -37,6 +36,11 @@ import java.util.List; import java.util.UUID; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotSame; + /** * Test transferLeadership with SCM HA setup. */ @@ -90,7 +94,7 @@ public void shutdown() { public void testOmTransfer() throws Exception { OzoneManager oldLeader = cluster.getOMLeader(); List omList = new ArrayList<>(cluster.getOzoneManagersList()); - Assertions.assertTrue(omList.contains(oldLeader)); + assertThat(omList).contains(oldLeader); omList.remove(oldLeader); OzoneManager newLeader = omList.get(0); cluster.waitForClusterToBeReady(); @@ -98,14 +102,14 @@ public void testOmTransfer() throws Exception { String[] args1 = {"om", "transfer", "-n", newLeader.getOMNodeId()}; ozoneAdmin.execute(args1); Thread.sleep(3000); - Assertions.assertEquals(newLeader, cluster.getOMLeader()); + assertEquals(newLeader, cluster.getOMLeader()); assertOMResetPriorities(); oldLeader = cluster.getOMLeader(); String[] args3 = {"om", "transfer", "-r"}; ozoneAdmin.execute(args3); Thread.sleep(3000); - Assertions.assertNotSame(oldLeader, cluster.getOMLeader()); + assertNotSame(oldLeader, cluster.getOMLeader()); assertOMResetPriorities(); } @@ -114,7 +118,7 @@ public void testScmTransfer() throws Exception { StorageContainerManager oldLeader = getScmLeader(cluster); List scmList = new ArrayList<>(cluster. getStorageContainerManagersList()); - Assertions.assertTrue(scmList.contains(oldLeader)); + assertThat(scmList).contains(oldLeader); scmList.remove(oldLeader); StorageContainerManager newLeader = scmList.get(0); @@ -122,14 +126,14 @@ public void testScmTransfer() throws Exception { String[] args1 = {"scm", "transfer", "-n", newLeader.getScmId()}; ozoneAdmin.execute(args1); cluster.waitForClusterToBeReady(); - Assertions.assertEquals(newLeader, getScmLeader(cluster)); + assertEquals(newLeader, getScmLeader(cluster)); assertSCMResetPriorities(); oldLeader = getScmLeader(cluster); String[] args3 = {"scm", "transfer", "-r"}; ozoneAdmin.execute(args3); cluster.waitForClusterToBeReady(); - Assertions.assertNotSame(oldLeader, getScmLeader(cluster)); + assertNotSame(oldLeader, getScmLeader(cluster)); assertSCMResetPriorities(); } @@ -141,14 +145,14 @@ private void assertOMResetPriorities() { .getPeers(); for (RaftPeer raftPeer: raftPeers) { - Assertions.assertEquals(RatisHelper.NEUTRAL_PRIORITY, + assertEquals(RatisHelper.NEUTRAL_PRIORITY, raftPeer.getPriority()); } } private void assertSCMResetPriorities() { StorageContainerManager scm = getScmLeader(cluster); - Assertions.assertNotNull(scm); + assertNotNull(scm); Collection raftPeers = scm .getScmHAManager() .getRatisServer() @@ -156,7 +160,7 @@ private void assertSCMResetPriorities() { .getGroup() .getPeers(); for (RaftPeer raftPeer: raftPeers) { - Assertions.assertEquals(RatisHelper.NEUTRAL_PRIORITY, + assertEquals(RatisHelper.NEUTRAL_PRIORITY, raftPeer.getPriority()); } } diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartKeyInfoCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartKeyInfoCodec.java index 31846c44a7f1..fc209624d85d 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartKeyInfoCodec.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartKeyInfoCodec.java @@ -23,13 +23,14 @@ import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.Proto2CodecTestBase; import org.apache.hadoop.util.Time; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.util.UUID; import static java.nio.charset.StandardCharsets.UTF_8; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; /** * Test {@link OmMultipartKeyInfo#getCodec()}. @@ -58,7 +59,7 @@ public void testOmMultipartKeyInfoCodec() { } catch (java.io.IOException e) { e.printStackTrace(); } - Assertions.assertNotNull(data); + assertNotNull(data); OmMultipartKeyInfo multipartKeyInfo = null; try { @@ -66,7 +67,7 @@ public void testOmMultipartKeyInfoCodec() { } catch (java.io.IOException e) { e.printStackTrace(); } - Assertions.assertEquals(omMultipartKeyInfo, multipartKeyInfo); + assertEquals(omMultipartKeyInfo, multipartKeyInfo); // When random byte data passed returns null. try { diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java index 914697b3a621..5226f315c8be 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java @@ -23,14 +23,14 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import java.util.Collections; import java.util.HashMap; - import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; /** * Class to test OmPrefixInfo. @@ -91,7 +91,7 @@ public void testCopyObject() { ACCESS); OmPrefixInfo clonePrefixInfo = omPrefixInfo.copyObject(); - Assertions.assertEquals(omPrefixInfo, clonePrefixInfo); + assertEquals(omPrefixInfo, clonePrefixInfo); // Change acls and check. @@ -99,7 +99,7 @@ public void testCopyObject() { IAccessAuthorizer.ACLIdentityType.USER, username, IAccessAuthorizer.ACLType.READ, ACCESS)); - Assertions.assertNotEquals(omPrefixInfo, clonePrefixInfo); + assertNotEquals(omPrefixInfo, clonePrefixInfo); } @@ -116,10 +116,10 @@ public void testgetFromProtobufOneMetadataOneAcl() { OmPrefixInfo ompri = OmPrefixInfo.getFromProtobuf(prefixInfo); - Assertions.assertEquals(prefixInfoPath, ompri.getName()); - Assertions.assertEquals(1, ompri.getMetadata().size()); - Assertions.assertEquals(metaval, ompri.getMetadata().get(metakey)); - Assertions.assertEquals(1, ompri.getAcls().size()); + assertEquals(prefixInfoPath, ompri.getName()); + assertEquals(1, ompri.getMetadata().size()); + assertEquals(metaval, ompri.getMetadata().get(metakey)); + assertEquals(1, ompri.getAcls().size()); } @Test @@ -133,8 +133,8 @@ public void testGetProtobuf() { omPrefixInfo.getMetadata().put("key", "value"); OzoneManagerStorageProtos.PersistedPrefixInfo pi = omPrefixInfo.getProtobuf(); - Assertions.assertEquals(testPath, pi.getName()); - Assertions.assertEquals(1, pi.getAclsCount()); - Assertions.assertEquals(1, pi.getMetadataCount()); + assertEquals(testPath, pi.getName()); + assertEquals(1, pi.getAclsCount()); + assertEquals(1, pi.getMetadataCount()); } } From 759f68f35021b45ad2a7b0cc6bcfd46088ddb692 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 Jan 2024 17:54:10 +0100 Subject: [PATCH 35/43] HDDS-10193. Bump maven-checkstyle-plugin to 3.3.1 (#6066) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index e3fb473b80d7..f9639c2d454a 100644 --- a/pom.xml +++ b/pom.xml @@ -279,7 +279,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 2.3.0 1.0-beta-1 1.0-alpha-11 - 3.1.2 + 3.3.1 3.9.1 3.1.0 9.3 From 8cc876d311f75a1f1a86390abe9b99abbbf66a1c Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Tue, 23 Jan 2024 11:38:38 -0800 Subject: [PATCH 36/43] HDDS-8005. Fixed intermittent failure in TestOmSnapshot.testSnapDiffWithMultipleSSTs (#6071) --- .../ozone/om/snapshot/TestOmSnapshot.java | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index 6d8bc353eb1b..6b19c257de63 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -87,7 +87,6 @@ import org.apache.ozone.rocksdiff.CompactionNode; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.tag.Slow; -import org.apache.ozone.test.tag.Unhealthy; import org.jetbrains.annotations.NotNull; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Test; @@ -1784,9 +1783,7 @@ public void testListSnapshotDiffWithInvalidParameters() * sst filtering code path. */ @Test - @Unhealthy("HDDS-8005") - public void testSnapDiffWithMultipleSSTs() - throws Exception { + public void testSnapDiffWithMultipleSSTs() throws Exception { // Create a volume and 2 buckets String volumeName1 = "vol-" + counter.incrementAndGet(); String bucketName1 = "buck1"; @@ -1800,29 +1797,27 @@ public void testSnapDiffWithMultipleSSTs() String keyPrefix = "key-"; // add file to bucket1 and take snapshot createFileKeyWithPrefix(bucket1, keyPrefix); + int keyTableSize = getKeyTableSstFiles().size(); String snap1 = "snap" + counter.incrementAndGet(); createSnapshot(volumeName1, bucketName1, snap1); // 1.sst - assertEquals(1, getKeyTableSstFiles().size()); + assertEquals(1, (getKeyTableSstFiles().size() - keyTableSize)); // add files to bucket2 and flush twice to create 2 sst files for (int i = 0; i < 5; i++) { createFileKeyWithPrefix(bucket2, keyPrefix); } flushKeyTable(); // 1.sst 2.sst - assertEquals(2, getKeyTableSstFiles().size()); + assertEquals(2, (getKeyTableSstFiles().size() - keyTableSize)); for (int i = 0; i < 5; i++) { createFileKeyWithPrefix(bucket2, keyPrefix); } flushKeyTable(); // 1.sst 2.sst 3.sst - assertEquals(3, getKeyTableSstFiles().size()); + assertEquals(3, (getKeyTableSstFiles().size() - keyTableSize)); // add a file to bucket1 and take second snapshot createFileKeyWithPrefix(bucket1, keyPrefix); String snap2 = "snap" + counter.incrementAndGet(); createSnapshot(volumeName1, bucketName1, snap2); // 1.sst 2.sst 3.sst 4.sst - assertEquals(4, getKeyTableSstFiles().size()); - SnapshotDiffReportOzone diff1 = - store.snapshotDiff(volumeName1, bucketName1, snap1, snap2, - null, 0, forceFullSnapshotDiff, disableNativeDiff) - .getSnapshotDiffReport(); + assertEquals(4, (getKeyTableSstFiles().size() - keyTableSize)); + SnapshotDiffReportOzone diff1 = getSnapDiffReport(volumeName1, bucketName1, snap1, snap2); assertEquals(1, diff1.getDiffList().size()); } From ce9620391f7073ca0a38cb2229afed8f87e5a967 Mon Sep 17 00:00:00 2001 From: Chandrakant Vankayalapati <104664857+ceekay47@users.noreply.github.com> Date: Tue, 23 Jan 2024 12:53:57 -0800 Subject: [PATCH 37/43] HDDS-8649. Remove duplicate helper methods getDBMultipartOpenKey() (#6057) --- ...TestOzoneClientMultipartUploadWithFSO.java | 33 ++----------------- .../hadoop/ozone/om/OMMetadataManager.java | 13 ++++++++ .../hadoop/ozone/om/KeyManagerImpl.java | 11 ++----- .../ozone/om/OmMetadataManagerImpl.java | 15 +++++++++ .../key/OMKeyCreateRequestWithFSO.java | 14 +------- ...MultipartUploadCompleteRequestWithFSO.java | 17 +--------- .../request/util/OMMultipartUploadUtils.java | 32 ++---------------- 7 files changed, 36 insertions(+), 99 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index 4867be49066f..2de5e83a4e9f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -58,7 +58,6 @@ import org.apache.hadoop.ozone.om.helpers.QuotaUtil; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeEach; @@ -568,8 +567,7 @@ public void testAbortUploadSuccessWithParts() throws Exception { bucket.abortMultipartUpload(keyName, uploadID); String multipartOpenKey = - getMultipartOpenKey(uploadID, volumeName, bucketName, keyName, - metadataMgr); + metadataMgr.getMultipartKeyFSO(volumeName, bucketName, keyName, uploadID); OmKeyInfo omKeyInfo = metadataMgr.getOpenKeyTable(bucketLayout).get(multipartOpenKey); OmMultipartKeyInfo omMultipartKeyInfo = @@ -853,8 +851,7 @@ private String verifyUploadedPart(String uploadID, String partName, ozoneManager.getMetadataManager().getBucketTable().get(buckKey); BucketLayout bucketLayout = buckInfo.getBucketLayout(); String multipartOpenKey = - getMultipartOpenKey(uploadID, volumeName, bucketName, keyName, - metadataMgr); + metadataMgr.getMultipartKeyFSO(volumeName, bucketName, keyName, uploadID); String multipartKey = metadataMgr.getMultipartKey(volumeName, bucketName, keyName, uploadID); @@ -881,32 +878,6 @@ private String verifyUploadedPart(String uploadID, String partName, return multipartKey; } - private String getMultipartOpenKey(String multipartUploadID, - String volName, String buckName, String kName, - OMMetadataManager omMetadataManager) throws IOException { - - String fileName = OzoneFSUtils.getFileName(kName); - final long volumeId = omMetadataManager.getVolumeId(volName); - final long bucketId = omMetadataManager.getBucketId(volName, - buckName); - long parentID = getParentID(volName, buckName, kName, - omMetadataManager); - - String multipartKey = omMetadataManager.getMultipartKey(volumeId, bucketId, - parentID, fileName, multipartUploadID); - - return multipartKey; - } - - private long getParentID(String volName, String buckName, - String kName, OMMetadataManager omMetadataManager) throws IOException { - final long volumeId = omMetadataManager.getVolumeId(volName); - final long bucketId = omMetadataManager.getBucketId(volName, - buckName); - return OMFileRequest.getParentID(volumeId, bucketId, - kName, omMetadataManager); - } - private String initiateMultipartUpload(OzoneBucket oBucket, String kName, ReplicationType replicationType, ReplicationFactor replicationFactor) throws IOException { diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index 68c5cf758eb5..9651c16175a9 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -391,6 +391,19 @@ List getExpiredMultipartUploads( String getMultipartKey(String volume, String bucket, String key, String uploadId); + /** + * Returns the DB key name of a multipart upload key in OM metadata store + * for FSO-enabled buckets. + * + * @param volume - volume name + * @param bucket - bucket name + * @param key - key name + * @param uploadId - the upload id for this key + * @return bytes of DB key. + */ + String getMultipartKeyFSO(String volume, String bucket, String key, String + uploadId) throws IOException; + /** * Gets the multipart info table which holds the information about diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index cac2aa53f6fa..18cfa8010c0c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -806,8 +806,8 @@ public OmMultipartUploadListParts listParts(String volumeName, //if there are no parts, use the replicationType from the open key. if (isBucketFSOptimized(volumeName, bucketName)) { multipartKey = - getMultipartOpenKeyFSO(volumeName, bucketName, keyName, - uploadID); + OMMultipartUploadUtils.getMultipartOpenKey(volumeName, bucketName, keyName, uploadID, + metadataManager, BucketLayout.FILE_SYSTEM_OPTIMIZED); } OmKeyInfo omKeyInfo = metadataManager.getOpenKeyTable(bucketLayout) @@ -877,13 +877,6 @@ private String getPartName(PartKeyInfo partKeyInfo, String volName, return partName; } - private String getMultipartOpenKeyFSO(String volumeName, String bucketName, - String keyName, String uploadID) throws IOException { - OMMetadataManager metaMgr = metadataManager; - return OMMultipartUploadUtils.getMultipartOpenKeyFSO( - volumeName, bucketName, keyName, uploadID, metaMgr); - } - /** * Returns list of ACLs for given Ozone object. * diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 1f8c3ba3cd97..c3ed18d25484 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -86,6 +86,7 @@ import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OMMultipartUploadUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; @@ -863,6 +864,20 @@ public String getMultipartKey(String volume, String bucket, String key, return OmMultipartUpload.getDbKey(volume, bucket, key, uploadId); } + @Override + public String getMultipartKeyFSO(String volume, String bucket, String key, String uploadId) throws IOException { + final long volumeId = getVolumeId(volume); + final long bucketId = getBucketId(volume, + bucket); + long parentId = + OMFileRequest.getParentID(volumeId, bucketId, key, this); + + String fileName = OzoneFSUtils.getFileName(key); + + return getMultipartKey(volumeId, bucketId, parentId, + fileName, uploadId); + } + /** * Returns the OzoneManagerLock used on Metadata DB. * diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java index 65a485305d3f..0dec9fa459f6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java @@ -29,7 +29,6 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO; @@ -54,7 +53,6 @@ import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; -import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.getParentId; /** * Handles CreateKey request layout version1. @@ -253,16 +251,6 @@ protected String getDBMultipartOpenKey(String volumeName, String bucketName, String keyName, String uploadID, OMMetadataManager omMetadataManager) throws IOException { - - final long volumeId = omMetadataManager.getVolumeId(volumeName); - final long bucketId = omMetadataManager.getBucketId(volumeName, - bucketName); - long parentId = - getParentId(omMetadataManager, volumeName, bucketName, keyName); - - String fileName = OzoneFSUtils.getFileName(keyName); - - return omMetadataManager.getMultipartKey(volumeId, bucketId, parentId, - fileName, uploadID); + return omMetadataManager.getMultipartKeyFSO(volumeName, bucketName, keyName, uploadID); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java index 35867bb84e85..c224786b108a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java @@ -130,22 +130,7 @@ protected String getDBOzoneKey(OMMetadataManager omMetadataManager, protected String getDBMultipartOpenKey(String volumeName, String bucketName, String keyName, String uploadID, OMMetadataManager omMetadataManager) throws IOException { - - long parentId = - getParentId(omMetadataManager, volumeName, bucketName, keyName); - - String fileName = keyName; - Path filePath = Paths.get(keyName).getFileName(); - if (filePath != null) { - fileName = filePath.toString(); - } - - final long volumeId = omMetadataManager.getVolumeId(volumeName); - final long bucketId = omMetadataManager.getBucketId(volumeName, - bucketName); - - return omMetadataManager.getMultipartKey(volumeId, bucketId, - parentId, fileName, uploadID); + return omMetadataManager.getMultipartKeyFSO(volumeName, bucketName, keyName, uploadID); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/OMMultipartUploadUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/OMMultipartUploadUtils.java index 4d8e466fd34d..42c651887823 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/OMMultipartUploadUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/OMMultipartUploadUtils.java @@ -23,8 +23,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; -import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import java.io.IOException; import java.util.UUID; @@ -93,38 +91,12 @@ public static String getMultipartOpenKey(String volumeName, OMMetadataManager omMetadataManager, BucketLayout bucketLayout) throws IOException { if (bucketLayout == BucketLayout.FILE_SYSTEM_OPTIMIZED) { - return getMultipartOpenKeyFSO(volumeName, bucketName, - keyName, multipartUploadId, omMetadataManager); + return omMetadataManager.getMultipartKeyFSO(volumeName, bucketName, keyName, multipartUploadId); } else { - return getMultipartOpenKey(volumeName, bucketName, - keyName, multipartUploadId, omMetadataManager); + return omMetadataManager.getMultipartKey(volumeName, bucketName, keyName, multipartUploadId); } } - public static String getMultipartOpenKey(String volumeName, - String bucketName, String keyName, String multipartUploadId, - OMMetadataManager omMetadataManager) { - return omMetadataManager.getMultipartKey( - volumeName, bucketName, keyName, multipartUploadId); - } - - public static String getMultipartOpenKeyFSO(String volumeName, - String bucketName, String keyName, String uploadID, - OMMetadataManager metaMgr) throws IOException { - String fileName = OzoneFSUtils.getFileName(keyName); - - final long volumeId = metaMgr.getVolumeId(volumeName); - final long bucketId = metaMgr.getBucketId(volumeName, bucketName); - long parentID = - OMFileRequest.getParentID(volumeId, bucketId, - keyName, metaMgr); - - String multipartKey = metaMgr.getMultipartKey(volumeId, bucketId, - parentID, fileName, uploadID); - - return multipartKey; - } - /** * Check whether key's isMultipartKey flag is set. From fb28d4dfbc81041e478b7e55b1e135bb15ee3e06 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 24 Jan 2024 06:08:15 +0100 Subject: [PATCH 38/43] HDDS-10175. Pre-build Ozone in flaky-test-check (#6043) --- .github/workflows/intermittent-test-check.yml | 63 +++++++++++++++---- 1 file changed, 51 insertions(+), 12 deletions(-) diff --git a/.github/workflows/intermittent-test-check.yml b/.github/workflows/intermittent-test-check.yml index d686ec41aaa0..bc3d762a5f2a 100644 --- a/.github/workflows/intermittent-test-check.yml +++ b/.github/workflows/intermittent-test-check.yml @@ -92,8 +92,41 @@ jobs: printf -v x "%s," "${splits[@]}" split_matrix="[${x%,}]" echo "matrix=$split_matrix" >> $GITHUB_OUTPUT + build: + needs: + - prepare-job + runs-on: ubuntu-20.04 + timeout-minutes: 60 + steps: + - name: Checkout project + uses: actions/checkout@v3 + - name: Cache for maven dependencies + uses: actions/cache@v3 + with: + path: | + ~/.m2/repository + !~/.m2/repository/org/apache/ozone + key: maven-repo-${{ hashFiles('**/pom.xml') }} + restore-keys: | + maven-repo- + - name: Setup java + uses: actions/setup-java@v3 + with: + distribution: 'temurin' + java-version: 8 + - name: Build (most) of Ozone + run: hadoop-ozone/dev-support/checks/build.sh -Dskip.npx -Dskip.installnpx -DskipShade + - name: Store Maven repo for tests + uses: actions/upload-artifact@v3 + with: + name: ozone-repo + path: | + ~/.m2/repository/org/apache/ozone + retention-days: 1 run-test: - needs: prepare-job + needs: + - prepare-job + - build name: Run-Split runs-on: ubuntu-20.04 strategy: @@ -105,14 +138,22 @@ jobs: with: ref: ${{ github.event.inputs.ref }} - name: Cache for maven dependencies - uses: actions/cache@v3 + uses: actions/cache/restore@v3 with: - path: ~/.m2/repository - key: maven-repo-${{ hashFiles('**/pom.xml') }}-8-single + path: | + ~/.m2/repository + !~/.m2/repository/org/apache/ozone + key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | - maven-repo-${{ hashFiles('**/pom.xml') }}-8 - maven-repo-${{ hashFiles('**/pom.xml') }} maven-repo- + - name: Download Ozone repo + id: download-ozone-repo + uses: actions/download-artifact@v3 + with: + name: ozone-repo + path: | + ~/.m2/repository/org/apache/ozone + continue-on-error: true - name: Setup java uses: actions/setup-java@v3 with: @@ -120,6 +161,10 @@ jobs: java-version: 8 - name: Execute tests run: | + if [[ -e "${{ steps.download-ozone-repo.outputs.download-path }}" ]]; then + export OZONE_REPO_CACHED=true + fi + test_type=${{ needs.prepare-job.outputs.test_type }} args="-DexcludedGroups=unhealthy" if [ "$test_type" = "integration" ]; then @@ -145,12 +190,6 @@ jobs: with: name: result-${{ env.TEST_CLASS }}-split-${{ matrix.split }} path: target/${{ needs.prepare-job.outputs.test_type }} - - name: Delete temporary build artifacts before caching - run: | - #Never cache local artifacts - rm -rf ~/.m2/repository/org/apache/ozone/hdds* - rm -rf ~/.m2/repository/org/apache/ozone/ozone* - if: always() count-failures: if: ${{ always() }} needs: run-test From a322aafa07c3d3589c32c9fba387a2a879b5d494 Mon Sep 17 00:00:00 2001 From: Tejaskriya <87555809+Tejaskriya@users.noreply.github.com> Date: Wed, 24 Jan 2024 11:15:11 +0530 Subject: [PATCH 39/43] HDDS-10042. Show IDs of under-replicated and unclosed containers for decommissioning nodes (#5929) --- .../hadoop/hdds/scm/client/ScmClient.java | 10 ++++ .../StorageContainerLocationProtocol.java | 9 ++++ ...ocationProtocolClientSideTranslatorPB.java | 21 ++++++++ .../src/main/proto/ScmAdminProtocol.proto | 16 ++++++ .../hdds/scm/node/DatanodeAdminMonitor.java | 6 +++ .../scm/node/DatanodeAdminMonitorImpl.java | 35 +++++++++---- .../scm/node/NodeDecommissionManager.java | 7 +++ ...ocationProtocolServerSideTranslatorPB.java | 26 ++++++++++ .../scm/server/SCMClientProtocolServer.java | 9 ++++ .../scm/node/TestDatanodeAdminMonitor.java | 44 ++++++++++++++++ .../scm/cli/ContainerOperationClient.java | 5 ++ .../DecommissionStatusSubCommand.java | 3 ++ .../TestDecommissionStatusSubCommand.java | 51 +++++++++++++++++++ 13 files changed, 233 insertions(+), 9 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index 120535405ecd..402398e36c3f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -20,10 +20,12 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerResponseProto; import org.apache.hadoop.hdds.scm.DatanodeAdminError; +import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerReplicaInfo; import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -179,6 +181,14 @@ ContainerWithPipeline createContainer(HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor replicationFactor, String owner) throws IOException; + /** + * Gets the list of underReplicated and unClosed containers on a decommissioning node. + * + * @param dn - Datanode detail + * @return Lists of underReplicated and Unclosed containers + */ + Map> getContainersOnDecomNode(DatanodeDetails dn) throws IOException; + /** * Returns a set of Nodes that meet a query criteria. Passing null for opState * or nodeState acts like a wild card, returning all nodes in that state. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index be0f41b62295..dabdc0b82298 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -19,6 +19,7 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto; @@ -220,6 +221,14 @@ List listContainer(long startContainerID, */ void deleteContainer(long containerID) throws IOException; + /** + * Gets the list of underReplicated and unClosed containers on a decommissioning node. + * + * @param dn - Datanode detail + * @return Lists of underReplicated and unClosed containers + */ + Map> getContainersOnDecomNode(DatanodeDetails dn) throws IOException; + /** * Queries a list of Node Statuses. Passing a null for either opState or * state acts like a wildcard returning all nodes in that state. diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index eb3f419e48d1..84a0fa4886ce 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicatedReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.GetScmInfoResponseProto; @@ -55,6 +56,9 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerReplicasRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainersOnDecomNodeRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainersOnDecomNodeProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainersOnDecomNodeResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerTokenRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerTokenResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineBatchRequestProto; @@ -459,6 +463,23 @@ public void deleteContainer(long containerID) } + @Override + public Map> getContainersOnDecomNode(DatanodeDetails dn) throws IOException { + GetContainersOnDecomNodeRequestProto request = GetContainersOnDecomNodeRequestProto.newBuilder() + .setDatanodeDetails(dn.getProtoBufMessage()).build(); + GetContainersOnDecomNodeResponseProto response = submitRequest(Type.GetContainersOnDecomNode, + builder -> builder.setGetContainersOnDecomNodeRequest(request)).getGetContainersOnDecomNodeResponse(); + Map> containerMap = new HashMap<>(); + for (ContainersOnDecomNodeProto containersProto : response.getContainersOnDecomNodeList()) { + List containerIds = new ArrayList<>(); + for (HddsProtos.ContainerID id : containersProto.getIdList()) { + containerIds.add(ContainerID.getFromProtobuf(id)); + } + containerMap.put(containersProto.getName(), containerIds); + } + return containerMap; + } + /** * Queries a list of Nodes based on their operational state or health state. * Passing a null for either value acts as a wildcard for that state. diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto index 6cfddcc2f6c4..6adca817ed1d 100644 --- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto +++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto @@ -82,6 +82,7 @@ message ScmContainerLocationRequest { optional GetFailedDeletedBlocksTxnRequestProto getFailedDeletedBlocksTxnRequest = 43; optional DecommissionScmRequestProto decommissionScmRequest = 44; optional SingleNodeQueryRequestProto singleNodeQueryRequest = 45; + optional GetContainersOnDecomNodeRequestProto getContainersOnDecomNodeRequest = 46; } message ScmContainerLocationResponse { @@ -135,6 +136,7 @@ message ScmContainerLocationResponse { optional GetFailedDeletedBlocksTxnResponseProto getFailedDeletedBlocksTxnResponse = 43; optional DecommissionScmResponseProto decommissionScmResponse = 44; optional SingleNodeQueryResponseProto singleNodeQueryResponse = 45; + optional GetContainersOnDecomNodeResponseProto getContainersOnDecomNodeResponse = 46; enum Status { OK = 1; @@ -187,6 +189,7 @@ enum Type { GetFailedDeletedBlocksTransaction = 39; DecommissionScm = 40; SingleNodeQuery = 41; + GetContainersOnDecomNode = 42; } /** @@ -602,6 +605,19 @@ message DecommissionScmResponseProto { optional string errorMsg = 2; } +message GetContainersOnDecomNodeRequestProto { + required DatanodeDetailsProto datanodeDetails = 1; +} + +message ContainersOnDecomNodeProto { + required string name = 1; + repeated ContainerID id = 2; +} + +message GetContainersOnDecomNodeResponseProto { + repeated ContainersOnDecomNodeProto containersOnDecomNode = 1; +} + /** * Protocol used from an HDFS node to StorageContainerManager. See the request * and response messages for details of the RPC calls. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java index e0b4c3ce543c..fbfbb49c2521 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java @@ -18,7 +18,11 @@ package org.apache.hadoop.hdds.scm.node; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; +import java.util.List; +import java.util.Map; import java.util.Set; /** @@ -31,4 +35,6 @@ public interface DatanodeAdminMonitor extends Runnable { void stopMonitoring(DatanodeDetails dn); Set getTrackedNodes(); void setMetrics(NodeDecommissionMetrics metrics); + Map> getContainersReplicatedOnNode(DatanodeDetails dn) + throws NodeNotFoundException; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java index 51c6d12dea92..d7975ff1e58e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java @@ -96,8 +96,8 @@ public class DatanodeAdminMonitorImpl implements DatanodeAdminMonitor { public static final class TrackedNode { private DatanodeDetails datanodeDetails; - private long startTime = 0L; + private Map> containersReplicatedOnNode = new ConcurrentHashMap<>(); public TrackedNode(DatanodeDetails datanodeDetails, long startTime) { this.datanodeDetails = datanodeDetails; @@ -122,6 +122,15 @@ public DatanodeDetails getDatanodeDetails() { public long getStartTime() { return startTime; } + + public Map> getContainersReplicatedOnNode() { + return containersReplicatedOnNode; + } + + public void setContainersReplicatedOnNode(List underReplicated, List unClosed) { + this.containersReplicatedOnNode.put("UnderReplicated", Collections.unmodifiableList(underReplicated)); + this.containersReplicatedOnNode.put("UnClosed", Collections.unmodifiableList(unClosed)); + } } private Map containerStateByHost; @@ -423,9 +432,7 @@ private boolean checkContainersReplicatedOnNode(TrackedNode dn) boolean isHealthy = replicaSet.isHealthyEnoughForOffline(); if (!isHealthy) { - if (LOG.isDebugEnabled()) { - unClosedIDs.add(cid); - } + unClosedIDs.add(cid); if (unclosed < containerDetailsLoggingLimit || LOG.isDebugEnabled()) { LOG.info("Unclosed Container {} {}; {}", cid, replicaSet, replicaDetails(replicaSet.getReplicas())); @@ -448,20 +455,18 @@ private boolean checkContainersReplicatedOnNode(TrackedNode dn) replicationManager.checkContainerStatus(replicaSet.getContainer(), report); replicatedOK = report.getStat(ReplicationManagerReport.HealthState.UNDER_REPLICATED) == 0; } - if (replicatedOK) { sufficientlyReplicated++; } else { - if (LOG.isDebugEnabled()) { - underReplicatedIDs.add(cid); - } + underReplicatedIDs.add(cid); if (underReplicated < containerDetailsLoggingLimit || LOG.isDebugEnabled()) { LOG.info("Under Replicated Container {} {}; {}", cid, replicaSet, replicaDetails(replicaSet.getReplicas())); } underReplicated++; } } catch (ContainerNotFoundException e) { - LOG.warn("ContainerID {} present in node list for {} but not found in containerManager", cid, dn); + LOG.warn("ContainerID {} present in node list for {} but not found in containerManager", cid, + dn.getDatanodeDetails()); } } LOG.info("{} has {} sufficientlyReplicated, {} deleting, {} " + @@ -485,9 +490,21 @@ private boolean checkContainersReplicatedOnNode(TrackedNode dn) unclosed, unClosedIDs.stream().map( Object::toString).collect(Collectors.joining(", "))); } + dn.setContainersReplicatedOnNode(underReplicatedIDs, unClosedIDs); return underReplicated == 0 && unclosed == 0; } + public Map> getContainersReplicatedOnNode(DatanodeDetails dn) { + Iterator iterator = trackedNodes.iterator(); + while (iterator.hasNext()) { + TrackedNode trackedNode = iterator.next(); + if (trackedNode.equals(new TrackedNode(dn, 0L))) { + return trackedNode.getContainersReplicatedOnNode(); + } + } + return new HashMap<>(); + } + private String replicaDetails(Collection replicas) { StringBuilder sb = new StringBuilder(); sb.append("Replicas{"); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java index c98cc63c4668..38e59b89e767 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; @@ -40,6 +41,7 @@ import java.util.Comparator; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -292,6 +294,11 @@ public NodeDecommissionManager(OzoneConfiguration config, NodeManager nm, TimeUnit.SECONDS); } + public Map> getContainersReplicatedOnNode(DatanodeDetails dn) + throws NodeNotFoundException { + return getMonitor().getContainersReplicatedOnNode(dn); + } + @VisibleForTesting public DatanodeAdminMonitor getMonitor() { return monitor; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index 6d47a78a7d77..f402b9309fe4 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.TransferLeadershipRequestProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.TransferLeadershipResponseProto; @@ -51,6 +52,9 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerReplicasRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerReplicasResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainersOnDecomNodeRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainersOnDecomNodeProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainersOnDecomNodeResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerTokenRequestProto; @@ -614,6 +618,12 @@ public ScmContainerLocationResponse processRequest( .setDecommissionNodesResponse(decommissionNodes( request.getDecommissionNodesRequest())) .build(); + case GetContainersOnDecomNode: + return ScmContainerLocationResponse.newBuilder() + .setCmdType(request.getCmdType()) + .setStatus(Status.OK) + .setGetContainersOnDecomNodeResponse(getContainersOnDecomNode(request.getGetContainersOnDecomNodeRequest())) + .build(); case RecommissionNodes: return ScmContainerLocationResponse.newBuilder() .setCmdType(request.getCmdType()) @@ -1160,6 +1170,22 @@ public DecommissionNodesResponseProto decommissionNodes( return response.build(); } + public GetContainersOnDecomNodeResponseProto getContainersOnDecomNode(GetContainersOnDecomNodeRequestProto request) + throws IOException { + Map> containerMap = impl.getContainersOnDecomNode( + DatanodeDetails.getFromProtoBuf(request.getDatanodeDetails())); + List containersProtoList = new ArrayList<>(); + for (Map.Entry> containerList : containerMap.entrySet()) { + List containerIdsProto = new ArrayList<>(); + for (ContainerID id : containerList.getValue()) { + containerIdsProto.add(id.getProtobuf()); + } + containersProtoList.add(ContainersOnDecomNodeProto.newBuilder().setName(containerList.getKey()) + .addAllId(containerIdsProto).build()); + } + return GetContainersOnDecomNodeResponseProto.newBuilder().addAllContainersOnDecomNode(containersProtoList).build(); + } + public RecommissionNodesResponseProto recommissionNodes( RecommissionNodesRequestProto request) throws IOException { List errors = diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index ac92ea893dbb..13bef8590b79 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -588,6 +588,15 @@ public void deleteContainer(long containerID) throws IOException { } } + @Override + public Map> getContainersOnDecomNode(DatanodeDetails dn) throws IOException { + try { + return scm.getScmDecommissionManager().getContainersReplicatedOnNode(dn); + } catch (NodeNotFoundException e) { + throw new IOException("Failed to get containers list. Unable to find required node", e); + } + } + @Override public List queryNode( HddsProtos.NodeOperationalState opState, HddsProtos.NodeState state, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java index a2df04742f55..f4002a7da1ed 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java @@ -836,6 +836,50 @@ public void testCancelledNodesMovedToInService() nodeManager.getNodeStatus(dn1).getOperationalState()); } + @Test + public void testContainersReplicatedOnDecomDnAPI() + throws NodeNotFoundException, ContainerNotFoundException { + conf.setBoolean("hdds.scm.replication.enable.legacy", false); + + DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails(); + nodeManager.register(dn1, + new NodeStatus(HddsProtos.NodeOperationalState.DECOMMISSIONING, + HddsProtos.NodeState.HEALTHY)); + + Set containers = new HashSet<>(); + containers.add(ContainerID.valueOf(1)); + containers.add(ContainerID.valueOf(2)); + nodeManager.setContainers(dn1, containers); + DatanodeAdminMonitorTestUtil + .mockGetContainerReplicaCount(repManager, + true, + HddsProtos.LifeCycleState.CLOSED, + DECOMMISSIONING, + IN_SERVICE, + IN_SERVICE); + + monitor.startMonitoring(dn1); + monitor.run(); + assertEquals(1, monitor.getTrackedNodeCount()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dn1).getOperationalState()); + assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnderReplicated").size(), 2); + assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnClosed").size(), 0); + + DatanodeAdminMonitorTestUtil + .mockGetContainerReplicaCount(repManager, + true, + HddsProtos.LifeCycleState.OPEN, + IN_SERVICE); + + monitor.run(); + assertEquals(1, monitor.getTrackedNodeCount()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dn1).getOperationalState()); + assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnderReplicated").size(), 0); + assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnClosed").size(), 2); + } + /** * Generate a set of ContainerID, starting from an ID of zero up to the given * count minus 1. diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index 1daffbb9b940..d07e696e7ef0 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -216,6 +216,11 @@ public ContainerWithPipeline createContainer(HddsProtos.ReplicationType type, } } + @Override + public Map> getContainersOnDecomNode(DatanodeDetails dn) throws IOException { + return storageContainerLocationClient.getContainersOnDecomNode(dn); + } + @Override public List queryNode( HddsProtos.NodeOperationalState opState, diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java index eaf112430984..0e9cbe5f5ef8 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.server.http.HttpConfig; import org.apache.hadoop.hdfs.web.URLConnectionFactory; import picocli.CommandLine; @@ -123,6 +124,8 @@ public void execute(ScmClient scmClient) throws IOException { node.getNodeID()); printDetails(datanode); printCounts(datanode, counts, numDecomNodes); + Map> containers = scmClient.getContainersOnDecomNode(datanode); + System.out.println(containers); } } private void printDetails(DatanodeDetails datanode) { diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java index acd2ee52b0d3..a26088bb31ed 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.scm.cli.datanode; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; @@ -24,6 +25,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.junit.jupiter.api.AfterAll; @@ -39,7 +41,9 @@ import java.net.InetSocketAddress; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.UUID; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -66,6 +70,7 @@ public class TestDecommissionStatusSubCommand { private final PrintStream originalErr = System.err; private DecommissionStatusSubCommand cmd; private List nodes = getNodeDetails(2); + private Map> containerOnDecom = getContainersOnDecomNodes(); private static HttpServer httpServer; private static OzoneConfiguration conf; @@ -106,6 +111,7 @@ public void setup() throws UnsupportedEncodingException { conf.set(ScmConfigKeys.OZONE_SCM_HTTP_BIND_HOST_KEY, "localhost"); cmd.setParent(conf); } + @AfterEach public void tearDown() { System.setOut(originalOut); @@ -117,6 +123,7 @@ public void testSuccessWhenDecommissionStatus() throws IOException { ScmClient scmClient = mock(ScmClient.class); when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes); // 2 nodes decommissioning + when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); cmd.execute(scmClient); Pattern p = Pattern.compile("Decommission\\sStatus:\\s" + @@ -128,9 +135,15 @@ public void testSuccessWhenDecommissionStatus() throws IOException { p = Pattern.compile("Datanode:\\s.*host0\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); + p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); p = Pattern.compile("Datanode:\\s.*host1\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); + p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); } @Test @@ -139,6 +152,7 @@ public void testNoNodesWhenDecommissionStatus() throws IOException { // No nodes in decommissioning. No error is printed when(scmClient.queryNode(any(), any(), any(), any())) .thenReturn(new ArrayList<>()); + when(scmClient.getContainersOnDecomNode(any())).thenReturn(new HashMap<>()); cmd.execute(scmClient); Pattern p = Pattern.compile("Decommission\\sStatus:\\s" + @@ -160,6 +174,7 @@ public void testIdOptionDecommissionStatusSuccess() throws IOException { ScmClient scmClient = mock(ScmClient.class); when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes); // 2 nodes decommissioning + when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); CommandLine c = new CommandLine(cmd); c.parseArgs("--id", nodes.get(0).getNodeID().getUuid()); @@ -168,11 +183,17 @@ public void testIdOptionDecommissionStatusSuccess() throws IOException { Pattern p = Pattern.compile("Datanode:\\s.*host0\\)", Pattern.MULTILINE); Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); + p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); // as uuid of only host0 is passed, host1 should NOT be displayed p = Pattern.compile("Datanode:\\s.*host1.\\)", Pattern.MULTILINE); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); + p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertFalse(m.find()); } @Test @@ -180,6 +201,10 @@ public void testIdOptionDecommissionStatusFail() throws IOException { ScmClient scmClient = mock(ScmClient.class); when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes.subList(0, 1)); // host0 decommissioning + when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(0).getNodeID()))) + .thenReturn(containerOnDecom); + when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(1).getNodeID()))) + .thenReturn(new HashMap<>()); CommandLine c = new CommandLine(cmd); c.parseArgs("--id", nodes.get(1).getNodeID().getUuid()); @@ -204,6 +229,7 @@ public void testIpOptionDecommissionStatusSuccess() throws IOException { ScmClient scmClient = mock(ScmClient.class); when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes); // 2 nodes decommissioning + when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); CommandLine c = new CommandLine(cmd); c.parseArgs("--ip", nodes.get(1).getNodeID().getIpAddress()); @@ -212,11 +238,17 @@ public void testIpOptionDecommissionStatusSuccess() throws IOException { Pattern p = Pattern.compile("Datanode:\\s.*host1\\)", Pattern.MULTILINE); Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); + p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); // as IpAddress of only host1 is passed, host0 should NOT be displayed p = Pattern.compile("Datanode:\\s.*host0.\\)", Pattern.MULTILINE); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); + p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertFalse(m.find()); } @Test @@ -224,6 +256,10 @@ public void testIpOptionDecommissionStatusFail() throws IOException { ScmClient scmClient = mock(ScmClient.class); when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes.subList(0, 1)); // host0 decommissioning + when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(0).getNodeID()))) + .thenReturn(containerOnDecom); + when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(1).getNodeID()))) + .thenReturn(new HashMap<>()); CommandLine c = new CommandLine(cmd); c.parseArgs("--ip", nodes.get(1).getNodeID().getIpAddress()); @@ -267,4 +303,19 @@ private List getNodeDetails(int n) { return nodesList; } + private Map> getContainersOnDecomNodes() { + Map> containerMap = new HashMap<>(); + List underReplicated = new ArrayList<>(); + underReplicated.add(new ContainerID(1L)); + underReplicated.add(new ContainerID(2L)); + underReplicated.add(new ContainerID(3L)); + containerMap.put("UnderReplicated", underReplicated); + List unclosed = new ArrayList<>(); + unclosed.add(new ContainerID(10L)); + unclosed.add(new ContainerID(11L)); + unclosed.add(new ContainerID(12L)); + containerMap.put("UnClosed", unclosed); + return containerMap; + } + } From 5b80ce62a129bf13c61dfdd4a8438072baaeb1cc Mon Sep 17 00:00:00 2001 From: tanvipenumudy <46785609+tanvipenumudy@users.noreply.github.com> Date: Wed, 24 Jan 2024 12:39:02 +0530 Subject: [PATCH 40/43] HDDS-10142. Add hidden command to set bucket encryption key to fix HDDS-7449 (#6020) --- .../hadoop/ozone/client/OzoneBucket.java | 6 ++ .../ozone/client/protocol/ClientProtocol.java | 18 +++++ .../hadoop/ozone/client/rpc/RpcClient.java | 16 ++++ .../hadoop/ozone/om/helpers/OmBucketArgs.java | 35 ++++++++ .../hadoop/ozone/shell/TestOzoneShellHA.java | 62 +++++++++++++- .../ozone/shell/TestOzoneShellHAWithFSO.java | 1 + .../src/main/proto/OmClientProtocol.proto | 1 + .../hadoop/ozone/common/BekInfoUtils.java | 70 ++++++++++++++++ .../request/bucket/OMBucketCreateRequest.java | 42 +--------- .../bucket/OMBucketSetPropertyRequest.java | 20 +++++ .../ozone/client/ClientProtocolStub.java | 8 ++ .../ozone/shell/bucket/BucketCommands.java | 3 +- .../ozone/shell/bucket/SetEncryptionKey.java | 81 +++++++++++++++++++ 13 files changed, 322 insertions(+), 41 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/common/BekInfoUtils.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetEncryptionKey.java diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index 441d9143b598..ca885b3b6b06 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -413,6 +413,12 @@ public void setListCacheSize(int listCacheSize) { this.listCacheSize = listCacheSize; } + @Deprecated + public void setEncryptionKey(String bekName) throws IOException { + proxy.setEncryptionKey(volumeName, name, bekName); + encryptionKeyName = bekName; + } + /** * Creates a new key in the bucket, with default replication type RATIS and * with replication factor THREE. diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 5316f7a99e9f..e455e3040adb 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -997,6 +997,24 @@ void setBucketQuota(String volumeName, String bucketName, void setReplicationConfig(String volumeName, String bucketName, ReplicationConfig replicationConfig) throws IOException; + /** + * Set Bucket Encryption Key (BEK). + * + * @param volumeName + * @param bucketName + * @param bekName + * @throws IOException + * @deprecated This functionality is deprecated as it is not intended for + * users to reset bucket encryption under normal circumstances and may be + * removed in the future. Users are advised to exercise caution and consider + * alternative approaches for managing bucket encryption unless HDDS-7449 or + * HDDS-7526 is encountered. As a result, the setter methods for this + * functionality have been marked as deprecated. + */ + @Deprecated + void setEncryptionKey(String volumeName, String bucketName, + String bekName) throws IOException; + /** * Returns OzoneKey that contains the application generated/visible * metadata for an Ozone Object. diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 850ae0d19376..e14ae5828d70 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1213,6 +1213,22 @@ public void setBucketQuota(String volumeName, String bucketName, } + @Deprecated + @Override + public void setEncryptionKey(String volumeName, String bucketName, + String bekName) throws IOException { + verifyVolumeName(volumeName); + verifyBucketName(bucketName); + OmBucketArgs.Builder builder = OmBucketArgs.newBuilder(); + BucketEncryptionKeyInfo bek = new BucketEncryptionKeyInfo.Builder() + .setKeyName(bekName).build(); + builder.setVolumeName(volumeName) + .setBucketName(bucketName) + .setBucketEncryptionKey(bek); + OmBucketArgs finalArgs = builder.build(); + ozoneManagerClient.setBucketProperty(finalArgs); + } + @Override public void setReplicationConfig( String volumeName, String bucketName, ReplicationConfig replicationConfig) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java index f8c752aab271..e382377dff45 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketArgs; import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.protocolPB.OMPBHelper; /** * A class that encapsulates Bucket Arguments. @@ -50,6 +51,10 @@ public final class OmBucketArgs extends WithMetadata implements Auditable { */ private StorageType storageType; + /** + * Bucket encryption key info if encryption is enabled. + */ + private BucketEncryptionKeyInfo bekInfo; private long quotaInBytes = OzoneConsts.QUOTA_RESET; private long quotaInNamespace = OzoneConsts.QUOTA_RESET; private boolean quotaInBytesSet = false; @@ -150,6 +155,10 @@ public DefaultReplicationConfig getDefaultReplicationConfig() { return defaultReplicationConfig; } + public BucketEncryptionKeyInfo getBucketEncryptionKeyInfo() { + return bekInfo; + } + /** * Sets the Bucket default replication config. */ @@ -168,6 +177,12 @@ private void setQuotaInNamespace(long quotaInNamespace) { this.quotaInNamespace = quotaInNamespace; } + @Deprecated + private void setBucketEncryptionKey( + BucketEncryptionKeyInfo bucketEncryptionKey) { + this.bekInfo = bucketEncryptionKey; + } + /** * Returns Bucket Owner Name. * @@ -216,6 +231,7 @@ public static class Builder { private long quotaInBytes; private boolean quotaInNamespaceSet = false; private long quotaInNamespace; + private BucketEncryptionKeyInfo bekInfo; private DefaultReplicationConfig defaultReplicationConfig; private String ownerName; /** @@ -241,6 +257,12 @@ public Builder setIsVersionEnabled(Boolean versionFlag) { return this; } + @Deprecated + public Builder setBucketEncryptionKey(BucketEncryptionKeyInfo info) { + this.bekInfo = info; + return this; + } + public Builder addMetadata(Map metadataMap) { this.metadata = metadataMap; return this; @@ -291,6 +313,9 @@ public OmBucketArgs build() { if (quotaInNamespaceSet) { omBucketArgs.setQuotaInNamespace(quotaInNamespace); } + if (bekInfo != null && bekInfo.getKeyName() != null) { + omBucketArgs.setBucketEncryptionKey(bekInfo); + } return omBucketArgs; } } @@ -322,6 +347,11 @@ public BucketArgs getProtobuf() { if (ownerName != null) { builder.setOwnerName(ownerName); } + + if (bekInfo != null && bekInfo.getKeyName() != null) { + builder.setBekInfo(OMPBHelper.convert(bekInfo)); + } + return builder.build(); } @@ -355,6 +385,11 @@ public static OmBucketArgs getFromProtobuf(BucketArgs bucketArgs) { if (bucketArgs.hasQuotaInNamespace()) { omBucketArgs.setQuotaInNamespace(bucketArgs.getQuotaInNamespace()); } + + if (bucketArgs.hasBekInfo()) { + omBucketArgs.setBucketEncryptionKey( + OMPBHelper.convert(bucketArgs.getBekInfo())); + } return omBucketArgs; } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index dd84489b68f4..de25f24f33c8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -29,12 +29,17 @@ import java.util.List; import java.util.UUID; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.kms.KMSClientProvider; +import org.apache.hadoop.crypto.key.kms.server.MiniKMS; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.TrashPolicy; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.OzoneAdmin; import org.apache.hadoop.hdds.client.ReplicationType; @@ -60,7 +65,6 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.fs.TrashPolicy; import org.apache.hadoop.ozone.om.TrashPolicyOzone; import com.google.common.base.Strings; @@ -81,6 +85,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -117,6 +122,8 @@ public class TestOzoneShellHA { private static File testFile; private static String testFilePathString; private static MiniOzoneCluster cluster = null; + private static File testDir; + private static MiniKMS miniKMS; private static OzoneClient client; private OzoneShell ozoneShell = null; private OzoneAdmin ozoneAdminShell = null; @@ -140,9 +147,20 @@ public class TestOzoneShellHA { @BeforeAll public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + startKMS(); startCluster(conf); } + protected static void startKMS() throws Exception { + testDir = GenericTestUtils.getTestDir( + TestOzoneShellHA.class.getSimpleName()); + File kmsDir = new File(testDir, UUID.randomUUID().toString()); + assertTrue(kmsDir.mkdirs()); + MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder(); + miniKMS = miniKMSBuilder.setKmsConfDir(kmsDir).build(); + miniKMS.start(); + } + protected static void startCluster(OzoneConfiguration conf) throws Exception { String path = GenericTestUtils.getTempPath( TestOzoneShellHA.class.getSimpleName()); @@ -160,6 +178,8 @@ protected static void startCluster(OzoneConfiguration conf) throws Exception { clusterId = UUID.randomUUID().toString(); scmId = UUID.randomUUID().toString(); final int numDNs = 5; + conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, + getKeyProviderURI(miniKMS)); cluster = MiniOzoneCluster.newOMHABuilder(conf) .setClusterId(clusterId) .setScmId(scmId) @@ -181,9 +201,17 @@ public static void shutdown() { cluster.shutdown(); } + if (miniKMS != null) { + miniKMS.stop(); + } + if (baseDir != null) { FileUtil.fullyDelete(baseDir, true); } + + if (testDir != null) { + FileUtil.fullyDelete(testDir, true); + } } @BeforeEach @@ -1322,6 +1350,33 @@ public void testSetECReplicationConfigOnBucket() throws Exception { } } + @Test + public void testSetEncryptionKey() throws Exception { + final String volumeName = "volume111"; + getVolume(volumeName); + String bucketPath = "/volume111/bucket0"; + String[] args = new String[]{"bucket", "create", bucketPath}; + execute(ozoneShell, args); + + OzoneVolume volume = + client.getObjectStore().getVolume(volumeName); + OzoneBucket bucket = volume.getBucket("bucket0"); + assertNull(bucket.getEncryptionKeyName()); + String newEncKey = "enckey1"; + + KeyProvider provider = cluster.getOzoneManager().getKmsProvider(); + KeyProvider.Options options = KeyProvider.options(cluster.getConf()); + options.setDescription(newEncKey); + options.setBitLength(128); + provider.createKey(newEncKey, options); + provider.flush(); + + args = new String[]{"bucket", "set-encryption-key", bucketPath, "-k", + newEncKey}; + execute(ozoneShell, args); + assertEquals(newEncKey, volume.getBucket("bucket0").getEncryptionKeyName()); + } + @Test public void testCreateBucketWithECReplicationConfigWithoutReplicationParam() { getVolume("volume102"); @@ -1935,4 +1990,9 @@ public void testLinkedAndNonLinkedBucketMetaData() new String[]{"volume", "delete", "/volume1"}); out.reset(); } + + private static String getKeyProviderURI(MiniKMS kms) { + return KMSClientProvider.SCHEME_NAME + "://" + + kms.getKMSUrl().toExternalForm().replace("://", "@"); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHAWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHAWithFSO.java index 0c6a5e814380..3d1757ecbd9c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHAWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHAWithFSO.java @@ -37,6 +37,7 @@ public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, OMConfigKeys.OZONE_BUCKET_LAYOUT_FILE_SYSTEM_OPTIMIZED); + startKMS(); startCluster(conf); } diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index fd83981507c6..19844eed01be 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -805,6 +805,7 @@ message BucketArgs { optional uint64 quotaInNamespace = 9; optional string ownerName = 10; optional hadoop.hdds.DefaultReplicationConfig defaultReplicationConfig = 11; + optional BucketEncryptionInfoProto bekInfo = 12; } message PrefixInfo { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/common/BekInfoUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/common/BekInfoUtils.java new file mode 100644 index 000000000000..7cfad3b8a33a --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/common/BekInfoUtils.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.common; + +import org.apache.hadoop.crypto.CipherSuite; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketEncryptionInfoProto; +import org.apache.hadoop.ozone.protocolPB.OMPBHelper; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CryptoProtocolVersionProto.ENCRYPTION_ZONES; + +/** + * Utility class for common bucket encryption key operations. + */ +public final class BekInfoUtils { + + private BekInfoUtils() { + } + + public static BucketEncryptionInfoProto getBekInfo( + KeyProviderCryptoExtension kmsProvider, BucketEncryptionInfoProto bek) + throws IOException { + BucketEncryptionInfoProto.Builder bekb = null; + if (kmsProvider == null) { + throw new OMException("Invalid KMS provider, check configuration " + + CommonConfigurationKeys.HADOOP_SECURITY_KEY_PROVIDER_PATH, + OMException.ResultCodes.INVALID_KMS_PROVIDER); + } + if (bek.getKeyName() == null) { + throw new OMException("Bucket encryption key needed.", OMException + .ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); + } + // Talk to KMS to retrieve the bucket encryption key info. + KeyProvider.Metadata metadata = kmsProvider.getMetadata( + bek.getKeyName()); + if (metadata == null) { + throw new OMException("Bucket encryption key " + bek.getKeyName() + + " doesn't exist.", + OMException.ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); + } + // If the provider supports pool for EDEKs, this will fill in the pool + kmsProvider.warmUpEncryptedKeys(bek.getKeyName()); + bekb = BucketEncryptionInfoProto.newBuilder() + .setKeyName(bek.getKeyName()) + .setCryptoProtocolVersion(ENCRYPTION_ZONES) + .setSuite(OMPBHelper.convert( + CipherSuite.convert(metadata.getCipher()))); + return bekb.build(); + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index de8152af4687..7cce3ac456f9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -18,10 +18,7 @@ package org.apache.hadoop.ozone.om.request.bucket; -import org.apache.hadoop.crypto.CipherSuite; -import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; -import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; @@ -33,6 +30,7 @@ import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.common.BekInfoUtils; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; @@ -52,14 +50,12 @@ import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse; import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketEncryptionInfoProto; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateBucketRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateBucketResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; -import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.util.Time; @@ -75,7 +71,6 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CryptoProtocolVersionProto.ENCRYPTION_ZONES; /** * Handles CreateBucket Request. @@ -116,7 +111,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { .setModificationTime(initialTime); if (bucketInfo.hasBeinfo()) { - newBucketInfo.setBeinfo(getBeinfo(kmsProvider, bucketInfo)); + newBucketInfo.setBeinfo( + BekInfoUtils.getBekInfo(kmsProvider, bucketInfo.getBeinfo())); } boolean hasSourceVolume = bucketInfo.hasSourceVolume(); @@ -338,38 +334,6 @@ private void addDefaultAcls(OmBucketInfo omBucketInfo, omBucketInfo.setAcls(acls); } - private BucketEncryptionInfoProto getBeinfo( - KeyProviderCryptoExtension kmsProvider, BucketInfo bucketInfo) - throws IOException { - BucketEncryptionInfoProto bek = bucketInfo.getBeinfo(); - BucketEncryptionInfoProto.Builder bekb = null; - if (kmsProvider == null) { - throw new OMException("Invalid KMS provider, check configuration " + - CommonConfigurationKeys.HADOOP_SECURITY_KEY_PROVIDER_PATH, - OMException.ResultCodes.INVALID_KMS_PROVIDER); - } - if (bek.getKeyName() == null) { - throw new OMException("Bucket encryption key needed.", OMException - .ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); - } - // Talk to KMS to retrieve the bucket encryption key info. - KeyProvider.Metadata metadata = kmsProvider.getMetadata( - bek.getKeyName()); - if (metadata == null) { - throw new OMException("Bucket encryption key " + bek.getKeyName() - + " doesn't exist.", - OMException.ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); - } - // If the provider supports pool for EDEKs, this will fill in the pool - kmsProvider.warmUpEncryptedKeys(bek.getKeyName()); - bekb = BucketEncryptionInfoProto.newBuilder() - .setKeyName(bek.getKeyName()) - .setCryptoProtocolVersion(ENCRYPTION_ZONES) - .setSuite(OMPBHelper.convert( - CipherSuite.convert(metadata.getCipher()))); - return bekb.build(); - } - /** * Check namespace quota. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java index 821c374c2d10..9c7ef1087c10 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java @@ -23,12 +23,15 @@ import java.util.List; import com.google.common.base.Preconditions; +import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.common.BekInfoUtils; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator; import org.apache.hadoop.ozone.om.request.validation.RequestProcessingPhase; @@ -87,6 +90,18 @@ public OMRequest preExecute(OzoneManager ozoneManager) .getSetBucketPropertyRequest().toBuilder() .setModificationTime(modificationTime); + BucketArgs bucketArgs = + getOmRequest().getSetBucketPropertyRequest().getBucketArgs(); + + if (bucketArgs.hasBekInfo()) { + KeyProviderCryptoExtension kmsProvider = ozoneManager.getKmsProvider(); + BucketArgs.Builder bucketArgsBuilder = + setBucketPropertyRequestBuilder.getBucketArgsBuilder(); + bucketArgsBuilder.setBekInfo( + BekInfoUtils.getBekInfo(kmsProvider, bucketArgs.getBekInfo())); + setBucketPropertyRequestBuilder.setBucketArgs(bucketArgsBuilder.build()); + } + return getOmRequest().toBuilder() .setSetBucketPropertyRequest(setBucketPropertyRequestBuilder) .setUserInfo(getUserInfo()) @@ -190,6 +205,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn bucketInfoBuilder.setDefaultReplicationConfig(defaultReplicationConfig); } + BucketEncryptionKeyInfo bek = omBucketArgs.getBucketEncryptionKeyInfo(); + if (bek != null && bek.getKeyName() != null) { + bucketInfoBuilder.setBucketEncryptionKey(bek); + } + omBucketInfo = bucketInfoBuilder.build(); // Update table cache. diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index 174af69e255d..f0528facbb67 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -573,6 +573,14 @@ public void setReplicationConfig(String volumeName, String bucketName, } + @Deprecated + @Override + public void setEncryptionKey(String volumeName, String bucketName, + String bekName) + throws IOException { + + } + @Override public OzoneKey headObject(String volumeName, String bucketName, String keyName) throws IOException { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java index 454660e2ca32..80e26e044516 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java @@ -50,7 +50,8 @@ SetAclBucketHandler.class, ClearQuotaHandler.class, SetReplicationConfigHandler.class, - UpdateBucketHandler.class + UpdateBucketHandler.class, + SetEncryptionKey.class }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetEncryptionKey.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetEncryptionKey.java new file mode 100644 index 000000000000..86a50e9df3c7 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetEncryptionKey.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.shell.bucket; + +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientException; +import org.apache.hadoop.ozone.shell.OzoneAddress; +import picocli.CommandLine; + +import java.io.IOException; + +/** + * Command-line tool to set the encryption key of a bucket. + * + * There are known bugs, HDDS-7449 and HDDS-7526, which could potentially result + * in the loss of bucket encryption properties when either quota or bucket + * replication properties are (re)set on an existing bucket, posing a critical + * issue. This may affect consumers using previous versions of Ozone. + * + * To address this bug, this CLI tool provides the ability to (re)set the + * Bucket Encryption Key (BEK) for HDDS-7449/HDDS-7526 affected buckets using + * the Ozone shell. + * + * Users can execute the following command for setting BEK for a given bucket: + * "ozone sh bucket set-encryption-key -k /" + * + * Please note that this operation only resets the BEK and does not modify any + * other properties of the bucket or the existing keys within it. + * + * Existing keys in the bucket will retain their current properties, and any + * keys added before the BEK reset will remain unencrypted. Keys added after the + * BEK reset will be encrypted using the new BEK details provided. + * + * @deprecated This functionality is deprecated as it is not intended for users + * to reset bucket encryption post-bucket creation under normal circumstances + * and may be removed in the future. Users are advised to exercise caution and + * consider alternative approaches for managing bucket encryption unless + * HDDS-7449 or HDDS-7526 is encountered. As a result, the setter methods and + * this CLI functionality have been marked as deprecated, and the command has + * been hidden. + */ +@Deprecated +@CommandLine.Command(name = "set-encryption-key", + description = "Set Bucket Encryption Key (BEK) for a given bucket. Users " + + "are advised to exercise caution and consider alternative approaches " + + "for managing bucket encryption unless HDDS-7449 or HDDS-7526 is " + + "encountered.", + hidden = true) +public class SetEncryptionKey extends BucketHandler { + + @CommandLine.Option(names = {"--key", "-k"}, + description = "bucket encryption key name") + private String bekName; + + @Override + protected void execute(OzoneClient client, OzoneAddress address) + throws IOException, OzoneClientException { + + String volumeName = address.getVolumeName(); + String bucketName = address.getBucketName(); + OzoneBucket bucket = + client.getObjectStore().getVolume(volumeName).getBucket(bucketName); + bucket.setEncryptionKey(bekName); + } +} From e2a8c14a910dd582006ca447911145ceb2874612 Mon Sep 17 00:00:00 2001 From: tejaskriya Date: Thu, 18 Jan 2024 18:42:57 +0530 Subject: [PATCH 41/43] HDDS-9738. Display pipeline and container counts for decommissioning DN --- .../hdds/scm/cli/datanode/DecommissionStatusSubCommand.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java index 0e9cbe5f5ef8..06b7d2d6ae49 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java @@ -27,6 +27,8 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.server.http.HttpConfig; +import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.server.http.HttpConfig; import org.apache.hadoop.hdfs.web.URLConnectionFactory; From f864494e23406cdb2d5b08511c7a8557d6e04139 Mon Sep 17 00:00:00 2001 From: tejaskriya Date: Wed, 24 Jan 2024 11:33:25 +0530 Subject: [PATCH 42/43] Fix TestDecommissionStatusSubCommand test cases --- .../cli/datanode/TestDecommissionStatusSubCommand.java | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java index a26088bb31ed..7f88086495f1 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdds.scm.cli.datanode; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; @@ -25,6 +29,10 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.http.HttpConfig; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -92,6 +100,7 @@ public void handle(HttpExchange exchange) throws IOException { }); httpServer.start(); } + @AfterAll public static void shutdownScmHttp() { if (httpServer != null) { From cfc84ff6d70d69799d9d528b409228c4c2fa8f6b Mon Sep 17 00:00:00 2001 From: tejaskriya Date: Wed, 24 Jan 2024 13:26:47 +0530 Subject: [PATCH 43/43] Fix merge issues --- .../scm/cli/datanode/DecommissionStatusSubCommand.java | 2 -- .../hadoop/hdds/scm/cli/datanode/StatusSubCommand.java | 2 +- .../cli/datanode/TestDecommissionStatusSubCommand.java | 8 -------- 3 files changed, 1 insertion(+), 11 deletions(-) diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java index 06b7d2d6ae49..be7ff94b2262 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java @@ -30,8 +30,6 @@ import org.apache.hadoop.hdds.server.http.HttpConfig; import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.server.http.HttpConfig; -import org.apache.hadoop.hdfs.web.URLConnectionFactory; import picocli.CommandLine; import javax.net.ssl.HttpsURLConnection; diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java index 740a136ff04a..9f5892f04bca 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java @@ -1,4 +1,3 @@ -package org.apache.hadoop.hdds.scm.cli.datanode; /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -16,6 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +package org.apache.hadoop.hdds.scm.cli.datanode; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.cli.GenericCli; diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java index 7f88086495f1..28dc60db8fc0 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java @@ -22,20 +22,12 @@ import com.sun.net.httpserver.HttpServer; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import com.sun.net.httpserver.HttpExchange; -import com.sun.net.httpserver.HttpHandler; -import com.sun.net.httpserver.HttpServer; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.http.HttpConfig; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.AfterEach;