diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestNSSummaryEndPoint.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestNSSummaryEndPoint.java
new file mode 100644
index 000000000000..6fa5e6b4fc14
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestNSSummaryEndPoint.java
@@ -0,0 +1,352 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.recon;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.BucketArgs;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.recon.api.types.DUResponse;
+import org.apache.hadoop.ozone.recon.scm.ReconNodeManager;
+import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade;
+import org.apache.ozone.test.GenericTestUtils;
+import org.apache.ozone.test.LambdaTestUtils;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+import org.slf4j.event.Level;
+
+import java.io.UnsupportedEncodingException;
+import java.nio.charset.StandardCharsets;
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NODE_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_RECON_HEARTBEAT_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADMIN_MONITOR_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY;
+import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_TASK_INTERVAL_DELAY;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/**
+ * Integration tests for NSSummaryEndPoint APIs.
+ */
+@Timeout(300)
+class TestNSSummaryEndPoint {
+
+ private static OzoneBucket legacyOzoneBucket;
+ private static OzoneBucket fsoOzoneBucket;
+ private static OzoneBucket obsOzoneBucket;
+ private static final OzoneConfiguration CONF = new OzoneConfiguration();
+ private static MiniOzoneCluster cluster;
+ private static NodeManager scmNodeManager;
+ private static ContainerManager scmContainerManager;
+
+ @BeforeAll
+ static void init() throws Exception {
+ setupConfigKeys();
+ cluster = MiniOzoneCluster.newBuilder(CONF)
+ .setNumDatanodes(5)
+ .includeRecon(true)
+ .build();
+ cluster.waitForClusterToBeReady();
+ GenericTestUtils.setLogLevel(ReconNodeManager.LOG, Level.DEBUG);
+
+ StorageContainerManager scm = cluster.getStorageContainerManager();
+ scmContainerManager = scm.getContainerManager();
+ scmNodeManager = scm.getScmNodeManager();
+
+ ReconStorageContainerManagerFacade reconScm =
+ (ReconStorageContainerManagerFacade)
+ cluster.getReconServer().getReconStorageContainerManager();
+ PipelineManager reconPipelineManager = reconScm.getPipelineManager();
+
+ LambdaTestUtils.await(60000, 5000,
+ () -> (reconPipelineManager.getPipelines().size() >= 4));
+
+ assertThat(scmContainerManager.getContainers()).isEmpty();
+
+ // Verify that all nodes are registered with Recon.
+ NodeManager reconNodeManager = reconScm.getScmNodeManager();
+ assertEquals(scmNodeManager.getAllNodes().size(),
+ reconNodeManager.getAllNodes().size());
+
+ OzoneClient client = cluster.newClient();
+ String volumeName = "vol1";
+ String fsoBucketName = "fso-bucket";
+ String legacyBucketName = "legacy-bucket";
+ String obsBucketName = "obs-bucket";
+
+ // create a volume and a FSO bucket
+ fsoOzoneBucket = TestDataUtil.createVolumeAndBucket(
+ client, volumeName, fsoBucketName, BucketLayout.FILE_SYSTEM_OPTIMIZED);
+
+ BucketArgs bucketArgs = new BucketArgs.Builder()
+ .setBucketLayout(BucketLayout.LEGACY)
+ .build();
+ // create a LEGACY bucket
+ legacyOzoneBucket = TestDataUtil
+ .createBucket(client, volumeName, bucketArgs, legacyBucketName);
+
+ bucketArgs = new BucketArgs.Builder()
+ .setBucketLayout(BucketLayout.OBJECT_STORE)
+ .build();
+ // create a OBS bucket
+ obsOzoneBucket = TestDataUtil
+ .createBucket(client, volumeName, bucketArgs, obsBucketName);
+
+ buildNameSpaceTree(obsOzoneBucket);
+ buildNameSpaceTree(legacyOzoneBucket);
+ buildNameSpaceTree(fsoOzoneBucket);
+ }
+
+ /**
+ * Verify listKeys at different levels.
+ * .
+ * └── volume
+ * └── bucket
+ * └── a1
+ * ├── b1
+ * │ ├── c1111.tx
+ * │ ├── c1222.tx
+ * │ ├── c1333.tx
+ * │ ├── c1444.tx
+ * │ ├── c1555.tx
+ * │ ├── c1
+ * │ │ └── c1.tx
+ * │ └── c12
+ * │ ├── c2.tx
+ * │ └── c3.tx
+ * ├── b2
+ * │ ├── d1
+ * │ │ └── d11.tx
+ * │ ├── d2
+ * │ │ ├── d21.tx
+ * │ │ └── d22.tx
+ * │ └── d3
+ * │ └── d31.tx
+ * └── b3
+ * ├── e1
+ * │ └── e11.tx
+ * ├── e2
+ * │ └── e21.tx
+ * └── e3
+ * └── e31.tx
+ */
+ private static void buildNameSpaceTree(OzoneBucket ozoneBucket)
+ throws Exception {
+ LinkedList keys = new LinkedList<>();
+ keys.add("/a1/b1/c1111.tx");
+ keys.add("/a1/b1/c1222.tx");
+ keys.add("/a1/b1/c1333.tx");
+ keys.add("/a1/b1/c1444.tx");
+ keys.add("/a1/b1/c1555.tx");
+ keys.add("/a1/b1/c1/c1.tx");
+ keys.add("/a1/b1/c12/c2.tx");
+ keys.add("/a1/b1/c12/c3.tx");
+
+ keys.add("/a1/b2/d1/d11.tx");
+ keys.add("/a1/b2/d2/d21.tx");
+ keys.add("/a1/b2/d2/d22.tx");
+ keys.add("/a1/b2/d3/d31.tx");
+
+ keys.add("/a1/b3/e1/e11.tx");
+ keys.add("/a1/b3/e2/e21.tx");
+ keys.add("/a1/b3/e3/e31.tx");
+
+ createKeys(ozoneBucket, keys);
+ }
+
+ private static void createKeys(OzoneBucket ozoneBucket, List keys)
+ throws Exception {
+ int length = 10;
+ byte[] input = new byte[length];
+ Arrays.fill(input, (byte) 96);
+ for (String key : keys) {
+ createKey(ozoneBucket, key, 10, input);
+ }
+ }
+
+ private static void createKey(OzoneBucket ozoneBucket, String key, int length,
+ byte[] input) throws Exception {
+
+ OzoneOutputStream ozoneOutputStream =
+ ozoneBucket.createKey(key, length);
+
+ ozoneOutputStream.write(input);
+ ozoneOutputStream.write(input, 0, 10);
+ ozoneOutputStream.close();
+
+ // Read the key with given key name.
+ OzoneInputStream ozoneInputStream = ozoneBucket.readKey(key);
+ byte[] read = new byte[length];
+ ozoneInputStream.read(read, 0, length);
+ ozoneInputStream.close();
+
+ assertEquals(new String(input, StandardCharsets.UTF_8),
+ new String(read, StandardCharsets.UTF_8));
+ }
+
+ @AfterAll
+ static void shutdown() {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+
+ private static void setupConfigKeys() {
+ CONF.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
+ 100, TimeUnit.MILLISECONDS);
+ CONF.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
+ CONF.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1);
+ CONF.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1, SECONDS);
+ CONF.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 1, SECONDS);
+ CONF.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, SECONDS);
+ CONF.setTimeDuration(HDDS_NODE_REPORT_INTERVAL, 1, SECONDS);
+ CONF.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
+ CONF.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
+ CONF.setTimeDuration(OZONE_SCM_DATANODE_ADMIN_MONITOR_INTERVAL,
+ 1, SECONDS);
+ CONF.setTimeDuration(
+ ScmConfigKeys.OZONE_SCM_EXPIRED_CONTAINER_REPLICA_OP_SCRUB_INTERVAL,
+ 1, SECONDS);
+ CONF.setTimeDuration(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT,
+ 0, SECONDS);
+ CONF.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s");
+ CONF.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s");
+ CONF.set(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, "5s");
+
+ CONF.setTimeDuration(HDDS_RECON_HEARTBEAT_INTERVAL,
+ 1, TimeUnit.SECONDS);
+
+ CONF.setTimeDuration(OZONE_RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY,
+ 1, TimeUnit.SECONDS);
+ CONF.setTimeDuration(OZONE_RECON_OM_SNAPSHOT_TASK_INTERVAL_DELAY,
+ 2, TimeUnit.SECONDS);
+ }
+
+ @Test
+ void testListKeysForFSOBucket() throws Exception {
+ assertDirectKeysInFSOBucket();
+ assertAllKeysRecursivelyInFSOBucket();
+ }
+
+ private static void assertDirectKeysInFSOBucket() throws JsonProcessingException, UnsupportedEncodingException {
+ // assert direct keys inside fsoBucket
+ DUResponse response = TestReconEndpointUtil.listKeysFromRecon(CONF, "/vol1/fso-bucket", false);
+ // No direct keys, total count provides all keys recursively in fso-bucket
+ // but since we passed recursive as false, so no list of keys under duData subpaths.
+ assertEquals(0, response.getCount());
+ assertEquals(0, response.getDuData().size());
+ assertEquals(15, response.getTotalCount());
+ }
+
+ private static void assertAllKeysRecursivelyInFSOBucket()
+ throws JsonProcessingException, UnsupportedEncodingException {
+ // assert direct keys inside fsoBucket
+ DUResponse response = TestReconEndpointUtil.listKeysFromRecon(CONF, "/vol1/fso-bucket", true);
+ // No direct keys, total count provides all keys recursively in fso-bucket
+ // but since we passed recursive as false, so no list of keys under duData subpaths.
+ assertEquals(15, response.getCount());
+ assertEquals(15, response.getDuData().size());
+ assertEquals(15, response.getTotalCount());
+ assertEquals("vol1/fso-bucket/a1/b1/c12/c3.tx", response.getDuData().get(14).getSubpath());
+ assertEquals(300, response.getSize());
+ assertEquals(900, response.getSizeWithReplica());
+ }
+
+ @Test
+ void testListKeysForOBSBucket() throws Exception {
+ // Both assertion should give same count of keys.
+ assertDirectKeysInOBSBucket();
+ assertAllKeysRecursivelyInOBSBucket();
+ }
+
+ private static void assertDirectKeysInOBSBucket() throws JsonProcessingException, UnsupportedEncodingException {
+ // assert direct keys inside obs-bucket
+ DUResponse response = TestReconEndpointUtil.listKeysFromRecon(CONF, "/vol1/obs-bucket", false);
+ assertEquals(15, response.getCount());
+ assertEquals(15, response.getDuData().size());
+ assertEquals(15, response.getTotalCount());
+ }
+
+ private static void assertAllKeysRecursivelyInOBSBucket()
+ throws JsonProcessingException, UnsupportedEncodingException {
+ // assert all keys inside obs-bucket
+ DUResponse response = TestReconEndpointUtil.listKeysFromRecon(CONF, "/vol1/obs-bucket", true);
+
+ assertEquals(15, response.getCount());
+ assertEquals(15, response.getDuData().size());
+ assertEquals(15, response.getTotalCount());
+ assertEquals("/a1/b3/e3/e31.tx", response.getDuData().get(14).getSubpath());
+ assertEquals(300, response.getSize());
+ assertEquals(900, response.getSizeWithReplica());
+ }
+
+ @Test
+ void testListKeysForLegacyBucket() throws Exception {
+ // Both assertion should give same count of keys.
+ assertDirectKeysInLegacyBucket();
+ assertAllKeysInLegacyBucket();
+ }
+
+ private static void assertDirectKeysInLegacyBucket() throws JsonProcessingException, UnsupportedEncodingException {
+ // assert direct keys inside legacy-bucket
+ DUResponse response = TestReconEndpointUtil.listKeysFromRecon(CONF, "/vol1/legacy-bucket", false);
+ assertEquals(15, response.getCount());
+ assertEquals(15, response.getDuData().size());
+ assertEquals(15, response.getTotalCount());
+ }
+
+ private static void assertAllKeysInLegacyBucket()
+ throws JsonProcessingException, UnsupportedEncodingException {
+ // assert all keys inside legacy-bucket
+ DUResponse response = TestReconEndpointUtil.listKeysFromRecon(CONF, "/vol1/legacy-bucket", true);
+
+ assertEquals(15, response.getCount());
+ assertEquals(15, response.getDuData().size());
+ assertEquals(15, response.getTotalCount());
+ assertEquals("/a1/b3/e3/e31.tx", response.getDuData().get(14).getSubpath());
+ assertEquals(300, response.getSize());
+ assertEquals(900, response.getSizeWithReplica());
+ }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconEndpointUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconEndpointUtil.java
index 002de94cb026..d61b35077fdb 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconEndpointUtil.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconEndpointUtil.java
@@ -23,15 +23,18 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.server.http.HttpConfig;
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
+import org.apache.hadoop.ozone.recon.api.types.DUResponse;
import org.apache.hadoop.ozone.recon.api.types.UnhealthyContainersResponse;
import org.hadoop.ozone.recon.schema.ContainerSchemaDefinition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.InputStream;
+import java.io.UnsupportedEncodingException;
import java.net.ConnectException;
import java.net.HttpURLConnection;
import java.net.URL;
+import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import static java.net.HttpURLConnection.HTTP_CREATED;
@@ -57,6 +60,7 @@ public final class TestReconEndpointUtil {
private static final String CONTAINER_ENDPOINT = "/api/v1/containers";
private static final String OM_DB_SYNC_ENDPOINT = "/api/v1/triggerdbsync/om";
+ private static final String LISTKEYS_ENDPOINT = "/api/v1/namespace/listKeys";
private TestReconEndpointUtil() {
}
@@ -102,6 +106,27 @@ public static UnhealthyContainersResponse getUnhealthyContainersFromRecon(
UnhealthyContainersResponse.class);
}
+ public static DUResponse listKeysFromRecon(OzoneConfiguration conf, String startPrefix, boolean recursive)
+ throws JsonProcessingException, UnsupportedEncodingException {
+ String encodedStartPrefix = URLEncoder.encode(startPrefix, "UTF-8");
+ String query = "?startPrefix=" + encodedStartPrefix + "&recursive=" + recursive;
+
+ StringBuilder urlBuilder = new StringBuilder();
+ urlBuilder.append(getReconWebAddress(conf))
+ .append(LISTKEYS_ENDPOINT)
+ .append(query);
+
+ String listKeysResponse = "";
+ try {
+ listKeysResponse = makeHttpCall(conf, urlBuilder);
+ } catch (Exception e) {
+ LOG.error("Error getting list keys response from Recon");
+ }
+
+ final ObjectMapper objectMapper = new ObjectMapper();
+ return objectMapper.readValue(listKeysResponse, DUResponse.class);
+ }
+
public static String makeHttpCall(OzoneConfiguration conf, StringBuilder url)
throws Exception {
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java
index 9c79a869c41d..6dbc4746acba 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java
@@ -44,6 +44,7 @@ private ReconConstants() {
public static final String DEFAULT_OPEN_KEY_INCLUDE_NON_FSO = "false";
public static final String DEFAULT_OPEN_KEY_INCLUDE_FSO = "false";
public static final String DEFAULT_FETCH_COUNT = "1000";
+ public static final String DEFAULT_KEY_SIZE = "0";
public static final String DEFAULT_BATCH_NUMBER = "1";
public static final String RECON_QUERY_BATCH_PARAM = "batchNum";
public static final String RECON_QUERY_PREVKEY = "prevKey";
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
index f154f024fbda..2511c5522b9f 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
@@ -29,8 +29,13 @@
import java.nio.file.Path;
import java.nio.file.Paths;
import java.sql.Timestamp;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.time.Instant;
import java.util.ArrayList;
+import java.util.Date;
import java.util.List;
+import java.util.TimeZone;
import java.util.concurrent.BlockingQueue;
import java.util.stream.Collectors;
@@ -107,6 +112,30 @@ public static File getReconScmDbDir(ConfigurationSource conf) {
return queues;
}
+ /**
+ * Converts string date in a provided format to server timezone's epoch milllioseconds.
+ *
+ * @param dateString
+ * @param dateFormat
+ * @param timeZone
+ * @return
+ * @throws ParseException
+ */
+ public static long convertToEpochMillis(String dateString, String dateFormat, TimeZone timeZone) {
+ try {
+ SimpleDateFormat sdf = new SimpleDateFormat(dateFormat);
+ sdf.setTimeZone(timeZone); // Set server's timezone
+ Date date = sdf.parse(dateString);
+ return date.getTime(); // Convert to epoch milliseconds
+ } catch (ParseException parseException) {
+ LOG.error("Date parse exception for date: {} in format: {} -> {}", dateString, dateFormat, parseException);
+ return Instant.now().toEpochMilli();
+ } catch (Exception exception) {
+ LOG.error("Unexpected error while parsing date: {} in format: {}", dateString, dateFormat);
+ return Instant.now().toEpochMilli();
+ }
+ }
+
/**
* Get configured Recon DB directory value based on config. If not present,
* fallback to ozone.metadata.dirs
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java
index 71040b9fdf64..0149c2a54992 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java
@@ -18,8 +18,10 @@
package org.apache.hadoop.ozone.recon.api;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.recon.ReconUtils;
import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler;
import org.apache.hadoop.ozone.recon.api.types.NamespaceSummaryResponse;
import org.apache.hadoop.ozone.recon.api.types.DUResponse;
@@ -27,6 +29,7 @@
import org.apache.hadoop.ozone.recon.api.types.ResponseStatus;
import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse;
import org.apache.hadoop.ozone.recon.api.types.EntityType;
+import org.apache.hadoop.ozone.recon.api.types.Stats;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
@@ -39,6 +42,14 @@
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.io.IOException;
+import java.util.List;
+import java.util.TimeZone;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
+import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT;
+import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_KEY_SIZE;
/**
* REST APIs for namespace metadata summary.
@@ -128,8 +139,110 @@ public Response getDiskUsage(@QueryParam("path") String path,
reconNamespaceSummaryManager,
omMetadataManager, reconSCM, path);
- duResponse = handler.getDuResponse(listFile, withReplica, sortSubpaths);
+ duResponse = handler.getDuResponse(listFile, withReplica, sortSubpaths, false, new Stats(-1));
+
+ return Response.ok(duResponse).build();
+ }
+
+ /**
+ * This API will list out limited 'count' number of keys after applying below filters in API parameters:
+ * Default Values of API param filters:
+ * -- replicationType - empty string and filter will not be applied, so list out all keys irrespective of
+ * replication type.
+ * -- creationTime - empty string and filter will not be applied, so list out keys irrespective of age.
+ * -- keySize - 0 bytes, which means all keys greater than zero bytes will be listed, effectively all.
+ * -- startPrefix - /
+ * -- count - 1000
+ *
+ * @param replicationType Filter for RATIS or EC replication keys
+ * @param creationDate Filter for keys created after creationDate in "MM-dd-yyyy HH:mm:ss" string format.
+ * @param keySize Filter for Keys greater than keySize in bytes.
+ * @param startPrefix Filter for startPrefix path.
+ * @param limit Filter for limited count of keys.
+ * @param recursive listing out keys recursively for FSO buckets.
+ * @return the list of keys in below structured format:
+ * Response For OBS Bucket keys:
+ * ********************************************************
+ * {
+ * "status": "OK",
+ * "path": "/volume1/obs-bucket/",
+ * "size": 73400320,
+ * "sizeWithReplica": 81788928,
+ * "subPathCount": 1,
+ * "totalKeyCount": 7,
+ * "lastKey": "/volume1/obs-bucket/key7",
+ * "subPaths": [
+ * {
+ * "key": true,
+ * "path": "key1",
+ * "size": 10485760,
+ * "sizeWithReplica": 18874368,
+ * "isKey": true,
+ * "replicationType": "RATIS",
+ * "creationTime": 1712321367060,
+ * "modificationTime": 1712321368190
+ * },
+ * {
+ * "key": true,
+ * "path": "key7",
+ * "size": 10485760,
+ * "sizeWithReplica": 18874368,
+ * "isKey": true,
+ * "replicationType": "EC",
+ * "creationTime": 1713261005555,
+ * "modificationTime": 1713261006728
+ * }
+ * ],
+ * "sizeDirectKey": 73400320
+ * }
+ * ********************************************************
+ * @throws IOException
+ */
+ @GET
+ @Path("/listKeys")
+ @SuppressWarnings("methodlength")
+ public Response listKeysWithDu(@QueryParam("replicationType") String replicationType,
+ @QueryParam("creationDate") String creationDate,
+ @DefaultValue(DEFAULT_KEY_SIZE) @QueryParam("keySize") long keySize,
+ @DefaultValue(OM_KEY_PREFIX) @QueryParam("startPrefix") String startPrefix,
+ @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam("count") long limit,
+ @DefaultValue("false") @QueryParam("recursive") boolean recursive)
+ throws IOException {
+
+ if (startPrefix == null || startPrefix.length() == 0) {
+ return Response.status(Response.Status.BAD_REQUEST).build();
+ }
+ DUResponse duResponse = new DUResponse();
+ if (!isInitializationComplete()) {
+ duResponse.setStatus(ResponseStatus.INITIALIZING);
+ return Response.ok(duResponse).build();
+ }
+ EntityHandler handler = EntityHandler.getEntityHandler(
+ reconNamespaceSummaryManager,
+ omMetadataManager, reconSCM, startPrefix);
+
+ Stats stats = new Stats(limit);
+
+ duResponse = handler.getListKeysResponse(stats, recursive);
+
+ List keyListWithDu = duResponse.getDuData();
+
+ long epochMillis = ReconUtils.convertToEpochMillis(creationDate, "MM-dd-yyyy HH:mm:ss", TimeZone.getDefault());
+ Predicate keyAgeFilter = keyData -> keyData.getCreationTime() >= epochMillis;
+ Predicate keyReplicationFilter =
+ keyData -> keyData.getReplicationType().equals(replicationType);
+ Predicate keySizeFilter = keyData -> keyData.getSize() > keySize;
+ Predicate keyFilter = keyData -> keyData.isKey();
+
+ List filteredKeyList = keyListWithDu.stream()
+ .filter(keyFilter)
+ .filter(keyData -> !StringUtils.isEmpty(creationDate) ? keyAgeFilter.test(keyData) : true)
+ .filter(keyData -> !StringUtils.isEmpty(replicationType) ? keyReplicationFilter.test(keyData) : true)
+ .filter(keySizeFilter)
+ .collect(Collectors.toList());
+ duResponse.setDuData(filteredKeyList);
+ duResponse.setCount(filteredKeyList.size());
return Response.ok(duResponse).build();
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
index baa9c522be10..f42f4e1216ea 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
@@ -26,9 +26,14 @@
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.recon.ReconUtils;
+import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler;
+import org.apache.hadoop.ozone.recon.api.types.DUResponse;
import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo;
import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse;
import org.apache.hadoop.ozone.recon.api.types.NSSummary;
+import org.apache.hadoop.ozone.recon.api.types.ResponseStatus;
+import org.apache.hadoop.ozone.recon.api.types.Stats;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.apache.hadoop.ozone.recon.scm.ReconContainerManager;
import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager;
@@ -53,6 +58,9 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.TimeZone;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
@@ -60,6 +68,7 @@
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE;
import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT;
+import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_KEY_SIZE;
import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT;
import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY;
import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_OPEN_KEY_INCLUDE_FSO;
@@ -701,6 +710,108 @@ private String createPath(OmKeyInfo omKeyInfo) {
omKeyInfo.getBucketName() + OM_KEY_PREFIX + omKeyInfo.getKeyName();
}
+ /**
+ * This API will list out limited 'count' number of keys after applying below filters in API parameters:
+ * Default Values of API param filters:
+ * -- replicationType - empty string and filter will not be applied, so list out all keys irrespective of
+ * replication type.
+ * -- creationTime - empty string and filter will not be applied, so list out keys irrespective of age.
+ * -- keySize - 0 bytes, which means all keys greater than zero bytes will be listed, effectively all.
+ * -- startPrefix - /
+ * -- count - 1000
+ *
+ * @param replicationType Filter for RATIS or EC replication keys
+ * @param creationDate Filter for keys created after creationDate in "MM-dd-yyyy HH:mm:ss" string format.
+ * @param keySize Filter for Keys greater than keySize in bytes.
+ * @param startPrefix Filter for startPrefix path.
+ * @param limit Filter for limited count of keys.
+ * @param recursive listing out keys recursively for FSO buckets.
+ * @return the list of keys in below structured format:
+ * Response For OBS Bucket keys:
+ * ********************************************************
+ * {
+ * "status": "OK",
+ * "path": "/volume1/obs-bucket/",
+ * "size": 73400320,
+ * "sizeWithReplica": 81788928,
+ * "subPathCount": 1,
+ * "totalKeyCount": 7,
+ * "lastKey": "/volume1/obs-bucket/key7",
+ * "subPaths": [
+ * {
+ * "key": true,
+ * "path": "key1",
+ * "size": 10485760,
+ * "sizeWithReplica": 18874368,
+ * "isKey": true,
+ * "replicationType": "RATIS",
+ * "creationTime": 1712321367060,
+ * "modificationTime": 1712321368190
+ * },
+ * {
+ * "key": true,
+ * "path": "key7",
+ * "size": 10485760,
+ * "sizeWithReplica": 18874368,
+ * "isKey": true,
+ * "replicationType": "EC",
+ * "creationTime": 1713261005555,
+ * "modificationTime": 1713261006728
+ * }
+ * ],
+ * "sizeDirectKey": 73400320
+ * }
+ * ********************************************************
+ * @throws IOException
+ */
+ @GET
+ @Path("/listKeys")
+ @SuppressWarnings("methodlength")
+ public Response listKeysWithDu(@QueryParam("replicationType") String replicationType,
+ @QueryParam("creationDate") String creationDate,
+ @DefaultValue(DEFAULT_KEY_SIZE) @QueryParam("keySize") long keySize,
+ @DefaultValue(OM_KEY_PREFIX) @QueryParam("startPrefix") String startPrefix,
+ @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam("count") long limit,
+ @DefaultValue("false") @QueryParam("recursive") boolean recursive)
+ throws IOException {
+
+ if (startPrefix == null || startPrefix.length() == 0) {
+ return Response.status(Response.Status.BAD_REQUEST).build();
+ }
+ DUResponse duResponse = new DUResponse();
+ if (!isInitializationComplete()) {
+ duResponse.setStatus(ResponseStatus.INITIALIZING);
+ return Response.ok(duResponse).build();
+ }
+ EntityHandler handler = EntityHandler.getEntityHandler(
+ reconNamespaceSummaryManager,
+ omMetadataManager, reconSCM, startPrefix);
+
+ Stats stats = new Stats(limit);
+
+ duResponse = handler.getListKeysResponse(stats, recursive);
+
+ List keyListWithDu = duResponse.getDuData();
+
+ long epochMillis = ReconUtils.convertToEpochMillis(creationDate, "MM-dd-yyyy HH:mm:ss", TimeZone.getDefault());
+ Predicate keyAgeFilter = keyData -> keyData.getCreationTime() >= epochMillis;
+ Predicate keyReplicationFilter =
+ keyData -> keyData.getReplicationType().equals(replicationType);
+ Predicate keySizeFilter = keyData -> keyData.getSize() > keySize;
+ Predicate keyFilter = keyData -> keyData.isKey();
+
+ List filteredKeyList = keyListWithDu.stream()
+ .filter(keyFilter)
+ .filter(keyData -> !StringUtils.isEmpty(creationDate) ? keyAgeFilter.test(keyData) : true)
+ .filter(keyData -> !StringUtils.isEmpty(replicationType) ? keyReplicationFilter.test(keyData) : true)
+ .filter(keySizeFilter)
+ .collect(Collectors.toList());
+
+ duResponse.setDuData(filteredKeyList);
+ duResponse.setCount(filteredKeyList.size());
+ return Response.ok(duResponse).build();
+ }
+
@VisibleForTesting
public GlobalStatsDao getDao() {
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java
index 00cd9617b5d3..2956bb2f650b 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse;
import org.apache.hadoop.ozone.recon.api.types.NSSummary;
import org.apache.hadoop.ozone.recon.api.types.ResponseStatus;
+import org.apache.hadoop.ozone.recon.api.types.Stats;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
@@ -43,6 +44,7 @@
* Class for handling bucket entity type.
*/
public class BucketEntityHandler extends EntityHandler {
+
public BucketEntityHandler(
ReconNamespaceSummaryManager reconNamespaceSummaryManager,
ReconOMMetadataManager omMetadataManager,
@@ -90,7 +92,7 @@ private BucketObjectDBInfo getBucketObjDbInfo(String[] names)
@Override
public DUResponse getDuResponse(
- boolean listFile, boolean withReplica, boolean sortSubpaths)
+ boolean listFile, boolean withReplica, boolean sortSubpaths, boolean recursive, Stats stats)
throws IOException {
DUResponse duResponse = new DUResponse();
duResponse.setPath(getNormalizedPath());
@@ -112,6 +114,7 @@ public DUResponse getDuResponse(
long bucketDataSize = duResponse.getKeySize();
long bucketDataSizeWithReplica = 0L;
for (long subdirObjectId: bucketSubdirs) {
+ List diskUsageList = new ArrayList<>();
NSSummary subdirNSSummary = getReconNamespaceSummaryManager()
.getNSSummary(subdirObjectId);
@@ -125,20 +128,25 @@ public DUResponse getDuResponse(
long dataSize = getTotalSize(subdirObjectId);
bucketDataSize += dataSize;
+ stats.setCurrentCount(stats.getCurrentCount() + 1);
+
if (withReplica) {
long dirDU = getBucketHandler()
- .calculateDUUnderObject(subdirObjectId);
+ .calculateDUUnderObject(subdirObjectId, recursive, diskUsageList, stats);
diskUsage.setSizeWithReplica(dirDU);
bucketDataSizeWithReplica += dirDU;
}
diskUsage.setSize(dataSize);
dirDUData.add(diskUsage);
+ if (diskUsageList.size() > 0) {
+ dirDUData.addAll(diskUsageList);
+ }
}
// Either listFile or withReplica is enabled, we need the directKeys info
if (listFile || withReplica) {
bucketDataSizeWithReplica += getBucketHandler()
.handleDirectKeys(bucketObjectId, withReplica,
- listFile, dirDUData, getNormalizedPath());
+ listFile, dirDUData, getNormalizedPath(), stats);
}
if (withReplica) {
duResponse.setSizeWithReplica(bucketDataSizeWithReplica);
@@ -153,7 +161,8 @@ public DUResponse getDuResponse(
}
duResponse.setDuData(dirDUData);
-
+ duResponse.setTotalCount(stats.getTotalCount());
+ duResponse.setLastKey(stats.getLastKey());
return duResponse;
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java
index 266caaa2d8e2..959271a97ea6 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java
@@ -29,6 +29,7 @@
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.recon.api.types.DUResponse;
import org.apache.hadoop.ozone.recon.api.types.EntityType;
+import org.apache.hadoop.ozone.recon.api.types.Stats;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
import org.slf4j.Logger;
@@ -77,17 +78,17 @@ public ReconNamespaceSummaryManager getReconNamespaceSummaryManager() {
public abstract EntityType determineKeyPath(String keyName)
throws IOException;
- public abstract long calculateDUUnderObject(long parentId)
- throws IOException;
-
- public abstract long handleDirectKeys(long parentId,
- boolean withReplica, boolean listFile,
- List duData,
- String normalizedPath) throws IOException;
+ public abstract long calculateDUUnderObject(long parentId, boolean recursive,
+ List diskUsageList, Stats stats) throws IOException;
public abstract long getDirObjectId(String[] names)
throws IOException;
+ public abstract long handleDirectKeys(long parentId,
+ boolean withReplica, boolean listFile,
+ List duData,
+ String normalizedPath, Stats stats) throws IOException;
+
public abstract long getDirObjectId(String[] names, int cutoff)
throws IOException;
@@ -232,4 +233,19 @@ public static BucketHandler getBucketHandler(
return getBucketHandler(reconNamespaceSummaryManager,
omMetadataManager, reconSCM, bucketInfo);
}
+
+ protected static void verifyStatsAndAddDURecord(List duData, Stats stats,
+ Table.KeyValue kv,
+ DUResponse.DiskUsage diskUsage) throws IOException {
+ if (stats.getLimit() == -1) {
+ duData.add(diskUsage);
+ } else {
+ if (stats.getCurrentCount() < stats.getLimit()) {
+ duData.add(diskUsage);
+ stats.setCurrentCount(stats.getCurrentCount() + 1);
+ stats.setLastKey(kv.getKey());
+ }
+ }
+ stats.setTotalCount(stats.getTotalCount() + 1);
+ }
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java
index ae7181af70b4..a24b43dafc2d 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java
@@ -29,6 +29,7 @@
import org.apache.hadoop.ozone.recon.api.types.QuotaUsageResponse;
import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse;
+import org.apache.hadoop.ozone.recon.api.types.Stats;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
@@ -83,7 +84,7 @@ private ObjectDBInfo getDirectoryObjDbInfo(String[] names)
@Override
public DUResponse getDuResponse(
- boolean listFile, boolean withReplica, boolean sortSubPaths)
+ boolean listFile, boolean withReplica, boolean sortSubPaths, boolean recursive, Stats stats)
throws IOException {
DUResponse duResponse = new DUResponse();
duResponse.setPath(getNormalizedPath());
@@ -106,6 +107,7 @@ public DUResponse getDuResponse(
List subdirDUData = new ArrayList<>();
// iterate all subdirectories to get disk usage data
for (long subdirObjectId: subdirs) {
+ List diskUsageList = new ArrayList<>();
NSSummary subdirNSSummary =
getReconNamespaceSummaryManager().getNSSummary(subdirObjectId);
// for the subdirName we need the subdir filename, not the key name
@@ -136,20 +138,23 @@ public DUResponse getDuResponse(
if (withReplica) {
long subdirDU = getBucketHandler()
- .calculateDUUnderObject(subdirObjectId);
+ .calculateDUUnderObject(subdirObjectId, recursive, diskUsageList, stats);
diskUsage.setSizeWithReplica(subdirDU);
dirDataSizeWithReplica += subdirDU;
}
diskUsage.setSize(dataSize);
subdirDUData.add(diskUsage);
+ if (recursive) {
+ subdirDUData.addAll(diskUsageList);
+ }
}
// handle direct keys under directory
if (listFile || withReplica) {
dirDataSizeWithReplica += getBucketHandler()
.handleDirectKeys(dirObjectId, withReplica,
- listFile, subdirDUData, getNormalizedPath());
+ listFile, subdirDUData, getNormalizedPath(), stats);
}
if (withReplica) {
@@ -165,7 +170,8 @@ public DUResponse getDuResponse(
}
duResponse.setDuData(subdirDUData);
-
+ duResponse.setTotalCount(stats.getTotalCount());
+ duResponse.setLastKey(stats.getLastKey());
return duResponse;
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java
index f2bcb58d3565..48e2f38d54b2 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse;
import org.apache.hadoop.ozone.recon.api.types.EntityType;
import org.apache.hadoop.ozone.recon.api.types.NSSummary;
+import org.apache.hadoop.ozone.recon.api.types.Stats;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
@@ -79,9 +80,13 @@ public abstract NamespaceSummaryResponse getSummaryResponse()
throws IOException;
public abstract DUResponse getDuResponse(
- boolean listFile, boolean withReplica, boolean sort)
+ boolean listFile, boolean withReplica, boolean sort, boolean recursive, Stats stats)
throws IOException;
+ public DUResponse getListKeysResponse(Stats stats, boolean recursive) throws IOException {
+ return getDuResponse(true, true, false, recursive, stats);
+ }
+
public abstract QuotaUsageResponse getQuotaResponse()
throws IOException;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java
index 8a1c5babe75e..dd8152cdeab3 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java
@@ -21,6 +21,7 @@
import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
@@ -28,10 +29,9 @@
import org.apache.hadoop.ozone.recon.api.types.DUResponse;
import org.apache.hadoop.ozone.recon.api.types.EntityType;
import org.apache.hadoop.ozone.recon.api.types.NSSummary;
+import org.apache.hadoop.ozone.recon.api.types.Stats;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.file.Paths;
@@ -45,11 +45,9 @@
* Class for handling FSO buckets NameSpaceSummaries.
*/
public class FSOBucketHandler extends BucketHandler {
- private static final Logger LOG =
- LoggerFactory.getLogger(FSOBucketHandler.class);
private final long volumeId;
private final long bucketId;
-
+
public FSOBucketHandler(
ReconNamespaceSummaryManager reconNamespaceSummaryManager,
ReconOMMetadataManager omMetadataManager,
@@ -75,7 +73,7 @@ public FSOBucketHandler(
*/
@Override
public EntityType determineKeyPath(String keyName)
- throws IOException {
+ throws IOException {
java.nio.file.Path keyPath = Paths.get(keyName);
Iterator elements = keyPath.iterator();
@@ -92,14 +90,14 @@ public EntityType determineKeyPath(String keyName)
String dbNodeName = getOmMetadataManager().getOzonePathKey(volumeId,
bucketId, lastKnownParentId, fileName);
omDirInfo = getOmMetadataManager().getDirectoryTable()
- .getSkipCache(dbNodeName);
+ .getSkipCache(dbNodeName);
if (omDirInfo != null) {
lastKnownParentId = omDirInfo.getObjectID();
} else if (!elements.hasNext()) {
// reached last path component. Check file exists for the given path.
OmKeyInfo omKeyInfo = getOmMetadataManager().getFileTable()
- .getSkipCache(dbNodeName);
+ .getSkipCache(dbNodeName);
// The path exists as a file
if (omKeyInfo != null) {
omKeyInfo.setKeyName(keyName);
@@ -121,13 +119,14 @@ public EntityType determineKeyPath(String keyName)
// FileTable's key is in the format of "volumeId/bucketId/parentId/fileName"
// Make use of RocksDB's order to seek to the prefix and avoid full iteration
@Override
- public long calculateDUUnderObject(long parentId)
+ public long calculateDUUnderObject(long parentId, boolean recursive, List diskUsageList,
+ Stats stats)
throws IOException {
Table keyTable = getOmMetadataManager().getFileTable();
long totalDU = 0L;
try (TableIterator>
- iterator = keyTable.iterator()) {
+ iterator = keyTable.iterator()) {
String seekPrefix = OM_KEY_PREFIX +
volumeId +
@@ -146,7 +145,19 @@ public long calculateDUUnderObject(long parentId)
break;
}
OmKeyInfo keyInfo = kv.getValue();
+ if (recursive) {
+ if (stats.getLimit() == -1) {
+ populateDiskUsage(keyInfo, diskUsageList);
+ } else {
+ if (stats.getCurrentCount() < stats.getLimit()) {
+ populateDiskUsage(keyInfo, diskUsageList);
+ stats.setCurrentCount(stats.getCurrentCount() + 1);
+ stats.setLastKey(kv.getKey());
+ }
+ }
+ }
if (keyInfo != null) {
+ stats.setTotalCount(stats.getTotalCount() + 1);
totalDU += keyInfo.getReplicatedSize();
}
}
@@ -154,7 +165,7 @@ public long calculateDUUnderObject(long parentId)
// handle nested keys (DFS)
NSSummary nsSummary = getReconNamespaceSummaryManager()
- .getNSSummary(parentId);
+ .getNSSummary(parentId);
// empty bucket
if (nsSummary == null) {
return 0;
@@ -162,19 +173,74 @@ public long calculateDUUnderObject(long parentId)
Set subDirIds = nsSummary.getChildDir();
for (long subDirId: subDirIds) {
- totalDU += calculateDUUnderObject(subDirId);
+ totalDU += calculateDUUnderObject(subDirId, recursive, diskUsageList, stats);
}
return totalDU;
}
+ private void populateDiskUsage(OmKeyInfo keyInfo, List diskUsageList) throws IOException {
+ DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage();
+ diskUsage.setKey(keyInfo.isFile());
+ diskUsage.setSubpath(constructFullPath(keyInfo, getReconNamespaceSummaryManager()));
+ diskUsage.setSize(keyInfo.getDataSize());
+ diskUsage.setSizeWithReplica(keyInfo.getReplicatedSize());
+ diskUsage.setReplicationType(keyInfo.getReplicationConfig().getReplicationType().name());
+ diskUsage.setCreationTime(keyInfo.getCreationTime());
+ diskUsage.setModificationTime(keyInfo.getModificationTime());
+
+ diskUsageList.add(diskUsage);
+ }
+
+ /**
+ * Constructs the full path of a key from its OmKeyInfo using a bottom-up approach, starting from the leaf node.
+ *
+ * The method begins with the leaf node (the key itself) and recursively prepends parent directory names, fetched
+ * via NSSummary objects, until reaching the parent bucket (parentId is -1). It effectively builds the path from
+ * bottom to top, finally prepending the volume and bucket names to complete the full path.
+ *
+ * @param omKeyInfo The OmKeyInfo object for the key
+ * @return The constructed full path of the key as a String.
+ * @throws IOException
+ */
+ public static String constructFullPath(OmKeyInfo omKeyInfo,
+ ReconNamespaceSummaryManager reconNamespaceSummaryManager)
+ throws IOException {
+ StringBuilder fullPath = new StringBuilder(omKeyInfo.getKeyName());
+ long parentId = omKeyInfo.getParentObjectID();
+ boolean isDirectoryPresent = false;
+ while (parentId != -1) {
+ NSSummary nsSummary = reconNamespaceSummaryManager.getNSSummary(parentId);
+ if (nsSummary == null) {
+ break;
+ }
+ // Prepend the directory name to the path
+ fullPath.insert(0, nsSummary.getDirName() + OM_KEY_PREFIX);
+
+ // Move to the parent ID of the current directory
+ parentId = nsSummary.getParentId();
+ isDirectoryPresent = true;
+ }
+
+ // Prepend the volume and bucket to the constructed path
+ String volumeName = omKeyInfo.getVolumeName();
+ String bucketName = omKeyInfo.getBucketName();
+ fullPath.insert(0, volumeName + OM_KEY_PREFIX + bucketName + OM_KEY_PREFIX);
+ if (isDirectoryPresent) {
+ return OmUtils.normalizeKey(fullPath.toString(), true);
+ }
+ return fullPath.toString();
+ }
+
/**
* This method handles disk usage of direct keys.
- * @param parentId parent directory/bucket
- * @param withReplica if withReplica is enabled, set sizeWithReplica
- * for each direct key's DU
- * @param listFile if listFile is enabled, append key DU as a subpath
- * @param duData the current DU data
+ *
+ * @param parentId parent directory/bucket
+ * @param withReplica if withReplica is enabled, set sizeWithReplica
+ * for each direct key's DU
+ * @param listFile if listFile is enabled, append key DU as a subpath
+ * @param duData the current DU data
* @param normalizedPath the normalized path request
+ * @param stats
* @return the total DU of all direct keys
* @throws IOException IOE
*/
@@ -182,13 +248,13 @@ public long calculateDUUnderObject(long parentId)
public long handleDirectKeys(long parentId, boolean withReplica,
boolean listFile,
List duData,
- String normalizedPath) throws IOException {
+ String normalizedPath, Stats stats) throws IOException {
Table keyTable = getOmMetadataManager().getFileTable();
long keyDataSizeWithReplica = 0L;
try (TableIterator>
- iterator = keyTable.iterator()) {
+ iterator = keyTable.iterator()) {
String seekPrefix = OM_KEY_PREFIX +
volumeId +
@@ -214,6 +280,9 @@ public long handleDirectKeys(long parentId, boolean withReplica,
diskUsage.setSubpath(subpath);
diskUsage.setKey(true);
diskUsage.setSize(keyInfo.getDataSize());
+ diskUsage.setReplicationType(keyInfo.getReplicationConfig().getReplicationType().name());
+ diskUsage.setCreationTime(keyInfo.getCreationTime());
+ diskUsage.setModificationTime(keyInfo.getModificationTime());
if (withReplica) {
long keyDU = keyInfo.getReplicatedSize();
@@ -222,7 +291,7 @@ public long handleDirectKeys(long parentId, boolean withReplica,
}
// list the key as a subpath
if (listFile) {
- duData.add(diskUsage);
+ verifyStatsAndAddDURecord(duData, stats, kv, diskUsage);
}
}
}
@@ -257,9 +326,9 @@ public long getDirObjectId(String[] names, int cutoff) throws IOException {
String dirKey;
for (int i = 2; i < cutoff; ++i) {
dirKey = getOmMetadataManager().getOzonePathKey(getVolumeObjectId(names),
- getBucketObjectId(names), dirObjectId, names[i]);
+ getBucketObjectId(names), dirObjectId, names[i]);
OmDirectoryInfo dirInfo =
- getOmMetadataManager().getDirectoryTable().getSkipCache(dirKey);
+ getOmMetadataManager().getDirectoryTable().getSkipCache(dirKey);
if (null != dirInfo) {
dirObjectId = dirInfo.getObjectID();
}
@@ -279,7 +348,7 @@ public OmKeyInfo getKeyInfo(String[] names) throws IOException {
String fileName = names[names.length - 1];
String ozoneKey =
getOmMetadataManager().getOzonePathKey(volumeId, bucketId,
- parentObjectId, fileName);
+ parentObjectId, fileName);
return getOmMetadataManager().getFileTable().getSkipCache(ozoneKey);
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java
index 8ea26fd2846e..04eebebec915 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java
@@ -28,10 +28,12 @@
import org.apache.hadoop.ozone.recon.api.types.DUResponse;
import org.apache.hadoop.ozone.recon.api.types.QuotaUsageResponse;
import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse;
+import org.apache.hadoop.ozone.recon.api.types.Stats;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
import java.io.IOException;
+import java.util.ArrayList;
/**
* Class for handling key entity type.
@@ -71,19 +73,32 @@ private ObjectDBInfo getKeyDbObjectInfo(String[] names)
@Override
public DUResponse getDuResponse(
- boolean listFile, boolean withReplica, boolean sort)
+ boolean listFile, boolean withReplica, boolean sort, boolean recursive, Stats stats)
throws IOException {
DUResponse duResponse = new DUResponse();
duResponse.setPath(getNormalizedPath());
- // DU for key doesn't have subpaths
- duResponse.setCount(0);
OmKeyInfo keyInfo = getBucketHandler().getKeyInfo(getNames());
-
+ duResponse.setKeySize(keyInfo.getDataSize());
duResponse.setSize(keyInfo.getDataSize());
if (withReplica) {
long keySizeWithReplica = keyInfo.getReplicatedSize();
duResponse.setSizeWithReplica(keySizeWithReplica);
}
+ if (listFile) {
+ duResponse.setCount(1);
+ DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage();
+ diskUsage.setKey(true);
+ diskUsage.setSubpath(getNormalizedPath());
+ diskUsage.setSize(keyInfo.getDataSize());
+ diskUsage.setSizeWithReplica(duResponse.getSizeWithReplica());
+ diskUsage.setReplicationType(keyInfo.getReplicationConfig().getReplicationType().name());
+ diskUsage.setCreationTime(keyInfo.getCreationTime());
+ diskUsage.setModificationTime(keyInfo.getModificationTime());
+ ArrayList diskUsages = new ArrayList<>();
+ diskUsages.add(diskUsage);
+ duResponse.setTotalCount(diskUsages.size());
+ duResponse.setDuData(diskUsages);
+ }
return duResponse;
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java
index 09f1c5bc7454..d818c9ce47c1 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java
@@ -29,6 +29,7 @@
import org.apache.hadoop.ozone.recon.api.types.DUResponse;
import org.apache.hadoop.ozone.recon.api.types.EntityType;
import org.apache.hadoop.ozone.recon.api.types.NSSummary;
+import org.apache.hadoop.ozone.recon.api.types.Stats;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
import org.slf4j.Logger;
@@ -110,12 +111,17 @@ public EntityType determineKeyPath(String keyName)
* Make use of RocksDB's order to seek to the prefix and avoid full iteration.
* Calculating DU only for keys. Skipping any directories and
* handling only direct keys.
+ *
* @param parentId
+ * @param recursive Whether to add keys recursively or just immediate du records.
+ * @param diskUsageList List to add du records.
+ * @param stats Staistics related to DU records count and limit.
* @return total DU of direct keys under object
* @throws IOException
*/
@Override
- public long calculateDUUnderObject(long parentId)
+ public long calculateDUUnderObject(long parentId, boolean recursive, List diskUsageList,
+ Stats stats)
throws IOException {
Table keyTable = getKeyTable();
@@ -174,19 +180,21 @@ public long calculateDUUnderObject(long parentId)
// handle nested keys (DFS)
Set subDirIds = nsSummary.getChildDir();
for (long subDirId: subDirIds) {
- totalDU += calculateDUUnderObject(subDirId);
+ totalDU += calculateDUUnderObject(subDirId, recursive, diskUsageList, stats);
}
return totalDU;
}
/**
* This method handles disk usage of direct keys.
- * @param parentId parent directory/bucket
- * @param withReplica if withReplica is enabled, set sizeWithReplica
- * for each direct key's DU
- * @param listFile if listFile is enabled, append key DU as a subpath
- * @param duData the current DU data
+ *
+ * @param parentId parent directory/bucket
+ * @param withReplica if withReplica is enabled, set sizeWithReplica
+ * for each direct key's DU
+ * @param listFile if listFile is enabled, append key DU as a subpath
+ * @param duData the current DU data
* @param normalizedPath the normalized path request
+ * @param stats
* @return the total DU of all direct keys
* @throws IOException IOE
*/
@@ -194,7 +202,7 @@ public long calculateDUUnderObject(long parentId)
public long handleDirectKeys(long parentId, boolean withReplica,
boolean listFile,
List duData,
- String normalizedPath) throws IOException {
+ String normalizedPath, Stats stats) throws IOException {
Table keyTable = getKeyTable();
long keyDataSizeWithReplica = 0L;
@@ -250,6 +258,9 @@ public long handleDirectKeys(long parentId, boolean withReplica,
diskUsage.setSubpath(subpath);
diskUsage.setKey(true);
diskUsage.setSize(keyInfo.getDataSize());
+ diskUsage.setReplicationType(keyInfo.getReplicationConfig().getReplicationType().name());
+ diskUsage.setCreationTime(keyInfo.getCreationTime());
+ diskUsage.setModificationTime(keyInfo.getModificationTime());
if (withReplica) {
long keyDU = keyInfo.getReplicatedSize();
@@ -258,7 +269,7 @@ public long handleDirectKeys(long parentId, boolean withReplica,
}
// list the key as a subpath
if (listFile) {
- duData.add(diskUsage);
+ verifyStatsAndAddDURecord(duData, stats, kv, diskUsage);
}
}
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java
index 024eec989a10..af610370bf13 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.ozone.recon.api.types.DUResponse;
import org.apache.hadoop.ozone.recon.api.types.EntityType;
import org.apache.hadoop.ozone.recon.api.types.NSSummary;
+import org.apache.hadoop.ozone.recon.api.types.Stats;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
@@ -97,6 +98,7 @@ public EntityType determineKeyPath(String keyName) throws IOException {
* keys
* @param duData the current DU data
* @param normalizedPath the normalized path request
+ * @param stats
* @return the total DU of all direct keys
* @throws IOException IOE
*/
@@ -104,7 +106,7 @@ public EntityType determineKeyPath(String keyName) throws IOException {
public long handleDirectKeys(long parentId, boolean withReplica,
boolean listFile,
List duData,
- String normalizedPath) throws IOException {
+ String normalizedPath, Stats stats) throws IOException {
NSSummary nsSummary = getReconNamespaceSummaryManager()
.getNSSummary(parentId);
@@ -145,6 +147,9 @@ public long handleDirectKeys(long parentId, boolean withReplica,
diskUsage.setSubpath(objectName);
diskUsage.setKey(true);
diskUsage.setSize(keyInfo.getDataSize());
+ diskUsage.setReplicationType(keyInfo.getReplicationConfig().getReplicationType().name());
+ diskUsage.setCreationTime(keyInfo.getCreationTime());
+ diskUsage.setModificationTime(keyInfo.getModificationTime());
if (withReplica) {
long keyDU = keyInfo.getReplicatedSize();
@@ -153,7 +158,7 @@ public long handleDirectKeys(long parentId, boolean withReplica,
}
// List all the keys for the OBS bucket if requested.
if (listFile) {
- duData.add(diskUsage);
+ verifyStatsAndAddDURecord(duData, stats, kv, diskUsage);
}
}
}
@@ -168,12 +173,17 @@ public long handleDirectKeys(long parentId, boolean withReplica,
* Since OBS buckets operate on a flat hierarchy, this method iterates through
* all the keys in the bucket without the need to traverse directories.
*
- * @param parentId The identifier for the parent bucket.
+ * @param parentId The identifier for the parent bucket.
+ * @param recursive Whether to add keys recursively or just immediate du records.
+ * @param diskUsageList List to add du records.
+ * @param stats Staistics related to DU records count and limit.
* @return The total disk usage of all keys within the specified OBS bucket.
* @throws IOException
*/
@Override
- public long calculateDUUnderObject(long parentId) throws IOException {
+ public long calculateDUUnderObject(long parentId, boolean recursive, List diskUsageList,
+ Stats stats)
+ throws IOException {
// Initialize the total disk usage variable.
long totalDU = 0L;
@@ -201,6 +211,7 @@ public long calculateDUUnderObject(long parentId) throws IOException {
// Sum the size of each key to the total disk usage.
OmKeyInfo keyInfo = kv.getValue();
if (keyInfo != null) {
+ stats.setTotalCount(stats.getTotalCount() + 1);
totalDU += keyInfo.getDataSize();
}
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/RootEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/RootEntityHandler.java
index b67703257ac1..780128c4e8b8 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/RootEntityHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/RootEntityHandler.java
@@ -31,6 +31,7 @@
import org.apache.hadoop.ozone.recon.api.types.QuotaUsageResponse;
import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse;
import org.apache.hadoop.ozone.recon.api.types.ResponseStatus;
+import org.apache.hadoop.ozone.recon.api.types.Stats;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
@@ -91,7 +92,7 @@ private ObjectDBInfo getPrefixObjDbInfo()
@Override
public DUResponse getDuResponse(
- boolean listFile, boolean withReplica, boolean sortSubPaths)
+ boolean listFile, boolean withReplica, boolean sortSubPaths, boolean recursive, Stats stats)
throws IOException {
DUResponse duResponse = new DUResponse();
duResponse.setPath(getNormalizedPath());
@@ -103,6 +104,7 @@ public DUResponse getDuResponse(
long totalDataSize = 0L;
long totalDataSizeWithReplica = 0L;
for (OmVolumeArgs volume: volumes) {
+ List diskUsageList = new ArrayList<>();
String volumeName = volume.getVolume();
String subpath = omMetadataManager.getVolumeKey(volumeName);
DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage();
@@ -122,7 +124,7 @@ public DUResponse getDuResponse(
BucketHandler.getBucketHandler(
getReconNamespaceSummaryManager(),
getOmMetadataManager(), getReconSCM(), bucket);
- volumeDU += bucketHandler.calculateDUUnderObject(bucketObjectID);
+ volumeDU += bucketHandler.calculateDUUnderObject(bucketObjectID, recursive, diskUsageList, stats);
}
}
totalDataSize += dataSize;
@@ -135,6 +137,7 @@ public DUResponse getDuResponse(
}
diskUsage.setSize(dataSize);
volumeDuData.add(diskUsage);
+ volumeDuData.addAll(diskUsageList);
}
if (withReplica) {
duResponse.setSizeWithReplica(totalDataSizeWithReplica);
@@ -148,18 +151,19 @@ public DUResponse getDuResponse(
}
duResponse.setDuData(volumeDuData);
-
+ duResponse.setTotalCount(stats.getTotalCount());
+ duResponse.setLastKey(stats.getLastKey());
return duResponse;
}
@Override
public QuotaUsageResponse getQuotaResponse()
- throws IOException {
+ throws IOException {
QuotaUsageResponse quotaUsageResponse = new QuotaUsageResponse();
SCMNodeStat stats = getReconSCM().getScmNodeManager().getStats();
long quotaInBytes = stats.getCapacity().get();
long quotaUsedInBytes =
- getDuResponse(true, true, false).getSizeWithReplica();
+ getDuResponse(true, true, false, false, new Stats(-1)).getSizeWithReplica();
quotaUsageResponse.setQuota(quotaInBytes);
quotaUsageResponse.setQuotaUsed(quotaUsedInBytes);
return quotaUsageResponse;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/UnknownEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/UnknownEntityHandler.java
index ab61ec38e8bf..234ff0120f8b 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/UnknownEntityHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/UnknownEntityHandler.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.ozone.recon.api.types.DUResponse;
import org.apache.hadoop.ozone.recon.api.types.QuotaUsageResponse;
import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse;
+import org.apache.hadoop.ozone.recon.api.types.Stats;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
@@ -51,7 +52,7 @@ public NamespaceSummaryResponse getSummaryResponse()
@Override
public DUResponse getDuResponse(
- boolean listFile, boolean withReplica, boolean sort)
+ boolean listFile, boolean withReplica, boolean sort, boolean recursive, Stats stats)
throws IOException {
DUResponse duResponse = new DUResponse();
duResponse.setStatus(ResponseStatus.PATH_NOT_FOUND);
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java
index 2ca9c352ce77..b39180f446fe 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.ozone.recon.api.types.QuotaUsageResponse;
import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse;
import org.apache.hadoop.ozone.recon.api.types.ResponseStatus;
+import org.apache.hadoop.ozone.recon.api.types.Stats;
import org.apache.hadoop.ozone.recon.api.types.VolumeObjectDBInfo;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
@@ -96,7 +97,7 @@ private VolumeObjectDBInfo getVolumeObjDbInfo(String[] names)
@Override
public DUResponse getDuResponse(
- boolean listFile, boolean withReplica, boolean sortSubPaths)
+ boolean listFile, boolean withReplica, boolean sortSubPaths, boolean recursive, Stats stats)
throws IOException {
DUResponse duResponse = new DUResponse();
duResponse.setPath(getNormalizedPath());
@@ -111,6 +112,7 @@ public DUResponse getDuResponse(
long volDataSize = 0L;
long volDataSizeWithReplica = 0L;
for (OmBucketInfo bucket: buckets) {
+ List diskUsageList = new ArrayList<>();
String bucketName = bucket.getBucketName();
long bucketObjectID = bucket.getObjectID();
String subpath = getOmMetadataManager().getBucketKey(volName, bucketName);
@@ -124,12 +126,13 @@ public DUResponse getDuResponse(
getReconNamespaceSummaryManager(),
getOmMetadataManager(), getReconSCM(), bucket);
long bucketDU = bucketHandler
- .calculateDUUnderObject(bucketObjectID);
+ .calculateDUUnderObject(bucketObjectID, recursive, diskUsageList, stats);
diskUsage.setSizeWithReplica(bucketDU);
volDataSizeWithReplica += bucketDU;
}
diskUsage.setSize(dataSize);
bucketDuData.add(diskUsage);
+ bucketDuData.addAll(diskUsageList);
}
if (withReplica) {
duResponse.setSizeWithReplica(volDataSizeWithReplica);
@@ -143,6 +146,8 @@ public DUResponse getDuResponse(
}
duResponse.setDuData(bucketDuData);
+ duResponse.setTotalCount(stats.getTotalCount());
+ duResponse.setLastKey(stats.getLastKey());
return duResponse;
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java
index b28d9d39c210..ad228a736dc1 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DUResponse.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.recon.api.types;
+import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.ArrayList;
@@ -47,6 +48,16 @@ public class DUResponse {
@JsonProperty("subPathCount")
private int count;
+ /** Total number of subpaths/keys under the requested startPrefix path. */
+ @JsonProperty("totalKeyCount")
+ @JsonInclude(JsonInclude.Include.NON_DEFAULT)
+ private long totalCount;
+
+ /** last key sent. */
+ @JsonProperty("lastKey")
+ @JsonInclude(JsonInclude.Include.NON_EMPTY)
+ private String lastKey;
+
/** Encapsulates a DU instance for a subpath. */
@JsonProperty("subPaths")
private List duData;
@@ -118,6 +129,22 @@ public void setKeySize(long keySize) {
this.keySize = keySize;
}
+ public long getTotalCount() {
+ return totalCount;
+ }
+
+ public void setTotalCount(long totalCount) {
+ this.totalCount = totalCount;
+ }
+
+ public String getLastKey() {
+ return lastKey;
+ }
+
+ public void setLastKey(String lastKey) {
+ this.lastKey = lastKey;
+ }
+
/**
* DU info for a path (path name, data size).
*/
@@ -138,6 +165,18 @@ public static class DiskUsage {
@JsonProperty("isKey")
private boolean isKey;
+ /** Indicate if the key replication type RATIS or EC. */
+ @JsonProperty("replicationType")
+ private String replicationType;
+
+ /** key creation time. */
+ @JsonProperty("creationTime")
+ private long creationTime;
+
+ /** key modification time. */
+ @JsonProperty("modificationTime")
+ private long modificationTime;
+
public DiskUsage() {
this.sizeWithReplica = -1L;
this.isKey = false;
@@ -174,5 +213,29 @@ public void setKey(boolean key) {
public boolean isKey() {
return isKey;
}
+
+ public String getReplicationType() {
+ return replicationType;
+ }
+
+ public void setReplicationType(String replicationType) {
+ this.replicationType = replicationType;
+ }
+
+ public long getCreationTime() {
+ return creationTime;
+ }
+
+ public void setCreationTime(long creationTime) {
+ this.creationTime = creationTime;
+ }
+
+ public long getModificationTime() {
+ return modificationTime;
+ }
+
+ public void setModificationTime(long modificationTime) {
+ this.modificationTime = modificationTime;
+ }
}
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java
index c0f93aebe97d..fa16d03abe41 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java
@@ -36,22 +36,24 @@ public class NSSummary {
private int[] fileSizeBucket;
private Set childDir;
private String dirName;
+ private long parentId = -1;
public NSSummary() {
this(0, 0L, new int[ReconConstants.NUM_OF_FILE_SIZE_BINS],
- new HashSet<>(), "");
+ new HashSet<>(), "", -1); // -1 can be a default value indicating no parent
}
public NSSummary(int numOfFiles,
long sizeOfFiles,
int[] bucket,
Set childDir,
- String dirName) {
+ String dirName, long parentId) {
this.numOfFiles = numOfFiles;
this.sizeOfFiles = sizeOfFiles;
setFileSizeBucket(bucket);
this.childDir = childDir;
this.dirName = dirName;
+ this.parentId = parentId;
}
public int getNumOfFiles() {
@@ -95,6 +97,14 @@ public void setDirName(String dirName) {
this.dirName = removeTrailingSlashIfNeeded(dirName);
}
+ public long getParentId() {
+ return parentId;
+ }
+
+ public void setParentId(long parentId) {
+ this.parentId = parentId;
+ }
+
public void addChildDir(long childId) {
if (this.childDir.contains(childId)) {
return;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/Stats.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/Stats.java
new file mode 100644
index 000000000000..f5dd27324002
--- /dev/null
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/Stats.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.recon.api.types;
+
+/**
+ * Wrapper object for statistics of records of a page in API response.
+ */
+public class Stats {
+ /**
+ * Total count of the keys.
+ */
+ private long totalCount;
+
+ /** last key sent. */
+ private String lastKey;
+
+ /**
+ * limit the number of records to return in API response.
+ */
+ private long limit;
+
+ /**
+ * counter to track the number of records added in API response.
+ */
+ private long currentCount;
+
+ public Stats(long limit) {
+ this.limit = limit;
+ }
+
+ public long getTotalCount() {
+ return totalCount;
+ }
+
+ public void setTotalCount(long totalCount) {
+ this.totalCount = totalCount;
+ }
+
+ public String getLastKey() {
+ return lastKey;
+ }
+
+ public void setLastKey(String lastKey) {
+ this.lastKey = lastKey;
+ }
+
+ public long getLimit() {
+ return limit;
+ }
+
+ public void setLimit(long limit) {
+ this.limit = limit;
+ }
+
+ public long getCurrentCount() {
+ return currentCount;
+ }
+
+ public void setCurrentCount(long currentCount) {
+ this.currentCount = currentCount;
+ }
+}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
index 09e0b2587934..c87f9a6e5d50 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java
@@ -65,9 +65,10 @@ public byte[] toPersistedFormat(NSSummary object) throws IOException {
int stringLen = dirName.getBytes(StandardCharsets.UTF_8).length;
int numOfChildDirs = childDirs.size();
final int resSize = NUM_OF_INTS * Integer.BYTES
- + (numOfChildDirs + 1) * Long.BYTES // 1 long field + list size
+ + (numOfChildDirs + 1) * Long.BYTES // 1 long field for parentId + list size
+ Short.BYTES // 2 dummy shorts to track length
- + stringLen; // directory name length
+ + stringLen // directory name length
+ + Long.BYTES; // Added space for parentId serialization
ByteArrayOutputStream out = new ByteArrayOutputStream(resSize);
out.write(integerCodec.toPersistedFormat(object.getNumOfFiles()));
@@ -84,6 +85,7 @@ public byte[] toPersistedFormat(NSSummary object) throws IOException {
}
out.write(integerCodec.toPersistedFormat(stringLen));
out.write(stringCodec.toPersistedFormat(dirName));
+ out.write(longCodec.toPersistedFormat(object.getParentId()));
return out.toByteArray();
}
@@ -110,6 +112,8 @@ public NSSummary fromPersistedFormat(byte[] rawData) throws IOException {
int strLen = in.readInt();
if (strLen == 0) {
+ long parentId = in.readLong(); // Deserialize parentId
+ res.setParentId(parentId);
return res;
}
byte[] buffer = new byte[strLen];
@@ -117,6 +121,8 @@ public NSSummary fromPersistedFormat(byte[] rawData) throws IOException {
assert (bytesRead == strLen);
String dirName = stringCodec.fromPersistedFormat(buffer);
res.setDirName(dirName);
+ long parentId = in.readLong();
+ res.setParentId(parentId);
return res;
}
@@ -128,6 +134,7 @@ public NSSummary copyObject(NSSummary object) {
copy.setFileSizeBucket(object.getFileSizeBucket());
copy.setChildDir(object.getChildDir());
copy.setDirName(object.getDirName());
+ copy.setParentId(object.getParentId());
return copy;
}
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java
index 57f7686263fa..8477c2c0fa41 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.ozone.recon.api.types.EntityMetaData;
import org.apache.hadoop.ozone.recon.api.types.EntityReadAccessHeatMapResponse;
import org.apache.hadoop.ozone.recon.api.types.ResponseStatus;
+import org.apache.hadoop.ozone.recon.api.types.Stats;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
import jakarta.annotation.Nonnull;
@@ -71,7 +72,7 @@ private long getEntitySize(String path) throws IOException {
EntityHandler.getEntityHandler(reconNamespaceSummaryManager,
omMetadataManager, reconSCM, path);
if (null != entityHandler) {
- DUResponse duResponse = entityHandler.getDuResponse(false, false, false);
+ DUResponse duResponse = entityHandler.getDuResponse(false, false, false, false, new Stats(-1));
if (null != duResponse && duResponse.getStatus() == ResponseStatus.OK) {
return duResponse.getSize();
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
index f00d83e64a52..b979307019ff 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
@@ -132,6 +132,10 @@ protected void handlePutDirEvent(OmDirectoryInfo directoryInfo,
curNSSummary = new NSSummary();
}
curNSSummary.setDirName(dirName);
+ // Set the parent directory ID
+ if (parentObjectId != -1) {
+ curNSSummary.setParentId(parentObjectId);
+ }
nsSummaryMap.put(objectId, curNSSummary);
// Write the child dir list to the parent directory
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java
index a9ed342faad4..1f50ca6d06d0 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java
@@ -39,6 +39,7 @@
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -208,13 +209,9 @@ public static void writeKeyToOm(OMMetadataManager omMetadataManager,
throws IOException {
// DB key in FileTable => "volumeId/bucketId/parentId/fileName"
// DB key in KeyTable => "/volume/bucket/key"
- String omKey;
- if (bucketLayout.equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) {
- omKey = omMetadataManager.getOzonePathKey(volumeObjectId,
- bucketObjectId, parentObjectId, fileName);
- } else {
- omKey = omMetadataManager.getOzoneKey(volume, bucket, key);
- }
+ String omKey =
+ getKey(omMetadataManager, key, bucket, volume, fileName, parentObjectId, bucketObjectId, volumeObjectId,
+ bucketLayout);
omMetadataManager.getKeyTable(bucketLayout).put(omKey,
new OmKeyInfo.Builder()
.setBucketName(bucket)
@@ -228,6 +225,20 @@ public static void writeKeyToOm(OMMetadataManager omMetadataManager,
.build());
}
+ @SuppressWarnings("checkstyle:ParameterNumber")
+ private static String getKey(OMMetadataManager omMetadataManager, String key, String bucket, String volume,
+ String fileName, long parentObjectId, long bucketObjectId, long volumeObjectId,
+ BucketLayout bucketLayout) {
+ String omKey;
+ if (bucketLayout.equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) {
+ omKey = omMetadataManager.getOzonePathKey(volumeObjectId,
+ bucketObjectId, parentObjectId, fileName);
+ } else {
+ omKey = omMetadataManager.getOzoneKey(volume, bucket, key);
+ }
+ return omKey;
+ }
+
@SuppressWarnings("checkstyle:parameternumber")
public static void writeKeyToOm(OMMetadataManager omMetadataManager,
String keyName,
@@ -243,13 +254,10 @@ public static void writeKeyToOm(OMMetadataManager omMetadataManager,
long dataSize)
throws IOException {
- String omKey;
- if (bucketLayout.equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) {
- omKey = omMetadataManager.getOzonePathKey(volumeObjectId,
- bucketObjectId, parentObjectId, fileName);
- } else {
- omKey = omMetadataManager.getOzoneKey(volName, bucketName, keyName);
- }
+ String omKey =
+ getKey(omMetadataManager, keyName, bucketName, volName, fileName, parentObjectId, bucketObjectId,
+ volumeObjectId,
+ bucketLayout);
omMetadataManager.getKeyTable(bucketLayout).put(omKey,
new OmKeyInfo.Builder()
.setBucketName(bucketName)
@@ -264,6 +272,42 @@ public static void writeKeyToOm(OMMetadataManager omMetadataManager,
.build());
}
+ /**
+ * Write a key on OM instance.
+ * @throw IOException while writing.
+ */
+ @SuppressWarnings("checkstyle:parameternumber")
+ public static void writeKeyToOm(OMMetadataManager omMetadataManager,
+ String key,
+ String bucket,
+ String volume,
+ String fileName,
+ long objectID,
+ long parentObjectId,
+ long bucketObjectId,
+ long volumeObjectId,
+ long dataSize,
+ BucketLayout bucketLayout,
+ ReplicationConfig replicationConfig,
+ long creationTime, boolean isFile)
+ throws IOException {
+ String omKey =
+ getKey(omMetadataManager, key, bucket, volume, fileName, parentObjectId, bucketObjectId, volumeObjectId,
+ bucketLayout);
+ omMetadataManager.getKeyTable(bucketLayout).put(omKey,
+ new OmKeyInfo.Builder()
+ .setBucketName(bucket)
+ .setVolumeName(volume)
+ .setKeyName(key)
+ .setFile(isFile)
+ .setReplicationConfig(replicationConfig)
+ .setCreationTime(creationTime)
+ .setObjectID(objectID)
+ .setParentObjectID(parentObjectId)
+ .setDataSize(dataSize)
+ .build());
+ }
+
/**
* Write an open key to OM instance optimized for File System.
*
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
index a88064d565b9..7d6e86b6a8b9 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
@@ -19,12 +19,15 @@
package org.apache.hadoop.ozone.recon.api;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import static org.apache.hadoop.ozone.om.helpers.QuotaUtil.getReplicatedSize;
import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
@@ -42,6 +45,7 @@
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.recon.ReconConstants;
import org.apache.hadoop.ozone.recon.ReconTestInjector;
+import org.apache.hadoop.ozone.recon.ReconUtils;
import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler;
import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler;
import org.apache.hadoop.ozone.recon.api.types.DUResponse;
@@ -73,6 +77,7 @@
import java.util.ArrayList;
import java.util.Set;
import java.util.HashSet;
+import java.util.TimeZone;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
@@ -118,6 +123,13 @@ public class TestNSSummaryEndpointWithFSO {
private OzoneConfiguration ozoneConfiguration;
private CommonUtils commonUtils;
+ private ReplicationConfig ratisOne = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS,
+ HddsProtos.ReplicationFactor.ONE);
+ private long epochMillis1 =
+ ReconUtils.convertToEpochMillis("04-04-2024 12:30:00", "MM-dd-yyyy HH:mm:ss", TimeZone.getDefault());
+ private long epochMillis2 =
+ ReconUtils.convertToEpochMillis("04-05-2024 12:30:00", "MM-dd-yyyy HH:mm:ss", TimeZone.getDefault());
+
private static final String TEST_PATH_UTILITY =
"/vol1/buck1/a/b/c/d/e/file1.txt";
private static final String PARENT_DIR = "vol1/buck1/a/b/c/d/e";
@@ -132,6 +144,7 @@ public class TestNSSummaryEndpointWithFSO {
private static final String BUCKET_TWO = "bucket2";
private static final String BUCKET_THREE = "bucket3";
private static final String BUCKET_FOUR = "bucket4";
+ private static final String BUCKET_FIVE = "bucket5";
private static final String KEY_ONE = "file1";
private static final String KEY_TWO = "dir1/dir2/file2";
private static final String KEY_THREE = "dir1/dir3/file3";
@@ -157,12 +170,21 @@ public class TestNSSummaryEndpointWithFSO {
private static final String FILE_NINE = "file9";
private static final String FILE_TEN = "file10";
private static final String FILE_ELEVEN = "file11";
+ private static final String FILE_TWELVE = "file12";
+ private static final String FILE_THIRTEEN = "file13";
+ private static final String FILE_FOURTEEN = "file14";
+ private static final String FILE_FIFTEEN = "file15";
+ private static final String FILE_SIXTEEN = "file16";
+ private static final String FILE_SEVENTEEN = "file17";
private static final String DIR_ONE = "dir1";
private static final String DIR_TWO = "dir2";
private static final String DIR_THREE = "dir3";
private static final String DIR_FOUR = "dir4";
private static final String DIR_FIVE = "dir5";
+ private static final String DIR_SIX = "dir6";
+ private static final String DIR_SEVEN = "dir7";
+ private static final String DIR_EIGHT = "dir8";
// objects IDs
private static final long VOL_OBJECT_ID = 0L;
private static final long BUCKET_ONE_OBJECT_ID = 1L;
@@ -179,6 +201,7 @@ public class TestNSSummaryEndpointWithFSO {
private static final long DIR_FOUR_OBJECT_ID = 12L;
private static final long MULTI_BLOCK_KEY_OBJECT_ID = 13L;
private static final long KEY_SEVEN_OBJECT_ID = 13L;
+
private static final long VOL_TWO_OBJECT_ID = 14L;
private static final long BUCKET_THREE_OBJECT_ID = 15L;
private static final long BUCKET_FOUR_OBJECT_ID = 16L;
@@ -188,6 +211,17 @@ public class TestNSSummaryEndpointWithFSO {
private static final long KEY_TEN_OBJECT_ID = 20L;
private static final long KEY_ELEVEN_OBJECT_ID = 21L;
+ private static final long BUCKET_FIVE_OBJECT_ID = 22L;
+ private static final long DIR_SIX_OBJECT_ID = 23L;
+ private static final long KEY_TWELVE_OBJECT_ID = 24L;
+ private static final long KEY_THIRTEEN_OBJECT_ID = 25L;
+ private static final long DIR_SEVEN_OBJECT_ID = 26L;
+ private static final long KEY_FOURTEEN_OBJECT_ID = 27L;
+ private static final long KEY_FIFTEEN_OBJECT_ID = 28L;
+ private static final long DIR_EIGHT_OBJECT_ID = 29L;
+ private static final long KEY_SIXTEEN_OBJECT_ID = 30L;
+ private static final long KEY_SEVENTEEN_OBJECT_ID = 31L;
+
// container IDs
private static final long CONTAINER_ONE_ID = 1L;
private static final long CONTAINER_TWO_ID = 2L;
@@ -225,6 +259,13 @@ public class TestNSSummaryEndpointWithFSO {
private static final long KEY_TEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2
private static final long KEY_ELEVEN_SIZE = OzoneConsts.KB + 1; // bin 1
+ private static final long KEY_TWELVE_SIZE = 2 * OzoneConsts.KB + 1; // bin 2
+ private static final long KEY_THIRTEEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2
+ private static final long KEY_FOURTEEN_SIZE = OzoneConsts.KB + 1; // bin 1
+ private static final long KEY_FIFTEEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2
+ private static final long KEY_SIXTEEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2
+ private static final long KEY_SEVENTEEN_SIZE = OzoneConsts.KB + 1; // bin 1
+
private static final long FILE1_SIZE_WITH_REPLICA =
getReplicatedSize(KEY_ONE_SIZE,
StandaloneReplicationConfig.getInstance(ONE));
@@ -258,6 +299,24 @@ public class TestNSSummaryEndpointWithFSO {
private static final long FILE11_SIZE_WITH_REPLICA =
getReplicatedSize(KEY_ELEVEN_SIZE,
StandaloneReplicationConfig.getInstance(ONE));
+ private static final long FILE12_SIZE_WITH_REPLICA =
+ getReplicatedSize(KEY_TWELVE_SIZE,
+ StandaloneReplicationConfig.getInstance(ONE));
+ private static final long FILE13_SIZE_WITH_REPLICA =
+ getReplicatedSize(KEY_THIRTEEN_SIZE,
+ StandaloneReplicationConfig.getInstance(ONE));
+ private static final long FILE14_SIZE_WITH_REPLICA =
+ getReplicatedSize(KEY_FOURTEEN_SIZE,
+ StandaloneReplicationConfig.getInstance(ONE));
+ private static final long FILE15_SIZE_WITH_REPLICA =
+ getReplicatedSize(KEY_FIFTEEN_SIZE,
+ StandaloneReplicationConfig.getInstance(ONE));
+ private static final long FILE16_SIZE_WITH_REPLICA =
+ getReplicatedSize(KEY_SIXTEEN_SIZE,
+ StandaloneReplicationConfig.getInstance(ONE));
+ private static final long FILE17_SIZE_WITH_REPLICA =
+ getReplicatedSize(KEY_SEVENTEEN_SIZE,
+ StandaloneReplicationConfig.getInstance(ONE));
private static final long MULTI_BLOCK_KEY_SIZE_WITH_REPLICA
= FILE7_SIZE_WITH_REPLICA;
private static final long
@@ -272,7 +331,13 @@ public class TestNSSummaryEndpointWithFSO {
+ FILE8_SIZE_WITH_REPLICA
+ FILE9_SIZE_WITH_REPLICA
+ FILE10_SIZE_WITH_REPLICA
- + FILE11_SIZE_WITH_REPLICA;
+ + FILE11_SIZE_WITH_REPLICA
+ + FILE12_SIZE_WITH_REPLICA
+ + FILE13_SIZE_WITH_REPLICA
+ + FILE14_SIZE_WITH_REPLICA
+ + FILE15_SIZE_WITH_REPLICA
+ + FILE16_SIZE_WITH_REPLICA
+ + FILE17_SIZE_WITH_REPLICA;
private static final long
MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL
@@ -315,6 +380,7 @@ public class TestNSSummaryEndpointWithFSO {
private static final long BUCKET_TWO_QUOTA = OzoneConsts.MB;
private static final long BUCKET_THREE_QUOTA = OzoneConsts.MB;
private static final long BUCKET_FOUR_QUOTA = OzoneConsts.MB;
+ private static final long BUCKET_FIVE_QUOTA = OzoneConsts.MB;
// mock client's path requests
private static final String TEST_USER = "TestUser";
@@ -323,23 +389,29 @@ public class TestNSSummaryEndpointWithFSO {
private static final String VOL_TWO_PATH = "/vol2";
private static final String BUCKET_ONE_PATH = "/vol/bucket1";
private static final String BUCKET_TWO_PATH = "/vol/bucket2";
+ private static final String BUCKET_FIVE_PATH = "/vol2/bucket5";
private static final String DIR_ONE_PATH = "/vol/bucket1/dir1";
private static final String DIR_TWO_PATH = "/vol/bucket1/dir1/dir2";
private static final String DIR_THREE_PATH = "/vol/bucket1/dir1/dir3";
private static final String DIR_FOUR_PATH = "/vol/bucket1/dir1/dir4";
+ private static final String DIR_SIX_PATH = "/vol2/bucket5/dir6";
+ private static final String DIR_SEVEN_PATH = "/vol2/bucket5/dir6/dir7";
+ private static final String DIR_EIGHT_PATH = "/vol2/bucket5/dir6/dir7/dir8";
private static final String KEY_PATH = "/vol/bucket2/file4";
private static final String MULTI_BLOCK_KEY_PATH = "/vol/bucket1/dir1/file7";
private static final String INVALID_PATH = "/vol/path/not/found";
// some expected answers
- private static final long ROOT_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE +
- KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE +
- KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE;
+ private static final long ROOT_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE + KEY_THREE_SIZE + KEY_FOUR_SIZE +
+ KEY_FIVE_SIZE + KEY_SIX_SIZE + KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE +
+ KEY_TWELVE_SIZE + KEY_THIRTEEN_SIZE + KEY_FOURTEEN_SIZE + KEY_FIFTEEN_SIZE + KEY_SIXTEEN_SIZE +
+ KEY_SEVENTEEN_SIZE;
private static final long VOL_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE +
KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE;
private static final long VOL_TWO_DATA_SIZE =
- KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE;
+ KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE + KEY_TWELVE_SIZE + KEY_THIRTEEN_SIZE +
+ KEY_FOURTEEN_SIZE + KEY_FIFTEEN_SIZE + KEY_SIXTEEN_SIZE + KEY_SEVENTEEN_SIZE;
private static final long BUCKET_ONE_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE +
KEY_THREE_SIZE + KEY_SIX_SIZE;
@@ -672,10 +744,21 @@ public void testQuotaUsage() throws Exception {
invalidResObj.getResponseCode());
}
+ @Test
+ public void testListKeysBucketFive() throws Exception {
+ // bucket level DU
+ Response bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS", "", 0, BUCKET_FIVE_PATH,
+ 1000, true);
+ DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity();
+ assertEquals(6, duBucketResponse.getCount());
+ DUResponse.DiskUsage duDir1 = duBucketResponse.getDuData().get(0);
+ assertEquals(DIR_SIX_PATH.substring(1) + OM_KEY_PREFIX + FILE_TWELVE, duDir1.getSubpath());
+ assertEquals("RATIS", duDir1.getReplicationType());
+ }
@Test
public void testFileSizeDist() throws Exception {
- checkFileSizeDist(ROOT_PATH, 2, 3, 4, 1);
+ checkFileSizeDist(ROOT_PATH, 2, 5, 8, 1);
checkFileSizeDist(VOL_PATH, 2, 1, 2, 1);
checkFileSizeDist(BUCKET_ONE_PATH, 1, 1, 1, 1);
checkFileSizeDist(DIR_ONE_PATH, 0, 1, 1, 1);
@@ -700,6 +783,7 @@ public void checkFileSizeDist(String path, int bin0,
* Write directories and keys info into OM DB.
* @throws Exception
*/
+ @SuppressWarnings("checkstyle:methodlength")
private void populateOMDB() throws Exception {
// write all directories
writeDirToOm(reconOMMetadataManager, DIR_ONE_OBJECT_ID,
@@ -717,6 +801,15 @@ private void populateOMDB() throws Exception {
writeDirToOm(reconOMMetadataManager, DIR_FIVE_OBJECT_ID,
BUCKET_THREE_OBJECT_ID, BUCKET_THREE_OBJECT_ID,
VOL_TWO_OBJECT_ID, DIR_FIVE);
+ writeDirToOm(reconOMMetadataManager, DIR_SIX_OBJECT_ID,
+ BUCKET_FIVE_OBJECT_ID, BUCKET_FIVE_OBJECT_ID,
+ VOL_TWO_OBJECT_ID, DIR_SIX);
+ writeDirToOm(reconOMMetadataManager, DIR_SEVEN_OBJECT_ID,
+ DIR_SIX_OBJECT_ID, BUCKET_FIVE_OBJECT_ID,
+ VOL_TWO_OBJECT_ID, DIR_SEVEN);
+ writeDirToOm(reconOMMetadataManager, DIR_EIGHT_OBJECT_ID,
+ DIR_SEVEN_OBJECT_ID, BUCKET_FIVE_OBJECT_ID,
+ VOL_TWO_OBJECT_ID, DIR_EIGHT);
// write all keys
writeKeyToOm(reconOMMetadataManager,
@@ -829,6 +922,87 @@ private void populateOMDB() throws Exception {
VOL_TWO_OBJECT_ID,
KEY_ELEVEN_SIZE,
getBucketLayout());
+
+ writeKeyToOm(reconOMMetadataManager,
+ FILE_TWELVE,
+ BUCKET_FIVE,
+ VOL_TWO,
+ FILE_TWELVE,
+ KEY_TWELVE_OBJECT_ID,
+ DIR_SIX_OBJECT_ID,
+ BUCKET_FIVE_OBJECT_ID,
+ VOL_TWO_OBJECT_ID,
+ KEY_TWELVE_SIZE,
+ getBucketLayout(),
+ ratisOne,
+ epochMillis1, true);
+ writeKeyToOm(reconOMMetadataManager,
+ FILE_THIRTEEN,
+ BUCKET_FIVE,
+ VOL_TWO,
+ FILE_THIRTEEN,
+ KEY_THIRTEEN_OBJECT_ID,
+ DIR_SIX_OBJECT_ID,
+ BUCKET_FIVE_OBJECT_ID,
+ VOL_TWO_OBJECT_ID,
+ KEY_THIRTEEN_SIZE,
+ getBucketLayout(),
+ ratisOne,
+ epochMillis2, true);
+
+ writeKeyToOm(reconOMMetadataManager,
+ FILE_FOURTEEN,
+ BUCKET_FIVE,
+ VOL_TWO,
+ FILE_FOURTEEN,
+ KEY_FOURTEEN_OBJECT_ID,
+ DIR_SEVEN_OBJECT_ID,
+ BUCKET_FIVE_OBJECT_ID,
+ VOL_TWO_OBJECT_ID,
+ KEY_FOURTEEN_SIZE,
+ getBucketLayout(),
+ ratisOne,
+ epochMillis1, true);
+ writeKeyToOm(reconOMMetadataManager,
+ FILE_FIFTEEN,
+ BUCKET_FIVE,
+ VOL_TWO,
+ FILE_FIFTEEN,
+ KEY_FIFTEEN_OBJECT_ID,
+ DIR_SEVEN_OBJECT_ID,
+ BUCKET_FIVE_OBJECT_ID,
+ VOL_TWO_OBJECT_ID,
+ KEY_FIFTEEN_SIZE,
+ getBucketLayout(),
+ ratisOne,
+ epochMillis2, true);
+
+ writeKeyToOm(reconOMMetadataManager,
+ FILE_SIXTEEN,
+ BUCKET_FIVE,
+ VOL_TWO,
+ FILE_SIXTEEN,
+ KEY_SIXTEEN_OBJECT_ID,
+ DIR_EIGHT_OBJECT_ID,
+ BUCKET_FIVE_OBJECT_ID,
+ VOL_TWO_OBJECT_ID,
+ KEY_SIXTEEN_SIZE,
+ getBucketLayout(),
+ ratisOne,
+ epochMillis1, true);
+ writeKeyToOm(reconOMMetadataManager,
+ FILE_SEVENTEEN,
+ BUCKET_FIVE,
+ VOL_TWO,
+ FILE_SEVENTEEN,
+ KEY_SEVENTEEN_OBJECT_ID,
+ DIR_EIGHT_OBJECT_ID,
+ BUCKET_FIVE_OBJECT_ID,
+ VOL_TWO_OBJECT_ID,
+ KEY_SEVENTEEN_SIZE,
+ getBucketLayout(),
+ ratisOne,
+ epochMillis2, true);
}
/**
@@ -900,6 +1074,14 @@ private static OMMetadataManager initializeNewOmMetadataManager(
.setBucketLayout(getBucketLayout())
.build();
+ OmBucketInfo bucketInfo5 = OmBucketInfo.newBuilder()
+ .setVolumeName(VOL_TWO)
+ .setBucketName(BUCKET_FIVE)
+ .setObjectID(BUCKET_FIVE_OBJECT_ID)
+ .setQuotaInBytes(BUCKET_FIVE_QUOTA)
+ .setBucketLayout(getBucketLayout())
+ .build();
+
String bucketKey = omMetadataManager.getBucketKey(
bucketInfo.getVolumeName(), bucketInfo.getBucketName());
String bucketKey2 = omMetadataManager.getBucketKey(
@@ -908,11 +1090,14 @@ private static OMMetadataManager initializeNewOmMetadataManager(
bucketInfo3.getVolumeName(), bucketInfo3.getBucketName());
String bucketKey4 = omMetadataManager.getBucketKey(
bucketInfo4.getVolumeName(), bucketInfo4.getBucketName());
+ String bucketKey5 = omMetadataManager.getBucketKey(
+ bucketInfo5.getVolumeName(), bucketInfo5.getBucketName());
omMetadataManager.getBucketTable().put(bucketKey, bucketInfo);
omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2);
omMetadataManager.getBucketTable().put(bucketKey3, bucketInfo3);
omMetadataManager.getBucketTable().put(bucketKey4, bucketInfo4);
+ omMetadataManager.getBucketTable().put(bucketKey5, bucketInfo5);
return omMetadataManager;
}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
index ce8aa7296350..a318a30d04c2 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
@@ -19,13 +19,16 @@
package org.apache.hadoop.ozone.recon.api;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
import static org.apache.hadoop.ozone.om.helpers.QuotaUtil.getReplicatedSize;
import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.StorageType;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
@@ -45,6 +48,7 @@
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.recon.ReconConstants;
import org.apache.hadoop.ozone.recon.ReconTestInjector;
+import org.apache.hadoop.ozone.recon.ReconUtils;
import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler;
import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler;
import org.apache.hadoop.ozone.recon.api.types.BucketObjectDBInfo;
@@ -81,6 +85,7 @@
import java.util.ArrayList;
import java.util.Set;
import java.util.HashSet;
+import java.util.TimeZone;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
@@ -89,6 +94,7 @@
import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm;
import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider;
import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager;
+import static org.junit.jupiter.api.Assertions.assertNull;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -113,15 +119,21 @@
* │ ├── file2
* │ └── file3
* └── bucket2 (OBS)
- * ├── file4
- * └── file5
+ * │ ├── file4
+ * │ └── file5
+ * └── bucket5 (OBS)
+ * ├── file6
+ * └── file7
* └── vol2
* ├── bucket3 (Legacy)
* │ ├── file8
* │ ├── file9
* │ └── file10
* └── bucket4 (Legacy)
- * └── file11
+ * │ └── file11
+ * └── bucket6 (Legacy)
+ * └── file12
+ * └── file13
*/
public class TestNSSummaryEndpointWithOBSAndLegacy {
@TempDir
@@ -146,16 +158,22 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
private static final String BUCKET_TWO = "bucket2";
private static final String BUCKET_THREE = "bucket3";
private static final String BUCKET_FOUR = "bucket4";
+ private static final String BUCKET_FIVE = "bucket5";
+ private static final String BUCKET_SIX = "bucket6";
private static final String KEY_ONE = "file1";
private static final String KEY_TWO = "////file2";
private static final String KEY_THREE = "file3///";
private static final String KEY_FOUR = "file4";
private static final String KEY_FIVE = "_//////";
+ private static final String KEY_SIX = "file6";
+ private static final String KEY_SEVEN = "file7";
private static final String KEY_EIGHT = "file8";
private static final String KEY_NINE = "//////";
private static final String KEY_TEN = "///__file10";
private static final String KEY_ELEVEN = "////file11";
private static final String MULTI_BLOCK_FILE = KEY_THREE;
+ private static final String KEY_TWELVE = "file12";
+ private static final String KEY_THIRTEEN = "file13";
private static final long PARENT_OBJECT_ID_ZERO = 0L;
private static final long VOL_OBJECT_ID = 0L;
@@ -164,15 +182,21 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
private static final long BUCKET_TWO_OBJECT_ID = 2L;
private static final long BUCKET_THREE_OBJECT_ID = 15L;
private static final long BUCKET_FOUR_OBJECT_ID = 16L;
+ private static final long BUCKET_FIVE_OBJECT_ID = 7L;
+ private static final long BUCKET_SIX_OBJECT_ID = 12L;
private static final long KEY_ONE_OBJECT_ID = 3L;
private static final long KEY_TWO_OBJECT_ID = 5L;
private static final long KEY_THREE_OBJECT_ID = 8L;
private static final long KEY_FOUR_OBJECT_ID = 6L;
private static final long KEY_FIVE_OBJECT_ID = 9L;
+ private static final long KEY_SIX_OBJECT_ID = 10L;
+ private static final long KEY_SEVEN_OBJECT_ID = 11L;
private static final long KEY_EIGHT_OBJECT_ID = 17L;
private static final long KEY_NINE_OBJECT_ID = 19L;
private static final long KEY_TEN_OBJECT_ID = 20L;
private static final long KEY_ELEVEN_OBJECT_ID = 21L;
+ private static final long KEY_TWELVE_OBJECT_ID = 22L;
+ private static final long KEY_THIRTEEN_OBJECT_ID = 23L;
private static final long MULTI_BLOCK_KEY_OBJECT_ID = 13L;
// container IDs
@@ -205,10 +229,14 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
private static final long FILE_THREE_SIZE = 4 * OzoneConsts.KB + 1; // bin 3
private static final long FILE_FOUR_SIZE = 2 * OzoneConsts.KB + 1; // bin 2
private static final long FILE_FIVE_SIZE = 100L; // bin 0
+ private static final long FILE_SIX_SIZE = 100L; // bin 0
+ private static final long FILE_SEVEN_SIZE = 100L; // bin 0
private static final long FILE_EIGHT_SIZE = OzoneConsts.KB + 1; // bin 1
private static final long FILE_NINE_SIZE = 2 * OzoneConsts.KB + 1; // bin 2
private static final long FILE_TEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2
private static final long FILE_ELEVEN_SIZE = OzoneConsts.KB + 1; // bin 1
+ private static final long FILE_TWELVE_SIZE = 2 * OzoneConsts.KB + 1; // bin 2
+ private static final long FILE_THIRTEEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2
private static final long FILE1_SIZE_WITH_REPLICA =
getReplicatedSize(FILE_ONE_SIZE,
@@ -226,6 +254,13 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
getReplicatedSize(FILE_FIVE_SIZE,
StandaloneReplicationConfig.getInstance(ONE));
+ private static final long FILE6_SIZE_WITH_REPLICA =
+ getReplicatedSize(FILE_SIX_SIZE,
+ StandaloneReplicationConfig.getInstance(THREE));
+ private static final long FILE7_SIZE_WITH_REPLICA =
+ getReplicatedSize(FILE_SEVEN_SIZE,
+ StandaloneReplicationConfig.getInstance(THREE));
+
private static final long FILE8_SIZE_WITH_REPLICA =
getReplicatedSize(FILE_EIGHT_SIZE,
StandaloneReplicationConfig.getInstance(ONE));
@@ -238,6 +273,12 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
private static final long FILE11_SIZE_WITH_REPLICA =
getReplicatedSize(FILE_ELEVEN_SIZE,
StandaloneReplicationConfig.getInstance(ONE));
+ private static final long FILE12_SIZE_WITH_REPLICA =
+ getReplicatedSize(FILE_TWELVE_SIZE,
+ StandaloneReplicationConfig.getInstance(ONE));
+ private static final long FILE13_SIZE_WITH_REPLICA =
+ getReplicatedSize(FILE_THIRTEEN_SIZE,
+ StandaloneReplicationConfig.getInstance(ONE));
private static final long MULTI_BLOCK_KEY_SIZE_WITH_REPLICA
= FILE3_SIZE_WITH_REPLICA;
@@ -248,10 +289,14 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
+ FILE3_SIZE_WITH_REPLICA
+ FILE4_SIZE_WITH_REPLICA
+ FILE5_SIZE_WITH_REPLICA
+ + FILE6_SIZE_WITH_REPLICA
+ + FILE7_SIZE_WITH_REPLICA
+ FILE8_SIZE_WITH_REPLICA
+ FILE9_SIZE_WITH_REPLICA
+ FILE10_SIZE_WITH_REPLICA
- + FILE11_SIZE_WITH_REPLICA;
+ + FILE11_SIZE_WITH_REPLICA
+ + FILE12_SIZE_WITH_REPLICA
+ + FILE13_SIZE_WITH_REPLICA;
private static final long
MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL
@@ -259,7 +304,9 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
+ FILE2_SIZE_WITH_REPLICA
+ FILE3_SIZE_WITH_REPLICA
+ FILE4_SIZE_WITH_REPLICA
- + FILE5_SIZE_WITH_REPLICA;
+ + FILE5_SIZE_WITH_REPLICA
+ + FILE6_SIZE_WITH_REPLICA
+ + FILE7_SIZE_WITH_REPLICA;
private static final long
MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET1
@@ -286,6 +333,8 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
private static final long BUCKET_TWO_QUOTA = OzoneConsts.MB;
private static final long BUCKET_THREE_QUOTA = OzoneConsts.MB;
private static final long BUCKET_FOUR_QUOTA = OzoneConsts.MB;
+ private static final long BUCKET_FIVE_QUOTA = OzoneConsts.MB;
+ private static final long BUCKET_SIX_QUOTA = OzoneConsts.MB;
// mock client's path requests
private static final String TEST_USER = "TestUser";
@@ -300,6 +349,10 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE;
private static final String BUCKET_FOUR_PATH =
ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_FOUR;
+ private static final String BUCKET_FIVE_PATH =
+ ROOT_PATH + VOL + ROOT_PATH + BUCKET_FIVE;
+ private static final String BUCKET_SIX_PATH =
+ ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_SIX;
private static final String KEY_ONE_PATH =
ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_ONE;
private static final String KEY_TWO_PATH =
@@ -310,6 +363,10 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FOUR;
private static final String KEY_FIVE_PATH =
ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FIVE;
+ private static final String KEY_SIX_PATH =
+ ROOT_PATH + VOL + ROOT_PATH + BUCKET_FIVE + ROOT_PATH + KEY_SIX;
+ private static final String KEY_SEVEN_PATH =
+ ROOT_PATH + VOL + ROOT_PATH + BUCKET_FIVE + ROOT_PATH + KEY_SEVEN;
private static final String KEY_EIGHT_PATH =
ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_EIGHT;
private static final String KEY_NINE_PATH =
@@ -318,6 +375,10 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_TEN;
private static final String KEY_ELEVEN_PATH =
ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_FOUR + ROOT_PATH + KEY_ELEVEN;
+ private static final String KEY_TWELVE_PATH =
+ ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_SIX + ROOT_PATH + KEY_TWELVE;
+ private static final String KEY_THIRTEEN_PATH =
+ ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_SIX + ROOT_PATH + KEY_THIRTEEN;
private static final String KEY4_PATH =
ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FOUR;
private static final String MULTI_BLOCK_KEY_PATH =
@@ -326,14 +387,14 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
// some expected answers
private static final long ROOT_DATA_SIZE =
- FILE_ONE_SIZE + FILE_TWO_SIZE + FILE_THREE_SIZE + FILE_FOUR_SIZE +
- FILE_FIVE_SIZE + FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE +
- FILE_ELEVEN_SIZE;
+ FILE_ONE_SIZE + FILE_TWO_SIZE + FILE_THREE_SIZE + FILE_FOUR_SIZE + FILE_FIVE_SIZE + FILE_SIX_SIZE +
+ FILE_SEVEN_SIZE + FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE + FILE_ELEVEN_SIZE + FILE_TWELVE_SIZE +
+ FILE_THIRTEEN_SIZE;
private static final long VOL_DATA_SIZE = FILE_ONE_SIZE + FILE_TWO_SIZE +
- FILE_THREE_SIZE + FILE_FOUR_SIZE + FILE_FIVE_SIZE;
+ FILE_THREE_SIZE + FILE_FOUR_SIZE + FILE_FIVE_SIZE + FILE_SIX_SIZE + FILE_SEVEN_SIZE;
private static final long VOL_TWO_DATA_SIZE =
- FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE + FILE_ELEVEN_SIZE;
+ FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE + FILE_ELEVEN_SIZE + FILE_TWELVE_SIZE + FILE_THIRTEEN_SIZE;
private static final long BUCKET_ONE_DATA_SIZE = FILE_ONE_SIZE +
FILE_TWO_SIZE +
@@ -342,11 +403,21 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
private static final long BUCKET_TWO_DATA_SIZE =
FILE_FOUR_SIZE + FILE_FIVE_SIZE;
+ private static final long BUCKET_FIVE_DATA_SIZE =
+ FILE_SIX_SIZE + FILE_SEVEN_SIZE;
+
private static final long BUCKET_THREE_DATA_SIZE =
FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE;
private static final long BUCKET_FOUR_DATA_SIZE = FILE_ELEVEN_SIZE;
+ private static int chunkSize = 1024 * 1024;
+
+ private ReplicationConfig ratisOne = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, ONE);
+ private long epochMillis1 =
+ ReconUtils.convertToEpochMillis("04-04-2024 12:30:00", "MM-dd-yyyy HH:mm:ss", TimeZone.getDefault());
+ private long epochMillis2 =
+ ReconUtils.convertToEpochMillis("04-05-2024 12:30:00", "MM-dd-yyyy HH:mm:ss", TimeZone.getDefault());
@BeforeEach
public void setUp() throws Exception {
@@ -409,8 +480,8 @@ public void testGetBasicInfoRoot() throws Exception {
(NamespaceSummaryResponse) rootResponse.getEntity();
assertEquals(EntityType.ROOT, rootResponseObj.getEntityType());
assertEquals(2, rootResponseObj.getCountStats().getNumVolume());
- assertEquals(4, rootResponseObj.getCountStats().getNumBucket());
- assertEquals(9, rootResponseObj.getCountStats().getNumTotalKey());
+ assertEquals(6, rootResponseObj.getCountStats().getNumBucket());
+ assertEquals(13, rootResponseObj.getCountStats().getNumTotalKey());
}
@Test
@@ -421,8 +492,8 @@ public void testGetBasicInfoVol() throws Exception {
(NamespaceSummaryResponse) volResponse.getEntity();
assertEquals(EntityType.VOLUME,
volResponseObj.getEntityType());
- assertEquals(2, volResponseObj.getCountStats().getNumBucket());
- assertEquals(5, volResponseObj.getCountStats().getNumTotalKey());
+ assertEquals(3, volResponseObj.getCountStats().getNumBucket());
+ assertEquals(7, volResponseObj.getCountStats().getNumTotalKey());
assertEquals(TEST_USER, ((VolumeObjectDBInfo) volResponseObj.
getObjectDBInfo()).getAdmin());
assertEquals(TEST_USER, ((VolumeObjectDBInfo) volResponseObj.
@@ -440,8 +511,8 @@ public void testGetBasicInfoVolTwo() throws Exception {
(NamespaceSummaryResponse) volTwoResponse.getEntity();
assertEquals(EntityType.VOLUME,
volTwoResponseObj.getEntityType());
- assertEquals(2, volTwoResponseObj.getCountStats().getNumBucket());
- assertEquals(4, volTwoResponseObj.getCountStats().getNumTotalKey());
+ assertEquals(3, volTwoResponseObj.getCountStats().getNumBucket());
+ assertEquals(6, volTwoResponseObj.getCountStats().getNumTotalKey());
assertEquals(TEST_USER, ((VolumeObjectDBInfo) volTwoResponseObj.
getObjectDBInfo()).getAdmin());
assertEquals(TEST_USER, ((VolumeObjectDBInfo) volTwoResponseObj.
@@ -573,7 +644,7 @@ public void testDiskUsageVolume() throws Exception {
Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH,
false, false, false);
DUResponse duVolRes = (DUResponse) volResponse.getEntity();
- assertEquals(2, duVolRes.getCount());
+ assertEquals(3, duVolRes.getCount());
List duData = duVolRes.getDuData();
// sort based on subpath
Collections.sort(duData,
@@ -592,7 +663,7 @@ public void testDiskUsageVolTwo() throws Exception {
Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_TWO_PATH,
false, false, false);
DUResponse duVolRes = (DUResponse) volResponse.getEntity();
- assertEquals(2, duVolRes.getCount());
+ assertEquals(3, duVolRes.getCount());
List duData = duVolRes.getDuData();
// sort based on subpath
Collections.sort(duData,
@@ -684,7 +755,7 @@ public void testDiskUsageKey4() throws Exception {
Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY4_PATH,
true, false, false);
DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity();
- assertEquals(0, duKeyResponse.getCount());
+ assertEquals(1, duKeyResponse.getCount());
assertEquals(FILE_FOUR_SIZE, duKeyResponse.getSize());
}
@@ -857,8 +928,8 @@ public void testQuotaUsage() throws Exception {
@Test
public void testFileSizeDist() throws Exception {
- checkFileSizeDist(ROOT_PATH, 2, 3, 3, 1);
- checkFileSizeDist(VOL_PATH, 2, 1, 1, 1);
+ checkFileSizeDist(ROOT_PATH, 4, 3, 5, 1);
+ checkFileSizeDist(VOL_PATH, 4, 1, 1, 1);
checkFileSizeDist(BUCKET_ONE_PATH, 1, 1, 0, 1);
}
@@ -904,6 +975,101 @@ public void testNormalizePathUptoBucket() {
OmUtils.normalizePathUptoBucket("volume/bucket/key$%#1/./////////key$%#2"));
}
+ @Test
+ public void testListKeysBucketFive() throws Exception {
+ // filter list keys under bucketFive based on RATIS ReplicationConfig and key creation date
+ // creationDate filter passed 1 minute above of KEY6 creation date, so listKeys API will return
+ // ZERO keys, as one RATIS keys got created after creationDate filter value.
+ Response bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS",
+ "04-04-2024 12:31:00", 0, BUCKET_FIVE_PATH, 10, false);
+ DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity();
+ // There are no sub-paths under this OBS bucket.
+ assertEquals(1, duBucketResponse.getCount());
+
+ // creationDate filter and keySize filter both are empty, so listKeys API should return both KEY6 and KEY7 keys,
+ bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS",
+ null, 0, BUCKET_FIVE_PATH, 10, false);
+ duBucketResponse = (DUResponse) bucketResponse.getEntity();
+ // There are no sub-paths under this OBS bucket.
+ assertEquals(2, duBucketResponse.getCount());
+ assertEquals(KEY_SIX, duBucketResponse.getDuData().get(0).getSubpath());
+
+ // creationDate filter passed same as KEY6 creation date, so listKeys API will return
+ // KEY6 and KEY7 keys, as only 2 RATIS keys created at "04-04-2024 12:30:00".
+ bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS",
+ "04-04-2024 12:30:00", 0, BUCKET_FIVE_PATH, 10, false);
+ duBucketResponse = (DUResponse) bucketResponse.getEntity();
+ // There are no sub-paths under this OBS bucket.
+ assertEquals(2, duBucketResponse.getCount());
+ assertEquals(KEY_SIX, duBucketResponse.getDuData().get(0).getSubpath());
+
+ // creationDate filter passed same as KEY6 and KEY7 creation date, but replicationType filter is EC,
+ // so listKeys API will return zero keys, because no EC key got created at or after creationDate filter value.
+ bucketResponse = nsSummaryEndpoint.listKeysWithDu("EC",
+ "04-04-2024 12:30:00", 0, BUCKET_FIVE_PATH, 10, false);
+ duBucketResponse = (DUResponse) bucketResponse.getEntity();
+ // There are no sub-paths under this OBS bucket.
+ assertEquals(0, duBucketResponse.getCount());
+
+ // creationDate filter passed same as KEY7 creation date, but replicationType filter is RATIS,
+ // so listKeys API will return ZERO keys, as only 1 RATIS key got created at or after creationDate filter value.
+ bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS",
+ "04-05-2024 12:30:00", 0, BUCKET_FIVE_PATH, 10, false);
+ duBucketResponse = (DUResponse) bucketResponse.getEntity();
+ // There are no sub-paths under this OBS bucket.
+ assertEquals(1, duBucketResponse.getCount());
+
+ // creationDate filter passed same as KEY6 creation date, and replicationType filter is RATIS,
+ // so listKeys API will return only KEY6, as only one RATIS key got created at or after creationDate filter value,
+ // but since keySize filter value is 110 bytes and all RATIS keys created are of size 100 bytes, so KEY6 will be
+ // filtered out and API will return ZERO keys.
+ bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS",
+ "04-04-2024 12:30:00", 110, BUCKET_FIVE_PATH, 10, false);
+ duBucketResponse = (DUResponse) bucketResponse.getEntity();
+ // There are no sub-paths under this OBS bucket.
+ assertEquals(0, duBucketResponse.getCount());
+
+ // creationDate filter passed same as KEY6 creation date, and replicationType filter is EC,
+ // so listKeys API will return only KEY7, as only one EC key got created at or after creationDate filter value,
+ // but since keySize filter value is 110 bytes and all EC keys created are of size 100 bytes, so KEY7 will be
+ // filtered out and API will return ZERO keys.
+ bucketResponse = nsSummaryEndpoint.listKeysWithDu("EC",
+ "04-04-2024 12:30:00", 110, BUCKET_FIVE_PATH, 10, false);
+ duBucketResponse = (DUResponse) bucketResponse.getEntity();
+ // There are no sub-paths under this OBS bucket.
+ assertEquals(0, duBucketResponse.getCount());
+
+ assertEquals(BUCKET_FIVE_DATA_SIZE, duBucketResponse.getSize());
+ }
+
+ @Test
+ public void testListKeysBucketSix() throws Exception {
+ // filter list keys under bucketSix based on RATIS ReplicationConfig and key creation date
+ Response bucketResponse = nsSummaryEndpoint.listKeysWithDu("RATIS",
+ "04-04-2024 12:20:00", 0, BUCKET_SIX_PATH, 10, false);
+ DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity();
+ // There will be two keys (sub-paths) under this LEGACY bucket.
+ assertEquals(2, duBucketResponse.getCount());
+ }
+
+ @Test
+ public void testListKeysOnPageTwoForBucketSix() throws Exception {
+ // filter list keys under bucketSix based on RATIS ReplicationConfig and key creation date
+ Response bucketResponse = nsSummaryEndpoint.listKeysWithDu("",
+ "04-04-2024 12:20:00", 0, BUCKET_SIX_PATH, 1, false);
+ DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity();
+ // First page of keys under this LEGACY bucket.
+ assertEquals(1, duBucketResponse.getCount());
+ assertEquals(2, duBucketResponse.getTotalCount());
+
+ // Second page of keys under this LEGACY bucket since lastKey
+ Response keyResponse = nsSummaryEndpoint.listKeysWithDu("",
+ "04-04-2024 12:20:00", 0, duBucketResponse.getLastKey(), 1, false);
+ DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity();
+ assertEquals(1, duKeyResponse.getCount());
+ assertEquals(1, duKeyResponse.getTotalCount());
+ assertNull(duKeyResponse.getLastKey());
+ }
/**
* Testing the following case.
@@ -913,15 +1079,21 @@ public void testNormalizePathUptoBucket() {
* │ ├── file2
* │ └── file3
* └── bucket2 (OBS)
- * ├── file4
- * └── file5
+ * │ ├── file4
+ * │ └── file5
+ * └── bucket5 (OBS)
+ * ├── file6
+ * └── file7
* └── vol2
* ├── bucket3 (Legacy)
* │ ├── file8
* │ ├── file9
* │ └── file10
* └── bucket4 (Legacy)
- * └── file11
+ * │ └── file11
+ * └── bucket6 (Legacy)
+ * └── file12
+ * └── file13
*
* Write these keys to OM and
* replicate them.
@@ -929,7 +1101,6 @@ public void testNormalizePathUptoBucket() {
*/
@SuppressWarnings("checkstyle:MethodLength")
private void populateOMDB() throws Exception {
-
// write all keys
writeKeyToOm(reconOMMetadataManager,
KEY_ONE,
@@ -986,6 +1157,59 @@ private void populateOMDB() throws Exception {
VOL_OBJECT_ID,
FILE_FIVE_SIZE,
getOBSBucketLayout());
+ writeKeyToOm(reconOMMetadataManager,
+ KEY_SIX,
+ BUCKET_FIVE,
+ VOL,
+ KEY_SIX,
+ KEY_SIX_OBJECT_ID,
+ BUCKET_FIVE_OBJECT_ID,
+ BUCKET_FIVE_OBJECT_ID,
+ VOL_OBJECT_ID,
+ FILE_SIX_SIZE,
+ getOBSBucketLayout(),
+ ratisOne,
+ epochMillis1, true);
+ writeKeyToOm(reconOMMetadataManager,
+ KEY_SEVEN,
+ BUCKET_FIVE,
+ VOL,
+ KEY_SEVEN,
+ KEY_SEVEN_OBJECT_ID,
+ BUCKET_FIVE_OBJECT_ID,
+ BUCKET_FIVE_OBJECT_ID,
+ VOL_OBJECT_ID,
+ FILE_SEVEN_SIZE,
+ getOBSBucketLayout(),
+ ratisOne,
+ epochMillis2, true);
+
+ writeKeyToOm(reconOMMetadataManager,
+ KEY_TWELVE,
+ BUCKET_SIX,
+ VOL_TWO,
+ KEY_TWELVE,
+ KEY_TWELVE_OBJECT_ID,
+ BUCKET_SIX_OBJECT_ID,
+ BUCKET_SIX_OBJECT_ID,
+ VOL_TWO_OBJECT_ID,
+ FILE_TWELVE_SIZE,
+ getLegacyBucketLayout(),
+ ratisOne,
+ epochMillis1, true);
+ writeKeyToOm(reconOMMetadataManager,
+ KEY_THIRTEEN,
+ BUCKET_SIX,
+ VOL_TWO,
+ KEY_THIRTEEN,
+ KEY_THIRTEEN_OBJECT_ID,
+ BUCKET_SIX_OBJECT_ID,
+ BUCKET_SIX_OBJECT_ID,
+ VOL_TWO_OBJECT_ID,
+ FILE_THIRTEEN_SIZE,
+ getLegacyBucketLayout(),
+ ratisOne,
+ epochMillis2, true);
writeKeyToOm(reconOMMetadataManager,
KEY_EIGHT,
@@ -1104,6 +1328,22 @@ private static OMMetadataManager initializeNewOmMetadataManager(
.setBucketLayout(getLegacyBucketLayout())
.build();
+ OmBucketInfo bucketInfo5 = OmBucketInfo.newBuilder()
+ .setVolumeName(VOL)
+ .setBucketName(BUCKET_FIVE)
+ .setObjectID(BUCKET_FIVE_OBJECT_ID)
+ .setQuotaInBytes(BUCKET_FIVE_QUOTA)
+ .setBucketLayout(getOBSBucketLayout())
+ .build();
+
+ OmBucketInfo bucketInfo6 = OmBucketInfo.newBuilder()
+ .setVolumeName(VOL_TWO)
+ .setBucketName(BUCKET_SIX)
+ .setObjectID(BUCKET_SIX_OBJECT_ID)
+ .setQuotaInBytes(BUCKET_SIX_QUOTA)
+ .setBucketLayout(getLegacyBucketLayout())
+ .build();
+
String bucketKey = omMetadataManager.getBucketKey(
bucketInfo.getVolumeName(), bucketInfo.getBucketName());
String bucketKey2 = omMetadataManager.getBucketKey(
@@ -1112,11 +1352,17 @@ private static OMMetadataManager initializeNewOmMetadataManager(
bucketInfo3.getVolumeName(), bucketInfo3.getBucketName());
String bucketKey4 = omMetadataManager.getBucketKey(
bucketInfo4.getVolumeName(), bucketInfo4.getBucketName());
+ String bucketKey5 = omMetadataManager.getBucketKey(
+ bucketInfo5.getVolumeName(), bucketInfo5.getBucketName());
+ String bucketKey6 = omMetadataManager.getBucketKey(
+ bucketInfo6.getVolumeName(), bucketInfo6.getBucketName());
omMetadataManager.getBucketTable().put(bucketKey, bucketInfo);
omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2);
omMetadataManager.getBucketTable().put(bucketKey3, bucketInfo3);
omMetadataManager.getBucketTable().put(bucketKey4, bucketInfo4);
+ omMetadataManager.getBucketTable().put(bucketKey5, bucketInfo5);
+ omMetadataManager.getBucketTable().put(bucketKey6, bucketInfo6);
return omMetadataManager;
}
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java
index 8b35bfdd4d2a..dc2e26861c3f 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java
@@ -91,9 +91,9 @@ public void testNSSummaryBasicInfoRoot(
(NamespaceSummaryResponse) rootResponse.getEntity();
assertEquals(EntityType.ROOT, rootResponseObj.getEntityType());
assertEquals(2, rootResponseObj.getCountStats().getNumVolume());
- assertEquals(4, rootResponseObj.getCountStats().getNumBucket());
- assertEquals(5, rootResponseObj.getCountStats().getNumTotalDir());
- assertEquals(10, rootResponseObj.getCountStats().getNumTotalKey());
+ assertEquals(5, rootResponseObj.getCountStats().getNumBucket());
+ assertEquals(8, rootResponseObj.getCountStats().getNumTotalDir());
+ assertEquals(16, rootResponseObj.getCountStats().getNumTotalKey());
assertEquals("USER",
rootResponseObj.getObjectDBInfo().getAcls().get(0).getType());
assertEquals("WRITE", rootResponseObj.getObjectDBInfo().getAcls().get(0)
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java
index fbddd50ee4cb..f0af066c46f3 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java
@@ -114,9 +114,9 @@ public void testInitNSSummaryTable() throws IOException {
private void putThreeNSMetadata() throws IOException {
HashMap hmap = new HashMap<>();
- hmap.put(1L, new NSSummary(1, 2, testBucket, TEST_CHILD_DIR, "dir1"));
- hmap.put(2L, new NSSummary(3, 4, testBucket, TEST_CHILD_DIR, "dir2"));
- hmap.put(3L, new NSSummary(5, 6, testBucket, TEST_CHILD_DIR, "dir3"));
+ hmap.put(1L, new NSSummary(1, 2, testBucket, TEST_CHILD_DIR, "dir1", -1));
+ hmap.put(2L, new NSSummary(3, 4, testBucket, TEST_CHILD_DIR, "dir2", -1));
+ hmap.put(3L, new NSSummary(5, 6, testBucket, TEST_CHILD_DIR, "dir3", -1));
RDBBatchOperation rdbBatchOperation = new RDBBatchOperation();
for (Map.Entry entry: hmap.entrySet()) {
reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation,