diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index f3e45c47eefb..c67d3b1d5761 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -4556,4 +4556,12 @@
+
+ ozone.om.server.list.max.size
+ 1000
+ OZONE, OM
+
+ Configuration property to configure the max server side response size for list calls on om.
+
+
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index 289e8f48c3e1..1a40b536909d 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -1808,7 +1808,6 @@ private boolean getChildrenKeys(String keyPrefix, String startKey,
// 1. Get immediate children of keyPrefix, starting with startKey
List statuses = proxy.listStatusLight(volumeName,
name, keyPrefix, false, startKey, listCacheSize, true);
- boolean reachedLimitCacheSize = statuses.size() == listCacheSize;
// 2. Special case: ListKey expects keyPrefix element should present in
// the resultList, only if startKey is blank. If startKey is not blank
@@ -1840,7 +1839,7 @@ private boolean getChildrenKeys(String keyPrefix, String startKey,
// Return it so that the next iteration will be
// started using the stacked items.
return true;
- } else if (reachedLimitCacheSize && indx == statuses.size() - 1) {
+ } else if (indx == statuses.size() - 1) {
// The last element is a FILE and reaches the listCacheSize.
// Now, sets next seek key to this element
stack.push(new ImmutablePair<>(keyPrefix, keyInfo.getKeyName()));
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java
index c5985f820933..b58e1021d98e 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OFSPath.java
@@ -301,6 +301,19 @@ public boolean isSnapshotPath() {
return false;
}
+ /**
+ * If the path is a snapshot path get the snapshot name from the key name.
+ */
+ public String getSnapshotName() {
+ if (keyName.startsWith(OM_SNAPSHOT_INDICATOR)) {
+ if (!bucketName.isEmpty() && !volumeName.isEmpty()) {
+ String[] keyNames = keyName.split(OZONE_URI_DELIMITER);
+ return keyNames.length > 1 ? keyNames[1] : null;
+ }
+ }
+ return null;
+ }
+
/**
* If key name is not empty, the given path is a key.
* e.g. /volume1/bucket2/key3 is a key.
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index a77bc4f53048..2cf5e4ced3e0 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -625,4 +625,9 @@ private OMConfigKeys() {
public static final String OZONE_OM_MAX_BUCKET =
"ozone.om.max.buckets";
public static final int OZONE_OM_MAX_BUCKET_DEFAULT = 100000;
+ /**
+ * Configuration property to configure the max server side response size for list calls.
+ */
+ public static final String OZONE_OM_SERVER_LIST_MAX_SIZE = "ozone.om.server.list.max.size";
+ public static final int OZONE_OM_SERVER_LIST_MAX_SIZE_DEFAULT = 1000;
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
index 5dd8f7db8628..973064751880 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
@@ -117,6 +117,7 @@
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED;
import static org.assertj.core.api.Assertions.assertThat;
@@ -184,7 +185,7 @@ void init() throws Exception {
conf.setFloat(OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL);
conf.setFloat(FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL);
conf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, TRASH_INTERVAL / 2);
-
+ conf.setInt(OZONE_OM_SERVER_LIST_MAX_SIZE, 2);
conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
conf.setBoolean(OZONE_ACL_ENABLED, true);
conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true);
@@ -2093,8 +2094,8 @@ void testListStatus2() throws IOException {
final long initialListStatusCount = omMetrics.getNumListStatus();
FileStatus[] statusList = fs.listStatus(createPath("/"));
assertEquals(1, statusList.length);
- assertChange(initialStats, statistics, Statistic.OBJECTS_LIST.getSymbol(), 1);
- assertEquals(initialListStatusCount + 1, omMetrics.getNumListStatus());
+ assertChange(initialStats, statistics, Statistic.OBJECTS_LIST.getSymbol(), 2);
+ assertEquals(initialListStatusCount + 2, omMetrics.getNumListStatus());
assertEquals(fs.getFileStatus(path), statusList[0]);
dirPath = RandomStringUtils.randomAlphanumeric(5);
@@ -2105,8 +2106,8 @@ void testListStatus2() throws IOException {
statusList = fs.listStatus(createPath("/"));
assertEquals(2, statusList.length);
- assertChange(initialStats, statistics, Statistic.OBJECTS_LIST.getSymbol(), 2);
- assertEquals(initialListStatusCount + 2, omMetrics.getNumListStatus());
+ assertChange(initialStats, statistics, Statistic.OBJECTS_LIST.getSymbol(), 4);
+ assertEquals(initialListStatusCount + 4, omMetrics.getNumListStatus());
for (Path p : paths) {
assertThat(Arrays.asList(statusList)).contains(fs.getFileStatus(p));
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java
index 8e0bd1ac7deb..ccfa0625800a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java
@@ -24,6 +24,8 @@
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.stream.Stream;
import java.util.concurrent.atomic.AtomicInteger;
@@ -49,10 +51,13 @@
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME;
import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL;
import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath;
import static org.assertj.core.api.Assertions.assertThat;
@@ -91,6 +96,8 @@ static void initClass() throws Exception {
conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true);
conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS);
conf.setInt(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, KeyManagerImpl.DISABLE_VALUE);
+ conf.setInt(OZONE_OM_SERVER_LIST_MAX_SIZE, 20);
+ conf.setInt(OZONE_FS_LISTING_PAGE_SIZE, 30);
// Start the cluster
cluster = MiniOzoneCluster.newHABuilder(conf)
@@ -289,6 +296,13 @@ void testFsLsSnapshot(@TempDir Path tempDir) throws Exception {
String snapshotPath2 = BUCKET_WITH_SNAPSHOT_INDICATOR_PATH +
OM_KEY_PREFIX + snapshotName2;
String snapshotKeyPath2 = snapshotPath2 + OM_KEY_PREFIX + key2;
+ List snapshotNames = new ArrayList<>();
+ for (int i = 0; i < cluster.getConf().getInt(OZONE_FS_LISTING_PAGE_SIZE,
+ OZONE_FS_LISTING_PAGE_SIZE_DEFAULT) * 2; i++) {
+ snapshotNames.add(createSnapshot());
+ }
+ String snapshotName3 = createSnapshot();
+
int res = ToolRunner.run(shell,
new String[]{"-deleteSnapshot", BUCKET_PATH, snapshotName1});
@@ -313,6 +327,10 @@ void testFsLsSnapshot(@TempDir Path tempDir) throws Exception {
assertThat(listSnapOut).doesNotContain(snapshotName1);
assertThat(listSnapOut).contains(snapshotName2);
+ assertThat(listSnapOut).contains(snapshotName3);
+ for (String snapshotName : snapshotNames) {
+ assertThat(listSnapOut).contains(snapshotName);
+ }
// Check for snapshot keys with "ozone fs -ls"
String listSnapKeyOut = execShellCommandAndGetOutput(1,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java
index e00a7c966d4c..d5551ce67379 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGenerator.java
@@ -46,6 +46,9 @@
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -150,7 +153,7 @@ private void verifyDirTree(String volumeName, String bucketName, int depth,
FileStatus[] fileStatuses = fileSystem.listStatus(rootDir);
// verify the num of peer directories, expected span count is 1
// as it has only one dir at root.
- verifyActualSpan(1, fileStatuses);
+ verifyActualSpan(1, Arrays.asList(fileStatuses));
for (FileStatus fileStatus : fileStatuses) {
int actualDepth =
traverseToLeaf(fileSystem, fileStatus.getPath(), 1, depth, span,
@@ -164,14 +167,16 @@ private int traverseToLeaf(FileSystem fs, Path dirPath, int depth,
int expectedFileCnt, StorageSize perFileSize)
throws IOException {
FileStatus[] fileStatuses = fs.listStatus(dirPath);
+ List fileStatusList = new ArrayList<>();
+ Collections.addAll(fileStatusList, fileStatuses);
// check the num of peer directories except root and leaf as both
// has less dirs.
if (depth < expectedDepth - 1) {
- verifyActualSpan(expectedSpanCnt, fileStatuses);
+ verifyActualSpan(expectedSpanCnt, fileStatusList);
}
int actualNumFiles = 0;
ArrayList files = new ArrayList<>();
- for (FileStatus fileStatus : fileStatuses) {
+ for (FileStatus fileStatus : fileStatusList) {
if (fileStatus.isDirectory()) {
++depth;
return traverseToLeaf(fs, fileStatus.getPath(), depth, expectedDepth,
@@ -192,7 +197,7 @@ private int traverseToLeaf(FileSystem fs, Path dirPath, int depth,
}
private int verifyActualSpan(int expectedSpanCnt,
- FileStatus[] fileStatuses) {
+ List fileStatuses) {
int actualSpan = 0;
for (FileStatus fileStatus : fileStatuses) {
if (fileStatus.isDirectory()) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java
index 204c0ee66818..7e9744d01238 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java
@@ -48,6 +48,7 @@
import static com.google.common.collect.Lists.newLinkedList;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE;
import static org.junit.jupiter.params.provider.Arguments.of;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -80,6 +81,7 @@ public static void init() throws Exception {
// Set the number of keys to be processed during batch operate.
conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 3);
conf.setInt(OZONE_CLIENT_LIST_CACHE_SIZE, 3);
+ conf.setInt(OZONE_OM_SERVER_LIST_MAX_SIZE, 2);
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java
index 11594f3ef11c..0829c8fc19a8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java
@@ -47,6 +47,7 @@
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE;
import static org.junit.jupiter.api.Assertions.assertEquals;
/**
@@ -81,6 +82,7 @@ public static void init() throws Exception {
// Set the number of keys to be processed during batch operate.
conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 3);
conf.setInt(OZONE_CLIENT_LIST_CACHE_SIZE, 3);
+ conf.setInt(OZONE_OM_SERVER_LIST_MAX_SIZE, 2);
cluster = MiniOzoneCluster.newBuilder(conf).build();
cluster.waitForClusterToBeReady();
client = cluster.newClient();
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
index 594b862bd1f0..ab1f68d9928a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
@@ -146,6 +146,8 @@
import com.google.common.collect.Lists;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE_DEFAULT;
import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.HBASE_SUPPORT;
import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA;
import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT;
@@ -181,9 +183,16 @@ public class OzoneManagerRequestHandler implements RequestHandler {
LoggerFactory.getLogger(OzoneManagerRequestHandler.class);
private final OzoneManager impl;
private FaultInjector injector;
+ private long maxKeyListSize;
+
public OzoneManagerRequestHandler(OzoneManager om) {
this.impl = om;
+ this.maxKeyListSize = om.getConfiguration().getLong(OZONE_OM_SERVER_LIST_MAX_SIZE,
+ OZONE_OM_SERVER_LIST_MAX_SIZE_DEFAULT);
+ if (this.maxKeyListSize <= 0) {
+ this.maxKeyListSize = OZONE_OM_SERVER_LIST_MAX_SIZE_DEFAULT;
+ }
}
//TODO simplify it to make it shorter
@@ -745,7 +754,7 @@ private ListKeysResponse listKeys(ListKeysRequest request, int clientVersion)
request.getBucketName(),
request.getStartKey(),
request.getPrefix(),
- request.getCount());
+ (int)Math.min(this.maxKeyListSize, request.getCount()));
for (OmKeyInfo key : listKeysResult.getKeys()) {
resp.addKeyInfo(key.getProtobuf(true, clientVersion));
}
@@ -763,7 +772,7 @@ private ListKeysLightResponse listKeysLight(ListKeysRequest request)
request.getBucketName(),
request.getStartKey(),
request.getPrefix(),
- request.getCount());
+ (int)Math.min(this.maxKeyListSize, request.getCount()));
for (BasicOmKeyInfo key : listKeysLightResult.getKeys()) {
resp.addBasicKeyInfo(key.getProtobuf());
}
@@ -1234,7 +1243,7 @@ private ListStatusResponse listStatus(
request.hasAllowPartialPrefix() && request.getAllowPartialPrefix();
List statuses =
impl.listStatus(omKeyArgs, request.getRecursive(),
- request.getStartKey(), request.getNumEntries(),
+ request.getStartKey(), Math.min(this.maxKeyListSize, request.getNumEntries()),
allowPartialPrefixes);
ListStatusResponse.Builder
listStatusResponseBuilder =
@@ -1260,7 +1269,7 @@ private ListStatusLightResponse listStatusLight(
request.hasAllowPartialPrefix() && request.getAllowPartialPrefix();
List statuses =
impl.listStatusLight(omKeyArgs, request.getRecursive(),
- request.getStartKey(), request.getNumEntries(),
+ request.getStartKey(), Math.min(this.maxKeyListSize, request.getNumEntries()),
allowPartialPrefixes);
ListStatusLightResponse.Builder
listStatusLightResponseBuilder =
@@ -1488,7 +1497,7 @@ private OzoneManagerProtocolProtos.ListSnapshotResponse getSnapshots(
throws IOException {
ListSnapshotResponse implResponse = impl.listSnapshot(
request.getVolumeName(), request.getBucketName(), request.getPrefix(),
- request.getPrevSnapshot(), request.getMaxListResult());
+ request.getPrevSnapshot(), (int)Math.min(request.getMaxListResult(), maxKeyListSize));
List snapshotInfoList = implResponse.getSnapshotInfos()
.stream().map(SnapshotInfo::getProtobuf).collect(Collectors.toList());
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java
index a4ced424522b..57ac3f29078f 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java
@@ -131,7 +131,8 @@ public void testMultiTenancyRequestsWhenDisabled() throws IOException {
final OzoneManager ozoneManager = mock(OzoneManager.class);
doCallRealMethod().when(ozoneManager).checkS3MultiTenancyEnabled();
-
+ final OzoneConfiguration conf = new OzoneConfiguration();
+ when(ozoneManager.getConfiguration()).thenReturn(conf);
when(ozoneManager.isS3MultiTenancyEnabled()).thenReturn(false);
final String tenantId = "test-tenant";
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/protocolPB/TestOzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/protocolPB/TestOzoneManagerRequestHandler.java
new file mode 100644
index 000000000000..996cab082775
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/protocolPB/TestOzoneManagerRequestHandler.java
@@ -0,0 +1,175 @@
+package org.apache.hadoop.ozone.protocolPB;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.ListKeysLightResult;
+import org.apache.hadoop.ozone.om.helpers.ListKeysResult;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.ValueSource;
+import org.mockito.Mockito;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVER_LIST_MAX_SIZE;
+
+/**
+ * Test class to test out OzoneManagerRequestHandler.
+ */
+public class TestOzoneManagerRequestHandler {
+
+
+ private OzoneManagerRequestHandler getRequestHandler(int limitListKeySize) {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.setInt(OZONE_OM_SERVER_LIST_MAX_SIZE, limitListKeySize);
+ OzoneManager ozoneManager = Mockito.mock(OzoneManager.class);
+ Mockito.when(ozoneManager.getConfiguration()).thenReturn(conf);
+ return new OzoneManagerRequestHandler(ozoneManager);
+ }
+
+ private OmKeyInfo getMockedOmKeyInfo() {
+ OmKeyInfo keyInfo = Mockito.mock(OmKeyInfo.class);
+ OzoneManagerProtocolProtos.KeyInfo info =
+ OzoneManagerProtocolProtos.KeyInfo.newBuilder().setBucketName("bucket").setKeyName("key").setVolumeName(
+ "volume").setDataSize(0).setType(HddsProtos.ReplicationType.RATIS).setCreationTime(0)
+ .setModificationTime(0).build();
+ Mockito.when(keyInfo.getProtobuf(Mockito.anyBoolean(), Mockito.anyInt())).thenReturn(info);
+ Mockito.when(keyInfo.getProtobuf(Mockito.anyInt())).thenReturn(info);
+ return keyInfo;
+ }
+
+ private BasicOmKeyInfo getMockedBasicOmKeyInfo() {
+ BasicOmKeyInfo keyInfo = Mockito.mock(BasicOmKeyInfo.class);
+ Mockito.when(keyInfo.getProtobuf()).thenReturn(
+ OzoneManagerProtocolProtos.BasicKeyInfo.newBuilder().setKeyName("key").setDataSize(0)
+ .setType(HddsProtos.ReplicationType.RATIS).setCreationTime(0).setModificationTime(0)
+ .build());
+ return keyInfo;
+ }
+
+ private OzoneFileStatus getMockedOzoneFileStatus() {
+ return new OzoneFileStatus(getMockedOmKeyInfo(), 256, false);
+ }
+
+ private void mockOmRequest(OzoneManagerProtocolProtos.OMRequest request,
+ OzoneManagerProtocolProtos.Type cmdType,
+ int requestSize) {
+ Mockito.when(request.getTraceID()).thenReturn("traceId");
+ Mockito.when(request.getCmdType()).thenReturn(cmdType);
+ switch (cmdType) {
+ case ListKeysLight:
+ case ListKeys:
+ Mockito.when(request.getListKeysRequest()).thenReturn(OzoneManagerProtocolProtos.ListKeysRequest.newBuilder()
+ .setCount(requestSize).setBucketName("bucket").setVolumeName("volume").setPrefix("").setStartKey("")
+ .build());
+ break;
+ case ListStatus:
+ Mockito.when(request.getListStatusRequest()).thenReturn(OzoneManagerProtocolProtos.ListStatusRequest.newBuilder()
+ .setNumEntries(requestSize).setKeyArgs(OzoneManagerProtocolProtos.KeyArgs.newBuilder().setBucketName(
+ "bucket").setVolumeName("volume").setKeyName("keyName")
+ .setLatestVersionLocation(true).setHeadOp(true)).setRecursive(true).setStartKey("")
+ .build());
+ break;
+ default:
+ break;
+ }
+ }
+
+ @ParameterizedTest
+ @ValueSource(ints = {0, 9, 10, 11, 50})
+ public void testListKeysResponseSize(int resultSize) throws IOException {
+ List keyInfos = IntStream.range(0, resultSize).mapToObj(i -> getMockedOmKeyInfo()).collect(
+ Collectors.toList());
+ OzoneManagerRequestHandler requestHandler = getRequestHandler(10);
+ OzoneManager ozoneManager = requestHandler.getOzoneManager();
+ Mockito.when(ozoneManager.listKeys(Mockito.anyString(), Mockito.anyString(), Mockito.anyString(),
+ Mockito.anyString(), Mockito.anyInt())).thenAnswer(i -> {
+ int maxSize = Math.max(Math.min(resultSize, i.getArgument(4)), 0);
+ return new ListKeysResult(keyInfos.isEmpty() ? keyInfos : keyInfos.subList(0, maxSize),
+ maxSize < resultSize);
+ });
+ OzoneManagerProtocolProtos.OMRequest request = Mockito.mock(OzoneManagerProtocolProtos.OMRequest.class);
+ for (int requestSize : Arrays.asList(0, resultSize - 1, resultSize, resultSize + 1, Integer.MAX_VALUE)) {
+ mockOmRequest(request, OzoneManagerProtocolProtos.Type.ListKeys, requestSize);
+ OzoneManagerProtocolProtos.OMResponse omResponse = requestHandler.handleReadRequest(request);
+ int expectedSize = Math.max(Math.min(Math.min(10, requestSize), resultSize), 0);
+ Assertions.assertEquals(expectedSize, omResponse.getListKeysResponse().getKeyInfoList().size());
+ Assertions.assertEquals(expectedSize < resultSize, omResponse.getListKeysResponse().getIsTruncated());
+ }
+ }
+
+ @ParameterizedTest
+ @ValueSource(ints = {0, 9, 10, 11, 50})
+ public void testListLightKeysResponseSize(int resultSize) throws IOException {
+ List keyInfos = IntStream.range(0, resultSize).mapToObj(i -> getMockedBasicOmKeyInfo()).collect(
+ Collectors.toList());
+ OzoneManagerRequestHandler requestHandler = getRequestHandler(10);
+ OzoneManager ozoneManager = requestHandler.getOzoneManager();
+ Mockito.when(ozoneManager.listKeysLight(Mockito.anyString(), Mockito.anyString(), Mockito.anyString(),
+ Mockito.anyString(), Mockito.anyInt())).thenAnswer(i -> {
+ int maxSize = Math.max(Math.min(resultSize, i.getArgument(4)), 0);
+ return new ListKeysLightResult(keyInfos.isEmpty() ? keyInfos : keyInfos.subList(0, maxSize),
+ maxSize < resultSize);
+ });
+ OzoneManagerProtocolProtos.OMRequest request = Mockito.mock(OzoneManagerProtocolProtos.OMRequest.class);
+ for (int requestSize : Arrays.asList(0, resultSize - 1, resultSize, resultSize + 1, Integer.MAX_VALUE)) {
+ mockOmRequest(request, OzoneManagerProtocolProtos.Type.ListKeysLight, requestSize);
+ OzoneManagerProtocolProtos.OMResponse omResponse = requestHandler.handleReadRequest(request);
+ int expectedSize = Math.max(Math.min(Math.min(10, requestSize), resultSize), 0);
+ Assertions.assertEquals(expectedSize, omResponse.getListKeysLightResponse().getBasicKeyInfoList().size());
+ Assertions.assertEquals(expectedSize < resultSize,
+ omResponse.getListKeysLightResponse().getIsTruncated());
+ }
+ }
+
+ @ParameterizedTest
+ @ValueSource(ints = {0, 9, 10, 11, 50})
+ public void testListStatusResponseSize(int resultSize) throws IOException {
+ List statusList = IntStream.range(0, resultSize).mapToObj(i -> getMockedOzoneFileStatus())
+ .collect(Collectors.toList());
+ OzoneManagerRequestHandler requestHandler = getRequestHandler(10);
+ OzoneManager ozoneManager = requestHandler.getOzoneManager();
+ Mockito.when(ozoneManager.listStatus(Mockito.any(OmKeyArgs.class), Mockito.anyBoolean(), Mockito.anyString(),
+ Mockito.anyLong(), Mockito.anyBoolean())).thenAnswer(i -> {
+ long maxSize = i.getArgument(3);
+ maxSize = Math.max(Math.min(resultSize, maxSize), 0);
+ return statusList.isEmpty() ? statusList : statusList.subList(0, (int) maxSize);
+ });
+ OzoneManagerProtocolProtos.OMRequest request = Mockito.mock(OzoneManagerProtocolProtos.OMRequest.class);
+ for (int requestSize : Arrays.asList(0, resultSize - 1, resultSize, resultSize + 1, Integer.MAX_VALUE)) {
+ mockOmRequest(request, OzoneManagerProtocolProtos.Type.ListStatus, requestSize);
+ OzoneManagerProtocolProtos.OMResponse omResponse = requestHandler.handleReadRequest(request);
+ int expectedSize = Math.max(Math.min(Math.min(10, requestSize), resultSize), 0);
+ Assertions.assertEquals(expectedSize, omResponse.getListStatusResponse().getStatusesList().size());
+ }
+ }
+}
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index e03a21782262..ed8d99d67fa1 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -685,7 +685,7 @@ public FileStatus[] listStatus(Path f) throws IOException {
LinkedList statuses = new LinkedList<>();
List tmpStatusList;
String startKey = "";
-
+ int entriesAdded;
do {
tmpStatusList =
adapter.listStatus(pathToKey(f), false, startKey, numEntries, uri,
@@ -693,20 +693,22 @@ workingDir, getUsername(), true)
.stream()
.map(this::convertFileStatus)
.collect(Collectors.toList());
-
+ entriesAdded = 0;
if (!tmpStatusList.isEmpty()) {
if (startKey.isEmpty() || !statuses.getLast().getPath().toString()
.equals(tmpStatusList.get(0).getPath().toString())) {
statuses.addAll(tmpStatusList);
+ entriesAdded += tmpStatusList.size();
} else {
statuses.addAll(tmpStatusList.subList(1, tmpStatusList.size()));
+ entriesAdded += tmpStatusList.size() - 1;
}
startKey = pathToKey(statuses.getLast().getPath());
}
// listStatus returns entries numEntries in size if available.
// Any lesser number of entries indicate that the required entries have
// exhausted.
- } while (tmpStatusList.size() == numEntries);
+ } while (entriesAdded > 0);
return statuses.toArray(new FileStatus[0]);
@@ -1055,8 +1057,7 @@ private boolean hasNextNoFilter() throws IOException {
return false;
}
if (i >= thisListing.size()) {
- if (startPath != null && (thisListing.size() == listingPageSize ||
- thisListing.size() == listingPageSize - 1)) {
+ if (startPath != null && (!thisListing.isEmpty())) {
// current listing is exhausted & fetch a new listing
thisListing = listFileStatus(p, startPath, lite);
if (thisListing != null && !thisListing.isEmpty()) {
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index 039c4ad898f3..9896ab722de6 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@ -835,7 +835,7 @@ private List listStatusVolume(String volumeStr,
* Helper for OFS listStatus on a bucket to get all snapshots.
*/
private List listStatusBucketSnapshot(
- String volumeName, String bucketName, URI uri) throws IOException {
+ String volumeName, String bucketName, URI uri, String prevSnapshot, long numberOfEntries) throws IOException {
OzoneBucket ozoneBucket = getBucket(volumeName, bucketName, false);
UserGroupInformation ugi =
@@ -844,9 +844,9 @@ private List listStatusBucketSnapshot(
String group = getGroupName(ugi);
List res = new ArrayList<>();
- Iterator snapshotIter = objectStore.listSnapshot(volumeName, bucketName, null, null);
+ Iterator snapshotIter = objectStore.listSnapshot(volumeName, bucketName, null, prevSnapshot);
- while (snapshotIter.hasNext()) {
+ while (snapshotIter.hasNext() && res.size() < numberOfEntries) {
OzoneSnapshot ozoneSnapshot = snapshotIter.next();
if (SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE.name().equals(ozoneSnapshot.getSnapshotStatus())) {
res.add(getFileStatusAdapterForBucketSnapshot(
@@ -915,7 +915,7 @@ public List listStatus(String pathStr, boolean recursive,
if (ofsPath.isSnapshotPath()) {
return listStatusBucketSnapshot(ofsPath.getVolumeName(),
- ofsPath.getBucketName(), uri);
+ ofsPath.getBucketName(), uri, ofsStartPath.getSnapshotName(), numEntries);
}
List result = new ArrayList<>();
String keyName = ofsPath.getKeyName();
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
index 58b59766a280..66b0037cf332 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
@@ -938,25 +938,27 @@ private List listStatusAdapter(Path f, boolean lite) throws I
LinkedList statuses = new LinkedList<>();
List tmpStatusList;
String startPath = "";
-
+ int entriesAdded;
do {
tmpStatusList =
adapter.listStatus(pathToKey(f), false, startPath,
numEntries, uri, workingDir, getUsername(), lite);
-
+ entriesAdded = 0;
if (!tmpStatusList.isEmpty()) {
if (startPath.isEmpty() || !statuses.getLast().getPath().toString()
.equals(tmpStatusList.get(0).getPath().toString())) {
statuses.addAll(tmpStatusList);
+ entriesAdded += tmpStatusList.size();
} else {
statuses.addAll(tmpStatusList.subList(1, tmpStatusList.size()));
+ entriesAdded += tmpStatusList.size() - 1;
}
startPath = pathToKey(statuses.getLast().getPath());
}
// listStatus returns entries numEntries in size if available.
// Any lesser number of entries indicate that the required entries have
// exhausted.
- } while (tmpStatusList.size() == numEntries);
+ } while (entriesAdded > 0);
return statuses;
}
@@ -1265,8 +1267,7 @@ private boolean hasNextNoFilter() throws IOException {
return false;
}
if (i >= thisListing.size()) {
- if (startPath != null && (thisListing.size() == listingPageSize ||
- thisListing.size() == listingPageSize - 1)) {
+ if (startPath != null && (!thisListing.isEmpty())) {
// current listing is exhausted & fetch a new listing
thisListing = listFileStatus(p, startPath, lite);
if (thisListing != null && !thisListing.isEmpty()) {