diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index f65e2f30cb8c..88418baffaa8 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -291,11 +291,34 @@ public void untarCheckpointFile(File tarFile, Path destPath) */ public static String constructFullPath(OmKeyInfo omKeyInfo, ReconNamespaceSummaryManager reconNamespaceSummaryManager, - ReconOMMetadataManager omMetadataManager) - throws IOException { + ReconOMMetadataManager omMetadataManager) throws IOException { + return constructFullPath(omKeyInfo.getKeyName(), omKeyInfo.getParentObjectID(), omKeyInfo.getVolumeName(), + omKeyInfo.getBucketName(), reconNamespaceSummaryManager, omMetadataManager); + } - StringBuilder fullPath = new StringBuilder(omKeyInfo.getKeyName()); - long parentId = omKeyInfo.getParentObjectID(); + /** + * Constructs the full path of a key from its key name and parent ID using a bottom-up approach, starting from the + * leaf node. + * + * The method begins with the leaf node (the key itself) and recursively prepends parent directory names, fetched + * via NSSummary objects, until reaching the parent bucket (parentId is -1). It effectively builds the path from + * bottom to top, finally prepending the volume and bucket names to complete the full path. If the directory structure + * is currently being rebuilt (indicated by the rebuildTriggered flag), this method returns an empty string to signify + * that path construction is temporarily unavailable. + * + * @param keyName The name of the key + * @param initialParentId The parent ID of the key + * @param volumeName The name of the volume + * @param bucketName The name of the bucket + * @return The constructed full path of the key as a String, or an empty string if a rebuild is in progress and + * the path cannot be constructed at this time. + * @throws IOException + */ + public static String constructFullPath(String keyName, long initialParentId, String volumeName, String bucketName, + ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconOMMetadataManager omMetadataManager) throws IOException { + StringBuilder fullPath = new StringBuilder(keyName); + long parentId = initialParentId; boolean isDirectoryPresent = false; while (parentId != 0) { @@ -320,8 +343,6 @@ public static String constructFullPath(OmKeyInfo omKeyInfo, } // Prepend the volume and bucket to the constructed path - String volumeName = omKeyInfo.getVolumeName(); - String bucketName = omKeyInfo.getBucketName(); fullPath.insert(0, volumeName + OM_KEY_PREFIX + bucketName + OM_KEY_PREFIX); if (isDirectoryPresent) { return OmUtils.normalizeKey(fullPath.toString(), true); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 8611abe88cda..21c9552c035a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; +import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfoProtoWrapper; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; import org.apache.hadoop.ozone.recon.api.types.ListKeysResponse; import org.apache.hadoop.ozone.recon.api.types.NSSummary; @@ -58,7 +59,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -989,7 +989,7 @@ public Response listKeys(@QueryParam("replicationType") String replicationType, listKeysResponse = (ListKeysResponse) response.getEntity(); } - List keyInfoList = listKeysResponse.getKeys(); + List keyInfoList = listKeysResponse.getKeys(); if (!keyInfoList.isEmpty()) { listKeysResponse.setLastKey(keyInfoList.get(keyInfoList.size() - 1).getKey()); } @@ -1003,66 +1003,49 @@ private Response getListKeysResponse(ParamInfo paramInfo) { listKeysResponse.setPath(paramInfo.getStartPrefix()); long replicatedTotal = 0; long unreplicatedTotal = 0; - boolean keysFound = false; // Flag to track if any keys are found // Search keys from non-FSO layout. - Map obsKeys; - Table keyTable = - omMetadataManager.getKeyTable(BucketLayout.LEGACY); - obsKeys = retrieveKeysFromTable(keyTable, paramInfo); - for (Map.Entry entry : obsKeys.entrySet()) { - keysFound = true; - KeyEntityInfo keyEntityInfo = - createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); - - listKeysResponse.getKeys().add(keyEntityInfo); - replicatedTotal += entry.getValue().getReplicatedSize(); - unreplicatedTotal += entry.getValue().getDataSize(); - } + Table keyTable = + omMetadataManager.getKeyTableLite(BucketLayout.LEGACY); + retrieveKeysFromTable(keyTable, paramInfo, listKeysResponse.getKeys()); + // Search keys from FSO layout. - Map fsoKeys = searchKeysInFSO(paramInfo); - for (Map.Entry entry : fsoKeys.entrySet()) { - keysFound = true; - KeyEntityInfo keyEntityInfo = - createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); - - listKeysResponse.getKeys().add(keyEntityInfo); - replicatedTotal += entry.getValue().getReplicatedSize(); - unreplicatedTotal += entry.getValue().getDataSize(); - } + searchKeysInFSO(paramInfo, listKeysResponse.getKeys()); // If no keys were found, return a response indicating that no keys matched - if (!keysFound) { + if (listKeysResponse.getKeys().isEmpty()) { return ReconResponseUtils.noMatchedKeysResponse(paramInfo.getStartPrefix()); } + for (KeyEntityInfoProtoWrapper keyEntityInfo : listKeysResponse.getKeys()) { + replicatedTotal += keyEntityInfo.getReplicatedSize(); + unreplicatedTotal += keyEntityInfo.getSize(); + } + // Set the aggregated totals in the response listKeysResponse.setReplicatedDataSize(replicatedTotal); listKeysResponse.setUnReplicatedDataSize(unreplicatedTotal); return Response.ok(listKeysResponse).build(); - } catch (IOException e) { - return ReconResponseUtils.createInternalServerErrorResponse( - "Error listing keys from OM DB: " + e.getMessage()); } catch (RuntimeException e) { + LOG.error("Error generating listKeys response", e); return ReconResponseUtils.createInternalServerErrorResponse( "Unexpected runtime error while searching keys in OM DB: " + e.getMessage()); } catch (Exception e) { + LOG.error("Error generating listKeys response", e); return ReconResponseUtils.createInternalServerErrorResponse( "Error listing keys from OM DB: " + e.getMessage()); } } - public Map searchKeysInFSO(ParamInfo paramInfo) + public void searchKeysInFSO(ParamInfo paramInfo, List results) throws IOException { - int originalLimit = paramInfo.getLimit(); - Map matchedKeys = new LinkedHashMap<>(); // Convert the search prefix to an object path for FSO buckets String startPrefixObjectPath = convertStartPrefixPathToObjectIdPath(paramInfo.getStartPrefix()); String[] names = parseRequestPath(startPrefixObjectPath); - Table fileTable = - omMetadataManager.getKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); + Table fileTable = + omMetadataManager.getKeyTableLite(BucketLayout.FILE_SYSTEM_OPTIMIZED); // If names.length > 2, then the search prefix is at the level above bucket level hence // no need to find parent or extract id's or find subpaths as the fileTable is @@ -1075,7 +1058,7 @@ public Map searchKeysInFSO(ParamInfo paramInfo) NSSummary parentSummary = reconNamespaceSummaryManager.getNSSummary(parentId); if (parentSummary == null) { - return matchedKeys; + return; } List subPaths = new ArrayList<>(); // Add the initial search prefix object path because it can have both files and subdirectories with files. @@ -1087,21 +1070,17 @@ public Map searchKeysInFSO(ParamInfo paramInfo) // Iterate over the subpaths and retrieve the files for (String subPath : subPaths) { paramInfo.setStartPrefix(subPath); - matchedKeys.putAll( - retrieveKeysFromTable(fileTable, paramInfo)); - paramInfo.setLimit(originalLimit - matchedKeys.size()); - if (matchedKeys.size() >= originalLimit) { + retrieveKeysFromTable(fileTable, paramInfo, results); + if (results.size() >= paramInfo.getLimit()) { break; } } - return matchedKeys; + return; } paramInfo.setStartPrefix(startPrefixObjectPath); // Iterate over for bucket and volume level search - matchedKeys.putAll( - retrieveKeysFromTable(fileTable, paramInfo)); - return matchedKeys; + retrieveKeysFromTable(fileTable, paramInfo, results); } @@ -1174,32 +1153,31 @@ public String convertStartPrefixPathToObjectIdPath(String startPrefixPath) * @return A map of keys and their corresponding OmKeyInfo objects. * @throws IOException If there are problems accessing the table. */ - private Map retrieveKeysFromTable( - Table table, ParamInfo paramInfo) + private void retrieveKeysFromTable( + Table table, ParamInfo paramInfo, List results) throws IOException { boolean skipPrevKey = false; String seekKey = paramInfo.getPrevKey(); - Map matchedKeys = new LinkedHashMap<>(); try ( - TableIterator> keyIter = table.iterator()) { + TableIterator> keyIter = table.iterator()) { if (!paramInfo.isSkipPrevKeyDone() && isNotBlank(seekKey)) { skipPrevKey = true; - Table.KeyValue seekKeyValue = + Table.KeyValue seekKeyValue = keyIter.seek(seekKey); // check if RocksDB was able to seek correctly to the given key prefix // if not, then return empty result // In case of an empty prevKeyPrefix, all the keys are returned if (seekKeyValue == null || (!seekKeyValue.getKey().equals(paramInfo.getPrevKey()))) { - return matchedKeys; + return; } } else { keyIter.seek(paramInfo.getStartPrefix()); } while (keyIter.hasNext()) { - Table.KeyValue entry = keyIter.next(); + Table.KeyValue entry = keyIter.next(); String dbKey = entry.getKey(); if (!dbKey.startsWith(paramInfo.getStartPrefix())) { break; // Exit the loop if the key no longer matches the prefix @@ -1209,9 +1187,14 @@ private Map retrieveKeysFromTable( continue; } if (applyFilters(entry, paramInfo)) { - matchedKeys.put(dbKey, entry.getValue()); + KeyEntityInfoProtoWrapper keyEntityInfo = entry.getValue(); + keyEntityInfo.setKey(dbKey); + keyEntityInfo.setPath(ReconUtils.constructFullPath(keyEntityInfo.getKeyName(), keyEntityInfo.getParentId(), + keyEntityInfo.getVolumeName(), keyEntityInfo.getBucketName(), reconNamespaceSummaryManager, + omMetadataManager)); + results.add(keyEntityInfo); paramInfo.setLastKey(dbKey); - if (matchedKeys.size() >= paramInfo.getLimit()) { + if (results.size() >= paramInfo.getLimit()) { break; } } @@ -1220,10 +1203,10 @@ private Map retrieveKeysFromTable( LOG.error("Error retrieving keys from table for path: {}", paramInfo.getStartPrefix(), exception); throw exception; } - return matchedKeys; } - private boolean applyFilters(Table.KeyValue entry, ParamInfo paramInfo) throws IOException { + private boolean applyFilters(Table.KeyValue entry, ParamInfo paramInfo) + throws IOException { LOG.debug("Applying filters on : {}", entry.getKey()); @@ -1238,7 +1221,7 @@ private boolean applyFilters(Table.KeyValue entry, ParamInfo return false; } - return entry.getValue().getDataSize() >= paramInfo.getKeySize(); + return entry.getValue().getSize() >= paramInfo.getKeySize(); } /** diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfoProtoWrapper.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfoProtoWrapper.java new file mode 100644 index 000000000000..b61ebf9963e3 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfoProtoWrapper.java @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api.types; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.utils.db.Codec; +import org.apache.hadoop.hdds.utils.db.DelegatedCodec; +import org.apache.hadoop.hdds.utils.db.Proto2Codec; +import org.apache.hadoop.ozone.om.helpers.QuotaUtil; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; + +/** + * POJO object wrapper for metadata of a given key/file. This class wraps a KeyInfo protobuf + * object and delegates most accessors to it. + */ +public final class KeyEntityInfoProtoWrapper { + + public static Codec getCodec() { + return new DelegatedCodec<>( + Proto2Codec.get(OzoneManagerProtocolProtos.KeyInfo.getDefaultInstance()), + KeyEntityInfoProtoWrapper::getFromProtobuf, + KeyEntityInfoProtoWrapper::toProtobuf, + KeyEntityInfoProtoWrapper.class); + } + + private final OzoneManagerProtocolProtos.KeyInfo keyInfoProto; + + /** This is key table key of rocksDB and will help UI to implement pagination + * where UI will use the last record key to send in API as preKeyPrefix. */ + @JsonProperty("key") + private String key; + + /** Path of a key/file. */ + @JsonProperty("path") + private String path; + + @JsonProperty("replicatedSize") + private final long replicatedSize; + + @JsonProperty("replicationInfo") + private final ReplicationConfig replicationConfig; + + private KeyEntityInfoProtoWrapper(OzoneManagerProtocolProtos.KeyInfo proto) { + keyInfoProto = proto; + replicationConfig = ReplicationConfig.fromProto(proto.getType(), proto.getFactor(), + proto.getEcReplicationConfig()); + this.replicatedSize = QuotaUtil.getReplicatedSize(getSize(), getReplicationConfig()); + } + + public static KeyEntityInfoProtoWrapper getFromProtobuf(OzoneManagerProtocolProtos.KeyInfo keyInfo) { + return new KeyEntityInfoProtoWrapper(keyInfo); + } + + public OzoneManagerProtocolProtos.KeyInfo toProtobuf() { + throw new UnsupportedOperationException("This method is not supported."); + } + + @JsonProperty("key") + public String getKey() { + if (key == null) { + throw new IllegalStateException("Key must be set to correctly serialize this object."); + } + return key; + } + + public void setKey(String key) { + this.key = key; + } + + @JsonProperty("path") + public String getPath() { + if (path == null) { + throw new IllegalStateException("Path must be set to correctly serialize this object."); + } + return path; + } + + public void setPath(String path) { + this.path = path; + } + + @JsonProperty("size") + public long getSize() { + return keyInfoProto.getDataSize(); + } + + @JsonProperty("replicatedSize") + public long getReplicatedSize() { + return replicatedSize; + } + + @JsonProperty("replicationInfo") + public ReplicationConfig getReplicationConfig() { + return replicationConfig; + } + + @JsonProperty("creationTime") + public long getCreationTime() { + return keyInfoProto.getCreationTime(); + } + + @JsonProperty("modificationTime") + public long getModificationTime() { + return keyInfoProto.getModificationTime(); + } + + @JsonProperty("isKey") + public boolean isKey() { + return keyInfoProto.getIsFile(); + } + + public long getParentId() { + return keyInfoProto.getParentID(); + } + + public String getVolumeName() { + return keyInfoProto.getVolumeName(); + } + + public String getBucketName() { + return keyInfoProto.getBucketName(); + } + + /** Returns the key name of the key stored in the OM Key Info object. */ + public String getKeyName() { + return keyInfoProto.getKeyName(); + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ListKeysResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ListKeysResponse.java index 7220060aeb02..2770e7f7f6f0 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ListKeysResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ListKeysResponse.java @@ -51,7 +51,7 @@ public class ListKeysResponse { /** list of keys. */ @JsonProperty("keys") - private List keys; + private List keys; public ListKeysResponse() { @@ -95,11 +95,11 @@ public void setPath(String path) { this.path = path; } - public List getKeys() { + public List getKeys() { return keys; } - public void setKeys(List keys) { + public void setKeys(List keys) { this.keys = keys; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java index 14ae997073c4..82913f453d01 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java @@ -23,9 +23,12 @@ import java.util.List; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfoProtoWrapper; /** * Interface for the OM Metadata Manager + DB store maintained by @@ -113,4 +116,13 @@ List listBucketsUnderVolume( */ OzoneConfiguration getOzoneConfiguration(); + /** + * A lighter weight version of the getKeyTable method that only returns the KeyEntityInfo wrapper object. This + * avoids creating a full OMKeyInfo object for each key if it is not needed. + * @param bucketLayout The Bucket layout to use for the key table. + * @return A table of keys and their metadata. + * @throws IOException + */ + Table getKeyTableLite(BucketLayout bucketLayout) throws IOException; + } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java index 91cb61369fcd..f750a0abb6a3 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java @@ -41,9 +41,11 @@ import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.recon.ReconUtils; +import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfoProtoWrapper; import org.eclipse.jetty.util.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -97,6 +99,7 @@ private void initializeNewRdbStore(File dbFile) throws IOException { .setName(dbFile.getName()) .setPath(dbFile.toPath().getParent()); addOMTablesAndCodecs(dbStoreBuilder); + dbStoreBuilder.addCodec(KeyEntityInfoProtoWrapper.class, KeyEntityInfoProtoWrapper.getCodec()); setStore(dbStoreBuilder.build()); LOG.info("Created OM DB handle from snapshot at {}.", dbFile.getAbsolutePath()); @@ -109,6 +112,12 @@ private void initializeNewRdbStore(File dbFile) throws IOException { } } + @Override + public Table getKeyTableLite(BucketLayout bucketLayout) throws IOException { + String tableName = bucketLayout.isFileSystemOptimized() ? FILE_TABLE : KEY_TABLE; + return getStore().getTable(tableName, String.class, KeyEntityInfoProtoWrapper.class); + } + @Override public void updateOmDB(File newDbLocation) throws IOException { if (getStore() != null) { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java index a1e8585401d1..398d494ea0dc 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -37,7 +37,7 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.recon.ReconTestInjector; import org.apache.hadoop.ozone.recon.ReconUtils; -import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; +import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfoProtoWrapper; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; import org.apache.hadoop.ozone.recon.api.types.ListKeysResponse; import org.apache.hadoop.ozone.recon.api.types.NSSummary; @@ -1579,7 +1579,7 @@ public void testListKeysFSOBucket() { "", 1000); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(6, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket/dir1/file1", keyEntityInfo.getPath()); assertEquals("/1/10/11/file1", keyEntityInfo.getKey()); assertEquals("/1/10/13/testfile", listKeysResponse.getLastKey()); @@ -1611,7 +1611,7 @@ public void testListKeysFSOBucketWithLimitAndPagination() { "", 2); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(2, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket/dir1/file1", keyEntityInfo.getPath()); assertEquals("/1/10/11/testfile", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString()); @@ -1653,7 +1653,7 @@ public void testListKeysFSOBucketDirOnePathWithLimitTwoAndPagination() { "", 2); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(2, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket/dir1/file1", keyEntityInfo.getPath()); assertEquals("/1/10/11/testfile", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString()); @@ -1695,7 +1695,7 @@ public void testListKeysFSOBucketDirOnePathWithLimitOneAndPagination() { "", 1); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(1, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket/dir1/file1", keyEntityInfo.getPath()); assertEquals("/1/10/11/file1", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString()); @@ -1746,7 +1746,7 @@ public void testListKeysFSOBucketTwoPathWithLimitAcrossDirsAtBucketLevel() { "", 3); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(3, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket2/dir8/file1", keyEntityInfo.getPath()); assertEquals("/1/30/32/file1", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString()); @@ -1779,7 +1779,7 @@ public void testListKeysFSOBucketDirTwoPathWithLimitAndPagination() { "", 2); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(2, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket/dir1/dir2/file1", keyEntityInfo.getPath()); assertEquals("/1/10/12/testfile", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString()); @@ -1812,7 +1812,7 @@ public void testListKeysFSOBucketDirThreePathWithLimitAndPagination() { "", 2); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(2, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/fso-bucket/dir1/dir2/dir3/file1", keyEntityInfo.getPath()); assertEquals("/1/10/13/testfile", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString()); @@ -1899,7 +1899,7 @@ public void testListKeysOBSBucketWithLimitAndPagination() throws Exception { "", 2); ListKeysResponse listKeysResponse = (ListKeysResponse) bucketResponse.getEntity(); assertEquals(2, listKeysResponse.getKeys().size()); - KeyEntityInfo keyEntityInfo = listKeysResponse.getKeys().get(0); + KeyEntityInfoProtoWrapper keyEntityInfo = listKeysResponse.getKeys().get(0); assertEquals("volume1/obs-bucket/key1", keyEntityInfo.getPath()); assertEquals("/volume1/obs-bucket/key1/key2", listKeysResponse.getLastKey()); assertEquals("RATIS", keyEntityInfo.getReplicationConfig().getReplicationType().toString());