diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java index 367c235f734b..d440df174841 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java @@ -37,10 +37,11 @@ public class RDBSstFileWriter implements DumpFileWriter, Closeable { private SstFileWriter sstFileWriter; private File sstFile; private AtomicLong keyCounter; + private Options emptyOption = new Options(); public RDBSstFileWriter() { EnvOptions envOptions = new EnvOptions(); - this.sstFileWriter = new SstFileWriter(envOptions, new Options()); + this.sstFileWriter = new SstFileWriter(envOptions, emptyOption); this.keyCounter = new AtomicLong(0); } @@ -83,6 +84,7 @@ public void close() throws IOException { } finally { sstFileWriter.close(); sstFileWriter = null; + emptyOption.close(); } keyCounter.set(0); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java index baaa17e49ac7..e32987c17db3 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java @@ -283,9 +283,8 @@ public DBUpdatesWrapper getUpdatesSince(long sequenceNumber, long limitCount) throw new IllegalArgumentException("Illegal count for getUpdatesSince."); } DBUpdatesWrapper dbUpdatesWrapper = new DBUpdatesWrapper(); - try { - TransactionLogIterator transactionLogIterator = - db.getUpdatesSince(sequenceNumber); + try (TransactionLogIterator transactionLogIterator = + db.getUpdatesSince(sequenceNumber)) { // Only the first record needs to be checked if its seq number < // ( 1 + passed_in_sequence_number). For example, if seqNumber passed @@ -298,24 +297,28 @@ public DBUpdatesWrapper getUpdatesSince(long sequenceNumber, long limitCount) while (transactionLogIterator.isValid()) { TransactionLogIterator.BatchResult result = transactionLogIterator.getBatch(); - long currSequenceNumber = result.sequenceNumber(); - if (checkValidStartingSeqNumber && - currSequenceNumber > 1 + sequenceNumber) { - throw new SequenceNumberNotFoundException("Unable to read data from" + - " RocksDB wal to get delta updates. It may have already been" + - "flushed to SSTs."); - } - // If the above condition was not satisfied, then it is OK to reset - // the flag. - checkValidStartingSeqNumber = false; - if (currSequenceNumber <= sequenceNumber) { - transactionLogIterator.next(); - continue; - } - dbUpdatesWrapper.addWriteBatch(result.writeBatch().data(), - result.sequenceNumber()); - if (currSequenceNumber - sequenceNumber >= limitCount) { - break; + try { + long currSequenceNumber = result.sequenceNumber(); + if (checkValidStartingSeqNumber && + currSequenceNumber > 1 + sequenceNumber) { + throw new SequenceNumberNotFoundException("Unable to read data from" + + " RocksDB wal to get delta updates. It may have already been" + + "flushed to SSTs."); + } + // If the above condition was not satisfied, then it is OK to reset + // the flag. + checkValidStartingSeqNumber = false; + if (currSequenceNumber <= sequenceNumber) { + transactionLogIterator.next(); + continue; + } + dbUpdatesWrapper.addWriteBatch(result.writeBatch().data(), + result.sequenceNumber()); + if (currSequenceNumber - sequenceNumber >= limitCount) { + break; + } + } finally { + result.writeBatch().close(); } transactionLogIterator.next(); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java index 4ac83c912234..1289260259bc 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java @@ -73,12 +73,11 @@ static IOException toIOException(Object name, String op, RocksDBException e) { * Read DB and return existing column families. * * @return a list of column families. - * @see RocksDB#listColumnFamilies(Options, String) */ private static List getColumnFamilies(File file) throws RocksDBException { - final List columnFamilies = RocksDB.listColumnFamilies( - new Options(), file.getAbsolutePath()) + final List columnFamilies = listColumnFamiliesEmptyOptions( + file.getAbsolutePath()) .stream() .map(TableConfig::newTableConfig) .collect(Collectors.toList()); @@ -88,6 +87,21 @@ private static List getColumnFamilies(File file) return columnFamilies; } + /** + * Read DB column families without Options. + * @param path + * @return A list of column family names + * @throws RocksDBException + * + * @see RocksDB#listColumnFamilies(Options, String) + */ + public static List listColumnFamiliesEmptyOptions(final String path) + throws RocksDBException { + try (Options emptyOptions = new Options()) { + return RocksDB.listColumnFamilies(emptyOptions, path); + } + } + static RocksDatabase open(File dbFile, DBOptions dbOptions, WriteOptions writeOptions, Set families, boolean readOnly) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 93c59a524a03..bd47ac843866 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -1550,8 +1550,47 @@ public List listStatus(OmKeyArgs args, boolean recursive, bucketName); Table keyTable = metadataManager .getKeyTable(getBucketLayout(metadataManager, volName, buckName)); - TableIterator> - iterator; + try (TableIterator> + iterator = getIteratorForKeyInTableCache(recursive, startKey, + volumeName, bucketName, cacheKeyMap, keyArgs, keyTable)) { + findKeyInDbWithIterator(recursive, startKey, numEntries, volumeName, + bucketName, keyName, cacheKeyMap, keyArgs, keyTable, iterator); + } + int countEntries; + + countEntries = 0; + // Convert results in cacheKeyMap to List + for (OzoneFileStatus fileStatus : cacheKeyMap.values()) { + // No need to check if a key is deleted or not here, this is handled + // when adding entries to cacheKeyMap from DB. + fileStatusList.add(fileStatus); + countEntries++; + if (countEntries >= numEntries) { + break; + } + } + // Clean up temp map and set + cacheKeyMap.clear(); + + List keyInfoList = new ArrayList<>(fileStatusList.size()); + fileStatusList.stream().map(s -> s.getKeyInfo()).forEach(keyInfoList::add); + if (args.getLatestVersionLocation()) { + slimLocationVersion(keyInfoList.toArray(new OmKeyInfo[0])); + } + refreshPipeline(keyInfoList); + + if (args.getSortDatanodes()) { + sortDatanodes(clientAddress, keyInfoList.toArray(new OmKeyInfo[0])); + } + return fileStatusList; + } + + private TableIterator> + getIteratorForKeyInTableCache( + boolean recursive, String startKey, String volumeName, String bucketName, + TreeMap cacheKeyMap, String keyArgs, + Table keyTable) { + TableIterator> iterator; try { Iterator, CacheValue>> cacheIter = keyTable.cacheIterator(); @@ -1567,7 +1606,17 @@ public List listStatus(OmKeyArgs args, boolean recursive, metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName); } + return iterator; + } + @SuppressWarnings("parameternumber") + private void findKeyInDbWithIterator(boolean recursive, String startKey, + long numEntries, String volumeName, String bucketName, String keyName, + TreeMap cacheKeyMap, String keyArgs, + Table keyTable, + TableIterator> iterator) + throws IOException { // Then, find key in DB String seekKeyInDb = metadataManager.getOzoneKey(volumeName, bucketName, startKey); @@ -1634,32 +1683,6 @@ public List listStatus(OmKeyArgs args, boolean recursive, } } } - - countEntries = 0; - // Convert results in cacheKeyMap to List - for (OzoneFileStatus fileStatus : cacheKeyMap.values()) { - // No need to check if a key is deleted or not here, this is handled - // when adding entries to cacheKeyMap from DB. - fileStatusList.add(fileStatus); - countEntries++; - if (countEntries >= numEntries) { - break; - } - } - // Clean up temp map and set - cacheKeyMap.clear(); - - List keyInfoList = new ArrayList<>(fileStatusList.size()); - fileStatusList.stream().map(s -> s.getKeyInfo()).forEach(keyInfoList::add); - if (args.getLatestVersionLocation()) { - slimLocationVersion(keyInfoList.toArray(new OmKeyInfo[0])); - } - refreshPipeline(keyInfoList); - - if (args.getSortDatanodes()) { - sortDatanodes(clientAddress, keyInfoList.toArray(new OmKeyInfo[0])); - } - return fileStatusList; } @SuppressWarnings("methodlength") @@ -1713,162 +1736,169 @@ public List listStatusFSO(OmKeyArgs args, boolean recursive, TreeMap tempCacheDirMap = new TreeMap<>(); TableIterator> - iterator; + iterator = null; - if (Strings.isNullOrEmpty(startKey)) { - OzoneFileStatus fileStatus = getFileStatus(args, clientAddress); - if (fileStatus.isFile()) { - return Collections.singletonList(fileStatus); - } + try { + if (Strings.isNullOrEmpty(startKey)) { + OzoneFileStatus fileStatus = getFileStatus(args, clientAddress); + if (fileStatus.isFile()) { + return Collections.singletonList(fileStatus); + } - // Not required to search in DeletedTable because all the deleted - // keys will be marked directly in dirTable or in keyTable by - // breaking the pointer to its sub-dirs and sub-files. So, there is no - // issue of inconsistency. + // Not required to search in DeletedTable because all the deleted + // keys will be marked directly in dirTable or in keyTable by + // breaking the pointer to its sub-dirs and sub-files. So, there is no + // issue of inconsistency. + + /* + * keyName is a directory. + * Say, "/a" is the dir name and its objectID is 1024, then seek + * will be doing with "1024/" to get all immediate descendants. + */ + if (fileStatus.getKeyInfo() != null) { + prefixKeyInDB = fileStatus.getKeyInfo().getObjectID(); + } else { + // list root directory. + prefixKeyInDB = bucketId; + } + seekFileInDB = metadataManager.getOzonePathKey( + volumeId, bucketId, prefixKeyInDB, ""); + seekDirInDB = metadataManager.getOzonePathKey( + volumeId, bucketId, prefixKeyInDB, ""); + + // Order of seek -> + // (1)Seek files in fileTable + // (2)Seek dirs in dirTable + + // First under lock obtain both entries from dir/file cache and generate + // entries marked for delete. + metadataManager.getLock() + .acquireReadLock(BUCKET_LOCK, volumeName, bucketName); + try { + BucketLayout bucketLayout = getBucketLayout( + metadataManager, volumeName, bucketName); + iterator = metadataManager.getKeyTable(bucketLayout).iterator(); + countEntries = + getFilesAndDirsFromCacheWithBucket(volumeName, bucketName, + cacheFileMap, tempCacheDirMap, deletedKeySet, prefixKeyInDB, + seekFileInDB, seekDirInDB, prefixPath, startKey, countEntries, + numEntries); + + } finally { + metadataManager.getLock() + .releaseReadLock(BUCKET_LOCK, volumeName, bucketName); + } + countEntries = + getFilesFromDirectory(cacheFileMap, seekFileInDB, prefixPath, + prefixKeyInDB, countEntries, numEntries, deletedKeySet, + iterator); - /* - * keyName is a directory. - * Say, "/a" is the dir name and its objectID is 1024, then seek - * will be doing with "1024/" to get all immediate descendants. - */ - if (fileStatus.getKeyInfo() != null) { - prefixKeyInDB = fileStatus.getKeyInfo().getObjectID(); } else { - // list root directory. - prefixKeyInDB = bucketId; - } - seekFileInDB = metadataManager.getOzonePathKey( - volumeId, bucketId, prefixKeyInDB, ""); - seekDirInDB = metadataManager.getOzonePathKey( - volumeId, bucketId, prefixKeyInDB, ""); - - // Order of seek -> - // (1)Seek files in fileTable - // (2)Seek dirs in dirTable - - - // First under lock obtain both entries from dir/file cache and generate - // entries marked for delete. - metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, - bucketName); - try { - BucketLayout bucketLayout = - getBucketLayout(metadataManager, volumeName, bucketName); - iterator = metadataManager.getKeyTable(bucketLayout).iterator(); - countEntries = getFilesAndDirsFromCacheWithBucket(volumeName, - bucketName, cacheFileMap, tempCacheDirMap, deletedKeySet, - prefixKeyInDB, seekFileInDB, seekDirInDB, prefixPath, startKey, - countEntries, numEntries); - - } finally { - metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, - bucketName); - } - countEntries = getFilesFromDirectory(cacheFileMap, seekFileInDB, - prefixPath, prefixKeyInDB, countEntries, numEntries, deletedKeySet, - iterator); - - } else { - /* - * startKey will be used in iterator seek and sets the beginning point - * for key traversal. - * keyName will be used as parentID where the user has requested to - * list the keys from. - * - * When recursive flag=false, parentID won't change between two pages. - * For example: OM has a namespace like, - * /a/1...1M files and /a/b/1...1M files. - * /a/1...1M directories and /a/b/1...1M directories. - * Listing "/a", will always have the parentID as "a" irrespective of - * the startKey value. - */ - - // Check startKey is an immediate child of keyName. For example, - // keyName=/a/ and expected startKey=/a/b. startKey can't be /xyz/b. - if (StringUtils.isNotBlank(keyName) && - !OzoneFSUtils.isImmediateChild(keyName, startKey)) { - if (LOG.isDebugEnabled()) { - LOG.debug("StartKey {} is not an immediate child of keyName {}. " + - "Returns empty list", startKey, keyName); + /* + * startKey will be used in iterator seek and sets the beginning point + * for key traversal. + * keyName will be used as parentID where the user has requested to + * list the keys from. + * + * When recursive flag=false, parentID won't change between two pages. + * For example: OM has a namespace like, + * /a/1...1M files and /a/b/1...1M files. + * /a/1...1M directories and /a/b/1...1M directories. + * Listing "/a", will always have the parentID as "a" irrespective of + * the startKey value. + */ + + // Check startKey is an immediate child of keyName. For example, + // keyName=/a/ and expected startKey=/a/b. startKey can't be /xyz/b. + if (StringUtils.isNotBlank(keyName) && !OzoneFSUtils + .isImmediateChild(keyName, startKey)) { + if (LOG.isDebugEnabled()) { + LOG.debug("StartKey {} is not an immediate child of keyName {}. " + + "Returns empty list", startKey, keyName); + } + return Collections.emptyList(); } - return Collections.emptyList(); - } - // assign startKeyPath if prefixPath is empty string. - if (StringUtils.isBlank(prefixPath)) { - prefixPath = OzoneFSUtils.getParentDir(startKey); - } + // assign startKeyPath if prefixPath is empty string. + if (StringUtils.isBlank(prefixPath)) { + prefixPath = OzoneFSUtils.getParentDir(startKey); + } - OmKeyArgs startKeyArgs = args.toBuilder() - .setKeyName(startKey) - .setSortDatanodesInPipeline(false) - .build(); - OzoneFileStatus fileStatusInfo = getOzoneFileStatusFSO(startKeyArgs, - null, true); + OmKeyArgs startKeyArgs = args.toBuilder() + .setKeyName(startKey) + .setSortDatanodesInPipeline(false) + .build(); + OzoneFileStatus fileStatusInfo = getOzoneFileStatusFSO(startKeyArgs, + null, true); - if (fileStatusInfo != null) { - prefixKeyInDB = fileStatusInfo.getKeyInfo().getParentObjectID(); + if (fileStatusInfo != null) { + prefixKeyInDB = fileStatusInfo.getKeyInfo().getParentObjectID(); - if (fileStatusInfo.isDirectory()) { - seekDirInDB = metadataManager.getOzonePathKey( - volumeId, bucketId, prefixKeyInDB, - fileStatusInfo.getKeyInfo().getFileName()); + if (fileStatusInfo.isDirectory()) { + seekDirInDB = metadataManager.getOzonePathKey( + volumeId, bucketId, prefixKeyInDB, + fileStatusInfo.getKeyInfo().getFileName()); - // Order of seek -> (1) Seek dirs only in dirTable. In OM, always - // the order of search is, first seek into fileTable and then - // dirTable. So, its not required to search again in the fileTable. + // Order of seek -> (1) Seek dirs only in dirTable. In OM, always + // the order of search is, first seek into fileTable and then + // dirTable. So, its not required to search again in the fileTable. - // Seek the given key in dirTable. - metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, + // Seek the given key in dirTable. + metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName); - try { - listStatusFindDirsInTableCache(tempCacheDirMap, - metadataManager.getDirectoryTable(), - prefixKeyInDB, seekDirInDB, prefixPath, startKey, volumeName, - bucketName, countEntries, numEntries, deletedKeySet); - } finally { - metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, + try { + listStatusFindDirsInTableCache(tempCacheDirMap, + metadataManager.getDirectoryTable(), + prefixKeyInDB, seekDirInDB, prefixPath, startKey, volumeName, + bucketName, countEntries, numEntries, deletedKeySet); + } finally { + metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, + bucketName); + } + + } else { + seekFileInDB = metadataManager.getOzonePathKey( + volumeId, bucketId, prefixKeyInDB, + fileStatusInfo.getKeyInfo().getFileName()); + // begins from the first sub-dir under the parent dir + seekDirInDB = metadataManager.getOzonePathKey( + volumeId, bucketId, prefixKeyInDB, ""); + + // First under lock obtain both entries from dir/file cache and + // generate entries marked for delete. + metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName); - } + try { + BucketLayout bucketLayout = + getBucketLayout(metadataManager, volumeName, bucketName); + iterator = metadataManager.getKeyTable(bucketLayout) + .iterator(); + countEntries = getFilesAndDirsFromCacheWithBucket(volumeName, + bucketName, cacheFileMap, tempCacheDirMap, deletedKeySet, + prefixKeyInDB, seekFileInDB, seekDirInDB, prefixPath, + startKey, countEntries, numEntries); + } finally { + metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, + bucketName); + } + // 1. Seek the given key in key table. + countEntries = getFilesFromDirectory(cacheFileMap, seekFileInDB, + prefixPath, prefixKeyInDB, countEntries, numEntries, + deletedKeySet, iterator); + } } else { - seekFileInDB = metadataManager.getOzonePathKey( - volumeId, bucketId, prefixKeyInDB, - fileStatusInfo.getKeyInfo().getFileName()); - // begins from the first sub-dir under the parent dir - seekDirInDB = metadataManager.getOzonePathKey( - volumeId, bucketId, prefixKeyInDB, ""); - - // First under lock obtain both entries from dir/file cache and - // generate entries marked for delete. - metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, - bucketName); - try { - BucketLayout bucketLayout = - getBucketLayout(metadataManager, volumeName, bucketName); - iterator = metadataManager.getKeyTable(bucketLayout) - .iterator(); - countEntries = getFilesAndDirsFromCacheWithBucket(volumeName, - bucketName, cacheFileMap, tempCacheDirMap, deletedKeySet, - prefixKeyInDB, seekFileInDB, seekDirInDB, prefixPath, startKey, - countEntries, numEntries); - } finally { - metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, - bucketName); + // TODO: HDDS-4364: startKey can be a non-existed key + if (LOG.isDebugEnabled()) { + LOG.debug("StartKey {} is a non-existed key and returning empty " + + "list", startKey); } - - // 1. Seek the given key in key table. - countEntries = getFilesFromDirectory(cacheFileMap, seekFileInDB, - prefixPath, prefixKeyInDB, countEntries, numEntries, - deletedKeySet, iterator); - } - } else { - // TODO: HDDS-4364: startKey can be a non-existed key - if (LOG.isDebugEnabled()) { - LOG.debug("StartKey {} is a non-existed key and returning empty " + - "list", startKey); + return Collections.emptyList(); } - return Collections.emptyList(); + } + } finally { + if (iterator != null) { + iterator.close(); } } @@ -2000,9 +2030,25 @@ protected int getDirectories( throws IOException { Table dirTable = metadataManager.getDirectoryTable(); - TableIterator> - iterator = dirTable.iterator(); + try (TableIterator> + iterator = dirTable.iterator()) { + + return getDirectoriesWithIterator(cacheKeyMap, seekDirInDB, prefixPath, + prefixKeyInDB, countEntries, numEntries, recursive, volumeName, + bucketName, deletedKeySet, iterator); + } + } + @SuppressWarnings("parameternumber") + private int getDirectoriesWithIterator( + TreeMap cacheKeyMap, String seekDirInDB, + String prefixPath, long prefixKeyInDB, int countEntries, long numEntries, + boolean recursive, String volumeName, String bucketName, + Set deletedKeySet, + TableIterator> iterator) + throws IOException { iterator.seek(seekDirInDB); while (iterator.hasNext() && numEntries - countEntries > 0) { @@ -2381,15 +2427,27 @@ public Table.KeyValue getPendingDeletionDir() @Override public List getPendingDeletionSubDirs(long volumeId, long bucketId, OmKeyInfo parentInfo, long numEntries) throws IOException { - List directories = new ArrayList<>(); String seekDirInDB = metadataManager.getOzonePathKey(volumeId, bucketId, parentInfo.getObjectID(), ""); long countEntries = 0; Table dirTable = metadataManager.getDirectoryTable(); - TableIterator> - iterator = dirTable.iterator(); + try (TableIterator> + iterator = dirTable.iterator()) { + return gatherSubDirsWithIterator(parentInfo, numEntries, + seekDirInDB, countEntries, iterator); + } + + } + private List gatherSubDirsWithIterator(OmKeyInfo parentInfo, + long numEntries, String seekDirInDB, + long countEntries, + TableIterator> iterator) + throws IOException { + List directories = new ArrayList<>(); iterator.seek(seekDirInDB); while (iterator.hasNext() && numEntries - countEntries > 0) { @@ -2421,25 +2479,26 @@ public List getPendingDeletionSubFiles(long volumeId, long countEntries = 0; Table fileTable = metadataManager.getFileTable(); - TableIterator> - iterator = fileTable.iterator(); + try (TableIterator> + iterator = fileTable.iterator()) { - iterator.seek(seekFileInDB); + iterator.seek(seekFileInDB); - while (iterator.hasNext() && numEntries - countEntries > 0) { - Table.KeyValue entry = iterator.next(); - OmKeyInfo fileInfo = entry.getValue(); - if (!OMFileRequest.isImmediateChild(fileInfo.getParentObjectID(), - parentInfo.getObjectID())) { - break; - } - fileInfo.setFileName(fileInfo.getKeyName()); - String fullKeyPath = OMFileRequest.getAbsolutePath( - parentInfo.getKeyName(), fileInfo.getKeyName()); - fileInfo.setKeyName(fullKeyPath); + while (iterator.hasNext() && numEntries - countEntries > 0) { + Table.KeyValue entry = iterator.next(); + OmKeyInfo fileInfo = entry.getValue(); + if (!OMFileRequest.isImmediateChild(fileInfo.getParentObjectID(), + parentInfo.getObjectID())) { + break; + } + fileInfo.setFileName(fileInfo.getKeyName()); + String fullKeyPath = OMFileRequest.getAbsolutePath( + parentInfo.getKeyName(), fileInfo.getKeyName()); + fileInfo.setKeyName(fullKeyPath); - files.add(fileInfo); - countEntries++; + files.add(fileInfo); + countEntries++; + } } return files; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java index 8363be8d9bcf..73902099021c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java @@ -902,16 +902,19 @@ private static boolean checkSubDirectoryExists(OmKeyInfo omKeyInfo, // Check dirTable entries for any sub paths. String seekDirInDB = metaMgr.getOzonePathKey(volumeId, bucketId, omKeyInfo.getObjectID(), ""); - TableIterator> - iterator = dirTable.iterator(); + try (TableIterator> + iterator = dirTable.iterator()) { - iterator.seek(seekDirInDB); + iterator.seek(seekDirInDB); + + if (iterator.hasNext()) { + Table.KeyValue entry = iterator.next(); + OmDirectoryInfo dirInfo = entry.getValue(); + return isImmediateChild(dirInfo.getParentObjectID(), + omKeyInfo.getObjectID()); + } - if (iterator.hasNext()) { - Table.KeyValue entry = iterator.next(); - OmDirectoryInfo dirInfo = entry.getValue(); - return isImmediateChild(dirInfo.getParentObjectID(), - omKeyInfo.getObjectID()); } return false; // no sub paths found } @@ -946,16 +949,17 @@ private static boolean checkSubFileExists(OmKeyInfo omKeyInfo, // Check fileTable entries for any sub paths. String seekFileInDB = metaMgr.getOzonePathKey(volumeId, bucketId, omKeyInfo.getObjectID(), ""); - TableIterator> - iterator = fileTable.iterator(); + try (TableIterator> + iterator = fileTable.iterator()) { - iterator.seek(seekFileInDB); + iterator.seek(seekFileInDB); - if (iterator.hasNext()) { - Table.KeyValue entry = iterator.next(); - OmKeyInfo fileInfo = entry.getValue(); - return isImmediateChild(fileInfo.getParentObjectID(), - omKeyInfo.getObjectID()); // found a sub path file + if (iterator.hasNext()) { + Table.KeyValue entry = iterator.next(); + OmKeyInfo fileInfo = entry.getValue(); + return isImmediateChild(fileInfo.getParentObjectID(), + omKeyInfo.getObjectID()); // found a sub path file + } } return false; // no sub paths found } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ListTables.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ListTables.java index be1cd592d7cf..494f42e5877e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ListTables.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ListTables.java @@ -24,9 +24,8 @@ import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.utils.db.RocksDatabase; import org.kohsuke.MetaInfServices; -import org.rocksdb.Options; -import org.rocksdb.RocksDB; import picocli.CommandLine; /** @@ -45,8 +44,8 @@ public class ListTables implements Callable, SubcommandWithParent { @Override public Void call() throws Exception { - List columnFamilies = RocksDB.listColumnFamilies(new Options(), - parent.getDbPath()); + List columnFamilies = RocksDatabase.listColumnFamiliesEmptyOptions( + parent.getDbPath()); for (byte[] b : columnFamilies) { System.out.println(new String(b, StandardCharsets.UTF_8)); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RocksDBUtils.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RocksDBUtils.java index 24f6e21833f1..cb780da450bb 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RocksDBUtils.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RocksDBUtils.java @@ -18,9 +18,8 @@ package org.apache.hadoop.ozone.debug; +import org.apache.hadoop.hdds.utils.db.RocksDatabase; import org.rocksdb.ColumnFamilyDescriptor; -import org.rocksdb.Options; -import org.rocksdb.RocksDB; import org.rocksdb.RocksDBException; import java.util.ArrayList; @@ -38,7 +37,7 @@ private RocksDBUtils() { public static List getColumnFamilyDescriptors( String dbPath) throws RocksDBException { List cfs = new ArrayList<>(); - List cfList = RocksDB.listColumnFamilies(new Options(), dbPath); + List cfList = RocksDatabase.listColumnFamiliesEmptyOptions(dbPath); if (cfList != null) { for (byte[] b : cfList) { cfs.add(new ColumnFamilyDescriptor(b));