diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index 20a12716370f..b1e55cdecdc8 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -542,7 +542,21 @@ public Iterator listKeys(String keyPrefix, * @throws IOException */ public void deleteKey(String key) throws IOException { - proxy.deleteKey(volumeName, name, key); + proxy.deleteKey(volumeName, name, key, false); + } + + /** + * Ozone FS api to delete a directory. Sub directories will be deleted if + * recursive flag is true, otherwise it will be non-recursive. + * + * @param key Name of the key to be deleted. + * @param recursive recursive deletion of all sub path keys if true, + * otherwise non-recursive + * @throws IOException + */ + public void deleteDirectory(String key, boolean recursive) + throws IOException { + proxy.deleteKey(volumeName, name, key, recursive); } /** diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index dbd47c77bece..e429d2dbc38b 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -289,9 +289,12 @@ OzoneInputStream getKey(String volumeName, String bucketName, String keyName) * @param volumeName Name of the Volume * @param bucketName Name of the Bucket * @param keyName Name of the Key + * @param recursive recursive deletion of all sub path keys if true, + * otherwise non-recursive * @throws IOException */ - void deleteKey(String volumeName, String bucketName, String keyName) + void deleteKey(String volumeName, String bucketName, String keyName, + boolean recursive) throws IOException; /** diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 8c0ed41c78a4..f70856b50aa0 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -731,7 +731,7 @@ public OzoneInputStream getKey( @Override public void deleteKey( - String volumeName, String bucketName, String keyName) + String volumeName, String bucketName, String keyName, boolean recursive) throws IOException { verifyVolumeName(volumeName); verifyBucketName(bucketName); @@ -740,6 +740,7 @@ public void deleteKey( .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) + .setRecursive(recursive) .build(); ozoneManagerClient.deleteKey(keyArgs); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java index a2f4d6aef5a1..c35760dd1353 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java @@ -231,7 +231,9 @@ public enum ResultCodes { PARTIAL_RENAME, - QUOTA_EXCEEDED + QUOTA_EXCEEDED, + + DIRECTORY_NOT_EMPTY } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java index c08c988fc7e3..f8c7c231a2a8 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java @@ -48,6 +48,7 @@ public final class OmKeyArgs implements Auditable { private boolean refreshPipeline; private boolean sortDatanodesInPipeline; private List acls; + private boolean recursive; @SuppressWarnings("parameternumber") private OmKeyArgs(String volumeName, String bucketName, String keyName, @@ -55,7 +56,7 @@ private OmKeyArgs(String volumeName, String bucketName, String keyName, List locationInfoList, boolean isMultipart, String uploadID, int partNumber, Map metadataMap, boolean refreshPipeline, - List acls, boolean sortDatanode) { + List acls, boolean sortDatanode, boolean recursive) { this.volumeName = volumeName; this.bucketName = bucketName; this.keyName = keyName; @@ -70,6 +71,7 @@ private OmKeyArgs(String volumeName, String bucketName, String keyName, this.refreshPipeline = refreshPipeline; this.acls = acls; this.sortDatanodesInPipeline = sortDatanode; + this.recursive = recursive; } public boolean getIsMultipartKey() { @@ -140,6 +142,10 @@ public boolean getSortDatanodes() { return sortDatanodesInPipeline; } + public boolean isRecursive() { + return recursive; + } + @Override public Map toAuditMap() { Map auditMap = new LinkedHashMap<>(); @@ -198,6 +204,7 @@ public static class Builder { private boolean refreshPipeline; private boolean sortDatanodesInPipeline; private List acls; + private boolean recursive; public Builder setVolumeName(String volume) { this.volumeName = volume; @@ -274,11 +281,16 @@ public Builder setSortDatanodesInPipeline(boolean sort) { return this; } + public Builder setRecursive(boolean isRecursive) { + this.recursive = isRecursive; + return this; + } + public OmKeyArgs build() { return new OmKeyArgs(volumeName, bucketName, keyName, dataSize, type, factor, locationInfoList, isMultipartKey, multipartUploadID, multipartUploadPartNumber, metadata, refreshPipeline, acls, - sortDatanodesInPipeline); + sortDatanodesInPipeline, recursive); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 3435df887291..ae59fe5b8375 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -741,7 +741,8 @@ public void deleteKey(OmKeyArgs args) throws IOException { KeyArgs keyArgs = KeyArgs.newBuilder() .setVolumeName(args.getVolumeName()) .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()).build(); + .setKeyName(args.getKeyName()) + .setRecursive(args.isRecursive()).build(); req.setKeyArgs(keyArgs); OMRequest omRequest = createOMRequest(Type.DeleteKey) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java index b81651937be2..016011078c8f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.fs.Trash; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -380,7 +381,7 @@ private void testDeleteCreatesFakeParentDir() throws Exception { } - private void testRecursiveDelete() throws Exception { + protected void testRecursiveDelete() throws Exception { Path grandparent = new Path("/gdir1"); for (int i = 1; i <= 10; i++) { @@ -389,6 +390,24 @@ private void testRecursiveDelete() throws Exception { ContractTestUtils.touch(fs, child); } + // delete a dir with sub-file + try { + FileStatus[] parents = fs.listStatus(grandparent); + Assert.assertTrue(parents.length > 0); + fs.delete(parents[0].getPath(), false); + Assert.fail("Must throw exception as dir is not empty!"); + } catch (PathIsNotEmptyDirectoryException pde) { + // expected + } + + // delete a dir with sub-file + try { + fs.delete(grandparent, false); + Assert.fail("Must throw exception as dir is not empty!"); + } catch (PathIsNotEmptyDirectoryException pde) { + // expected + } + // Delete the grandparent, which should delete all keys. fs.delete(grandparent, true); @@ -451,7 +470,7 @@ private void checkPath(Path path) { } } - private void testFileDelete() throws Exception { + protected void testFileDelete() throws Exception { Path grandparent = new Path("/testBatchDelete"); Path parent = new Path(grandparent, "parent"); Path childFolder = new Path(parent, "childFolder"); @@ -783,12 +802,8 @@ protected void testRenameToNewSubDirShouldNotExist() throws Exception { final Path baPath = new Path(fs.getUri().toString() + "/b/a"); fs.mkdirs(baPath); - try { - fs.rename(aSourcePath, bDestinPath); - Assert.fail("Should fail as new destination dir exists!"); - } catch (FileAlreadyExistsException faee) { - // expected as new sub-path /b/a already exists. - } + Assert.assertFalse("New destin sub-path /b/a already exists", + fs.rename(aSourcePath, bDestinPath)); // Case-5.b) Rename file from /a/b/c/file1 to /a. // Should be failed since /a/file1 exists. @@ -802,12 +817,8 @@ protected void testRenameToNewSubDirShouldNotExist() throws Exception { final Path aDestinPath = new Path(fs.getUri().toString() + "/a"); - try { - fs.rename(abcFile1, aDestinPath); - Assert.fail("Should fail as new destination file exists!"); - } catch (FileAlreadyExistsException faee) { - // expected as new sub-path /a/file1 already exists. - } + Assert.assertFalse("New destin sub-path /b/a already exists", + fs.rename(abcFile1, aDestinPath)); } /** @@ -822,12 +833,8 @@ protected void testRenameDirToFile() throws Exception { ContractTestUtils.touch(fs, file1Destin); Path abcRootPath = new Path(fs.getUri().toString() + "/a/b/c"); fs.mkdirs(abcRootPath); - try { - fs.rename(abcRootPath, file1Destin); - Assert.fail("key already exists /root_dir/file1"); - } catch (FileAlreadyExistsException faee) { - // expected - } + Assert.assertFalse("key already exists /root_dir/file1", + fs.rename(abcRootPath, file1Destin)); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java index 21e21572f159..d66ec4dbe9b4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java @@ -18,7 +18,6 @@ package org.apache.hadoop.fs.ozone; -import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.LocatedFileStatus; @@ -26,15 +25,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.jetbrains.annotations.NotNull; import org.junit.Assert; import org.junit.Rule; @@ -48,11 +40,11 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; -import java.util.Iterator; -import java.util.Map; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; /** * Ozone file system tests that are not covered by contract tests, @@ -308,78 +300,6 @@ protected void testRenameDirToItsOwnSubDir() throws Exception { } } - /** - * Case-5) If new destin '/dst/source' exists then throws exception. - * If destination is a directory then rename source as sub-path of it. - *

- * For example: rename /a to /b will lead to /b/a. This new path should - * not exist. - */ - protected void testRenameToNewSubDirShouldNotExist() throws Exception { - // Case-5.a) Rename directory from /a to /b. - // created /a - final Path aSourcePath = new Path(fs.getUri().toString() + "/a"); - fs.mkdirs(aSourcePath); - - // created /b - final Path bDestinPath = new Path(fs.getUri().toString() + "/b"); - fs.mkdirs(bDestinPath); - - // Add a sub-directory '/b/a' to '/b'. This is to verify that rename - // throws exception as new destin /b/a already exists. - final Path baPath = new Path(fs.getUri().toString() + "/b/a"); - fs.mkdirs(baPath); - - try { - fs.rename(aSourcePath, bDestinPath); - Assert.fail("Should fail as new destination dir exists!"); - } catch (OMException ome) { - // expected as new sub-path /b/a already exists. - assertEquals(ome.getResult(), OMException.ResultCodes.KEY_ALREADY_EXISTS); - } - - // Case-5.b) Rename file from /a/b/c/file1 to /a. - // Should be failed since /a/file1 exists. - final Path abcPath = new Path(fs.getUri().toString() + "/a/b/c"); - fs.mkdirs(abcPath); - Path abcFile1 = new Path(abcPath, "/file1"); - ContractTestUtils.touch(fs, abcFile1); - - final Path aFile1 = new Path(fs.getUri().toString() + "/a/file1"); - ContractTestUtils.touch(fs, aFile1); - - final Path aDestinPath = new Path(fs.getUri().toString() + "/a"); - - try { - fs.rename(abcFile1, aDestinPath); - Assert.fail("Should fail as new destination file exists!"); - } catch (OMException ome) { - // expected as new sub-path /b/a already exists. - assertEquals(ome.getResult(), OMException.ResultCodes.KEY_ALREADY_EXISTS); - } - } - - /** - * Case-6) Rename directory to an existed file, should be failed. - */ - protected void testRenameDirToFile() throws Exception { - final String root = "/root"; - Path rootPath = new Path(fs.getUri().toString() + root); - fs.mkdirs(rootPath); - - Path file1Destin = new Path(fs.getUri().toString() + root + "/file1"); - ContractTestUtils.touch(fs, file1Destin); - Path abcRootPath = new Path(fs.getUri().toString() + "/a/b/c"); - fs.mkdirs(abcRootPath); - try { - fs.rename(abcRootPath, file1Destin); - Assert.fail("key already exists /root_dir/file1"); - } catch (OMException ome) { - // expected - assertEquals(ome.getResult(), OMException.ResultCodes.KEY_ALREADY_EXISTS); - } - } - /** * Fails if the (a) parent of dst does not exist or (b) parent is a file. */ @@ -425,128 +345,90 @@ public void testFileSystem() throws Exception { testCreateFileShouldCheckExistenceOfDirWithSameName(); // TODO: Cleanup keyTable and dirTable explicitly as FS delete operation // is not yet implemented. This should be replaced with fs.delete() call. - tableCleanup(); + deleteRootDir(); testMakeDirsWithAnExistingDirectoryPath(); - tableCleanup(); + deleteRootDir(); testCreateWithInvalidPaths(); - tableCleanup(); + deleteRootDir(); testListStatusWithoutRecursiveSearch(); - tableCleanup(); + deleteRootDir(); testListFilesRecursive(); - tableCleanup(); + deleteRootDir(); testGetDirectoryModificationTime(); - tableCleanup(); + deleteRootDir(); testListStatusOnRoot(); - tableCleanup(); + deleteRootDir(); testListStatus(); - tableCleanup(); + deleteRootDir(); testListStatusOnSubDirs(); - tableCleanup(); + deleteRootDir(); testListStatusOnLargeDirectory(); - tableCleanup(); + deleteRootDir(); testNonExplicitlyCreatedPathExistsAfterItsLeafsWereRemoved(); - tableCleanup(); + deleteRootDir(); testRenameDir(); - tableCleanup(); + deleteRootDir(); testRenameFile(); - tableCleanup(); + deleteRootDir(); testRenameWithNonExistentSource(); - tableCleanup(); + deleteRootDir(); testRenameDirToItsOwnSubDir(); - tableCleanup(); + deleteRootDir(); testRenameSourceAndDestinAreSame(); - tableCleanup(); + deleteRootDir(); testRenameToExistingDir(); - tableCleanup(); + deleteRootDir(); testRenameToNewSubDirShouldNotExist(); - tableCleanup(); + deleteRootDir(); testRenameDirToFile(); - tableCleanup(); + deleteRootDir(); testRenameFileToDir(); - tableCleanup(); + deleteRootDir(); testRenameDestinationParentDoesntExist(); - tableCleanup(); + deleteRootDir(); testRenameToParentDir(); - tableCleanup(); + deleteRootDir(); testSeekOnFileLength(); - tableCleanup(); + deleteRootDir(); + + testFileDelete(); + deleteRootDir(); + + testDeleteRoot(); + deleteRootDir(); + + testRecursiveDelete(); + deleteRootDir(); } /** - * Cleanup keyTable and directoryTable explicitly as FS delete operation - * is not yet supported. + * Cleanup files and directories. * * @throws IOException DB failure */ - protected void tableCleanup() throws IOException { - OMMetadataManager metadataMgr = cluster.getOzoneManager() - .getMetadataManager(); - TableIterator> dirTableIterator = - metadataMgr.getDirectoryTable().iterator(); - dirTableIterator.seekToFirst(); - ArrayList dirList = new ArrayList<>(); - while (dirTableIterator.hasNext()) { - String key = dirTableIterator.key(); - if (StringUtils.isNotBlank(key)) { - dirList.add(key); - } - dirTableIterator.next(); - } - - Iterator, CacheValue>> - cacheIterator = metadataMgr.getDirectoryTable().cacheIterator(); - while(cacheIterator.hasNext()){ - cacheIterator.next(); - cacheIterator.remove(); - } + protected void deleteRootDir() throws IOException { + Path root = new Path("/"); + FileStatus[] fileStatuses = fs.listStatus(root); - for (String dirKey : dirList) { - metadataMgr.getDirectoryTable().delete(dirKey); - Assert.assertNull("Unexpected entry!", - metadataMgr.getDirectoryTable().get(dirKey)); + if (fileStatuses == null) { + return; } - Assert.assertTrue("DirTable is not empty", - metadataMgr.getDirectoryTable().isEmpty()); - - Assert.assertFalse(metadataMgr.getDirectoryTable().cacheIterator() - .hasNext()); - - TableIterator> keyTableIterator = - metadataMgr.getKeyTable().iterator(); - keyTableIterator.seekToFirst(); - ArrayList fileList = new ArrayList<>(); - while (keyTableIterator.hasNext()) { - String key = keyTableIterator.key(); - if (StringUtils.isNotBlank(key)) { - fileList.add(key); - } - keyTableIterator.next(); + for (FileStatus fStatus : fileStatuses) { + fs.delete(fStatus.getPath(), true); } - Iterator, CacheValue>> - keyCacheIterator = metadataMgr.getKeyTable().cacheIterator(); - while(keyCacheIterator.hasNext()){ - keyCacheIterator.next(); - keyCacheIterator.remove(); + fileStatuses = fs.listStatus(root); + if (fileStatuses != null) { + Assert.assertEquals("Delete root failed!", 0, fileStatuses.length); + rootItemCount = 0; + return; } - - for (String fileKey : fileList) { - metadataMgr.getKeyTable().delete(fileKey); - Assert.assertNull("Unexpected entry!", - metadataMgr.getKeyTable().get(fileKey)); - } - - Assert.assertTrue("KeyTable is not empty", - metadataMgr.getKeyTable().isEmpty()); - rootItemCount = 0; } diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 32b3578971d3..9126f922906a 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -318,6 +318,8 @@ enum Status { QUOTA_EXCEEDED = 66; + DIRECTORY_NOT_EMPTY = 67; + } /** @@ -728,6 +730,9 @@ message KeyArgs { // This will be set by leader OM in HA and update the original request. optional FileEncryptionInfoProto fileEncryptionInfo = 15; + + // This will be set when user performs delete directory recursively. + optional bool recursive = 16; } message KeyLocation { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 2fd6131363f3..6b108f633fa9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -2464,7 +2464,8 @@ protected int getDirectories(Set fileStatusList, while (iterator.hasNext() && numEntries - countEntries > 0) { OmDirectoryInfo dirInfo = iterator.value().getValue(); - if (!isImmediateChild(dirInfo.getParentObjectID(), prefixKeyInDB)) { + if (!OMFileRequest.isImmediateChild(dirInfo.getParentObjectID(), + prefixKeyInDB)) { break; } @@ -2499,7 +2500,8 @@ private int getFilesFromDirectory(Set fileStatusList, while (iterator.hasNext() && numEntries - countEntries > 0) { OmKeyInfo keyInfo = iterator.value().getValue(); - if (!isImmediateChild(keyInfo.getParentObjectID(), prefixKeyInDB)) { + if (!OMFileRequest.isImmediateChild(keyInfo.getParentObjectID(), + prefixKeyInDB)) { break; } @@ -2514,10 +2516,6 @@ private int getFilesFromDirectory(Set fileStatusList, return countEntries; } - private boolean isImmediateChild(long parentId, long ancestorId) { - return parentId == ancestorId; - } - /** * Helper function for listStatus to find key in FileTableCache. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java index d2dd5c7e07b4..7b5988c388ee 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java @@ -42,6 +42,7 @@ import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequestV1; import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest; import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequest; +import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequestV1; import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest; import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequest; import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequestV1; @@ -146,6 +147,9 @@ public static OMClientRequest createClientRequest(OMRequest omRequest) { } return new OMKeyCommitRequest(omRequest); case DeleteKey: + if (omLayoutVersionV1) { + return new OMKeyDeleteRequestV1(omRequest); + } return new OMKeyDeleteRequest(omRequest); case DeleteKeys: return new OMKeysDeleteRequest(omRequest); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java index e7b43d64d686..fc9bab0f2e83 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java @@ -25,11 +25,14 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Map; import com.google.common.base.Optional; import com.google.common.base.Strings; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; @@ -759,4 +762,93 @@ public static long getToKeyNameParentId(String volumeName, } return toKeyParentDirStatus.getKeyInfo().getObjectID(); } + + /** + * Check if there are any sub path exist for the given user key path. + * + * @param omKeyInfo om key path + * @param metaMgr OMMetadataManager + * @return true if there are any sub path, false otherwise + * @throws IOException DB exception + */ + public static boolean hasChildren(OmKeyInfo omKeyInfo, + OMMetadataManager metaMgr) throws IOException { + return checkSubDirectoryExists(omKeyInfo, metaMgr) || + checkSubFileExists(omKeyInfo, metaMgr); + } + + private static boolean checkSubDirectoryExists(OmKeyInfo omKeyInfo, + OMMetadataManager metaMgr) throws IOException { + // Check all dirTable cache for any sub paths. + Table dirTable = metaMgr.getDirectoryTable(); + Iterator, CacheValue>> + cacheIter = dirTable.cacheIterator(); + + while (cacheIter.hasNext()) { + Map.Entry, CacheValue> entry = + cacheIter.next(); + OmDirectoryInfo cacheOmDirInfo = entry.getValue().getCacheValue(); + if (cacheOmDirInfo == null) { + continue; + } + if (isImmediateChild(cacheOmDirInfo.getParentObjectID(), + omKeyInfo.getObjectID())) { + return true; // found a sub path directory + } + } + + // Check dirTable entries for any sub paths. + String seekDirInDB = metaMgr.getOzonePathKey(omKeyInfo.getObjectID(), ""); + TableIterator> + iterator = dirTable.iterator(); + + iterator.seek(seekDirInDB); + + if (iterator.hasNext()) { + OmDirectoryInfo dirInfo = iterator.value().getValue(); + return isImmediateChild(dirInfo.getParentObjectID(), + omKeyInfo.getObjectID()); + } + return false; // no sub paths found + } + + private static boolean checkSubFileExists(OmKeyInfo omKeyInfo, + OMMetadataManager metaMgr) throws IOException { + // Check all fileTable cache for any sub paths. + Table fileTable = metaMgr.getKeyTable(); + Iterator, CacheValue>> + cacheIter = fileTable.cacheIterator(); + + while (cacheIter.hasNext()) { + Map.Entry, CacheValue> entry = + cacheIter.next(); + OmKeyInfo cacheOmFileInfo = entry.getValue().getCacheValue(); + if (cacheOmFileInfo == null) { + continue; + } + if (isImmediateChild(cacheOmFileInfo.getParentObjectID(), + omKeyInfo.getObjectID())) { + return true; // found a sub path file + } + } + + // Check fileTable entries for any sub paths. + String seekFileInDB = metaMgr.getOzonePathKey( + omKeyInfo.getObjectID(), ""); + TableIterator> + iterator = fileTable.iterator(); + + iterator.seek(seekFileInDB); + + if (iterator.hasNext()) { + OmKeyInfo fileInfo = iterator.value().getValue(); + return isImmediateChild(fileInfo.getParentObjectID(), + omKeyInfo.getObjectID()); // found a sub path file + } + return false; // no sub paths found + } + + public static boolean isImmediateChild(long parentId, long ancestorId) { + return parentId == ancestorId; + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java new file mode 100644 index 000000000000..cbc58790c38c --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java @@ -0,0 +1,204 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.key; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.key.OMKeyDeleteResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_EMPTY; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles DeleteKey request layout version V1. + */ +public class OMKeyDeleteRequestV1 extends OMKeyDeleteRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMKeyDeleteRequestV1.class); + + public OMKeyDeleteRequestV1(OMRequest omRequest) { + super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest(); + + OzoneManagerProtocolProtos.KeyArgs keyArgs = + deleteKeyRequest.getKeyArgs(); + Map auditMap = buildKeyArgsAuditMap(keyArgs); + + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + boolean recursive = keyArgs.getRecursive(); + + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumKeyDeletes(); + + AuditLogger auditLogger = ozoneManager.getAuditLogger(); + OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + IOException exception = null; + boolean acquiredLock = false; + OMClientResponse omClientResponse = null; + Result result = null; + OmVolumeArgs omVolumeArgs = null; + OmBucketInfo omBucketInfo = null; + try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + + // check Acl + checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, + IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY); + + acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, bucketName); + + // Validate bucket and volume exists or not. + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + + OzoneFileStatus keyStatus = + OMFileRequest.getOMKeyInfoIfExists(omMetadataManager, volumeName, + bucketName, keyName, 0); + + if (keyStatus == null) { + throw new OMException("Key not found. Key:" + keyName, KEY_NOT_FOUND); + } + + OmKeyInfo omKeyInfo = keyStatus.getKeyInfo(); + + // Set the UpdateID to current transactionLogIndex + omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + + String ozonePathKey = omMetadataManager.getOzonePathKey( + omKeyInfo.getParentObjectID(), omKeyInfo.getFileName()); + + if (keyStatus.isDirectory()) { + // Check if there are any sub path exists under the user requested path + if (!recursive && OMFileRequest.hasChildren(omKeyInfo, + omMetadataManager)) { + throw new OMException("Directory is not empty. Key:" + keyName, + DIRECTORY_NOT_EMPTY); + } + + // Update dir cache. + omMetadataManager.getDirectoryTable().addCacheEntry( + new CacheKey<>(ozonePathKey), + new CacheValue<>(Optional.absent(), trxnLogIndex)); + } else { + // Update table cache. + omMetadataManager.getKeyTable().addCacheEntry( + new CacheKey<>(ozonePathKey), + new CacheValue<>(Optional.absent(), trxnLogIndex)); + } + + omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName); + omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); + + long quotaReleased = sumBlockLengths(omKeyInfo); + // update usedBytes atomically. + omVolumeArgs.getUsedBytes().add(-quotaReleased); + omBucketInfo.getUsedBytes().add(-quotaReleased); + + // No need to add cache entries to delete table. As delete table will + // be used by DeleteKeyService only, not used for any client response + // validation, so we don't need to add to cache. + // TODO: Revisit if we need it later. + + omClientResponse = new OMKeyDeleteResponseV1(omResponse + .setDeleteKeyResponse(DeleteKeyResponse.newBuilder()).build(), + omKeyInfo, ozoneManager.isRatisEnabled(), omVolumeArgs, + omBucketInfo, keyStatus.isDirectory()); + + result = Result.SUCCESS; + } catch (IOException ex) { + result = Result.FAILURE; + exception = ex; + omClientResponse = new OMKeyDeleteResponseV1( + createErrorOMResponse(omResponse, exception)); + } finally { + addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, + omDoubleBufferHelper); + if (acquiredLock) { + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, + bucketName); + } + } + + // Performing audit logging outside of the lock. + auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_KEY, auditMap, + exception, userInfo)); + + + switch (result) { + case SUCCESS: + omMetrics.decNumKeys(); + LOG.debug("Key deleted. Volume:{}, Bucket:{}, Key:{}", volumeName, + bucketName, keyName); + break; + case FAILURE: + omMetrics.incNumKeyDeleteFails(); + LOG.error("Key delete failed. Volume:{}, Bucket:{}, Key:{}.", + volumeName, bucketName, keyName, exception); + break; + default: + LOG.error("Unrecognized Result for OMKeyDeleteRequest: {}", + deleteKeyRequest); + } + + return omClientResponse; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java index 74e53fe31433..ba022c53d997 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java @@ -90,7 +90,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OMClientResponse omClientResponse = null; IOException exception = null; OmKeyInfo fromKeyValue; - String fromKey = null; Result result; try { if (toKeyName.length() == 0 || fromKeyName.length() == 0) { @@ -122,7 +121,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // case-1) fromKeyName should exist, otw throws exception if (fromKeyFileStatus == null) { // TODO: Add support for renaming open key - throw new OMException("Key not found " + fromKey, KEY_NOT_FOUND); + throw new OMException("Key not found " + fromKeyName, KEY_NOT_FOUND); } // source existed diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java index f9c6d185f398..4700313f8028 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java @@ -82,4 +82,16 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), omBucketInfo.getBucketName()), omBucketInfo); } + + protected OmKeyInfo getOmKeyInfo() { + return omKeyInfo; + } + + protected OmBucketInfo getOmBucketInfo() { + return omBucketInfo; + } + + protected OmVolumeArgs getOmVolumeArgs() { + return omVolumeArgs; + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseV1.java new file mode 100644 index 000000000000..858532a730bd --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseV1.java @@ -0,0 +1,88 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.key; + +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import javax.annotation.Nonnull; +import java.io.IOException; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; + +/** + * Response for DeleteKey request. + */ +@CleanupTableInfo(cleanupTables = {FILE_TABLE, DIRECTORY_TABLE, DELETED_TABLE}) +public class OMKeyDeleteResponseV1 extends OMKeyDeleteResponse { + + private boolean isDeleteDirectory; + + public OMKeyDeleteResponseV1(@Nonnull OMResponse omResponse, + @Nonnull OmKeyInfo omKeyInfo, boolean isRatisEnabled, + @Nonnull OmVolumeArgs omVolumeArgs, @Nonnull OmBucketInfo omBucketInfo, + @Nonnull boolean isDeleteDirectory) { + super(omResponse, omKeyInfo, isRatisEnabled, omVolumeArgs, omBucketInfo); + this.isDeleteDirectory = isDeleteDirectory; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public OMKeyDeleteResponseV1(@Nonnull OMResponse omResponse) { + super(omResponse); + } + + @Override + public void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + + // For OmResponse with failure, this should do nothing. This method is + // not called in failure scenario in OM code. + String ozoneDbKey = omMetadataManager.getOzonePathKey( + getOmKeyInfo().getParentObjectID(), getOmKeyInfo().getFileName()); + + if (isDeleteDirectory) { + omMetadataManager.getDirectoryTable().deleteWithBatch(batchOperation, + ozoneDbKey); + } else { + Table keyTable = omMetadataManager.getKeyTable(); + addDeletionToBatch(omMetadataManager, batchOperation, keyTable, + ozoneDbKey, getOmKeyInfo()); + } + + // update volume usedBytes. + omMetadataManager.getVolumeTable().putWithBatch(batchOperation, + omMetadataManager.getVolumeKey(getOmVolumeArgs().getVolume()), + getOmVolumeArgs()); + // update bucket usedBytes. + omMetadataManager.getBucketTable().putWithBatch(batchOperation, + omMetadataManager.getBucketKey(getOmVolumeArgs().getVolume(), + getOmBucketInfo().getBucketName()), getOmBucketInfo()); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java index 776abf10cd0b..0d3bddcddc70 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java @@ -908,6 +908,9 @@ public static long addParentsToDirTable(String volumeName, String bucketName, throws Exception { long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName, omMetaMgr); + if (org.apache.commons.lang3.StringUtils.isBlank(key)) { + return bucketId; + } String[] pathComponents = StringUtils.split(key, '/'); long objectId = bucketId + 10; long parentId = bucketId; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java index b8e560308077..b5af35412d95 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java @@ -46,27 +46,23 @@ public void testPreExecute() throws Exception { @Test public void testValidateAndUpdateCache() throws Exception { - OMRequest modifiedOmRequest = - doPreExecute(createDeleteKeyRequest()); - - OMKeyDeleteRequest omKeyDeleteRequest = - new OMKeyDeleteRequest(modifiedOmRequest); - // Add volume, bucket and key entries to OM DB. TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager); - - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); + String ozoneKey = addKeyToTable(); OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); // As we added manually to key table. Assert.assertNotNull(omKeyInfo); + OMRequest modifiedOmRequest = + doPreExecute(createDeleteKeyRequest()); + + OMKeyDeleteRequest omKeyDeleteRequest = + getOmKeyDeleteRequest(modifiedOmRequest); + OMClientResponse omClientResponse = omKeyDeleteRequest.validateAndUpdateCache(ozoneManager, 100L, ozoneManagerDoubleBufferHelper); @@ -86,7 +82,7 @@ public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception { doPreExecute(createDeleteKeyRequest()); OMKeyDeleteRequest omKeyDeleteRequest = - new OMKeyDeleteRequest(modifiedOmRequest); + getOmKeyDeleteRequest(modifiedOmRequest); // Add only volume and bucket entry to DB. // In actual implementation we don't check for bucket/volume exists @@ -108,7 +104,7 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception { doPreExecute(createDeleteKeyRequest()); OMKeyDeleteRequest omKeyDeleteRequest = - new OMKeyDeleteRequest(modifiedOmRequest); + getOmKeyDeleteRequest(modifiedOmRequest); OMClientResponse omClientResponse = omKeyDeleteRequest.validateAndUpdateCache(ozoneManager, @@ -124,7 +120,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { doPreExecute(createDeleteKeyRequest()); OMKeyDeleteRequest omKeyDeleteRequest = - new OMKeyDeleteRequest(modifiedOmRequest); + getOmKeyDeleteRequest(modifiedOmRequest); TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); @@ -145,7 +141,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { private OMRequest doPreExecute(OMRequest originalOmRequest) throws Exception { OMKeyDeleteRequest omKeyDeleteRequest = - new OMKeyDeleteRequest(originalOmRequest); + getOmKeyDeleteRequest(originalOmRequest); OMRequest modifiedOmRequest = omKeyDeleteRequest.preExecute(ozoneManager); @@ -170,4 +166,18 @@ private OMRequest createDeleteKeyRequest() { .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey) .setClientId(UUID.randomUUID().toString()).build(); } + + protected String addKeyToTable() throws Exception { + TestOMRequestUtils.addKeyToTable(false, volumeName, + bucketName, keyName, clientID, replicationType, replicationFactor, + omMetadataManager); + + return omMetadataManager.getOzoneKey(volumeName, bucketName, + keyName); + } + + protected OMKeyDeleteRequest getOmKeyDeleteRequest( + OMRequest modifiedOmRequest) { + return new OMKeyDeleteRequest(modifiedOmRequest); + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java new file mode 100644 index 000000000000..dbba1434323e --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestV1.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.key; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.util.Time; + +/** + * Tests OmKeyDelete request layout version V1. + */ +public class TestOMKeyDeleteRequestV1 extends TestOMKeyDeleteRequest { + + protected OMKeyDeleteRequest getOmKeyDeleteRequest( + OMRequest modifiedOmRequest) { + return new OMKeyDeleteRequestV1(modifiedOmRequest); + } + + protected String addKeyToTable() throws Exception { + String parentDir = "c/d/e"; + String fileName = "file1"; + String key = parentDir + "/" + fileName; + keyName = key; // updated key name + + // Create parent dirs for the path + long parentId = TestOMRequestUtils.addParentsToDirTable(volumeName, + bucketName, parentDir, omMetadataManager); + + OmKeyInfo omKeyInfo = + TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, key, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, + parentId + 1, + parentId, 100, Time.now()); + TestOMRequestUtils.addFileToKeyTable(false, false, + fileName, omKeyInfo, -1, 50, omMetadataManager); + return omKeyInfo.getPath(); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java index 440fa7837ebd..469ad9eb616f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java @@ -40,17 +40,18 @@ */ public class TestOMKeyDeleteResponse extends TestOMKeyResponse { + private OmBucketInfo omBucketInfo; + @Test public void testAddToDBBatch() throws Exception { - - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() - .setOwnerName(keyName).setAdminName(keyName) - .setVolume(volumeName).setCreationTime(Time.now()).build(); - OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() - .setVolumeName(volumeName).setBucketName(bucketName) - .setCreationTime(Time.now()).build(); + .setOwnerName(keyName).setAdminName(keyName) + .setVolume(volumeName).setCreationTime(Time.now()).build(); + omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName) + .setCreationTime(Time.now()).build(); + + OmKeyInfo omKeyInfo = getOmKeyInfo(); OzoneManagerProtocolProtos.OMResponse omResponse = OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse( @@ -59,14 +60,10 @@ public void testAddToDBBatch() throws Exception { .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey) .build(); - OMKeyDeleteResponse omKeyDeleteResponse = new OMKeyDeleteResponse( - omResponse, omKeyInfo, true, omVolumeArgs, omBucketInfo); + OMKeyDeleteResponse omKeyDeleteResponse = getOmKeyDeleteResponse(omKeyInfo, + omVolumeArgs, omResponse); - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager); + String ozoneKey = addKeyToTable(); Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey)); omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation); @@ -84,15 +81,14 @@ public void testAddToDBBatch() throws Exception { @Test public void testAddToDBBatchWithNonEmptyBlocks() throws Exception { - - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() - .setOwnerName(keyName).setAdminName(keyName) - .setVolume(volumeName).setCreationTime(Time.now()).build(); - OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() - .setVolumeName(volumeName).setBucketName(bucketName) - .setCreationTime(Time.now()).build(); + .setOwnerName(keyName).setAdminName(keyName) + .setVolume(volumeName).setCreationTime(Time.now()).build(); + omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName) + .setCreationTime(Time.now()).build(); + + OmKeyInfo omKeyInfo = getOmKeyInfo(); // Add block to key. List omKeyLocationInfoList = new ArrayList<>(); @@ -115,10 +111,7 @@ public void testAddToDBBatchWithNonEmptyBlocks() throws Exception { omKeyInfo.appendNewBlocks(omKeyLocationInfoList, false); - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - - omMetadataManager.getKeyTable().put(ozoneKey, omKeyInfo); + String ozoneKey = addKeyToTable(); OzoneManagerProtocolProtos.OMResponse omResponse = OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse( @@ -127,8 +120,8 @@ public void testAddToDBBatchWithNonEmptyBlocks() throws Exception { .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey) .build(); - OMKeyDeleteResponse omKeyDeleteResponse = new OMKeyDeleteResponse( - omResponse, omKeyInfo, true, omVolumeArgs, omBucketInfo); + OMKeyDeleteResponse omKeyDeleteResponse = getOmKeyDeleteResponse(omKeyInfo, + omVolumeArgs, omResponse); Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey)); omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation); @@ -146,14 +139,13 @@ public void testAddToDBBatchWithNonEmptyBlocks() throws Exception { @Test public void testAddToDBBatchWithErrorResponse() throws Exception { - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() - .setOwnerName(keyName).setAdminName(keyName) - .setVolume(volumeName).setCreationTime(Time.now()).build(); - OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() - .setVolumeName(volumeName).setBucketName(bucketName) - .setCreationTime(Time.now()).build(); + .setOwnerName(keyName).setAdminName(keyName) + .setVolume(volumeName).setCreationTime(Time.now()).build(); + omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName).setBucketName(bucketName) + .setCreationTime(Time.now()).build(); + OmKeyInfo omKeyInfo = getOmKeyInfo(); OzoneManagerProtocolProtos.OMResponse omResponse = OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse( @@ -162,14 +154,10 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey) .build(); - OMKeyDeleteResponse omKeyDeleteResponse = new OMKeyDeleteResponse( - omResponse, omKeyInfo, true, omVolumeArgs, omBucketInfo); - - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); + OMKeyDeleteResponse omKeyDeleteResponse = getOmKeyDeleteResponse(omKeyInfo, + omVolumeArgs, omResponse); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager); + String ozoneKey = addKeyToTable(); Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey)); @@ -183,4 +171,24 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey)); } + + protected String addKeyToTable() throws Exception { + String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, + keyName); + + TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, + clientID, replicationType, replicationFactor, omMetadataManager); + return ozoneKey; + } + + protected OMKeyDeleteResponse getOmKeyDeleteResponse(OmKeyInfo omKeyInfo, + OmVolumeArgs omVolumeArgs, + OzoneManagerProtocolProtos.OMResponse omResponse) { + return new OMKeyDeleteResponse(omResponse, omKeyInfo, + true, omVolumeArgs, omBucketInfo); + } + + protected OmBucketInfo getOmBucketInfo() { + return omBucketInfo; + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java new file mode 100644 index 000000000000..37be730a24da --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseV1.java @@ -0,0 +1,72 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.key; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.util.Time; +import org.junit.Assert; + +/** + * Tests OMKeyDeleteResponse layout version V1. + */ +public class TestOMKeyDeleteResponseV1 extends TestOMKeyDeleteResponse { + + @Override + protected OMKeyDeleteResponse getOmKeyDeleteResponse(OmKeyInfo omKeyInfo, + OmVolumeArgs omVolumeArgs, + OzoneManagerProtocolProtos.OMResponse omResponse) { + return new OMKeyDeleteResponseV1(omResponse, omKeyInfo, + true, omVolumeArgs, getOmBucketInfo(), false); + } + + @Override + protected String addKeyToTable() throws Exception { + // Add volume, bucket and key entries to OM DB. + TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + // Create parent dirs for the path + long parentId = TestOMRequestUtils.addParentsToDirTable(volumeName, + bucketName, "", omMetadataManager); + + OmKeyInfo omKeyInfo = + TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, + parentId + 1, + parentId, 100, Time.now()); + TestOMRequestUtils.addFileToKeyTable(false, false, + keyName, omKeyInfo, -1, 50, omMetadataManager); + return omKeyInfo.getPath(); + } + + @Override + protected OmKeyInfo getOmKeyInfo() { + Assert.assertNotNull(getOmBucketInfo()); + return TestOMRequestUtils.createOmKeyInfo(volumeName, + getOmBucketInfo().getBucketName(), keyName, replicationType, + replicationFactor, + getOmBucketInfo().getObjectID() + 1, + getOmBucketInfo().getObjectID(), 100, Time.now()); + } +} diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index 5ba721115344..de3b095bb413 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -255,6 +256,7 @@ public boolean createDirectory(String keyName) throws IOException { return true; } + /** * Helper method to delete an object specified by key name in bucket. * @@ -262,12 +264,32 @@ public boolean createDirectory(String keyName) throws IOException { * @return true if the key is deleted, false otherwise */ @Override - public boolean deleteObject(String keyName) { + public boolean deleteObject(String keyName) throws IOException { + return deleteObject(keyName, false); + } + + /** + * Helper method to delete an object specified by key name in bucket. + * + * @param keyName key name to be deleted + * @param recursive recursive deletion of all sub path keys if true, + * otherwise non-recursive + * @return true if the key is deleted, false otherwise + */ + @Override + public boolean deleteObject(String keyName, boolean recursive) + throws IOException { LOG.trace("issuing delete for key {}", keyName); try { incrementCounter(Statistic.OBJECTS_DELETED, 1); - bucket.deleteKey(keyName); + bucket.deleteDirectory(keyName, recursive); return true; + } catch (OMException ome) { + LOG.error("delete key failed {}", ome.getMessage()); + if (OMException.ResultCodes.DIRECTORY_NOT_EMPTY == ome.getResult()) { + throw new PathIsNotEmptyDirectoryException(ome.getMessage()); + } + return false; } catch (IOException ioe) { LOG.error("delete key failed {}", ioe.getMessage()); return false; diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index b33144a48e89..80d099c6cfa3 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -415,7 +415,17 @@ public boolean rename(Path src, Path dst) throws IOException { } private boolean renameV1(String srcPath, String dstPath) throws IOException { - adapter.renameKey(srcPath, dstPath); + try { + adapter.renameKey(srcPath, dstPath); + } catch (OMException ome) { + LOG.error("rename key failed: {}. source:{}, destin:{}", + ome.getMessage(), srcPath, dstPath); + if (OMException.ResultCodes.KEY_ALREADY_EXISTS == ome.getResult()) { + return false; + } else { + throw ome; + } + } return true; } @@ -498,6 +508,20 @@ public boolean delete(Path f, boolean recursive) throws IOException { incrementCounter(Statistic.INVOCATION_DELETE, 1); statistics.incrementWriteOps(1); LOG.debug("Delete path {} - recursive {}", f, recursive); + + String layOutVersion = adapter.getBucketLayoutVersion(); + if (layOutVersion != null && + OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1.equals(layOutVersion)) { + + if (f.isRoot()) { + LOG.warn("Cannot delete root directory."); + return false; + } + + String key = pathToKey(f); + return adapter.deleteObject(key, recursive); + } + FileStatus status; try { status = getFileStatus(f); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index 1d749d83adcc..2ac783c687c2 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.client.ReplicationFactor; @@ -438,10 +439,13 @@ public boolean createDirectory(String pathStr) throws IOException { * Helper method to delete an object specified by key name in bucket. * * @param path path to a key to be deleted + * @param recursive recursive deletion of all sub path keys if true, + * otherwise non-recursive * @return true if the key is deleted, false otherwise */ @Override - public boolean deleteObject(String path) { + public boolean deleteObject(String path, boolean recursive) + throws IOException { LOG.trace("issuing delete for path to key: {}", path); incrementCounter(Statistic.OBJECTS_DELETED, 1); OFSPath ofsPath = new OFSPath(path); @@ -451,14 +455,25 @@ public boolean deleteObject(String path) { } try { OzoneBucket bucket = getBucket(ofsPath, false); - bucket.deleteKey(keyName); + bucket.deleteDirectory(keyName, recursive); return true; + } catch (OMException ome) { + LOG.error("delete key failed {}", ome.getMessage()); + if (OMException.ResultCodes.DIRECTORY_NOT_EMPTY == ome.getResult()) { + throw new PathIsNotEmptyDirectoryException(ome.getMessage()); + } + return false; } catch (IOException ioe) { LOG.error("delete key failed " + ioe.getMessage()); return false; } } + @Override + public boolean deleteObject(String path) throws IOException { + return deleteObject(path, false); + } + /** * Helper function to check if the list of key paths are in the same volume * and same bucket. diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java index be93bd6813ec..6e4446bc474d 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java @@ -51,7 +51,9 @@ OzoneFSOutputStream createFile(String key, short replication, boolean createDirectory(String keyName) throws IOException; - boolean deleteObject(String keyName); + boolean deleteObject(String keyName) throws IOException; + + boolean deleteObject(String keyName, boolean recursive) throws IOException; boolean deleteObjects(List keyName);