diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteSkipTrashParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteSkipTrashParam.java new file mode 100644 index 0000000000000..5ca9d69d7c870 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteSkipTrashParam.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.web.resources; + +/** + * SkipTrash param to be used by DELETE query. + */ +public class DeleteSkipTrashParam extends BooleanParam { + + public static final String NAME = "skiptrash"; + public static final String DEFAULT = FALSE; + + private static final Domain DOMAIN = new Domain(NAME); + + /** + * Constructor. + * @param value the parameter value. + */ + public DeleteSkipTrashParam(final Boolean value) { + super(DOMAIN, value); + } + + /** + * Constructor. + * @param str a string representation of the parameter value. + */ + public DeleteSkipTrashParam(final String str) { + this(DOMAIN.parse(str)); + } + + @Override + public String getName() { + return NAME; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index e13f44e774057..f6ccd4904f9e1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -121,6 +121,7 @@ public class HttpFSFileSystem extends FileSystem public static final String ACLSPEC_PARAM = "aclspec"; public static final String DESTINATION_PARAM = "destination"; public static final String RECURSIVE_PARAM = "recursive"; + public static final String SKIP_TRASH_PARAM = "skiptrash"; public static final String SOURCES_PARAM = "sources"; public static final String OWNER_PARAM = "owner"; public static final String GROUP_PARAM = "group"; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java index b2e9a8470d2c5..aa58f260466ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.fs.Trash; import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.http.client.HttpFSFileSystem; @@ -52,6 +53,8 @@ import org.json.simple.JSONArray; import org.json.simple.JSONObject; import org.apache.hadoop.fs.permission.FsCreateModes; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.FileNotFoundException; import java.io.IOException; @@ -74,6 +77,8 @@ @InterfaceAudience.Private public final class FSOperations { + private static final Logger LOG = LoggerFactory.getLogger(FSOperations.class); + private static int bufferSize = 4096; private FSOperations() { @@ -716,18 +721,22 @@ public static long copyBytes(InputStream in, OutputStream out, long count) */ @InterfaceAudience.Private public static class FSDelete implements FileSystemAccess.FileSystemExecutor { - private Path path; - private boolean recursive; + private final Path path; + private final boolean recursive; + private final boolean skipTrash; /** * Creates a Delete executor. * * @param path path to delete. * @param recursive if the delete should be recursive or not. + * @param skipTrash if the file must be deleted and not kept in trash + * regardless of fs.trash.interval config value. */ - public FSDelete(String path, boolean recursive) { + public FSDelete(String path, boolean recursive, boolean skipTrash) { this.path = new Path(path); this.recursive = recursive; + this.skipTrash = skipTrash; } /** @@ -742,6 +751,19 @@ public FSDelete(String path, boolean recursive) { */ @Override public JSONObject execute(FileSystem fs) throws IOException { + if (!skipTrash) { + boolean movedToTrash = Trash.moveToAppropriateTrash(fs, path, + fs.getConf()); + if (movedToTrash) { + HttpFSServerWebApp.getMetrics().incrOpsDelete(); + return toJSON( + StringUtils.toLowerCase(HttpFSFileSystem.DELETE_JSON), true); + } + // Same is the behavior with Delete shell command. + // If moveToAppropriateTrash() returns false, file deletion + // is attempted rather than throwing Error. + LOG.debug("Could not move {} to Trash, attempting removal", path); + } boolean deleted = fs.delete(path, recursive); HttpFSServerWebApp.get().getMetrics().incrOpsDelete(); return toJSON( diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java index 39180fbe4ae8a..defbdfae8e41d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java @@ -81,7 +81,8 @@ public class HttpFSParametersProvider extends ParametersProvider { new Class[]{ReplicationParam.class}); PARAMS_DEF.put(Operation.SETTIMES, new Class[]{ModifiedTimeParam.class, AccessTimeParam.class}); - PARAMS_DEF.put(Operation.DELETE, new Class[]{RecursiveParam.class}); + PARAMS_DEF.put(Operation.DELETE, new Class[]{RecursiveParam.class, + DeleteSkipTrashParam.class}); PARAMS_DEF.put(Operation.SETACL, new Class[]{AclPermissionParam.class}); PARAMS_DEF.put(Operation.REMOVEACL, new Class[]{}); PARAMS_DEF.put(Operation.MODIFYACLENTRIES, @@ -241,6 +242,25 @@ public RecursiveParam() { } } + /** + * Class for delete's skipTrash parameter. + */ + @InterfaceAudience.Private + public static class DeleteSkipTrashParam extends BooleanParam { + + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.SKIP_TRASH_PARAM; + + /** + * Constructor. + */ + public DeleteSkipTrashParam() { + super(NAME, false); + } + } + /** * Class for filter parameter. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index 5965f7082fa73..7243c10f96d4b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AclPermissionParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.BlockSizeParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DataParam; +import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DeleteSkipTrashParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DestinationParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ECPolicyParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.FilterParam; @@ -539,9 +540,13 @@ public Response delete(@PathParam("path") String path, case DELETE: { Boolean recursive = params.get(RecursiveParam.NAME, RecursiveParam.class); - AUDIT_LOG.info("[{}] recursive [{}]", path, recursive); + Boolean skipTrashParam = params.get(DeleteSkipTrashParam.NAME, + DeleteSkipTrashParam.class); + boolean skipTrash = skipTrashParam != null && skipTrashParam; + AUDIT_LOG.info("[{}] recursive [{}] skipTrash [{}]", path, recursive, + skipTrash); FSOperations.FSDelete command = - new FSOperations.FSDelete(path, recursive); + new FSOperations.FSDelete(path, recursive, skipTrash); JSONObject json = fsExecute(user, command); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java index 2f0ef9ab23ce7..d3b5e6e159640 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.fs.http.server; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -529,6 +530,36 @@ private void createWithHttp(String filename, String perms, Assert.assertEquals(HttpURLConnection.HTTP_CREATED, conn.getResponseCode()); } + private void deleteWithHttp(String filename, String perms, + String unmaskedPerms, Boolean skipTrash) throws Exception { + String user = HadoopUsersConfTestHelper.getHadoopUsers()[0]; + // Remove leading / from filename + if (filename.charAt(0) == '/') { + filename = filename.substring(1); + } + String pathOps; + if (perms == null) { + pathOps = MessageFormat.format("/webhdfs/v1/{0}?user.name={1}&op=DELETE", + filename, user); + } else { + pathOps = MessageFormat.format( + "/webhdfs/v1/{0}?user.name={1}&permission={2}&op=DELETE", + filename, user, perms); + } + if (unmaskedPerms != null) { + pathOps = pathOps + "&unmaskedpermission=" + unmaskedPerms; + } + if (skipTrash != null) { + pathOps = pathOps + "&skiptrash=" + skipTrash; + } + URL url = new URL(TestJettyHelper.getJettyURL(), pathOps); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.addRequestProperty("Content-Type", "application/octet-stream"); + conn.setRequestMethod("DELETE"); + conn.connect(); + Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + } + /** * Talks to the http interface to create a directory. * @@ -774,6 +805,37 @@ public void testPerms() throws Exception { Assert.assertTrue("321".equals(getPerms(statusJson))); } + /** + * Validate create and delete calls. + */ + @Test + @TestDir + @TestJetty + @TestHdfs + public void testCreateDelete() throws Exception { + final String dir1 = "/testCreateDelete1"; + final String path1 = dir1 + "/file1"; + final String dir2 = "/testCreateDelete2"; + final String path2 = dir2 + "/file2"; + + createHttpFSServer(false, false); + final Configuration conf = HttpFSServerWebApp.get() + .get(FileSystemAccess.class).getFileSystemConfiguration(); + conf.setLong(FS_TRASH_INTERVAL_KEY, 5); + writeConf(conf, "hdfs-site.xml"); + + FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + fs.mkdirs(new Path(dir1)); + + createWithHttp(path1, null); + deleteWithHttp(path1, null, null, null); + + fs.mkdirs(new Path(dir2)); + + createWithHttp(path2, null); + deleteWithHttp(path2, null, null, true); + } + /** * Validate XAttr get/set/remove calls. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html index 1f45f4d16c1f7..ab13e8e8f2d21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.html @@ -165,11 +165,32 @@ +
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js index 448e2790e30ef..829649a4f1a51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/explorer.js @@ -82,23 +82,47 @@ function delete_path(inode_name, absolute_file_path) { $('#delete-modal-title').text("Delete - " + inode_name); $('#delete-prompt').text("Are you sure you want to delete " + inode_name - + " ?"); - - $('#delete-button').click(function() { + + " ?"); + $('#delete-trash-modal-title').text("Skip Trash - " + inode_name); + $('#delete-trash-prompt').text("Skipping Trash might delete file forever." + + " Do you want to skip-trash " + inode_name + + " ? (default behaviour - No)"); + + $('#skip-trash-button').click(function () { + // DELETE /webhdfs/v1/?op=DELETE&recursive=&skiptrash=true + var url = '/webhdfs/v1' + encode_path(absolute_file_path) + + '?op=DELETE' + '&recursive=true&skiptrash=true'; + $.ajax(url, + { + type: 'DELETE' + }).done(function (data) { + browse_directory(current_directory); + }).fail(network_error_handler(url) + ).always(function () { + $('#delete-modal').modal('hide'); + $('#delete-button').button('reset'); + $('#delete-trash-modal').modal('hide'); + $('#skip-trash-button').button('reset'); + }); + }) + $('#trash-button').click(function () { // DELETE /webhdfs/v1/?op=DELETE&recursive= var url = '/webhdfs/v1' + encode_path(absolute_file_path) + - '?op=DELETE' + '&recursive=true'; - + '?op=DELETE' + '&recursive=true'; $.ajax(url, - { type: 'DELETE' - }).done(function(data) { - browse_directory(current_directory); - }).fail(network_error_handler(url) - ).always(function() { - $('#delete-modal').modal('hide'); - $('#delete-button').button('reset'); - }); + { + type: 'DELETE' + }).done(function (data) { + browse_directory(current_directory); + }).fail(network_error_handler(url) + ).always(function () { + $('#delete-modal').modal('hide'); + $('#delete-button').button('reset'); + $('#delete-trash-modal').modal('hide'); + $('#trash-button').button('reset'); + }); }) + $('#delete-modal').modal(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index c75fbe04f23d0..9a68f40f4981f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -55,8 +55,10 @@ import javax.ws.rs.core.Response.ResponseBuilder; import javax.ws.rs.core.Response.Status; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.fs.Trash; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -118,6 +120,9 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import com.sun.jersey.spi.container.ResourceFilters; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; + /** Web-hdfs NameNode implementation. */ @Path("") @ResourceFilters(ParamFilter.class) @@ -1460,10 +1465,13 @@ public Response deleteRoot( @QueryParam(RecursiveParam.NAME) @DefaultValue(RecursiveParam.DEFAULT) final RecursiveParam recursive, @QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT) - final SnapshotNameParam snapshotName + final SnapshotNameParam snapshotName, + @QueryParam(DeleteSkipTrashParam.NAME) + @DefaultValue(DeleteSkipTrashParam.DEFAULT) + final DeleteSkipTrashParam skiptrash ) throws IOException, InterruptedException { return delete(ugi, delegation, username, doAsUser, ROOT, op, recursive, - snapshotName); + snapshotName, skiptrash); } /** Handle HTTP DELETE request. */ @@ -1484,34 +1492,53 @@ public Response delete( @QueryParam(RecursiveParam.NAME) @DefaultValue(RecursiveParam.DEFAULT) final RecursiveParam recursive, @QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT) - final SnapshotNameParam snapshotName + final SnapshotNameParam snapshotName, + @QueryParam(DeleteSkipTrashParam.NAME) + @DefaultValue(DeleteSkipTrashParam.DEFAULT) + final DeleteSkipTrashParam skiptrash ) throws IOException, InterruptedException { - init(ugi, delegation, username, doAsUser, path, op, recursive, snapshotName); + init(ugi, delegation, username, doAsUser, path, op, recursive, + snapshotName, skiptrash); - return doAs(ugi, new PrivilegedExceptionAction() { - @Override - public Response run() throws IOException { - return delete(ugi, delegation, username, doAsUser, - path.getAbsolutePath(), op, recursive, snapshotName); - } - }); + return doAs(ugi, () -> delete( + path.getAbsolutePath(), op, recursive, snapshotName, skiptrash)); } protected Response delete( - final UserGroupInformation ugi, - final DelegationParam delegation, - final UserParam username, - final DoAsParam doAsUser, final String fullpath, final DeleteOpParam op, final RecursiveParam recursive, - final SnapshotNameParam snapshotName - ) throws IOException { + final SnapshotNameParam snapshotName, + final DeleteSkipTrashParam skipTrash) throws IOException { final ClientProtocol cp = getRpcClientProtocol(); switch(op.getValue()) { case DELETE: { + Configuration conf = + (Configuration) context.getAttribute(JspHelper.CURRENT_CONF); + long trashInterval = + conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT); + if (trashInterval > 0 && !skipTrash.getValue()) { + LOG.info("{} is {} , trying to archive {} instead of removing", + FS_TRASH_INTERVAL_KEY, trashInterval, fullpath); + org.apache.hadoop.fs.Path path = + new org.apache.hadoop.fs.Path(fullpath); + Configuration clonedConf = new Configuration(conf); + // To avoid caching FS objects and prevent OOM issues + clonedConf.set("fs.hdfs.impl.disable.cache", "true"); + FileSystem fs = FileSystem.get(clonedConf); + boolean movedToTrash = Trash.moveToAppropriateTrash(fs, path, + clonedConf); + if (movedToTrash) { + final String js = JsonUtil.toJsonString("boolean", true); + return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); + } + // Same is the behavior with Delete shell command. + // If moveToAppropriateTrash() returns false, file deletion + // is attempted rather than throwing Error. + LOG.debug("Could not move {} to Trash, attempting removal", fullpath); + } final boolean b = cp.delete(fullpath, recursive.getValue()); final String js = JsonUtil.toJsonString("boolean", b); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html index 7b5f355c5cc71..568419771a918 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html @@ -165,11 +165,32 @@
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js index 8ad7a65708f4c..c30a4bb705b84 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js @@ -83,22 +83,46 @@ $('#delete-modal-title').text("Delete - " + inode_name); $('#delete-prompt').text("Are you sure you want to delete " + inode_name + " ?"); + $('#delete-trash-modal-title').text("Skip Trash - " + inode_name); + $('#delete-trash-prompt').text("Skipping Trash might delete file forever." + + " Do you want to skip-trash " + inode_name + + " ? (default behaviour - No)"); - $('#delete-button').click(function() { + $('#skip-trash-button').click(function () { + // DELETE /webhdfs/v1/?op=DELETE&recursive=&skiptrash=true + var url = '/webhdfs/v1' + encode_path(absolute_file_path) + + '?op=DELETE' + '&recursive=true&skiptrash=true'; + $.ajax(url, + { + type: 'DELETE' + }).done(function (data) { + browse_directory(current_directory); + }).fail(network_error_handler(url) + ).always(function () { + $('#delete-modal').modal('hide'); + $('#delete-button').button('reset'); + $('#delete-trash-modal').modal('hide'); + $('#skip-trash-button').button('reset'); + }); + }) + $('#trash-button').click(function () { // DELETE /webhdfs/v1/?op=DELETE&recursive= var url = '/webhdfs/v1' + encode_path(absolute_file_path) + - '?op=DELETE' + '&recursive=true'; - + '?op=DELETE' + '&recursive=true'; $.ajax(url, - { type: 'DELETE' - }).done(function(data) { - browse_directory(current_directory); - }).fail(network_error_handler(url) - ).always(function() { - $('#delete-modal').modal('hide'); - $('#delete-button').button('reset'); - }); + { + type: 'DELETE' + }).done(function (data) { + browse_directory(current_directory); + }).fail(network_error_handler(url) + ).always(function () { + $('#delete-modal').modal('hide'); + $('#delete-button').button('reset'); + $('#delete-trash-modal').modal('hide'); + $('#trash-button').button('reset'); + }); }) + $('#delete-modal').modal(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index 203082f067a37..e575839b7d0ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -461,7 +461,11 @@ See also: [`destination`](#Destination), [FileSystem](../../api/org/apache/hadoo * Submit a HTTP DELETE request. curl -i -X DELETE "http://:/webhdfs/v1/?op=DELETE - [&recursive=]" + [&recursive=][&skiptrash=]" + + Default values of queryparams if not provided: + 1. recursive: false + 2. skiptrash: false The client receives a response with a [`boolean` JSON object](#Boolean_JSON_Schema): diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index 27a69855ecd4c..4ffad676b2b16 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.web; +import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER; +import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_STATIC_OVERRIDES_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; @@ -58,6 +60,8 @@ import java.util.NoSuchElementException; import java.util.Random; +import org.apache.hadoop.hdfs.web.resources.DeleteSkipTrashParam; +import org.apache.hadoop.hdfs.web.resources.RecursiveParam; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.commons.io.IOUtils; import org.apache.hadoop.fs.QuotaUsage; @@ -1494,8 +1498,12 @@ private void checkResponseContainsLocation(URL url, String TYPE) HttpURLConnection.HTTP_OK, conn.getResponseCode()); JSONObject responseJson = new JSONObject(response); - Assert.assertTrue("Response didn't give us a location. " + response, - responseJson.has("Location")); + if (!TYPE.equals("DELETE")) { + Assert.assertTrue("Response didn't give us a location. " + response, + responseJson.has("Location")); + } else { + Assert.assertTrue(responseJson.getBoolean("boolean")); + } //Test that the DN allows CORS on Create if(TYPE.equals("CREATE")) { @@ -1507,14 +1515,15 @@ private void checkResponseContainsLocation(URL url, String TYPE) } } - @Test /** * Test that when "&noredirect=true" is added to operations CREATE, APPEND, * OPEN, and GETFILECHECKSUM the response (which is usually a 307 temporary * redirect) is a 200 with JSON that contains the redirected location */ + @Test public void testWebHdfsNoRedirect() throws Exception { final Configuration conf = WebHdfsTestUtil.createConf(); + conf.setLong(FS_TRASH_INTERVAL_KEY, 5); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); LOG.info("Started cluster"); InetSocketAddress addr = cluster.getNameNode().getHttpAddress(); @@ -1553,6 +1562,26 @@ public void testWebHdfsNoRedirect() throws Exception { + Param.toSortedString("&", new NoRedirectParam(true))); LOG.info("Sending append request " + url); checkResponseContainsLocation(url, "POST"); + + // setup some permission to allow moving file to .Trash location + cluster.getFileSystem().setPermission(new Path("/testWebHdfsNoRedirect"), + new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); + Path userDir = new Path(FileSystem.USER_HOME_PREFIX); + Path trashDir = new Path(FileSystem.USER_HOME_PREFIX, DEFAULT_HADOOP_HTTP_STATIC_USER); + Path trashPath = new Path(FileSystem.USER_HOME_PREFIX, + new Path(DEFAULT_HADOOP_HTTP_STATIC_USER, FileSystem.TRASH_PREFIX)); + cluster.getFileSystem().mkdirs(userDir, FsPermission.getDirDefault()); + cluster.getFileSystem().mkdir(trashDir, FsPermission.getDirDefault()); + cluster.getFileSystem().mkdir(trashPath, FsPermission.getDirDefault()); + cluster.getFileSystem().setOwner(trashPath, DEFAULT_HADOOP_HTTP_STATIC_USER, HADOOP_USER_GROUP_STATIC_OVERRIDES_DEFAULT); + cluster.getFileSystem().setPermission(new Path("/"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); + + url = new URL("http", addr.getHostString(), addr.getPort(), + WebHdfsFileSystem.PATH_PREFIX + "/testWebHdfsNoRedirect" + "?op=DELETE" + + Param.toSortedString("&", new RecursiveParam(true)) + + Param.toSortedString("&", new DeleteSkipTrashParam(false))); + LOG.info("Sending append request " + url); + checkResponseContainsLocation(url, "DELETE"); } @Test