Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions hadoop-hdds/docs/content/design/ofs.md
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,10 @@ This feature wouldn't degrade server performance as the loop is on the client.
Think it as a client is issuing multiple requests to the server to get all the
information.

# Special note

Trash is disabled even if `fs.trash.interval` is set on purpose. (HDDS-3982)

# Link

Design doc is uploaded to the JIRA HDDS-2665:
Expand Down
7 changes: 6 additions & 1 deletion hadoop-hdds/docs/content/design/trash.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,4 +22,9 @@ author: Matthew Sharp

The design doc is uploaded to the JIRA:

https://issues.apache.org/jira/secure/attachment/12985273/Ozone_Trash_Feature.docx
https://issues.apache.org/jira/secure/attachment/12985273/Ozone_Trash_Feature.docx

## Special note

Trash is disabled for both o3fs and ofs even if `fs.trash.interval` is set
on purpose. (HDDS-3982)
3 changes: 3 additions & 0 deletions hadoop-hdds/docs/content/interface/OzoneFS.md
Original file line number Diff line number Diff line change
Expand Up @@ -165,3 +165,6 @@ hdfs dfs -put /etc/hosts /volume1/bucket1/test

For more usage, see: https://issues.apache.org/jira/secure/attachment/12987636/Design%20ofs%20v1.pdf

## Special note

Trash is disabled even if `fs.trash.interval` is set on purpose. (HDDS-3982)
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.Trash;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneCluster;
Expand All @@ -47,6 +48,7 @@

import org.apache.commons.io.IOUtils;

import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
Expand Down Expand Up @@ -86,6 +88,7 @@ public class TestOzoneFileSystem {
private String volumeName;
private String bucketName;
private int rootItemCount;
private Trash trash;

@Test(timeout = 300_000)
public void testCreateFileShouldCheckExistenceOfDirWithSameName()
Expand Down Expand Up @@ -167,6 +170,8 @@ public void testFileSystem() throws Exception {
testOzoneFsServiceLoader();
o3fs = (OzoneFileSystem) fs;

testRenameToTrashDisabled();

testGetTrashRoots();
testGetTrashRoot();
testGetDirectoryModificationTime();
Expand Down Expand Up @@ -197,6 +202,7 @@ public void tearDown() {
private void setupOzoneFileSystem()
throws IOException, TimeoutException, InterruptedException {
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(3)
.build();
Expand All @@ -215,6 +221,7 @@ private void setupOzoneFileSystem()
// Set the number of keys to be processed during batch operate.
conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
fs = FileSystem.get(conf);
trash = new Trash(conf);
}

private void testOzoneFsServiceLoader() throws IOException {
Expand Down Expand Up @@ -617,4 +624,35 @@ public void testGetTrashRoots() throws IOException {
// Clean up
o3fs.delete(trashRoot, true);
}

/**
* Check that no files are actually moved to trash since it is disabled by
* fs.rename(src, dst, options).
*/
public void testRenameToTrashDisabled() throws IOException {
// Create a file
String testKeyName = "testKey1";
Path path = new Path(OZONE_URI_DELIMITER, testKeyName);
try (FSDataOutputStream stream = fs.create(path)) {
stream.write(1);
}

// Call moveToTrash. We can't call protected fs.rename() directly
trash.moveToTrash(path);

// Construct paths
String username = UserGroupInformation.getCurrentUser().getShortUserName();
Path trashRoot = new Path(OZONE_URI_DELIMITER, TRASH_PREFIX);
Path userTrash = new Path(trashRoot, username);
Path userTrashCurrent = new Path(userTrash, "Current");
Path trashPath = new Path(userTrashCurrent, testKeyName);

// Trash Current directory should still have been created.
Assert.assertTrue(o3fs.exists(userTrashCurrent));
// Check under trash, the key should be deleted instead
Assert.assertFalse(o3fs.exists(trashPath));

// Cleanup
o3fs.delete(trashRoot, true);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.fs.Trash;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
Expand Down Expand Up @@ -64,6 +65,7 @@
import java.util.TreeSet;
import java.util.stream.Collectors;

import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX;
import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE;
import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
Expand All @@ -87,6 +89,7 @@ public class TestRootedOzoneFileSystem {
private RootedOzoneFileSystem ofs;
private ObjectStore objectStore;
private static BasicRootedOzoneClientAdapterImpl adapter;
private Trash trash;

private String volumeName;
private Path volumePath;
Expand All @@ -98,6 +101,7 @@ public class TestRootedOzoneFileSystem {
@Before
public void init() throws Exception {
conf = new OzoneConfiguration();
conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(3)
.build();
Expand All @@ -122,6 +126,7 @@ public void init() throws Exception {
// hence this workaround.
conf.set("fs.ofs.impl", "org.apache.hadoop.fs.ozone.RootedOzoneFileSystem");
fs = FileSystem.get(conf);
trash = new Trash(conf);
ofs = (RootedOzoneFileSystem) fs;
adapter = (BasicRootedOzoneClientAdapterImpl) ofs.getAdapter();
}
Expand Down Expand Up @@ -999,4 +1004,36 @@ public void testGetTrashRoots() throws IOException {
Assert.assertTrue(volume1.setOwner(prevOwner));
}

/**
* Check that no files are actually moved to trash since it is disabled by
* fs.rename(src, dst, options).
*/
@Test
public void testRenameToTrashDisabled() throws IOException {
// Create a file
String testKeyName = "testKey1";
Path path = new Path(bucketPath, testKeyName);
try (FSDataOutputStream stream = fs.create(path)) {
stream.write(1);
}

// Call moveToTrash. We can't call protected fs.rename() directly
trash.moveToTrash(path);

// Construct paths
String username = UserGroupInformation.getCurrentUser().getShortUserName();
Path trashRoot = new Path(bucketPath, TRASH_PREFIX);
Path userTrash = new Path(trashRoot, username);
Path userTrashCurrent = new Path(userTrash, "Current");
Path trashPath = new Path(userTrashCurrent, testKeyName);

// Trash Current directory should still have been created.
Assert.assertTrue(ofs.exists(userTrashCurrent));
// Check under trash, the key should be deleted instead
Assert.assertFalse(ofs.exists(trashPath));

// Cleanup
ofs.delete(trashRoot, true);
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
Expand Down Expand Up @@ -468,6 +469,7 @@ private OzoneConfiguration getClientConfForOFS(
}

@Test
@Ignore("HDDS-3982. Disable moveToTrash in o3fs and ofs temporarily")
public void testDeleteToTrashOrSkipTrash() throws Exception {
final String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + omServiceId;
OzoneConfiguration clientConf = getClientConfForOFS(hostPrefix, conf);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.fs.permission.FsPermission;
Expand Down Expand Up @@ -397,6 +398,34 @@ public boolean rename(Path src, Path dst) throws IOException {
return result;
}

/**
* Intercept rename to trash calls from TrashPolicyDefault,
* convert them to delete calls instead.
*/
@Deprecated
protected void rename(final Path src, final Path dst,
final Rename... options) throws IOException {
boolean hasMoveToTrash = false;
if (options != null) {
for (Rename option : options) {
if (option == Rename.TO_TRASH) {
hasMoveToTrash = true;
break;
}
}
}
if (!hasMoveToTrash) {
// if doesn't have TO_TRASH option, just pass the call to super
super.rename(src, dst, options);
} else {
// intercept when TO_TRASH is found
LOG.info("Move to trash is disabled for o3fs, deleting instead: {}. "
+ "Files or directories will NOT be retained in trash. "
+ "Ignore the following TrashPolicyDefault message, if any.", src);
delete(src, true);
}
}

private class DeleteIterator extends OzoneListingIterator {
private boolean recursive;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.fs.permission.FsPermission;
Expand Down Expand Up @@ -372,6 +373,34 @@ public boolean rename(Path src, Path dst) throws IOException {
return result;
}

/**
* Intercept rename to trash calls from TrashPolicyDefault,
* convert them to delete calls instead.
*/
@Deprecated
protected void rename(final Path src, final Path dst,
final Options.Rename... options) throws IOException {
boolean hasMoveToTrash = false;
if (options != null) {
for (Options.Rename option : options) {
if (option == Options.Rename.TO_TRASH) {
hasMoveToTrash = true;
break;
}
}
}
if (!hasMoveToTrash) {
// if doesn't have TO_TRASH option, just pass the call to super
super.rename(src, dst, options);
} else {
// intercept when TO_TRASH is found
LOG.info("Move to trash is disabled for ofs, deleting instead: {}. "
+ "Files or directories will NOT be retained in trash. "
+ "Ignore the following TrashPolicyDefault message, if any.", src);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

One last thing: Currently we are straight a way deleting files. Since users coming from Hadoop shell, there could be assumptions that files may move to trash if no -skipTrash flag. But we will be deleting instead of trashing them.
User may realize after looking at this log message, but by this time already things happened.
There should be a way to shout out this behavior to users. One way is documenting and any other better way?

delete(src, true);
}
}

private class DeleteIterator extends OzoneListingIterator {
final private boolean recursive;
private final OzoneBucket bucket;
Expand Down