Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -2032,6 +2032,19 @@ public boolean isInSafeMode() throws IOException {
return setSafeMode(SafeModeAction.SAFEMODE_GET, true);
}

/**
* HDFS only.
*
* Returns if the NameNode enabled the snapshot trash root configuration
* dfs.namenode.snapshot.trashroot.enabled
* @return true if NameNode enabled snapshot trash root
* @throws IOException
* when there is an issue communicating with the NameNode
*/
public boolean isSnapshotTrashRootEnabled() throws IOException {
return dfs.isSnapshotTrashRootEnabled();
}

/** @see org.apache.hadoop.hdfs.client.HdfsAdmin#allowSnapshot(Path) */
public void allowSnapshot(final Path path) throws IOException {
statistics.incrementWriteOps(1);
Expand Down Expand Up @@ -2068,12 +2081,7 @@ public void disallowSnapshot(final Path path) throws IOException {
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p) throws IOException {
String ssTrashRoot =
new Path(p, FileSystem.TRASH_PREFIX).toUri().getPath();
if (dfs.exists(ssTrashRoot)) {
throw new IOException("Found trash root under path " + p + ". "
+ "Please remove or move the trash root and then try again.");
}
checkTrashRootAndRemoveIfEmpty(p);
dfs.disallowSnapshot(getPathName(p));
return null;
}
Expand All @@ -2083,6 +2091,7 @@ public Void next(final FileSystem fs, final Path p)
throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem)fs;
myDfs.checkTrashRootAndRemoveIfEmpty(p);
myDfs.disallowSnapshot(p);
} else {
throw new UnsupportedOperationException("Cannot perform snapshot"
Expand All @@ -2094,6 +2103,41 @@ public Void next(final FileSystem fs, final Path p)
}.resolve(this, absF);
}

/**
* Helper function to check if a trash root exists in the given directory,
* remove the trash root if it is empty, or throw IOException if not empty
* @param p Path to a directory.
*/
private void checkTrashRootAndRemoveIfEmpty(final Path p) throws IOException {
Path trashRoot = new Path(p, FileSystem.TRASH_PREFIX);
try {
// listStatus has 4 possible outcomes here:
// 1) throws FileNotFoundException: the trash root doesn't exist.
// 2) returns empty array: the trash path is an empty directory.
// 3) returns non-empty array, len >= 2: the trash root is not empty.
// 4) returns non-empty array, len == 1:
// i) if the element's path is exactly p, the trash path is not a dir.
// e.g. a file named .Trash. Ignore.
// ii) if the element's path isn't p, the trash root is not empty.
FileStatus[] fileStatuses = listStatus(trashRoot);
if (fileStatuses.length == 0) {
DFSClient.LOG.debug("Removing empty trash root {}", trashRoot);
delete(trashRoot, false);
} else {
if (fileStatuses.length == 1
&& !fileStatuses[0].isDirectory()
&& !fileStatuses[0].getPath().equals(p)) {
// Ignore the trash path because it is not a directory.
DFSClient.LOG.warn("{} is not a directory.", trashRoot);
} else {
throw new IOException("Found non-empty trash root at " +
trashRoot + ". Rename or delete it, then try again.");
}
}
} catch (FileNotFoundException ignored) {
}
}

@Override
public Path createSnapshot(final Path path, final String snapshotName)
throws IOException {
Expand Down Expand Up @@ -2901,6 +2945,80 @@ private void provisionEZTrash(String path, FsPermission trashPermission)
setPermission(trashPath, trashPermission);
}

/**
* HDFS only.
*
* Provision snapshottable directory trash.
* @param path Path to a snapshottable directory.
* @param trashPermission Expected FsPermission of the trash root.
* @return Path of the provisioned trash root
*/
public Path provisionSnapshotTrash(final Path path,
final FsPermission trashPermission) throws IOException {
Path absF = fixRelativePart(path);
return new FileSystemLinkResolver<Path>() {
@Override
public Path doCall(Path p) throws IOException {
return provisionSnapshotTrash(getPathName(p), trashPermission);
}

@Override
public Path next(FileSystem fs, Path p) throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem)fs;
return myDfs.provisionSnapshotTrash(p, trashPermission);
}
throw new UnsupportedOperationException(
"Cannot provisionSnapshotTrash through a symlink to" +
" a non-DistributedFileSystem: " + fs + " -> " + p);
}
}.resolve(this, absF);
}

private Path provisionSnapshotTrash(
String pathStr, FsPermission trashPermission) throws IOException {
Path path = new Path(pathStr);
// Given path must be a snapshottable directory
FileStatus fileStatus = getFileStatus(path);
if (!fileStatus.isSnapshotEnabled()) {
throw new IllegalArgumentException(
path + " is not a snapshottable directory.");
}

// Check if trash root already exists
Path trashPath = new Path(path, FileSystem.TRASH_PREFIX);
try {
FileStatus trashFileStatus = getFileStatus(trashPath);
String errMessage = "Can't provision trash for snapshottable directory " +
pathStr + " because trash path " + trashPath.toString() +
" already exists.";
if (!trashFileStatus.isDirectory()) {
errMessage += "\r\n" +
"WARNING: " + trashPath.toString() + " is not a directory.";
}
if (!trashFileStatus.getPermission().equals(trashPermission)) {
errMessage += "\r\n" +
"WARNING: Permission of " + trashPath.toString() +
" differs from provided permission " + trashPermission;
}
throw new FileAlreadyExistsException(errMessage);
} catch (FileNotFoundException ignored) {
// Trash path doesn't exist. Continue
}

// Create trash root and set the permission
mkdir(trashPath, trashPermission);
setPermission(trashPath, trashPermission);

// Print a warning if snapshot trash root feature is not enabled
if (!isSnapshotTrashRootEnabled()) {
DFSClient.LOG.warn("New trash is provisioned, but the snapshot trash root"
+ " feature is disabled. This new trash but won't be automatically"
+ " utilized unless the feature is enabled on the NameNode.");
}
return trashPath;
}

@Override
public void setXAttr(Path path, final String name, final byte[] value,
final EnumSet<XAttrSetFlag> flag) throws IOException {
Expand Down Expand Up @@ -3124,7 +3242,7 @@ public Void next(FileSystem fs, Path p) throws IOException {
}

/**
* Get erasure coding policy information for the specified path
* Get erasure coding policy information for the specified path.
*
* @param path The path of the file or directory
* @return Returns the policy information if file or directory on the path
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1677,6 +1677,20 @@ public void provisionEZTrash(final Path path,
.provisionEZTrash(mountPathInfo.getPathOnTarget(), trashPermission);
}

@Override
public Path provisionSnapshotTrash(final Path path,
final FsPermission trashPermission) throws IOException {
if (this.vfs == null) {
return super.provisionSnapshotTrash(path, trashPermission);
}
ViewFileSystemOverloadScheme.MountPathInfo<FileSystem> mountPathInfo =
this.vfs.getMountPathInfo(path, getConf());
checkDFS(mountPathInfo.getTargetFs(), "provisionSnapshotTrash");
return ((DistributedFileSystem) mountPathInfo.getTargetFs())
.provisionSnapshotTrash(mountPathInfo.getPathOnTarget(),
trashPermission);
}

@Override
public void setXAttr(Path path, String name, byte[] value,
EnumSet<XAttrSetFlag> flag) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme;
import org.apache.hadoop.hdfs.DFSInotifyEventInputStream;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
Expand Down Expand Up @@ -67,8 +68,8 @@
@InterfaceStability.Evolving
public class HdfsAdmin {

private DistributedFileSystem dfs;
private static final FsPermission TRASH_PERMISSION = new FsPermission(
final private DistributedFileSystem dfs;
public static final FsPermission TRASH_PERMISSION = new FsPermission(
FsAction.ALL, FsAction.ALL, FsAction.ALL, true);

/**
Expand All @@ -80,6 +81,10 @@ public class HdfsAdmin {
*/
public HdfsAdmin(URI uri, Configuration conf) throws IOException {
FileSystem fs = FileSystem.get(uri, conf);
if ((fs instanceof ViewFileSystemOverloadScheme)) {
fs = ((ViewFileSystemOverloadScheme) fs)
.getRawFileSystem(new Path(FileSystem.getDefaultUri(conf)), conf);
}
if (!(fs instanceof DistributedFileSystem)) {
throw new IllegalArgumentException("'" + uri + "' is not an HDFS URI.");
} else {
Expand Down Expand Up @@ -165,6 +170,20 @@ public void clearQuotaByStorageType(Path src, StorageType type) throws IOExcepti
*/
public void allowSnapshot(Path path) throws IOException {
dfs.allowSnapshot(path);
if (dfs.isSnapshotTrashRootEnabled()) {
dfs.provisionSnapshotTrash(path, TRASH_PERMISSION);
}
}

/**
* Provision a trash directory for a given snapshottable directory.
* @param path the root of the snapshottable directory
* @return Path of the provisioned trash root
* @throws IOException if the trash directory can not be created.
*/
public Path provisionSnapshotTrash(Path path)
throws IOException {
return dfs.provisionSnapshotTrash(path, TRASH_PERMISSION);
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@

import com.google.common.base.Joiner;

import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
Expand Down Expand Up @@ -460,6 +461,7 @@ static int run(DistributedFileSystem dfs, String[] argv, int idx) throws IOExcep
"\t[-fetchImage <local directory>]\n" +
"\t[-allowSnapshot <snapshotDir>]\n" +
"\t[-disallowSnapshot <snapshotDir>]\n" +
"\t[-provisionSnapshotTrash <snapshotDir>]\n" +
"\t[-shutdownDatanode <datanode_host:ipc_port> [upgrade]]\n" +
"\t[-evictWriters <datanode_host:ipc_port>]\n" +
"\t[-getDatanodeInfo <datanode_host:ipc_port>]\n" +
Expand Down Expand Up @@ -765,9 +767,9 @@ public int triggerBlockReport(String[] argv) throws IOException {
*/
public void allowSnapshot(String[] argv) throws IOException {
Path p = new Path(argv[1]);
final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), getConf());
final HdfsAdmin admin = new HdfsAdmin(p.toUri(), getConf());
try {
dfs.allowSnapshot(p);
admin.allowSnapshot(p);
} catch (SnapshotException e) {
throw new RemoteException(e.getClass().getName(), e.getMessage());
}
Expand All @@ -782,14 +784,33 @@ public void allowSnapshot(String[] argv) throws IOException {
*/
public void disallowSnapshot(String[] argv) throws IOException {
Path p = new Path(argv[1]);
final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), getConf());
final HdfsAdmin admin = new HdfsAdmin(p.toUri(), getConf());
try {
dfs.disallowSnapshot(p);
admin.disallowSnapshot(p);
} catch (SnapshotException e) {
throw new RemoteException(e.getClass().getName(), e.getMessage());
}
System.out.println("Disallowing snapshot on " + argv[1] + " succeeded");
}

/**
* Provision trash root in a snapshottable directory.
* Usage: hdfs dfsadmin -provisionSnapshotTrash snapshotDir
* @param argv List of of command line parameters.
* @exception IOException
*/
public void provisionSnapshotTrash(String[] argv) throws IOException {
Path p = new Path(argv[1]);
final HdfsAdmin admin = new HdfsAdmin(p.toUri(), getConf());
Path trashRoot;
try {
trashRoot = admin.provisionSnapshotTrash(p);
} catch (SnapshotException e) {
throw new RemoteException(e.getClass().getName(), e.getMessage());
}
System.out.println("Successfully provisioned snapshot trash at " +
trashRoot);
}

/**
* Command to ask the namenode to save the namespace.
Expand Down Expand Up @@ -1245,6 +1266,10 @@ private void printHelp(String cmd) {
String disallowSnapshot = "-disallowSnapshot <snapshotDir>:\n" +
"\tDo not allow snapshots to be taken on a directory any more.\n";

String provisionSnapshotTrash = "-provisionSnapshotTrash <snapshotDir>:\n" +
"\tProvision trash root in a snapshottable directory with permission"
+ "\t" + HdfsAdmin.TRASH_PERMISSION + ".\n";

String shutdownDatanode = "-shutdownDatanode <datanode_host:ipc_port> [upgrade]\n"
+ "\tSubmit a shutdown request for the given datanode. If an optional\n"
+ "\t\"upgrade\" argument is specified, clients accessing the datanode\n"
Expand Down Expand Up @@ -1334,6 +1359,8 @@ private void printHelp(String cmd) {
System.out.println(allowSnapshot);
} else if ("disallowSnapshot".equalsIgnoreCase(cmd)) {
System.out.println(disallowSnapshot);
} else if ("provisionSnapshotTrash".equalsIgnoreCase(cmd)) {
System.out.println(provisionSnapshotTrash);
} else if ("shutdownDatanode".equalsIgnoreCase(cmd)) {
System.out.println(shutdownDatanode);
} else if ("evictWriters".equalsIgnoreCase(cmd)) {
Expand Down Expand Up @@ -1376,6 +1403,7 @@ private void printHelp(String cmd) {
System.out.println(fetchImage);
System.out.println(allowSnapshot);
System.out.println(disallowSnapshot);
System.out.println(provisionSnapshotTrash);
System.out.println(shutdownDatanode);
System.out.println(evictWriters);
System.out.println(getDatanodeInfo);
Expand Down Expand Up @@ -2085,6 +2113,9 @@ private static void printUsage(String cmd) {
} else if ("-disallowSnapshot".equalsIgnoreCase(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-disallowSnapshot <snapshotDir>]");
} else if ("-provisionSnapshotTrash".equalsIgnoreCase(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-provisionSnapshotTrash <snapshotDir>]");
} else if ("-saveNamespace".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-saveNamespace [-beforeShutdown]]");
Expand Down Expand Up @@ -2218,6 +2249,11 @@ public int run(String[] argv) {
printUsage(cmd);
return exitCode;
}
} else if ("-provisionSnapshotTrash".equalsIgnoreCase(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-report".equals(cmd)) {
if (argv.length > 6) {
printUsage(cmd);
Expand Down Expand Up @@ -2354,6 +2390,8 @@ public int run(String[] argv) {
allowSnapshot(argv);
} else if ("-disallowSnapshot".equalsIgnoreCase(cmd)) {
disallowSnapshot(argv);
} else if ("-provisionSnapshotTrash".equalsIgnoreCase(cmd)) {
provisionSnapshotTrash(argv);
} else if ("-saveNamespace".equals(cmd)) {
exitCode = saveNamespace(argv);
} else if ("-rollEdits".equals(cmd)) {
Expand Down
Loading