-
Notifications
You must be signed in to change notification settings - Fork 9.2k
HDFS-16008. RBF: Tool to initialize ViewFS Mapping to Router #2981
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 4 commits
ef407a8
9a7c9f1
e0b806f
7fa083a
3be7673
c33ba43
698a3a9
3370147
93dbc90
0204cdc
bc5089e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -19,6 +19,7 @@ | |
|
|
||
| import java.io.IOException; | ||
| import java.net.InetSocketAddress; | ||
| import java.net.URI; | ||
| import java.util.Arrays; | ||
| import java.util.Collection; | ||
| import java.util.LinkedHashMap; | ||
|
|
@@ -34,6 +35,10 @@ | |
| import org.apache.hadoop.fs.CommonConfigurationKeys; | ||
| import org.apache.hadoop.fs.StorageType; | ||
| import org.apache.hadoop.fs.permission.FsPermission; | ||
| import org.apache.hadoop.fs.viewfs.Constants; | ||
| import org.apache.hadoop.fs.Path; | ||
| import org.apache.hadoop.fs.FileSystem; | ||
| import org.apache.hadoop.fs.FileStatus; | ||
| import org.apache.hadoop.hdfs.DFSConfigKeys; | ||
| import org.apache.hadoop.hdfs.HdfsConfiguration; | ||
| import org.apache.hadoop.hdfs.protocol.HdfsConstants; | ||
|
|
@@ -131,8 +136,8 @@ private String getUsage(String cmd) { | |
| String[] commands = | ||
| {"-add", "-update", "-rm", "-ls", "-getDestination", "-setQuota", | ||
| "-setStorageTypeQuota", "-clrQuota", "-clrStorageTypeQuota", | ||
| "-safemode", "-nameservice", "-getDisabledNameservices", | ||
| "-refresh", "-refreshRouterArgs", | ||
| "-initViewFsToMountTable", "-safemode", "-nameservice", | ||
| "-getDisabledNameservices", "-refresh", "-refreshRouterArgs", | ||
| "-refreshSuperUserGroupsConfiguration"}; | ||
| StringBuilder usage = new StringBuilder(); | ||
| usage.append("Usage: hdfs dfsrouteradmin :\n"); | ||
|
|
@@ -171,7 +176,9 @@ private String getUsage(String cmd) { | |
| return "\t[-clrQuota <path>]"; | ||
| } else if (cmd.equals("-clrStorageTypeQuota")) { | ||
| return "\t[-clrStorageTypeQuota <path>]"; | ||
| } else if (cmd.equals("-safemode")) { | ||
| } else if (cmd.equals("-initViewFsToMountTable")) { | ||
| return "\t[-initViewFsToMountTable <clusterName>]"; | ||
| }else if (cmd.equals("-safemode")) { | ||
| return "\t[-safemode enter | leave | get]"; | ||
| } else if (cmd.equals("-nameservice")) { | ||
| return "\t[-nameservice enable | disable <nameservice>]"; | ||
|
|
@@ -242,6 +249,10 @@ private boolean validateMin(String[] argv) { | |
| if (argv.length < 2) { | ||
| return false; | ||
| } | ||
| } else if ("-initViewFsToMountTable".equals(cmd)) { | ||
| if (argv.length < 2) { | ||
| return false; | ||
| } | ||
| } else if ("-getDestination".equals(cmd)) { | ||
| if (argv.length < 2) { | ||
| return false; | ||
|
|
@@ -384,6 +395,13 @@ public int run(String[] argv) throws Exception { | |
| getDisabledNameservices(); | ||
| } else if ("-refresh".equals(cmd)) { | ||
| refresh(address); | ||
| } else if ("-initViewFsToMountTable".equals(cmd)) { | ||
| if (initViewFsToMountTable(argv[i])) { | ||
| System.out.println("Successfully init ViewFs mapping to router " + | ||
| argv[i]); | ||
| } else { | ||
| exitCode = -1; | ||
| } | ||
| } else if ("-refreshRouterArgs".equals(cmd)) { | ||
| exitCode = genericRefresh(argv, i); | ||
| } else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) { | ||
|
|
@@ -1036,6 +1054,74 @@ private boolean updateQuota(String mount, long nsQuota, long ssQuota) | |
| return updateResponse.getStatus(); | ||
| } | ||
|
|
||
| /** | ||
| * initViewFsToMountTable. | ||
zhuxiangyi marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| * @param clusterName The specified cluster to initialize. | ||
zhuxiangyi marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| * @return If the quota was updated. | ||
| * @throws IOException Error adding the mount point. | ||
| */ | ||
| public boolean initViewFsToMountTable(String clusterName) | ||
| throws IOException { | ||
| // fs.viewfs.mounttable.ClusterX.link./data | ||
| final String mountTablePrefix = | ||
zhuxiangyi marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| Constants.CONFIG_VIEWFS_PREFIX + "." + clusterName + "." + | ||
| Constants.CONFIG_VIEWFS_LINK + "."; | ||
| final String rootPath = "/"; | ||
| Map<String, String> viewFsMap = getConf().getValByRegex( | ||
| mountTablePrefix + rootPath); | ||
| if (viewFsMap.isEmpty()) { | ||
| System.out.println("There is no ViewFs mapping to initialize."); | ||
| return true; | ||
| } | ||
| for (Entry<String, String> entry : viewFsMap.entrySet()) { | ||
| Path path = new Path(entry.getValue()); | ||
| URI destUri = path.toUri(); | ||
| String mountKey = entry.getKey(); | ||
| DestinationOrder order = DestinationOrder.HASH; | ||
| String mount = mountKey.replaceAll(mountTablePrefix, ""); | ||
| if (!destUri.getScheme().equals("hdfs")) { | ||
zhuxiangyi marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| System.out.println("Only supports HDFS, " + | ||
| "added Mount Point failed , " + mountKey); | ||
| } | ||
| if (!mount.startsWith(rootPath) || | ||
| !destUri.getPath().startsWith(rootPath)) { | ||
| System.out.println("Added Mount Point failed " + mountKey); | ||
| continue; | ||
| } | ||
| String[] nss = new String[]{destUri.getAuthority()}; | ||
| boolean added = addMount( | ||
| mount, nss, destUri.getPath(), false, | ||
| false, order, getACLEntityFormHdfsPath(path, getConf())); | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. if we specify
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @Hexiaoqiao I didn't find any problems here, can you tell me the details, thank you very much. |
||
| if (added) { | ||
| System.out.println("Added mount point " + mount); | ||
| } | ||
| } | ||
| return true; | ||
| } | ||
|
|
||
| /** | ||
| * Returns ACLEntity according to a HDFS pat. | ||
| * @param path A path of HDFS. | ||
| */ | ||
| static public ACLEntity getACLEntityFormHdfsPath( | ||
zhuxiangyi marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| Path path, Configuration conf) { | ||
| String owner = null; | ||
| String group = null; | ||
| FsPermission mode = null; | ||
| try { | ||
| FileSystem fs = path.getFileSystem(conf); | ||
| if (fs.exists(path)) { | ||
| FileStatus fileStatus = fs.getFileStatus(path); | ||
| owner = fileStatus.getOwner(); | ||
| group = fileStatus.getGroup(); | ||
| mode = fileStatus.getPermission(); | ||
| } | ||
| } catch (IOException e) { | ||
| System.err.println("Exception encountered " + e); | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. just suggest to throw exception rather than just print the error information.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. When FileStatus cannot be obtained, I think the default ACLEntity should be used to add the mapping. |
||
| } | ||
| return new ACLEntity(owner, group, mode); | ||
| } | ||
|
|
||
| /** | ||
| * Update storage type quota of specified mount table. | ||
| * | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.