diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index 5d95df8f51649..557f3352075d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -23,6 +23,7 @@ import java.util.EnumSet; import java.util.List; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo; import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.type.MapType; @@ -284,6 +285,7 @@ public enum Operation { HTTP_POST), SATISFYSTORAGEPOLICY(HTTP_PUT), GETSNAPSHOTDIFFLISTING(HTTP_GET), GETFILELINKSTATUS(HTTP_GET), GETSTATUS(HTTP_GET), + GETECPOLICIES(HTTP_GET), GET_BLOCK_LOCATIONS(HTTP_GET); private String httpMethod; @@ -1773,6 +1775,17 @@ public FsStatus getStatus(final Path path) throws IOException { return JsonUtilClient.toFsStatus(json); } + public Collection getAllErasureCodingPolicies() throws IOException { + Map params = new HashMap<>(); + params.put(OP_PARAM, Operation.GETECPOLICIES.toString()); + Path path = new Path(getUri().toString(), "/"); + HttpURLConnection conn = + getConnection(Operation.GETECPOLICIES.getMethod(), params, path, false); + HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK); + JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn); + return JsonUtilClient.getAllErasureCodingPolicies(json); + } + @VisibleForTesting static BlockLocation[] toBlockLocations(JSONObject json) throws IOException { ObjectMapper mapper = new ObjectMapper(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java index d32c19ec9e161..f495a85a23e28 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -2342,4 +2343,30 @@ public Map execute(FileSystem fs) throws IOException { return toJson(fsStatus); } } + + /** + * Executor that performs a FSGetErasureCodingPolicies operation. + */ + @InterfaceAudience.Private + public static class FSGetErasureCodingPolicies + implements FileSystemAccess.FileSystemExecutor { + + public FSGetErasureCodingPolicies() { + } + + @Override + public String execute(FileSystem fs) throws IOException { + Collection ecPolicyInfos = null; + if (fs instanceof DistributedFileSystem) { + DistributedFileSystem dfs = (DistributedFileSystem) fs; + ecPolicyInfos = dfs.getAllErasureCodingPolicies(); + } else { + throw new UnsupportedOperationException("getErasureCodingPolicies is " + + "not supported for HttpFs on " + fs.getClass() + + ". Please check your fs.defaultFS configuration"); + } + HttpFSServerWebApp.get().getMetrics().incrOpsAllECPolicies(); + return JsonUtil.toJsonString(ecPolicyInfos.stream().toArray(ErasureCodingPolicyInfo[]::new)); + } + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java index 1d319516c2ec8..3477a6fef6e0a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java @@ -130,6 +130,7 @@ public class HttpFSParametersProvider extends ParametersProvider { PARAMS_DEF.put(Operation.SATISFYSTORAGEPOLICY, new Class[] {}); PARAMS_DEF.put(Operation.GETFILELINKSTATUS, new Class[]{}); PARAMS_DEF.put(Operation.GETSTATUS, new Class[]{}); + PARAMS_DEF.put(Operation.GETECPOLICIES, new Class[]{}); PARAMS_DEF.put(Operation.GET_BLOCK_LOCATIONS, new Class[] {OffsetParam.class, LenParam.class}); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index 8d5921411ee74..196dc44ec5f9a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -526,6 +526,14 @@ public InputStream run() throws Exception { response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } + case GETECPOLICIES: { + FSOperations.FSGetErasureCodingPolicies command = + new FSOperations.FSGetErasureCodingPolicies(); + String js = fsExecute(user, command); + AUDIT_LOG.info("[{}]", path); + response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); + break; + } case GET_BLOCK_LOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java index 6f2c484addb0d..d65208fdbb8d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java @@ -65,6 +65,7 @@ public class HttpFSServerMetrics { private @Metric MutableCounterLong opsStat; private @Metric MutableCounterLong opsCheckAccess; private @Metric MutableCounterLong opsStatus; + private @Metric MutableCounterLong opsAllECPolicies; private final MetricsRegistry registry = new MetricsRegistry("httpfsserver"); private final String name; @@ -165,4 +166,8 @@ public long getOpsStat() { public void incrOpsStatus() { opsStatus.incr(); } + + public void incrOpsAllECPolicies() { + opsAllECPolicies.incr(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java index e61431250b96c..da3faf1066192 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; @@ -1217,7 +1218,7 @@ protected enum Operation { FILE_STATUS_ATTR, GET_SNAPSHOT_DIFF, GET_SNAPSHOTTABLE_DIRECTORY_LIST, GET_SNAPSHOT_LIST, GET_SERVERDEFAULTS, CHECKACCESS, SETECPOLICY, SATISFYSTORAGEPOLICY, GET_SNAPSHOT_DIFF_LISTING, GETFILEBLOCKLOCATIONS, - GETFILELINKSTATUS, GETSTATUS + GETFILELINKSTATUS, GETSTATUS, GETECPOLICIES } private void operation(Operation op) throws Exception { @@ -1366,8 +1367,10 @@ private void operation(Operation op) throws Exception { case GETSTATUS: testGetStatus(); break; + case GETECPOLICIES: + testGetAllEEPolicies(); + break; } - } @Parameterized.Parameters @@ -2111,6 +2114,41 @@ private void testGetStatus() throws Exception { } } + private void testGetAllEEPolicies() throws Exception { + if (isLocalFS()) { + // do not test the getAllEEPolicies for local FS. + return; + } + final Path path = new Path("/foo"); + FileSystem fs = FileSystem.get(path.toUri(), this.getProxiedFSConf()); + if (fs instanceof DistributedFileSystem) { + DistributedFileSystem dfs = + (DistributedFileSystem) FileSystem.get(path.toUri(), this.getProxiedFSConf()); + FileSystem httpFs = this.getHttpFSFileSystem(); + + Collection dfsAllErasureCodingPolicies = + dfs.getAllErasureCodingPolicies(); + Collection diffErasureCodingPolicies = null; + + if (httpFs instanceof HttpFSFileSystem) { + HttpFSFileSystem httpFS = (HttpFSFileSystem) httpFs; + diffErasureCodingPolicies = httpFS.getAllErasureCodingPolicies(); + } else if (httpFs instanceof WebHdfsFileSystem) { + WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) httpFs; + diffErasureCodingPolicies = webHdfsFileSystem.getAllErasureCodingPolicies(); + } else { + Assert.fail(fs.getClass().getSimpleName() + + " is not of type HttpFSFileSystem or WebHdfsFileSystem"); + } + + //Validate erasureCodingPolicyInfos are the same as DistributedFileSystem + assertEquals(dfsAllErasureCodingPolicies.size(), diffErasureCodingPolicies.size()); + assertTrue(dfsAllErasureCodingPolicies.containsAll(diffErasureCodingPolicies)); + } else { + Assert.fail(fs.getClass().getSimpleName() + " is not of type DistributedFileSystem."); + } + } + private void assertHttpFsReportListingWithDfsClient(SnapshotDiffReportListing diffReportListing, SnapshotDiffReportListing dfsDiffReportListing) { Assert.assertEquals(diffReportListing.getCreateList().size(),