diff --git a/docs/changelog/94299.yaml b/docs/changelog/94299.yaml new file mode 100644 index 0000000000000..b2703a15e2be9 --- /dev/null +++ b/docs/changelog/94299.yaml @@ -0,0 +1,5 @@ +pr: 94299 +summary: HDFS plugin: allow webhdfs scheme +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 78809a170fbbd..f1fb012025017 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -139,6 +139,10 @@ for (int hadoopVersion = minTestedHadoopVersion; hadoopVersion <= maxTestedHadoo } else { miniHDFSArgs.add("-Dhdfs.config.port=" + getNonSecureNamenodePortForVersion(hadoopVer)) } + // This is required for webhdfs to work, otherwise miniHDFS will fail with module java.base does not "opens java.lang" to unnamed module + miniHDFSArgs.add('--add-opens') + miniHDFSArgs.add('java.base/java.lang=ALL-UNNAMED') + miniHDFSArgs.add('-Dhdfs.config.http.port=' + getHttpPortForVersion(hadoopVer)) // If it's an HA fixture, set a nameservice to use in the JVM options if (name.startsWith('haHdfs') || name.startsWith('secureHaHdfs')) { miniHDFSArgs.add("-Dha-nameservice=ha-hdfs") @@ -191,7 +195,7 @@ for (int hadoopVersion = minTestedHadoopVersion; hadoopVersion <= maxTestedHadoo } } - for (String integTestTaskName : ['yamlRestTest' + hadoopVersion, 'yamlRestTestSecure' + hadoopVersion]) { + for (String integTestTaskName : ['yamlRestTest' + hadoopVersion, 'yamlRestTestSecure' + hadoopVersion, 'yamlRestTestWeb' + hadoopVersion]) { tasks.register(integTestTaskName, RestIntegTestTask) { description = "Runs rest tests against an elasticsearch cluster with HDFS" + hadoopVer @@ -209,6 +213,7 @@ for (int hadoopVersion = minTestedHadoopVersion; hadoopVersion <= maxTestedHadoo Map expansions = [ 'hdfs_port' : getNonSecureNamenodePortForVersion(hadoopVer), 'secure_hdfs_port': getSecureNamenodePortForVersion(hadoopVer), + 'hdfs_http_port' : getHttpPortForVersion(hadoopVer), ] inputs.properties(expansions) filter("tokens": expansions.collectEntries { k, v -> [k, v.toString()]}, ReplaceTokens.class) @@ -219,6 +224,9 @@ for (int hadoopVersion = minTestedHadoopVersion; hadoopVersion <= maxTestedHadoo it.into("secure_hdfs_repository_" + hadoopVer) { from "src/yamlRestTest/resources/rest-api-spec/test/secure_hdfs_repository" } + it.into("webhdfs_repository_" + hadoopVer) { + from "src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository" + } } tasks.named("processYamlRestTestResources").configure { dependsOn(processHadoopTestResources) @@ -236,6 +244,11 @@ for (int hadoopVersion = minTestedHadoopVersion; hadoopVersion <= maxTestedHadoo // The normal test runner only runs the standard hdfs rest tests systemProperty 'tests.rest.suite', 'hdfs_repository_' + hadoopVer } + tasks.named("yamlRestTestWeb" + hadoopVer).configure { + dependsOn "hdfs" + hadoopVer + "Fixture" + // The normal test runner only runs the standard hdfs rest tests + systemProperty 'tests.rest.suite', 'hdfs_repository_' + hadoopVer + } tasks.named("javaRestTest" + hadoopVer).configure { dependsOn "haHdfs" + hadoopVer + "Fixture" } @@ -245,19 +258,27 @@ for (int hadoopVersion = minTestedHadoopVersion; hadoopVersion <= maxTestedHadoo systemProperty 'tests.rest.suite', 'hdfs_repository_' + hadoopVer + '/10_basic' } // HA fixture is unsupported. Don't run them. + tasks.named("yamlRestTestSecure" + hadoopVer).configure { + enabled = false + } tasks.named("javaRestTestSecure" + hadoopVer).configure { enabled = false } } tasks.named("check").configure { - dependsOn("yamlRestTest" + hadoopVer, "yamlRestTestSecure" + hadoopVer, "javaRestTestSecure" + hadoopVer) + dependsOn("yamlRestTest" + hadoopVer, "yamlRestTestSecure" + hadoopVer, "javaRestTestSecure" + hadoopVer, "yamlRestTestWeb" + hadoopVer) } // Run just the secure hdfs rest test suite. tasks.named("yamlRestTestSecure" + hadoopVer).configure { systemProperty 'tests.rest.suite', 'secure_hdfs_repository_' + hadoopVer } + + // Run just the secure webhdfs rest test suite. + tasks.named("yamlRestTestWeb" + hadoopVer).configure { + systemProperty 'tests.rest.suite', 'webhdfs_repository_' + hadoopVer + } } @@ -269,6 +290,10 @@ def getNonSecureNamenodePortForVersion(hadoopVersion) { return 10003 - (2 * hadoopVersion) } +def getHttpPortForVersion(hadoopVersion) { + return 10004 - (2 * hadoopVersion) +} + Set disabledIntegTestTaskNames = [] tasks.withType(RestIntegTestTask).configureEach { testTask -> diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java index b85acf5d328ce..96e4b07713c9b 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java @@ -37,7 +37,11 @@ final class HdfsBlobStore implements BlobStore { this.fileContext = fileContext; // Only restrict permissions if not running with HA boolean restrictPermissions = (haEnabled == false); - this.securityContext = new HdfsSecurityContext(fileContext.getUgi(), restrictPermissions); + this.securityContext = new HdfsSecurityContext( + fileContext.getUgi(), + restrictPermissions, + fileContext.getDefaultFileSystem().getUri().getScheme() + ); this.bufferSize = bufferSize; this.replicationFactor = replicationFactor; this.root = execute(fileContext1 -> fileContext1.makeQualified(new Path(path))); diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java index a855428c3252b..0690802155321 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java @@ -40,6 +40,7 @@ import java.net.UnknownHostException; import java.security.AccessController; import java.security.PrivilegedAction; +import java.util.List; import java.util.Locale; public final class HdfsRepository extends BlobStoreRepository { @@ -51,6 +52,8 @@ public final class HdfsRepository extends BlobStoreRepository { private static final String CONF_SECURITY_PRINCIPAL = "security.principal"; + private static final List allowedSchemes = List.of("hdfs", "webhdfs", "swebhdfs"); + private final Environment environment; private final ByteSizeValue chunkSize; private final URI uri; @@ -74,17 +77,18 @@ public HdfsRepository( throw new IllegalArgumentException("No 'uri' defined for hdfs snapshot/restore"); } uri = URI.create(uriSetting); - if ("hdfs".equalsIgnoreCase(uri.getScheme()) == false) { + if (uri.getScheme() == null || allowedSchemes.contains(uri.getScheme()) == false) { throw new IllegalArgumentException( String.format( Locale.ROOT, - "Invalid scheme [%s] specified in uri [%s]; only 'hdfs' uri allowed for hdfs snapshot/restore", + "Invalid scheme [%s] specified in uri [%s]; only one of %s uris allowed for hdfs snapshot/restore", uri.getScheme(), - uriSetting + uriSetting, + String.join(", ", allowedSchemes) ) ); } - if (Strings.hasLength(uri.getPath()) && uri.getPath().equals("/") == false) { + if (uri.getScheme().equals("hdfs") && Strings.hasLength(uri.getPath()) && uri.getPath().equals("/") == false) { throw new IllegalArgumentException( String.format( Locale.ROOT, @@ -151,6 +155,8 @@ private HdfsBlobStore createBlobstore(URI blobstoreUri, String path, Settings re // Disable FS cache hadoopConfiguration.setBoolean("fs.hdfs.impl.disable.cache", true); + hadoopConfiguration.setBoolean("fs.webhdfs.impl.disable.cache", true); + hadoopConfiguration.setBoolean("fs.swebhdfs.impl.disable.cache", true); // Create a hadoop user UserGroupInformation ugi = login(hadoopConfiguration, repositorySettings); diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java index 630afc8f18287..d440b35db34ee 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java @@ -9,6 +9,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.elasticsearch.SpecialPermission; +import org.elasticsearch.common.util.ArrayUtils; import org.elasticsearch.env.Environment; import java.io.IOException; @@ -22,6 +23,7 @@ import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; import java.util.Arrays; +import java.util.Objects; import javax.security.auth.AuthPermission; import javax.security.auth.PrivateCredentialPermission; @@ -37,6 +39,8 @@ class HdfsSecurityContext { private static final Permission[] SIMPLE_AUTH_PERMISSIONS; private static final Permission[] KERBEROS_AUTH_PERMISSIONS; + private static final Permission[] WEBHDFS_PERMISSIONS; + private static final Permission[] SWEBHDFS_PERMISSIONS; static { // We can do FS ops with only a few elevated permissions: SIMPLE_AUTH_PERMISSIONS = new Permission[] { @@ -72,6 +76,16 @@ class HdfsSecurityContext { // 7) allow code to initiate kerberos connections as the logged in user // Still far and away fewer permissions than the original full plugin policy }; + WEBHDFS_PERMISSIONS = new Permission[] { + // 1) allow hadoop to act as the logged in Subject + new AuthPermission("doAs") + }; + SWEBHDFS_PERMISSIONS = new Permission[] { + // 1) allow hadoop to act as the logged in Subject + new AuthPermission("doAs"), + // 2) allow hadoop to call setSSLSocketFactory on HttpsURLConnection + new RuntimePermission("setFactory"), + }; } /** @@ -94,13 +108,13 @@ static Path locateKeytabFile(Environment environment) { private final boolean restrictPermissions; private final Permission[] restrictedExecutionPermissions; - HdfsSecurityContext(UserGroupInformation ugi, boolean restrictPermissions) { + HdfsSecurityContext(UserGroupInformation ugi, boolean restrictPermissions, String scheme) { this.ugi = ugi; this.restrictPermissions = restrictPermissions; - this.restrictedExecutionPermissions = renderPermissions(ugi); + this.restrictedExecutionPermissions = renderPermissions(ugi, scheme); } - private Permission[] renderPermissions(UserGroupInformation userGroupInformation) { + private Permission[] renderPermissions(UserGroupInformation userGroupInformation, String scheme) { Permission[] permissions; if (userGroupInformation.isFromKeytab()) { // KERBEROS @@ -117,6 +131,12 @@ private Permission[] renderPermissions(UserGroupInformation userGroupInformation // SIMPLE permissions = Arrays.copyOf(SIMPLE_AUTH_PERMISSIONS, SIMPLE_AUTH_PERMISSIONS.length); } + if (Objects.equals(scheme, "webhdfs")) { + permissions = ArrayUtils.concat(permissions, WEBHDFS_PERMISSIONS, Permission.class); + } else if (Objects.equals(scheme, "swebhdfs")) { + permissions = ArrayUtils.concat(permissions, SWEBHDFS_PERMISSIONS, Permission.class); + } + return permissions; } diff --git a/plugins/repository-hdfs/src/main/plugin-metadata/plugin-security.policy b/plugins/repository-hdfs/src/main/plugin-metadata/plugin-security.policy index cd9f5e8d61490..f77433af49eed 100644 --- a/plugins/repository-hdfs/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/repository-hdfs/src/main/plugin-metadata/plugin-security.policy @@ -68,4 +68,8 @@ grant { // client binds to the address returned from the host name of any principal set up as a service principal // org.apache.hadoop.ipc.Client.Connection.setupConnection permission java.net.SocketPermission "localhost:0", "listen,resolve"; + + // org.apache.hadoop.hdfs.web.SSLConnectionConfigurator + // This is used by swebhdfs connections + permission java.lang.RuntimePermission "setFactory"; }; diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java index f72a5eeea90d0..e0645fd48119c 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java @@ -190,6 +190,36 @@ public void testPathSpecifiedInHdfs() { } } + public void testWebhdfsIsAllowedScheme() { + client().admin() + .cluster() + .preparePutRepository("test-repo") + .setType("hdfs") + .setSettings( + Settings.builder() + .put("uri", "webhdfs:///") + .put("conf.fs.AbstractFileSystem.webhdfs.impl", TestingFs.class.getName()) + .put("path", "foo") + .put("chunk_size", randomIntBetween(100, 1000) + "k") + .put("compress", randomBoolean()) + ) + .get(); + } + + public void testSwebhdfsIsAllowedScheme() { + client().admin() + .cluster() + .preparePutRepository("test-repo") + .setType("hdfs") + .setSettings( + Settings.builder() + .put("uri", "swebhdfs:///") + .put("conf.fs.AbstractFileSystem.swebhdfs.impl", TestingFs.class.getName()) + .put("path", "foo") + ) + .get(); + } + public void testMissingPath() { try { clusterAdmin().preparePutRepository("test-repo") diff --git a/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/hdfs_repository/10_basic.yml b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/hdfs_repository/10_basic.yml index bc419d75ba773..f2e29218e744d 100644 --- a/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/hdfs_repository/10_basic.yml +++ b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/hdfs_repository/10_basic.yml @@ -19,7 +19,7 @@ --- # # Check that we can't use file:// repositories or anything like that -# We only test this plugin against hdfs:// +# We only test this plugin against hdfs:// and webhdfs:// # "HDFS only": - do: diff --git a/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/secure_hdfs_repository/10_basic.yml b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/secure_hdfs_repository/10_basic.yml index bc419d75ba773..f2e29218e744d 100644 --- a/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/secure_hdfs_repository/10_basic.yml +++ b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/secure_hdfs_repository/10_basic.yml @@ -19,7 +19,7 @@ --- # # Check that we can't use file:// repositories or anything like that -# We only test this plugin against hdfs:// +# We only test this plugin against hdfs:// and webhdfs:// # "HDFS only": - do: diff --git a/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/10_basic.yml b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/10_basic.yml new file mode 100644 index 0000000000000..f2e29218e744d --- /dev/null +++ b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/10_basic.yml @@ -0,0 +1,33 @@ +# Integration tests for HDFS Repository plugin +# +# Check plugin is installed +# +"Plugin loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains + - do: + cluster.state: {} + + # Get master node id + - set: { master_node: master } + + - do: + nodes.info: {} + + - contains: { nodes.$master.plugins: { name: repository-hdfs } } +--- +# +# Check that we can't use file:// repositories or anything like that +# We only test this plugin against hdfs:// and webhdfs:// +# +"HDFS only": + - do: + catch: /Invalid scheme/ + snapshot.create_repository: + repository: misconfigured_repository + body: + type: hdfs + settings: + uri: "file://bogus" + path: "foo/bar" diff --git a/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/20_repository_create.yml b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/20_repository_create.yml new file mode 100644 index 0000000000000..4f8a7f4794e5a --- /dev/null +++ b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/20_repository_create.yml @@ -0,0 +1,27 @@ +# Integration tests for HDFS Repository plugin +# +# Tests creating a repository +# +"HDFS Repository Creation": + # Create repository + - do: + snapshot.create_repository: + repository: test_repository_create + body: + type: hdfs + settings: + uri: "webhdfs://localhost:@hdfs_http_port@" + path: "test/repository_create" + + # Get repository + - do: + snapshot.get_repository: + repository: test_repository_create + + - is_true: test_repository_create + - match: {test_repository_create.settings.path : "test/repository_create"} + + # Remove our repository + - do: + snapshot.delete_repository: + repository: test_repository_create diff --git a/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/20_repository_delete.yml b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/20_repository_delete.yml new file mode 100644 index 0000000000000..0c8dd5db88910 --- /dev/null +++ b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/20_repository_delete.yml @@ -0,0 +1,50 @@ +# Integration tests for HDFS Repository plugin +# +# Tests creating a repository, then deleting it and creating it again. +# +"HDFS Delete Repository": + # Create repository + - do: + snapshot.create_repository: + repository: test_repo_hdfs_1 + body: + type: hdfs + settings: + uri: "webhdfs://localhost:@hdfs_http_port@" + path: "foo/bar" + + # Get repository + - do: + snapshot.get_repository: + repository: test_repo_hdfs_1 + + - is_true: test_repo_hdfs_1 + - match: {test_repo_hdfs_1.settings.path : "foo/bar"} + + # Delete repository + - do: + snapshot.delete_repository: + repository: test_repo_hdfs_1 + + # Get repository: It should be gone + - do: + catch: /repository_missing_exception/ + snapshot.get_repository: + repository: test_repo_hdfs_1 + + # Create it again + - do: + snapshot.create_repository: + repository: test_repo_hdfs_1 + body: + type: hdfs + settings: + uri: "webhdfs://localhost:@hdfs_http_port@" + path: "foo/bar" + + # Get repository again + - do: + snapshot.get_repository: + repository: test_repo_hdfs_1 + + - is_true: test_repo_hdfs_1 diff --git a/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/20_repository_verify.yml b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/20_repository_verify.yml new file mode 100644 index 0000000000000..ec611def99193 --- /dev/null +++ b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/20_repository_verify.yml @@ -0,0 +1,23 @@ +# Integration tests for HDFS Repository plugin +# +# Tests explicit verify +# +"HDFS Repository Verify": + - do: + snapshot.create_repository: + repository: test_repository_verify + body: + type: hdfs + settings: + uri: "webhdfs://localhost:@hdfs_http_port@" + path: "test/repository_verify" + + # Verify repository + - do: + snapshot.verify_repository: + repository: test_repository_verify + + # Remove our repository + - do: + snapshot.delete_repository: + repository: test_repository_verify diff --git a/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/30_snapshot.yml b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/30_snapshot.yml new file mode 100644 index 0000000000000..ff7ebb2ce4029 --- /dev/null +++ b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/30_snapshot.yml @@ -0,0 +1,48 @@ +# Integration tests for HDFS Repository plugin +# +# Actually perform a snapshot to hdfs +# +--- +"take snapshot": + # Create repository + - do: + snapshot.create_repository: + repository: test_snapshot_repository + body: + type: hdfs + settings: + uri: "webhdfs://localhost:@hdfs_http_port@" + path: "test/snapshot" + + # Create index + - do: + indices.create: + index: test_index + body: + settings: + number_of_shards: 1 + number_of_replicas: 1 + + # Create snapshot + - do: + snapshot.create: + repository: test_snapshot_repository + snapshot: test_snapshot + wait_for_completion: true + + - match: { snapshot.snapshot: test_snapshot } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.shards.successful: 1 } + - match: { snapshot.shards.failed : 0 } + + # Remove our snapshot + - do: + snapshot.delete: + repository: test_snapshot_repository + snapshot: test_snapshot + + # Remove our repository + - do: + snapshot.delete_repository: + repository: test_snapshot_repository + diff --git a/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/30_snapshot_get.yml b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/30_snapshot_get.yml new file mode 100644 index 0000000000000..c7cb474debf06 --- /dev/null +++ b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/30_snapshot_get.yml @@ -0,0 +1,70 @@ +# Integration tests for HDFS Repository plugin +# +# Tests retrieving information about snapshot +# +--- +"Get a snapshot": + # Create repository + - do: + snapshot.create_repository: + repository: test_snapshot_get_repository + body: + type: hdfs + settings: + uri: "webhdfs://localhost:@hdfs_http_port@" + path: "test/snapshot_get" + + # Create index + - do: + indices.create: + index: test_index + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + + # Wait for green + - do: + cluster.health: + wait_for_status: green + + # Create snapshot + - do: + snapshot.create: + repository: test_snapshot_get_repository + snapshot: test_snapshot_get + wait_for_completion: true + + - match: { snapshot.snapshot: test_snapshot_get } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.shards.successful: 1 } + - match: { snapshot.shards.failed : 0 } + + # Get snapshot info + - do: + snapshot.get: + repository: test_snapshot_get_repository + snapshot: test_snapshot_get + + - length: { snapshots: 1 } + - match: { snapshots.0.snapshot : test_snapshot_get } + + # List snapshot info + - do: + snapshot.get: + repository: test_snapshot_get_repository + snapshot: "*" + + - length: { snapshots: 1 } + - match: { snapshots.0.snapshot : test_snapshot_get } + + # Remove our snapshot + - do: + snapshot.delete: + repository: test_snapshot_get_repository + snapshot: test_snapshot_get + + # Remove our repository + - do: + snapshot.delete_repository: + repository: test_snapshot_get_repository diff --git a/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/30_snapshot_readonly.yml b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/30_snapshot_readonly.yml new file mode 100644 index 0000000000000..23a51fa8a4670 --- /dev/null +++ b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/30_snapshot_readonly.yml @@ -0,0 +1,29 @@ +# Integration tests for HDFS Repository plugin +# +# Tests retrieving information about snapshot +# +--- +"Get a snapshot - readonly": + # Create repository + - do: + snapshot.create_repository: + repository: test_snapshot_repository_ro + body: + type: hdfs + settings: + uri: "webhdfs://localhost:@hdfs_http_port@" + path: "/user/elasticsearch/existing/readonly-repository" + readonly: true + + # List snapshot info + - do: + snapshot.get: + repository: test_snapshot_repository_ro + snapshot: "_all" + + - length: { snapshots: 1 } + + # Remove our repository + - do: + snapshot.delete_repository: + repository: test_snapshot_repository_ro diff --git a/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/40_restore.yml b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/40_restore.yml new file mode 100644 index 0000000000000..841624e7aa1b2 --- /dev/null +++ b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/webhdfs_repository/40_restore.yml @@ -0,0 +1,83 @@ +# Integration tests for HDFS Repository plugin +# +# Actually perform a snapshot to hdfs, then restore it +# +--- +"Create a snapshot and then restore it": + - skip: + features: ["allowed_warnings"] + + # Create repository + - do: + snapshot.create_repository: + repository: test_restore_repository + body: + type: hdfs + settings: + uri: "webhdfs://localhost:@hdfs_http_port@" + path: "test/restore" + + # Create index + - do: + indices.create: + index: test_index + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + + # Wait for green + - do: + cluster.health: + wait_for_status: green + + # Take snapshot + - do: + snapshot.create: + repository: test_restore_repository + snapshot: test_restore + wait_for_completion: true + + - match: { snapshot.snapshot: test_restore } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.shards.successful: 1 } + - match: { snapshot.shards.failed : 0 } + - is_true: snapshot.version + - gt: { snapshot.version_id: 0} + + # Close index + - do: + indices.close: + index : test_index + allowed_warnings: + - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" + + # Restore index + - do: + snapshot.restore: + repository: test_restore_repository + snapshot: test_restore + wait_for_completion: true + + # Check recovery stats + - do: + indices.recovery: + index: test_index + + - match: { test_index.shards.0.type: SNAPSHOT } + - match: { test_index.shards.0.stage: DONE } + - match: { test_index.shards.0.index.files.recovered: 1} + - gt: { test_index.shards.0.index.size.recovered_in_bytes: 0} + - match: { test_index.shards.0.index.files.reused: 0} + - match: { test_index.shards.0.index.size.reused_in_bytes: 0} + + # Remove our snapshot + - do: + snapshot.delete: + repository: test_restore_repository + snapshot: test_restore + + # Remove our repository + - do: + snapshot.delete_repository: + repository: test_restore_repository diff --git a/test/fixtures/hdfs2-fixture/src/main/java/hdfs/MiniHDFS.java b/test/fixtures/hdfs2-fixture/src/main/java/hdfs/MiniHDFS.java index ee993fec74eb4..ec7408737ca3a 100644 --- a/test/fixtures/hdfs2-fixture/src/main/java/hdfs/MiniHDFS.java +++ b/test/fixtures/hdfs2-fixture/src/main/java/hdfs/MiniHDFS.java @@ -99,6 +99,10 @@ public static void main(String[] args) throws Exception { builder.nameNodePort(9999); } } + String explicitHttpPort = System.getProperty("hdfs.config.http.port"); + if (explicitHttpPort != null) { + builder.nameNodeHttpPort(Integer.parseInt(explicitHttpPort)); + } // Configure HA mode String haNameService = System.getProperty("ha-nameservice"); diff --git a/test/fixtures/hdfs3-fixture/src/main/java/hdfs/MiniHDFS.java b/test/fixtures/hdfs3-fixture/src/main/java/hdfs/MiniHDFS.java index 0a26f5d82ac17..01f6ce7ee2a66 100644 --- a/test/fixtures/hdfs3-fixture/src/main/java/hdfs/MiniHDFS.java +++ b/test/fixtures/hdfs3-fixture/src/main/java/hdfs/MiniHDFS.java @@ -100,6 +100,10 @@ public static void main(String[] args) throws Exception { builder.nameNodePort(9999); } } + String explicitHttpPort = System.getProperty("hdfs.config.http.port"); + if (explicitHttpPort != null) { + builder.nameNodeHttpPort(Integer.parseInt(explicitHttpPort)); + } // Configure HA mode String haNameService = System.getProperty("ha-nameservice");