diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml index 6c976f19f2428..911aa8d5d4906 100644 --- a/hadoop-tools/hadoop-azure/pom.xml +++ b/hadoop-tools/hadoop-azure/pom.xml @@ -214,13 +214,6 @@ provided - - - junit - junit - test - - org.apache.hadoop hadoop-common @@ -367,11 +360,6 @@ junit-jupiter-params test - - org.junit.platform - junit-platform-launcher - test - org.junit.vintage junit-vintage-engine diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestWithTimeout.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestWithTimeout.java index ad737b55acf81..d6624e8c0c178 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestWithTimeout.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/AbstractWasbTestWithTimeout.java @@ -28,7 +28,7 @@ import java.util.concurrent.TimeUnit; -import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Base class for any Wasb test with timeouts & named threads. @@ -69,10 +69,10 @@ protected int getTestTimeoutMillis() { } public static void assumeNotNull(Object objects) { - assumeTrue(objects != null); + assumeThat(objects).isNotNull(); } public static void assumeNotNull(Object objects, String message) { - assumeTrue(objects != null, message); + assumeThat(objects).as(message).isNotNull(); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java index bc855a7ee6e3c..8110b6d6ae8e3 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestContainerChecks.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.azure; -import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.assertj.core.api.Assumptions.assumeThat; import java.io.FileNotFoundException; import java.util.EnumSet; @@ -169,7 +169,7 @@ public String call() throws Exception { @Test public void testContainerChecksWithSas() throws Exception { - assumeFalse(runningInSASMode); + assumeThat(runningInSASMode).isFalse(); testAccount = AzureBlobStorageTestAccount.create("", EnumSet.of(CreateOptions.UseSas)); assumeNotNull(testAccount); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestListPerformance.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestListPerformance.java index 99e7383e8a175..44d57d1b19f83 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestListPerformance.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestListPerformance.java @@ -48,7 +48,7 @@ import org.apache.hadoop.fs.azure.integration.AzureTestUtils; import org.apache.hadoop.fs.contract.ContractTestUtils; -import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Test list performance. @@ -99,7 +99,7 @@ protected AzureBlobStorageTestAccount createTestAccount() throws Exception { @Test public void test_0101_CreateDirectoryWithFiles() throws Exception { - assumeFalse(fs.exists(TEST_DIR_PATH), "Test path exists; skipping"); + assumeThat(fs.exists(TEST_DIR_PATH)).as("Test path exists; skipping").isFalse(); ExecutorService executorService = Executors.newFixedThreadPool(threads); CloudBlobContainer container = testAccount.getRealContainer(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractEmulator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractEmulator.java index 4f76926de2d48..2cc7592dc1f9b 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractEmulator.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractEmulator.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.azure; -import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static org.assertj.core.api.Assumptions.assumeThat; import org.apache.hadoop.fs.FileSystemContractBaseTest; import org.apache.hadoop.fs.Path; @@ -50,7 +50,9 @@ public void setUp() throws Exception { if (testAccount != null) { fs = testAccount.getFileSystem(); } - assumeTrue(fs != null); + assumeThat(fs) + .as("FileSystem must not be null for this test") + .isNotNull(); basePath = fs.makeQualified( AzureTestUtils.createTestPath( new Path("ITestNativeAzureFileSystemContractEmulator"))); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractLive.java index b8f535ed13ed5..702b9c6375ce6 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractLive.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractLive.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.azure; -import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static org.assertj.core.api.Assumptions.assumeThat; import org.apache.hadoop.fs.FileSystemContractBaseTest; import org.apache.hadoop.fs.Path; @@ -53,7 +53,7 @@ public void setUp() throws Exception { if (testAccount != null) { fs = testAccount.getFileSystem(); } - assumeTrue(fs != null); + assumeThat(fs).isNotNull(); basePath = fs.makeQualified( AzureTestUtils.createTestPath( new Path("NativeAzureFileSystemContractLive"))); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractPageBlobLive.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractPageBlobLive.java index 171c610140d91..5f0e951142b7c 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractPageBlobLive.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractPageBlobLive.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.azure; -import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static org.assertj.core.api.Assumptions.assumeThat; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystemContractBaseTest; @@ -63,7 +63,7 @@ private AzureBlobStorageTestAccount createTestAccount() @BeforeEach public void setUp() throws Exception { testAccount = createTestAccount(); - assumeTrue(testAccount != null); + assumeThat(testAccount).isNotNull(); fs = testAccount.getFileSystem(); basePath = AzureTestUtils.pathForTests(fs, "filesystemcontractpageblob"); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbRemoteCallHelper.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbRemoteCallHelper.java index c69233961fd80..f1e8fdd181399 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbRemoteCallHelper.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbRemoteCallHelper.java @@ -47,7 +47,7 @@ import static org.mockito.Mockito.atLeast; import static org.mockito.Mockito.times; -import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Test class to hold all WasbRemoteCallHelper tests. @@ -72,8 +72,9 @@ public void setUp() throws Exception { boolean useSecureMode = fs.getConf().getBoolean(KEY_USE_SECURE_MODE, false); boolean useAuthorization = fs.getConf() .getBoolean(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, false); - assumeTrue(useSecureMode && useAuthorization, - "Test valid when both SecureMode and Authorization are enabled .. skipping"); + assumeThat(useSecureMode && useAuthorization) + .as("Test valid when both SecureMode and Authorization are enabled .. skipping") + .isTrue(); } /** diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbUriAndConfiguration.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbUriAndConfiguration.java index b78f509886c51..08122f966b905 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbUriAndConfiguration.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestWasbUriAndConfiguration.java @@ -21,7 +21,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; import static org.apache.hadoop.fs.azure.NativeAzureFileSystem.RETURN_URI_AS_CANONICAL_SERVICE_NAME_PROPERTY_NAME; import static org.apache.hadoop.test.LambdaTestUtils.intercept; -import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.assertj.core.api.Assumptions.assumeThat; import java.io.ByteArrayInputStream; import java.io.DataInputStream; @@ -132,7 +132,7 @@ public void testConnectUsingKey() throws Exception { @Test public void testConnectUsingSAS() throws Exception { - assumeFalse(runningInSASMode); + assumeThat(runningInSASMode).isFalse(); // Create the test account with SAS credentials. testAccount = AzureBlobStorageTestAccount.create("", EnumSet.of(CreateOptions.UseSas, CreateOptions.CreateContainer)); @@ -148,7 +148,7 @@ public void testConnectUsingSAS() throws Exception { @Test public void testConnectUsingSASReadonly() throws Exception { - assumeFalse(runningInSASMode); + assumeThat(runningInSASMode).isFalse(); // Create the test account with SAS credentials. testAccount = AzureBlobStorageTestAccount.create("", EnumSet.of( CreateOptions.UseSas, CreateOptions.CreateContainer, @@ -378,7 +378,7 @@ public void testDefaultKeyProvider() throws Exception { public void testCredsFromCredentialProvider(@TempDir java.nio.file.Path tempDir) throws Exception { - assumeFalse(runningInSASMode); + assumeThat(runningInSASMode).isFalse(); String account = "testacct"; String key = "testkey"; // set up conf to have a cred provider diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java index 37a4711d8a48c..6cc6903d4930d 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemAuthorization.java @@ -38,7 +38,6 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.hadoop.util.StringUtils; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -48,7 +47,7 @@ import static org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.KEY_USE_SECURE_MODE; import static org.apache.hadoop.fs.azure.CachingAuthorizer.KEY_AUTH_SERVICE_CACHING_ENABLE; import static org.apache.hadoop.fs.contract.ContractTestUtils.*; -import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Test class to hold all WASB authorization tests. @@ -92,8 +91,9 @@ public void setUp() throws Exception { super.setUp(); boolean useSecureMode = fs.getConf().getBoolean(KEY_USE_SECURE_MODE, false); boolean useAuthorization = fs.getConf().getBoolean(NativeAzureFileSystem.KEY_AZURE_AUTHORIZATION, false); - assumeTrue((useSecureMode && useAuthorization), - "Test valid when both SecureMode and Authorization are enabled .. skipping"); + assumeThat((useSecureMode && useAuthorization)) + .as("Test valid when both SecureMode and Authorization are enabled .. skipping") + .isTrue(); authorizer = new MockWasbAuthorizerImpl(fs); authorizer.init(fs.getConf()); @@ -1544,8 +1544,9 @@ public void testSetOwnerSucceedsForAuthorisedUsers() throws Throwable { ContractTestUtils.assertPathExists(fs, "test path does not exist", testPath); String owner = fs.getFileStatus(testPath).getOwner(); - assumeTrue(!StringUtils.equalsIgnoreCase(owner, newOwner), - "changing owner requires original and new owner to be different"); + assumeThat(owner) + .as("changing owner requires original and new owner to be different") + .isNotEqualToIgnoringCase(newOwner); authorisedUser.doAs(new PrivilegedExceptionAction() { @Override @@ -1587,8 +1588,9 @@ public void testSetOwnerSucceedsForAnyUserWhenWildCardIsSpecified() throws Throw ContractTestUtils.assertPathExists(fs, "test path does not exist", testPath); String owner = fs.getFileStatus(testPath).getOwner(); - assumeTrue(!StringUtils.equalsIgnoreCase(owner, newOwner), - "changing owner requires original and new owner to be different"); + assumeThat(owner) + .as("changing owner requires original and new owner to be different") + .isNotEqualToIgnoringCase(newOwner); user.doAs(new PrivilegedExceptionAction() { @Override diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java index 69dc335bd8d6a..4a71c78a9813f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java @@ -27,7 +27,6 @@ import java.util.List; import org.junit.jupiter.api.Assertions; -import org.junit.Assume; import org.opentest4j.TestAbortedException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,8 +41,6 @@ import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount; import org.apache.hadoop.fs.azure.NativeAzureFileSystem; -import static org.junit.jupiter.api.Assumptions.assumeTrue; - import static org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount.WASB_ACCOUNT_NAME_DOMAIN_SUFFIX_REGEX; import static org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount.WASB_TEST_ACCOUNT_NAME_WITH_DOMAIN; import static org.apache.hadoop.fs.azure.integration.AzureTestConstants.*; @@ -51,6 +48,7 @@ import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; import static org.apache.hadoop.test.MetricsAsserts.getLongGauge; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Utilities for the Azure tests. Based on {@code S3ATestUtils}, so @@ -394,7 +392,7 @@ public static void assume(String message, boolean condition) { if (!condition) { LOG.warn(message); } - Assume.assumeTrue(message, condition); + assumeThat(condition).as(message).isTrue(); } /** @@ -495,8 +493,10 @@ public static String verifyWasbAccountNameInConfig(Configuration conf) { if (accountName == null) { accountName = conf.get(WASB_TEST_ACCOUNT_NAME_WITH_DOMAIN); } - assumeTrue(accountName != null && !accountName.endsWith(WASB_ACCOUNT_NAME_DOMAIN_SUFFIX_REGEX), - "Account for WASB is missing or it is not in correct format"); + assumeThat(accountName) + .as("Account for WASB is missing or it is not in correct format") + .isNotNull() + .doesNotEndWith(WASB_ACCOUNT_NAME_DOMAIN_SUFFIX_REGEX); return accountName; } @@ -550,7 +550,8 @@ public static String readStringFromStream(FSDataInputStream inputStream) throws * Assume hierarchical namespace is disabled for test account. */ public static void assumeNamespaceDisabled(Configuration conf) { - Assume.assumeFalse("Hierarchical namespace is enabled for test account.", - conf.getBoolean(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT, false)); + assumeThat(conf.getBoolean(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT, false)) + .as("Hierarchical namespace is enabled for test account.") + .isFalse(); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/ITestAzureHugeFiles.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/ITestAzureHugeFiles.java index 8939bfec66d77..d70412aca1f83 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/ITestAzureHugeFiles.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/ITestAzureHugeFiles.java @@ -43,7 +43,7 @@ import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.*; import static org.apache.hadoop.fs.contract.ContractTestUtils.*; -import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static org.assertj.core.api.Assumptions.assumeThat; /** @@ -153,8 +153,10 @@ FileStatus assumeHugeFileExists() throws IOException { assertPathExists(getFileSystem(), "huge file not created", hugefile); try { FileStatus status = getFileSystem().getFileStatus(hugefile); - assumeTrue(status.isFile(), "Not a file: " + status); - assumeTrue(status.getLen() > 0, "File " + hugefile + " is empty"); + assumeThat(status.isFile()).as("Not a file: " + status).isTrue(); + assumeThat(status.getLen()) + .as("File " + hugefile + " is empty") + .isPositive(); return status; } catch (FileNotFoundException e) { skip("huge file not created: " + hugefile); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java index 9be4998cb8217..27585f9fa5986 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsIntegrationTest.java @@ -30,9 +30,8 @@ import java.util.concurrent.Future; import org.assertj.core.api.Assertions; -import org.junit.After; -import org.junit.Assume; -import org.junit.Before; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -76,7 +75,7 @@ import static org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode.FILE_SYSTEM_NOT_FOUND; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.*; import static org.apache.hadoop.test.LambdaTestUtils.intercept; -import static org.junit.Assume.assumeTrue; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Base for AzureBlobFileSystem Integration tests. @@ -114,8 +113,9 @@ protected AbstractAbfsIntegrationTest() throws Exception { // check if accountName is set using different config key accountName = rawConfig.get(FS_AZURE_ABFS_ACCOUNT_NAME); } - assumeTrue("Not set: " + FS_AZURE_ABFS_ACCOUNT_NAME, - accountName != null && !accountName.isEmpty()); + assumeThat(accountName) + .as("Not set: " + FS_AZURE_ABFS_ACCOUNT_NAME) + .isNotBlank(); final String abfsUrl = this.getFileSystemName() + "@" + this.getAccountName(); URI defaultUri = null; @@ -191,7 +191,7 @@ public TracingContext getTestTracingContext(AzureBlobFileSystem fs, FSOperationType.TEST_OP, needsPrimaryReqId, format, null); } - @Before + @BeforeEach public void setup() throws Exception { //Create filesystem first to make sure getWasbFileSystem() can return an existing filesystem. createFileSystem(); @@ -225,7 +225,7 @@ public void setup() throws Exception { } } - @After + @AfterEach public void teardown() throws Exception { try { IOUtils.closeStream(wasb); @@ -569,23 +569,24 @@ protected AbfsOutputStream createAbfsOutputStreamWithFlushEnabled( */ protected long assertAbfsStatistics(AbfsStatistic statistic, long expectedValue, Map metricMap) { - assertEquals("Mismatch in " + statistic.getStatName(), expectedValue, - (long) metricMap.get(statistic.getStatName())); + assertEquals(expectedValue, (long) metricMap.get(statistic.getStatName()), + "Mismatch in " + statistic.getStatName()); return expectedValue; } protected void assumeValidTestConfigPresent(final Configuration conf, final String key) { String configuredValue = conf.get(accountProperty(key, accountName), conf.get(key, "")); - Assume.assumeTrue(String.format("Missing Required Test Config: %s.", key), - !configuredValue.isEmpty()); + assumeThat(configuredValue) + .as(String.format("Missing Required Test Config: %s.", key)) + .isNotEmpty(); } protected void assumeValidAuthConfigsPresent() { final AuthType currentAuthType = getAuthType(); - Assume.assumeFalse( - "SAS Based Authentication Not Allowed For Integration Tests", - currentAuthType == AuthType.SAS); + assumeThat(currentAuthType). + as("SAS Based Authentication Not Allowed For Integration Tests"). + isEqualTo(AuthType.SAS); if (currentAuthType == AuthType.SharedKey) { assumeValidTestConfigPresent(getRawConfiguration(), FS_AZURE_ACCOUNT_KEY); } else { @@ -616,7 +617,7 @@ public AbfsServiceType getIngressServiceType() { * @param path path to create. Can be relative or absolute. */ protected void createAzCopyFolder(Path path) throws Exception { - Assume.assumeTrue(getAbfsServiceType() == AbfsServiceType.BLOB); + assumeThat(getAbfsServiceType()).isEqualTo(AbfsServiceType.BLOB); assumeValidTestConfigPresent(getRawConfiguration(), FS_AZURE_TEST_FIXED_SAS_TOKEN); String sasToken = getRawConfiguration().get(FS_AZURE_TEST_FIXED_SAS_TOKEN); AzcopyToolHelper azcopyHelper = AzcopyToolHelper.getInstance(sasToken); @@ -628,7 +629,7 @@ protected void createAzCopyFolder(Path path) throws Exception { * @param path path to create. Can be relative or absolute. */ protected void createAzCopyFile(Path path) throws Exception { - Assume.assumeTrue(getAbfsServiceType() == AbfsServiceType.BLOB); + assumeThat(getAbfsServiceType()).isEqualTo(AbfsServiceType.BLOB); assumeValidTestConfigPresent(getRawConfiguration(), FS_AZURE_TEST_FIXED_SAS_TOKEN); String sasToken = getRawConfiguration().get(FS_AZURE_TEST_FIXED_SAS_TOKEN); AzcopyToolHelper azcopyHelper = AzcopyToolHelper.getInstance(sasToken); @@ -646,8 +647,9 @@ private String getAzcopyAbsolutePath(Path path) throws IOException { * Otherwise, the test will be skipped. */ protected void assumeBlobServiceType() { - Assume.assumeTrue("Blob service type is required for this test", - getAbfsServiceType() == AbfsServiceType.BLOB); + assumeThat(getAbfsServiceType()). + as("Blob service type is required for this test"). + isEqualTo(AbfsServiceType.BLOB); } /** @@ -655,8 +657,9 @@ protected void assumeBlobServiceType() { * Otherwise, the test will be skipped. */ protected void assumeDfsServiceType() { - Assume.assumeTrue("DFS service type is required for this test", - getAbfsServiceType() == AbfsServiceType.DFS); + assumeThat(getAbfsServiceType()) + .as("DFS service type is required for this test") + .isEqualTo(AbfsServiceType.DFS); } /** @@ -674,7 +677,7 @@ protected void assumeHnsEnabled() throws IOException { * @throws IOException if an error occurs while checking the account type. */ protected void assumeHnsEnabled(String errorMessage) throws IOException { - Assume.assumeTrue(errorMessage, getIsNamespaceEnabled(getFileSystem())); + assumeThat(getIsNamespaceEnabled(getFileSystem())).as(errorMessage).isTrue(); } /** @@ -692,7 +695,7 @@ protected void assumeHnsDisabled() throws IOException { * @throws IOException if an error occurs while checking the account type. */ protected void assumeHnsDisabled(String message) throws IOException { - Assume.assumeFalse(message, getIsNamespaceEnabled(getFileSystem())); + assumeThat(getIsNamespaceEnabled(getFileSystem())).as(message).isFalse(); } /** @@ -755,19 +758,20 @@ protected void checkFuturesForExceptions(List> futures, int exceptionV protected void assumeRecoveryThroughClientTransactionID(boolean isCreate) throws IOException { // Assumes that recovery through client transaction ID is enabled. - Assume.assumeTrue("Recovery through client transaction ID is not enabled", - getConfiguration().getIsClientTransactionIdEnabled()); + assumeThat(getConfiguration().getIsClientTransactionIdEnabled()) + .as("Recovery through client transaction ID is not enabled") + .isTrue(); // Assumes that service type is DFS. assumeDfsServiceType(); // Assumes that namespace is enabled for the given AzureBlobFileSystem. assumeHnsEnabled(); if (isCreate) { // Assume that create client is DFS client. - Assume.assumeTrue("Ingress service type is not DFS", - AbfsServiceType.DFS.equals(getIngressServiceType())); + assumeThat(AbfsServiceType.DFS.equals(getIngressServiceType())) + .as("Ingress service type is not DFS") + .isTrue(); // Assume that append blob is not enabled in DFS client. - Assume.assumeFalse("Append blob is enabled in DFS client", - isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Append blob is enabled in DFS client").isFalse(); } } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsScaleTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsScaleTest.java index 14c9bff7bf8d2..d8286ecac2e34 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsScaleTest.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsScaleTest.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.azurebfs; +import org.junit.jupiter.api.BeforeEach; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,6 +45,7 @@ protected int getTestTimeoutMillis() { return AzureTestConstants.SCALE_TEST_TIMEOUT_MILLIS; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsTestWithTimeout.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsTestWithTimeout.java index 0485422871ecc..2b61ce750fda2 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsTestWithTimeout.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/AbstractAbfsTestWithTimeout.java @@ -18,16 +18,17 @@ package org.apache.hadoop.fs.azurebfs; import java.io.IOException; +import java.util.concurrent.TimeUnit; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.rules.TestName; -import org.junit.rules.Timeout; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.extension.RegisterExtension; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.test.TestName; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.Path; @@ -37,27 +38,22 @@ * Base class for any ABFS test with timeouts & named threads. * This class does not attempt to bind to Azure. */ -public class AbstractAbfsTestWithTimeout extends Assert { +@Timeout(value = TEST_TIMEOUT, unit = TimeUnit.MILLISECONDS) +public class AbstractAbfsTestWithTimeout extends Assertions { private static final Logger LOG = LoggerFactory.getLogger(AbstractAbfsTestWithTimeout.class); /** * The name of the current method. */ - @Rule + @RegisterExtension public TestName methodName = new TestName(); - /** - * Set the timeout for every test. - * This is driven by the value returned by {@link #getTestTimeoutMillis()}. - */ - @Rule - public Timeout testTimeout = new Timeout(getTestTimeoutMillis()); /** * Name the junit thread for the class. This will overridden * before the individual test methods are run. */ - @BeforeClass + @BeforeAll public static void nameTestThread() { Thread.currentThread().setName("JUnit"); } @@ -65,7 +61,7 @@ public static void nameTestThread() { /** * Name the thread to the current test method. */ - @Before + @BeforeEach public void nameThread() { Thread.currentThread().setName("JUnit-" + methodName.getMethodName()); } @@ -110,15 +106,17 @@ protected boolean validateContent(AzureBlobFileSystem fs, Path path, while (valueOfContentAtPos != -1 && pos < lenOfOriginalByteArray) { if (originalByteArray[pos] != valueOfContentAtPos) { - assertEquals("Mismatch in content validation at position {}", pos, - originalByteArray[pos], valueOfContentAtPos); + assertEquals( + originalByteArray[pos], + valueOfContentAtPos, + String.format("Mismatch in content validation at position %d", pos)); return false; } valueOfContentAtPos = (byte) in.read(); pos++; } if (valueOfContentAtPos != -1) { - assertEquals("Expected end of file", -1, valueOfContentAtPos); + assertEquals(-1, valueOfContentAtPos, "Expected end of file"); return false; } return true; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestABFSJceksFiltering.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestABFSJceksFiltering.java index e1b6b39521acd..da32419f6aaab 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestABFSJceksFiltering.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestABFSJceksFiltering.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.azurebfs; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.security.alias.CredentialProviderFactory; import org.apache.hadoop.conf.Configuration; @@ -35,7 +35,7 @@ public void testIncompatibleCredentialProviderIsExcluded() throws Exception { rawConfig.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, "jceks://abfs@a@b.c.d/tmp/a.jceks,jceks://file/tmp/secret.jceks"); try (AzureBlobFileSystem fs = (AzureBlobFileSystem) FileSystem.get(rawConfig)) { - assertNotNull("filesystem", fs); + assertNotNull(fs, "filesystem"); String providers = fs.getConf().get(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH); assertEquals("jceks://file/tmp/secret.jceks", providers); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java index c180689b267ab..c74d1bf97d5c9 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsClient.java @@ -27,10 +27,9 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; -import org.assertj.core.api.Assertions; -import org.junit.Assert; -import org.junit.Ignore; -import org.junit.Test; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -43,6 +42,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_ACCOUNT_KEY; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.assertj.core.api.Assertions.assertThat; /** * Test continuation token which has equal sign. @@ -55,7 +55,7 @@ public ITestAbfsClient() throws Exception { super(); } - @Ignore("HADOOP-16845: Invalid continuation tokens are ignored by the ADLS " + @Disabled("HADOOP-16845: Invalid continuation tokens are ignored by the ADLS " + "Gen2 service, so we are disabling this test until the service is fixed.") @Test public void testContinuationTokenHavingEqualSign() throws Exception { @@ -66,13 +66,13 @@ public void testContinuationTokenHavingEqualSign() throws Exception { AbfsRestOperation op = abfsClient .listPath("/", true, LIST_MAX_RESULTS, "===========", getTestTracingContext(fs, true), null).getOp(); - Assert.assertTrue(false); + Assertions.assertTrue(false); } catch (AbfsRestOperationException ex) { - Assert.assertEquals("InvalidQueryParameterValue", ex.getErrorCode().getErrorCode()); + Assertions.assertEquals("InvalidQueryParameterValue", ex.getErrorCode().getErrorCode()); } } - @Ignore("Enable this to verify the log warning message format for HostNotFoundException") + @Disabled("Enable this to verify the log warning message format for HostNotFoundException") @Test public void testUnknownHost() throws Exception { // When hitting hostName not found exception, the retry will take about 14 mins until failed. @@ -113,7 +113,7 @@ public void testListPathWithValidListMaxResultsValues() if (continuationToken == null) { // Listing is complete and number of objects should be same as expected - Assertions.assertThat(list) + assertThat(list) .describedAs("AbfsClient.listPath() should return %d items" + " when listMaxResults is %d, directory contains %d items and " + "listing is complete", @@ -121,7 +121,7 @@ public void testListPathWithValidListMaxResultsValues() .hasSize(expectedListResultsSize); } else { // Listing is incomplete and number of objects can be less than expected - Assertions.assertThat(list) + assertThat(list) .describedAs("AbfsClient.listPath() should return %d items" + " or less when listMaxResults is %d, directory contains" + " %d items and listing is incomplete", @@ -148,7 +148,7 @@ public void testListPathWithValueGreaterThanServerMaximum() if (continuationToken == null) { // Listing is complete and number of objects should be same as expected - Assertions.assertThat(list) + assertThat(list) .describedAs("AbfsClient.listPath() should return %d items" + " when listMaxResults is %d directory contains %d items and " + "listing is complete", LIST_MAX_RESULTS_SERVER, @@ -156,7 +156,7 @@ public void testListPathWithValueGreaterThanServerMaximum() .hasSize(LIST_MAX_RESULTS_SERVER); } else { // Listing is incomplete and number of objects can be less than expected - Assertions.assertThat(list) + assertThat(list) .describedAs("AbfsClient.listPath() should return %d items" + " or less when listMaxResults is %d, directory contains" + " %d items and listing is complete", LIST_MAX_RESULTS_SERVER, diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsCustomEncryption.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsCustomEncryption.java index 45de7b3d2348e..5ace54d909488 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsCustomEncryption.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsCustomEncryption.java @@ -30,9 +30,8 @@ import org.assertj.core.api.Assertions; import org.assertj.core.api.Assumptions; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -63,6 +62,8 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.Lists; +import org.junit.jupiter.params.ParameterizedClass; +import org.junit.jupiter.params.provider.MethodSource; import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.CPK_IN_NON_HNS_ACCOUNT_ERROR_MESSAGE; import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.EMPTY_STRING; @@ -85,7 +86,8 @@ import static org.apache.hadoop.fs.permission.AclEntryType.USER; import static org.apache.hadoop.fs.permission.FsAction.ALL; -@RunWith(Parameterized.class) +@ParameterizedClass(name="{0} mode, {2}") +@MethodSource("params") public class ITestAbfsCustomEncryption extends AbstractAbfsIntegrationTest { public static final String SERVER_FILE_CONTENT = "123"; @@ -97,43 +99,33 @@ public class ITestAbfsCustomEncryption extends AbstractAbfsIntegrationTest { private List fileSystemsOpenedInTest = new ArrayList<>(); // Encryption type used by filesystem while creating file - @Parameterized.Parameter public EncryptionType fileEncryptionType; // Encryption type used by filesystem to call different operations - @Parameterized.Parameter(1) public EncryptionType requestEncryptionType; - @Parameterized.Parameter(2) public FSOperationType operation; - @Parameterized.Parameter(3) public boolean responseHeaderServerEnc; - @Parameterized.Parameter(4) public boolean responseHeaderReqServerEnc; - @Parameterized.Parameter(5) public boolean isExceptionCase; /** * Boolean value to indicate that the server response would have header related * to CPK and the test would need to assert its value. */ - @Parameterized.Parameter(6) public boolean isCpkResponseHdrExpected; /** * Boolean value to indicate that the server response would have fields related * to CPK and the test would need to assert its value. */ - @Parameterized.Parameter(7) public Boolean isCpkResponseKeyExpected = false; - @Parameterized.Parameter(8) public Boolean fileSystemListStatusResultToBeUsedForOpeningFile = false; - @Parameterized.Parameters(name = "{0} mode, {2}") public static Iterable params() { return Arrays.asList(new Object[][] { {ENCRYPTION_CONTEXT, ENCRYPTION_CONTEXT, FSOperationType.READ, true, false, false, true, false, false}, @@ -178,10 +170,24 @@ public static Iterable params() { }); } - public ITestAbfsCustomEncryption() throws Exception { + public ITestAbfsCustomEncryption(EncryptionType pFileEncryptionType, + EncryptionType pRequestEncryptionType, FSOperationType pOperation, + boolean pResponseHeaderServerEnc, boolean pResponseHeaderReqServerEnc, + boolean pIsExceptionCase, boolean pIsCpkResponseHdrExpected, + boolean pFileSystemListStatusResultToBeUsedForOpeningFile) throws Exception { + new Random().nextBytes(cpk); cpkSHAEncoded = EncodingHelper.getBase64EncodedString( EncodingHelper.getSHA256Hash(cpk)); + fileEncryptionType = pFileEncryptionType; + requestEncryptionType = pRequestEncryptionType; + operation = pOperation; + responseHeaderServerEnc = pResponseHeaderServerEnc; + responseHeaderReqServerEnc = pResponseHeaderReqServerEnc; + isExceptionCase = pIsExceptionCase; + isCpkResponseHdrExpected = pIsCpkResponseHdrExpected; + fileSystemListStatusResultToBeUsedForOpeningFile = + pFileSystemListStatusResultToBeUsedForOpeningFile; } @Test @@ -514,6 +520,7 @@ private AzureBlobFileSystem getFileSystemForFileEncryption() throws Exception { return fs; } + @AfterEach @Override public void teardown() throws Exception { super.teardown(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsDurationTrackers.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsDurationTrackers.java index 0997b3dbd44d4..45c1161cd1814 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsDurationTrackers.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsDurationTrackers.java @@ -21,7 +21,7 @@ import java.io.IOException; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsHugeFiles.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsHugeFiles.java index 510e0a7596b47..0c7cc0d9a739b 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsHugeFiles.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsHugeFiles.java @@ -23,10 +23,11 @@ import java.util.Collection; import java.util.Random; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; +import org.junit.jupiter.params.provider.MethodSource; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; @@ -43,7 +44,8 @@ /** * Testing Huge file for AbfsOutputStream. */ -@RunWith(Parameterized.class) +@ParameterizedClass(name="size [{0}] ; blockFactoryName [{1}]") +@MethodSource("sizes") public class ITestAbfsHugeFiles extends AbstractAbfsScaleTest { private static final int ONE_MB = 1024 * 1024; private static final int EIGHT_MB = 8 * ONE_MB; @@ -62,8 +64,6 @@ public class ITestAbfsHugeFiles extends AbstractAbfsScaleTest { // Block Factory to be used in this test. private String blockFactoryName; - @Parameterized.Parameters(name = "size [{0}] ; blockFactoryName " - + "[{1}]") public static Collection sizes() { return Arrays.asList(new Object[][] { { DEFAULT_WRITE_BUFFER_SIZE, DataBlocks.DATA_BLOCKS_BUFFER_DISK }, @@ -81,7 +81,7 @@ public ITestAbfsHugeFiles(int size, String blockFactoryName) this.blockFactoryName = blockFactoryName; } - @Before + @BeforeEach public void setUp() throws Exception { Configuration configuration = getRawConfiguration(); configuration.unset(DATA_BLOCKS_BUFFER); @@ -103,8 +103,8 @@ public void testHugeFileWrite() throws IOException { } // Verify correct length was uploaded. Don't want to verify contents // here, as this would increase the test time significantly. - assertEquals("Mismatch in content length of file uploaded", size, - fs.getFileStatus(filePath).getLen()); + assertEquals(size, fs.getFileStatus(filePath).getLen(), + "Mismatch in content length of file uploaded"); } /** @@ -128,7 +128,7 @@ public void testLotsOfWrites() throws IOException { } // Verify correct length was uploaded. Don't want to verify contents // here, as this would increase the test time significantly. - assertEquals("Mismatch in content length of file uploaded", size, - fs.getFileStatus(filePath).getLen()); + assertEquals(size, fs.getFileStatus(filePath).getLen(), + "Mismatch in content length of file uploaded"); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsIdentityTransformer.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsIdentityTransformer.java index 5868d083e12e9..ab53786c719e0 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsIdentityTransformer.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsIdentityTransformer.java @@ -25,7 +25,7 @@ import org.apache.hadoop.util.Lists; import org.apache.hadoop.fs.azurebfs.oauth2.IdentityTransformer; import org.apache.hadoop.fs.permission.AclEntry; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; @@ -48,7 +48,6 @@ /** * Test IdentityTransformer. */ -//@RunWith(Parameterized.class) public class ITestAbfsIdentityTransformer extends AbstractAbfsScaleTest{ private final UserGroupInformation userGroupInfo; private final String localUser; @@ -73,8 +72,8 @@ public void testDaemonServiceSettingIdentity() throws IOException { resetIdentityConfig(config); // Default config IdentityTransformer identityTransformer = getTransformerWithDefaultIdentityConfig(config); - assertEquals("Identity should not change for default config", - DAEMON, identityTransformer.transformUserOrGroupForSetRequest(DAEMON)); + assertEquals(DAEMON, identityTransformer.transformUserOrGroupForSetRequest(DAEMON), + "Identity should not change for default config"); // Add service principal id config.set(FS_AZURE_OVERRIDE_OWNER_SP, SERVICE_PRINCIPAL_ID); @@ -82,20 +81,20 @@ public void testDaemonServiceSettingIdentity() throws IOException { // case 1: substitution list doesn't contain daemon config.set(FS_AZURE_OVERRIDE_OWNER_SP_LIST, "a,b,c,d"); identityTransformer = getTransformerWithCustomizedIdentityConfig(config); - assertEquals("Identity should not change when substitution list doesn't contain daemon", - DAEMON, identityTransformer.transformUserOrGroupForSetRequest(DAEMON)); + assertEquals(DAEMON, identityTransformer.transformUserOrGroupForSetRequest(DAEMON), + "Identity should not change when substitution list doesn't contain daemon"); // case 2: substitution list contains daemon name config.set(FS_AZURE_OVERRIDE_OWNER_SP_LIST, DAEMON + ",a,b,c,d"); identityTransformer = getTransformerWithCustomizedIdentityConfig(config); - assertEquals("Identity should be replaced to servicePrincipalId", - SERVICE_PRINCIPAL_ID, identityTransformer.transformUserOrGroupForSetRequest(DAEMON)); + assertEquals(SERVICE_PRINCIPAL_ID, identityTransformer.transformUserOrGroupForSetRequest(DAEMON), + "Identity should be replaced to servicePrincipalId"); // case 3: substitution list is * config.set(FS_AZURE_OVERRIDE_OWNER_SP_LIST, ASTERISK); identityTransformer = getTransformerWithCustomizedIdentityConfig(config); - assertEquals("Identity should be replaced to servicePrincipalId", - SERVICE_PRINCIPAL_ID, identityTransformer.transformUserOrGroupForSetRequest(DAEMON)); + assertEquals(SERVICE_PRINCIPAL_ID, identityTransformer.transformUserOrGroupForSetRequest(DAEMON), + "Identity should be replaced to servicePrincipalId"); } @Test @@ -103,8 +102,8 @@ public void testFullyQualifiedNameSettingIdentity() throws IOException { Configuration config = this.getRawConfiguration(); // Default config IdentityTransformer identityTransformer = getTransformerWithDefaultIdentityConfig(config); - assertEquals("short name should not be converted to full name by default", - SHORT_NAME, identityTransformer.transformUserOrGroupForSetRequest(SHORT_NAME)); + assertEquals(SHORT_NAME, identityTransformer.transformUserOrGroupForSetRequest(SHORT_NAME), + "short name should not be converted to full name by default"); resetIdentityConfig(config); @@ -112,8 +111,9 @@ public void testFullyQualifiedNameSettingIdentity() throws IOException { config.setBoolean(FS_AZURE_FILE_OWNER_ENABLE_SHORTNAME, true); config.set(FS_AZURE_FILE_OWNER_DOMAINNAME, DOMAIN); identityTransformer = getTransformerWithCustomizedIdentityConfig(config); - assertEquals("short name should be converted to full name", - FULLY_QUALIFIED_NAME, identityTransformer.transformUserOrGroupForSetRequest(SHORT_NAME)); + assertEquals(FULLY_QUALIFIED_NAME, + identityTransformer.transformUserOrGroupForSetRequest(SHORT_NAME), + "short name should be converted to full name"); } @Test @@ -128,8 +128,8 @@ public void testNoOpForSettingOidAsIdentity() throws IOException { IdentityTransformer identityTransformer = getTransformerWithCustomizedIdentityConfig(config); final String principalId = UUID.randomUUID().toString(); - assertEquals("Identity should not be changed when owner is already a principal id ", - principalId, identityTransformer.transformUserOrGroupForSetRequest(principalId)); + assertEquals(principalId, identityTransformer.transformUserOrGroupForSetRequest(principalId), + "Identity should not be changed when owner is already a principal id "); } @Test @@ -141,8 +141,8 @@ public void testNoOpWhenSettingSuperUserAsdentity() throws IOException { config.set(FS_AZURE_FILE_OWNER_DOMAINNAME, DOMAIN); // Default config IdentityTransformer identityTransformer = getTransformerWithDefaultIdentityConfig(config); - assertEquals("Identity should not be changed because it is not in substitution list", - SUPER_USER, identityTransformer.transformUserOrGroupForSetRequest(SUPER_USER)); + assertEquals(SUPER_USER, identityTransformer.transformUserOrGroupForSetRequest(SUPER_USER), + "Identity should not be changed because it is not in substitution list"); } @Test @@ -152,14 +152,16 @@ public void testIdentityReplacementForSuperUserGetRequest() throws IOException { // with default config, identityTransformer should do $superUser replacement IdentityTransformer identityTransformer = getTransformerWithDefaultIdentityConfig(config); - assertEquals("$superuser should be replaced with local user by default", - localUser, identityTransformer.transformIdentityForGetRequest(SUPER_USER, true, localUser)); + assertEquals(localUser, + identityTransformer.transformIdentityForGetRequest(SUPER_USER, true, localUser), + "$superuser should be replaced with local user by default"); // Disable $supeuser replacement config.setBoolean(FS_AZURE_SKIP_SUPER_USER_REPLACEMENT, true); identityTransformer = getTransformerWithCustomizedIdentityConfig(config); - assertEquals("$superuser should not be replaced", - SUPER_USER, identityTransformer.transformIdentityForGetRequest(SUPER_USER, true, localUser)); + assertEquals(SUPER_USER, + identityTransformer.transformIdentityForGetRequest(SUPER_USER, true, localUser), + "$superuser should not be replaced"); } @Test @@ -169,47 +171,53 @@ public void testIdentityReplacementForDaemonServiceGetRequest() throws IOExcepti // Default config IdentityTransformer identityTransformer = getTransformerWithDefaultIdentityConfig(config); - assertEquals("By default servicePrincipalId should not be converted for GetFileStatus(), listFileStatus(), getAcl()", - SERVICE_PRINCIPAL_ID, identityTransformer.transformIdentityForGetRequest(SERVICE_PRINCIPAL_ID, true, localUser)); + assertEquals(SERVICE_PRINCIPAL_ID, + identityTransformer.transformIdentityForGetRequest(SERVICE_PRINCIPAL_ID, true, localUser), + "By default servicePrincipalId should not be converted for GetFileStatus(), listFileStatus(), getAcl()"); resetIdentityConfig(config); // 1. substitution list doesn't contain currentUser config.set(FS_AZURE_OVERRIDE_OWNER_SP_LIST, "a,b,c,d"); identityTransformer = getTransformerWithCustomizedIdentityConfig(config); - assertEquals("servicePrincipalId should not be replaced if local daemon user is not in substitution list", - SERVICE_PRINCIPAL_ID, identityTransformer.transformIdentityForGetRequest(SERVICE_PRINCIPAL_ID, true, localUser)); + assertEquals(SERVICE_PRINCIPAL_ID, + identityTransformer.transformIdentityForGetRequest(SERVICE_PRINCIPAL_ID, true, localUser), + "servicePrincipalId should not be replaced if local daemon user is not in substitution list"); resetIdentityConfig(config); // 2. substitution list contains currentUser(daemon name) but the service principal id in config doesn't match config.set(FS_AZURE_OVERRIDE_OWNER_SP_LIST, localUser + ",a,b,c,d"); config.set(FS_AZURE_OVERRIDE_OWNER_SP, UUID.randomUUID().toString()); identityTransformer = getTransformerWithCustomizedIdentityConfig(config); - assertEquals("servicePrincipalId should not be replaced if it is not equal to the SPN set in config", - SERVICE_PRINCIPAL_ID, identityTransformer.transformIdentityForGetRequest(SERVICE_PRINCIPAL_ID, true, localUser)); + assertEquals(SERVICE_PRINCIPAL_ID, + identityTransformer.transformIdentityForGetRequest(SERVICE_PRINCIPAL_ID, true, localUser), + "servicePrincipalId should not be replaced if it is not equal to the SPN set in config"); resetIdentityConfig(config); // 3. substitution list contains currentUser(daemon name) and the service principal id in config matches config.set(FS_AZURE_OVERRIDE_OWNER_SP_LIST, localUser + ",a,b,c,d"); config.set(FS_AZURE_OVERRIDE_OWNER_SP, SERVICE_PRINCIPAL_ID); identityTransformer = getTransformerWithCustomizedIdentityConfig(config); - assertEquals("servicePrincipalId should be transformed to local use", - localUser, identityTransformer.transformIdentityForGetRequest(SERVICE_PRINCIPAL_ID, true, localUser)); + assertEquals(localUser, + identityTransformer.transformIdentityForGetRequest(SERVICE_PRINCIPAL_ID, true, localUser), + "servicePrincipalId should be transformed to local use"); resetIdentityConfig(config); // 4. substitution is "*" but the service principal id in config doesn't match the input config.set(FS_AZURE_OVERRIDE_OWNER_SP_LIST, ASTERISK); config.set(FS_AZURE_OVERRIDE_OWNER_SP, UUID.randomUUID().toString()); identityTransformer = getTransformerWithCustomizedIdentityConfig(config); - assertEquals("servicePrincipalId should not be replaced if it is not equal to the SPN set in config", - SERVICE_PRINCIPAL_ID, identityTransformer.transformIdentityForGetRequest(SERVICE_PRINCIPAL_ID, true, localUser)); + assertEquals(SERVICE_PRINCIPAL_ID, + identityTransformer.transformIdentityForGetRequest(SERVICE_PRINCIPAL_ID, true, localUser), + "servicePrincipalId should not be replaced if it is not equal to the SPN set in config"); resetIdentityConfig(config); // 5. substitution is "*" and the service principal id in config match the input config.set(FS_AZURE_OVERRIDE_OWNER_SP_LIST, ASTERISK); config.set(FS_AZURE_OVERRIDE_OWNER_SP, SERVICE_PRINCIPAL_ID); identityTransformer = getTransformerWithCustomizedIdentityConfig(config); - assertEquals("servicePrincipalId should be transformed to local user", - localUser, identityTransformer.transformIdentityForGetRequest(SERVICE_PRINCIPAL_ID, true, localUser)); + assertEquals(localUser, + identityTransformer.transformIdentityForGetRequest(SERVICE_PRINCIPAL_ID, true, localUser), + "servicePrincipalId should be transformed to local user"); } @Test @@ -219,17 +227,20 @@ public void testIdentityReplacementForKinitUserGetRequest() throws IOException { // Default config IdentityTransformer identityTransformer = getTransformerWithDefaultIdentityConfig(config); - assertEquals("full name should not be transformed if shortname is not enabled", - FULLY_QUALIFIED_NAME, identityTransformer.transformIdentityForGetRequest(FULLY_QUALIFIED_NAME, true, localUser)); + assertEquals(FULLY_QUALIFIED_NAME, + identityTransformer.transformIdentityForGetRequest(FULLY_QUALIFIED_NAME, true, localUser), + "full name should not be transformed if shortname is not enabled"); // add config to get short name config.setBoolean(FS_AZURE_FILE_OWNER_ENABLE_SHORTNAME, true); identityTransformer = getTransformerWithCustomizedIdentityConfig(config); - assertEquals("should convert the full owner name to shortname ", - SHORT_NAME, identityTransformer.transformIdentityForGetRequest(FULLY_QUALIFIED_NAME, true, localUser)); + assertEquals(SHORT_NAME, + identityTransformer.transformIdentityForGetRequest(FULLY_QUALIFIED_NAME, true, localUser), + "should convert the full owner name to shortname "); - assertEquals("group name should not be converted to shortname ", - FULLY_QUALIFIED_NAME, identityTransformer.transformIdentityForGetRequest(FULLY_QUALIFIED_NAME, false, localGroup)); + assertEquals(FULLY_QUALIFIED_NAME, + identityTransformer.transformIdentityForGetRequest(FULLY_QUALIFIED_NAME, false, localGroup), + "group name should not be converted to shortname "); } @Test @@ -350,9 +361,9 @@ private IdentityTransformer getTransformerWithCustomizedIdentityConfig(Configura } private void checkAclEntriesList(List aclEntries, List expected) { - assertTrue("list size not equals", aclEntries.size() == expected.size()); + assertTrue(aclEntries.size() == expected.size(), "list size not equals"); for (int i = 0; i < aclEntries.size(); i++) { - assertEquals("Identity doesn't match", expected.get(i).getName(), aclEntries.get(i).getName()); + assertEquals(expected.get(i).getName(), aclEntries.get(i).getName(), "Identity doesn't match"); } } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsInputStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsInputStreamStatistics.java index afc92c111a913..6b87f1b73ef20 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsInputStreamStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsInputStreamStatistics.java @@ -21,7 +21,7 @@ import java.io.IOException; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -160,18 +160,14 @@ public void testSeekStatistics() throws IOException { * would be equal to OPERATIONS. * */ - assertEquals("Mismatch in seekOps value", 2 * OPERATIONS, - stats.getSeekOperations()); - assertEquals("Mismatch in backwardSeekOps value", OPERATIONS, - stats.getBackwardSeekOperations()); - assertEquals("Mismatch in forwardSeekOps value", OPERATIONS, - stats.getForwardSeekOperations()); - assertEquals("Mismatch in bytesBackwardsOnSeek value", - OPERATIONS * ONE_MB, stats.getBytesBackwardsOnSeek()); - assertEquals("Mismatch in bytesSkippedOnSeek value", - 0, stats.getBytesSkippedOnSeek()); - assertEquals("Mismatch in seekInBuffer value", OPERATIONS, - stats.getSeekInBuffer()); + assertEquals(2 * OPERATIONS, stats.getSeekOperations(), "Mismatch in seekOps value"); + assertEquals(OPERATIONS, stats.getBackwardSeekOperations(), + "Mismatch in backwardSeekOps value"); + assertEquals(OPERATIONS, stats.getForwardSeekOperations(), "Mismatch in forwardSeekOps value"); + assertEquals(OPERATIONS * ONE_MB, stats.getBytesBackwardsOnSeek(), + "Mismatch in bytesBackwardsOnSeek value"); + assertEquals(0, stats.getBytesSkippedOnSeek(), "Mismatch in bytesSkippedOnSeek value"); + assertEquals(OPERATIONS, stats.getSeekInBuffer(), "Mismatch in seekInBuffer value"); in.close(); // Verifying whether stats are readable after stream is closed. @@ -230,12 +226,9 @@ public void testReadStatistics() throws IOException { * total remote read ops is 1. * */ - assertEquals("Mismatch in bytesRead value", OPERATIONS, - stats.getBytesRead()); - assertEquals("Mismatch in readOps value", OPERATIONS, - stats.getReadOperations()); - assertEquals("Mismatch in remoteReadOps value", 1, - stats.getRemoteReadOperations()); + assertEquals(OPERATIONS, stats.getBytesRead(), "Mismatch in bytesRead value"); + assertEquals(OPERATIONS, stats.getReadOperations(), "Mismatch in readOps value"); + assertEquals(1, stats.getRemoteReadOperations(), "Mismatch in remoteReadOps value"); in.close(); // Verifying if stats are still readable after stream is closed. @@ -288,8 +281,7 @@ public void testWithNullStreamStatistics() throws IOException { getTestTracingContext(fs, false)); // Verifying that AbfsInputStream Operations works with null statistics. - assertNotEquals("AbfsInputStream read() with null statistics should " - + "work", -1, in.read()); + assertNotEquals(-1, in.read(), "AbfsInputStream read() with null statistics should "+ "work"); in.seek(ONE_KB); // Verifying toString() with no StreamStatistics. @@ -420,6 +412,6 @@ public void testActionHttpGetRequest() throws IOException { * @param statistic the name of operation or statistic being asserted. */ private void checkInitValue(long actualValue, String statistic) { - assertEquals("Mismatch in " + statistic + " value", 0, actualValue); + assertEquals(0, actualValue, "Mismatch in " + statistic + " value"); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java index 6f7fe0ce2cab2..2e63e234c97ef 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsListStatusRemoteIterator.java @@ -29,9 +29,8 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; -import org.assertj.core.api.Assertions; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -49,6 +48,7 @@ import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.nullable; import static org.mockito.Mockito.verify; +import static org.assertj.core.api.Assertions.assertThat; /** * Test ListStatusRemoteIterator operation. @@ -71,7 +71,7 @@ public void testAbfsIteratorWithHasNext() throws Exception { ListingSupport listingSupport = Mockito.spy(getFileSystem().getAbfsStore()); RemoteIterator fsItr = new AbfsListStatusRemoteIterator(testDir, listingSupport, getTestTracingContext(getFileSystem(), true)); - Assertions.assertThat(fsItr) + assertThat(fsItr) .describedAs("RemoteIterator should be instance of " + "AbfsListStatusRemoteIterator by default") .isInstanceOf(AbfsListStatusRemoteIterator.class); @@ -99,7 +99,7 @@ public void testAbfsIteratorWithoutHasNext() throws Exception { ListingSupport listingSupport = Mockito.spy(getFileSystem().getAbfsStore()); RemoteIterator fsItr = new AbfsListStatusRemoteIterator(testDir, listingSupport, getTestTracingContext(getFileSystem(), true)); - Assertions.assertThat(fsItr) + assertThat(fsItr) .describedAs("RemoteIterator should be instance of " + "AbfsListStatusRemoteIterator by default") .isInstanceOf(AbfsListStatusRemoteIterator.class); @@ -128,7 +128,7 @@ public void testWithAbfsIteratorDisabled() throws Exception { RemoteIterator fsItr = getFileSystem().listStatusIterator(testDir); - Assertions.assertThat(fsItr) + assertThat(fsItr) .describedAs("RemoteIterator should not be instance of " + "AbfsListStatusRemoteIterator when it is disabled") .isNotInstanceOf(AbfsListStatusRemoteIterator.class); @@ -150,7 +150,7 @@ public void testWithAbfsIteratorDisabledWithoutHasNext() throws Exception { RemoteIterator fsItr = getFileSystem().listStatusIterator( testDir); - Assertions.assertThat(fsItr).describedAs( + assertThat(fsItr).describedAs( "RemoteIterator should not be instance of " + "AbfsListStatusRemoteIterator when it is disabled") .isNotInstanceOf(AbfsListStatusRemoteIterator.class); @@ -182,7 +182,7 @@ public void testHasNextForEmptyDir() throws Exception { setPageSize(10); RemoteIterator fsItr = getFileSystem() .listStatusIterator(testDir); - Assertions.assertThat(fsItr.hasNext()) + assertThat(fsItr.hasNext()) .describedAs("hasNext returns false for empty directory") .isFalse(); } @@ -195,9 +195,9 @@ public void testHasNextForFile() throws Exception { getFileSystem().create(testFile); setPageSize(10); RemoteIterator fsItr = fs.listStatusIterator(testFile); - Assertions.assertThat(fsItr.hasNext()) + assertThat(fsItr.hasNext()) .describedAs("hasNext returns true for file").isTrue(); - Assertions.assertThat(fsItr.next().getPath().toString()) + assertThat(fsItr.next().getPath().toString()) .describedAs("next returns the file itself") .endsWith(testFileName); } @@ -227,16 +227,16 @@ private void verifyIteratorResultContent(FileStatus fileStatus, List fileNames) { assertPathDns(fileStatus.getPath()); String pathStr = fileStatus.getPath().toString(); - Assert.assertTrue( - String.format("Could not remove path %s from filenames %s", pathStr, - fileNames), fileNames.remove(pathStr)); + Assertions.assertTrue( + fileNames.remove(pathStr), String.format("Could not remove path %s from filenames %s", pathStr, + fileNames)); } private void verifyIteratorResultCount(int itrCount, List fileNames) { - Assertions.assertThat(itrCount).describedAs( + assertThat(itrCount).describedAs( "Number of iterations should be equal to the files created") .isEqualTo(TEST_FILES_NUMBER); - Assertions.assertThat(fileNames) + assertThat(fileNames) .describedAs("After removing every item found from the iterator, " + "there should be no more elements in the fileNames") .hasSize(0); @@ -291,7 +291,7 @@ private List createFilesUnderDirectory(Path rootPath) tasks.add(es.submit(() -> { touch(filePath); synchronized (fileNames) { - Assert.assertTrue(fileNames.add(filePath.toString())); + Assertions.assertTrue(fileNames.add(filePath.toString())); } return null; })); @@ -303,7 +303,7 @@ private List createFilesUnderDirectory(Path rootPath) es.shutdownNow(); } LOG.debug(fileNames.toString()); - Assertions.assertThat(fileNames) + assertThat(fileNames) .describedAs("File creation incorrect or fileNames not added to list") .hasSize(ITestAbfsListStatusRemoteIterator.TEST_FILES_NUMBER); return fileNames; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsMsiTokenProvider.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsMsiTokenProvider.java index d871befa43005..4880f8ec139a1 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsMsiTokenProvider.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsMsiTokenProvider.java @@ -21,25 +21,20 @@ import java.io.IOException; import java.util.Date; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.azurebfs.oauth2.AccessTokenProvider; import org.apache.hadoop.fs.azurebfs.oauth2.AzureADToken; import org.apache.hadoop.fs.azurebfs.oauth2.MsiTokenProvider; -import static org.junit.Assume.assumeThat; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.Matchers.isEmptyOrNullString; -import static org.hamcrest.Matchers.isEmptyString; - import static org.apache.hadoop.fs.azurebfs.constants.AuthConfigurations.DEFAULT_FS_AZURE_ACCOUNT_OAUTH_MSI_AUTHORITY; import static org.apache.hadoop.fs.azurebfs.constants.AuthConfigurations.DEFAULT_FS_AZURE_ACCOUNT_OAUTH_MSI_ENDPOINT; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_OAUTH_CLIENT_ID; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_OAUTH_MSI_AUTHORITY; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_OAUTH_MSI_ENDPOINT; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_OAUTH_MSI_TENANT; +import static org.assertj.core.api.Assertions.assertThat; /** * Test MsiTokenProvider. @@ -54,14 +49,14 @@ public ITestAbfsMsiTokenProvider() throws Exception { @Test public void test() throws IOException { AbfsConfiguration conf = getConfiguration(); - assumeThat(conf.get(FS_AZURE_ACCOUNT_OAUTH_MSI_ENDPOINT), - not(isEmptyOrNullString())); - assumeThat(conf.get(FS_AZURE_ACCOUNT_OAUTH_MSI_TENANT), - not(isEmptyOrNullString())); - assumeThat(conf.get(FS_AZURE_ACCOUNT_OAUTH_CLIENT_ID), - not(isEmptyOrNullString())); - assumeThat(conf.get(FS_AZURE_ACCOUNT_OAUTH_MSI_AUTHORITY), - not(isEmptyOrNullString())); + assertThat(conf.get(FS_AZURE_ACCOUNT_OAUTH_MSI_ENDPOINT)) + .isNotNull().isNotEmpty(); + assertThat(conf.get(FS_AZURE_ACCOUNT_OAUTH_MSI_TENANT)) + .isNotNull().isNotEmpty(); + assertThat(conf.get(FS_AZURE_ACCOUNT_OAUTH_CLIENT_ID)) + .isNotNull().isNotEmpty(); + assertThat(conf.get(FS_AZURE_ACCOUNT_OAUTH_MSI_AUTHORITY)) + .isNotNull().isNotEmpty(); String tenantGuid = conf .getPasswordString(FS_AZURE_ACCOUNT_OAUTH_MSI_TENANT); @@ -77,8 +72,8 @@ public void test() throws IOException { AzureADToken token = null; token = tokenProvider.getToken(); - assertThat(token.getAccessToken(), not(isEmptyString())); - assertThat(token.getExpiry().after(new Date()), is(true)); + assertThat(token.getAccessToken()).isNotEmpty(); + assertThat(token.getExpiry().after(new Date())).isEqualTo(true); } private String getTrimmedPasswordString(AbfsConfiguration conf, String key, diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsNetworkStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsNetworkStatistics.java index e66afbcaa7492..e29bfc5f624b2 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsNetworkStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsNetworkStatistics.java @@ -23,7 +23,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsOutputStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsOutputStreamStatistics.java index 8be997ce69cf3..459c615d32480 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsOutputStreamStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsOutputStreamStatistics.java @@ -21,7 +21,7 @@ import java.io.IOException; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -67,8 +67,8 @@ public void testAbfsOutputStreamUploadingBytes() throws IOException { getAbfsOutputStreamStatistics(outForSomeBytes); //Test for zero bytes To upload. - assertEquals("Mismatch in bytes to upload", 0, - abfsOutputStreamStatisticsForUploadBytes.getBytesToUpload()); + assertEquals(0, abfsOutputStreamStatisticsForUploadBytes.getBytesToUpload(), + "Mismatch in bytes to upload"); outForSomeBytes.write(testBytesToUpload.getBytes()); outForSomeBytes.flush(); @@ -76,14 +76,13 @@ public void testAbfsOutputStreamUploadingBytes() throws IOException { getAbfsOutputStreamStatistics(outForSomeBytes); //Test for bytes to upload. - assertEquals("Mismatch in bytes to upload", - testBytesToUpload.getBytes().length, - abfsOutputStreamStatisticsForUploadBytes.getBytesToUpload()); + assertEquals(testBytesToUpload.getBytes().length, + abfsOutputStreamStatisticsForUploadBytes.getBytesToUpload(), "Mismatch in bytes to upload"); //Test for successful bytes uploaded. - assertEquals("Mismatch in successful bytes uploaded", - testBytesToUpload.getBytes().length, - abfsOutputStreamStatisticsForUploadBytes.getBytesUploadSuccessful()); + assertEquals(testBytesToUpload.getBytes().length, + abfsOutputStreamStatisticsForUploadBytes.getBytesUploadSuccessful(), + "Mismatch in successful bytes uploaded"); } @@ -99,14 +98,13 @@ public void testAbfsOutputStreamUploadingBytes() throws IOException { getAbfsOutputStreamStatistics(outForLargeBytes); //Test for bytes to upload. - assertEquals("Mismatch in bytes to upload", - OPERATIONS * (testBytesToUpload.getBytes().length), - abfsOutputStreamStatistics.getBytesToUpload()); + assertEquals(OPERATIONS * (testBytesToUpload.getBytes().length), + abfsOutputStreamStatistics.getBytesToUpload(), "Mismatch in bytes to upload"); //Test for successful bytes uploaded. - assertEquals("Mismatch in successful bytes uploaded", - OPERATIONS * (testBytesToUpload.getBytes().length), - abfsOutputStreamStatistics.getBytesUploadSuccessful()); + assertEquals(OPERATIONS * (testBytesToUpload.getBytes().length), + abfsOutputStreamStatistics.getBytesUploadSuccessful(), + "Mismatch in successful bytes uploaded"); } } @@ -137,8 +135,8 @@ public void testAbfsOutputStreamQueueShrink() throws IOException { getAbfsOutputStreamStatistics(outForOneOp); //Test for shrinking queue zero time. - assertEquals("Mismatch in queue shrunk operations", 0, - abfsOutputStreamStatistics.getQueueShrunkOps()); + assertEquals(0, abfsOutputStreamStatistics.getQueueShrunkOps(), + "Mismatch in queue shrunk operations"); } @@ -168,9 +166,8 @@ public void testAbfsOutputStreamQueueShrink() throws IOException { * write operations done to get the number of queue shrinks done. * */ - assertEquals("Mismatch in queue shrunk operations", - OPERATIONS - outForLargeOps.getWriteOperationsSize(), - abfsOutputStreamStatistics.getQueueShrunkOps()); + assertEquals(OPERATIONS - outForLargeOps.getWriteOperationsSize(), + abfsOutputStreamStatistics.getQueueShrunkOps(), "Mismatch in queue shrunk operations"); } } @@ -196,8 +193,8 @@ public void testAbfsOutputStreamWriteBuffer() throws IOException { getAbfsOutputStreamStatistics(outForOneOp); //Test for zero time writing buffer to service. - assertEquals("Mismatch in write current buffer operations", 0, - abfsOutputStreamStatistics.getWriteCurrentBufferOperations()); + assertEquals(0, abfsOutputStreamStatistics.getWriteCurrentBufferOperations(), + "Mismatch in write current buffer operations"); outForOneOp.write(testWriteBuffer.getBytes()); outForOneOp.flush(); @@ -205,8 +202,8 @@ public void testAbfsOutputStreamWriteBuffer() throws IOException { abfsOutputStreamStatistics = getAbfsOutputStreamStatistics(outForOneOp); //Test for one time writing buffer to service. - assertEquals("Mismatch in write current buffer operations", 1, - abfsOutputStreamStatistics.getWriteCurrentBufferOperations()); + assertEquals(1, abfsOutputStreamStatistics.getWriteCurrentBufferOperations(), + "Mismatch in write current buffer operations"); } try ( @@ -225,9 +222,8 @@ public void testAbfsOutputStreamWriteBuffer() throws IOException { AbfsOutputStreamStatisticsImpl abfsOutputStreamStatistics = getAbfsOutputStreamStatistics(outForLargeOps); //Test for 10 times writing buffer to service. - assertEquals("Mismatch in write current buffer operations", - OPERATIONS, - abfsOutputStreamStatistics.getWriteCurrentBufferOperations()); + assertEquals(OPERATIONS, abfsOutputStreamStatistics.getWriteCurrentBufferOperations(), + "Mismatch in write current buffer operations"); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadFooterMetrics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadFooterMetrics.java index ad4b0b1049d6d..ff5009785f4f9 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadFooterMetrics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadFooterMetrics.java @@ -43,8 +43,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.azurebfs.utils.MetricFormat; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.Random; @@ -58,6 +57,8 @@ import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderValidator; import org.apache.hadoop.fs.azurebfs.constants.FSOperationType; +import static org.assertj.core.api.Assumptions.assumeThat; + public class ITestAbfsReadFooterMetrics extends AbstractAbfsScaleTest { public ITestAbfsReadFooterMetrics() throws Exception { @@ -70,11 +71,13 @@ private void checkPrerequisites(){ checkIfConfigIsSet(FS_AZURE_METRIC_URI); } - private void checkIfConfigIsSet(String configKey){ + private void checkIfConfigIsSet(String configKey) { AbfsConfiguration conf = getConfiguration(); String value = conf.get(configKey); - Assume.assumeTrue(configKey + " config is mandatory for the test to run", - value != null && value.trim().length() > 1); + assumeThat(value) + .as(configKey + " config is mandatory for the test to run") + .isNotNull() + .matches(v -> v.trim().length() > 1, "trimmed length > 1"); } private static final String TEST_PATH = "/testfile"; @@ -127,8 +130,9 @@ private void writeDataToFile(AzureBlobFileSystem fs, Path testPath, byte[] data) */ private void assertMetricsEquality(AzureBlobFileSystem fs, String expectedMetrics) { AbfsReadFooterMetrics actualMetrics = fs.getAbfsClient().getAbfsCounters().getAbfsReadFooterMetrics(); - assertNotNull("AbfsReadFooterMetrics is null", actualMetrics); - assertEquals("The computed metrics differs from the actual metrics", expectedMetrics, actualMetrics.toString()); + assertNotNull(actualMetrics, "AbfsReadFooterMetrics is null"); + assertEquals(expectedMetrics, actualMetrics.toString(), + "The computed metrics differs from the actual metrics"); } /** @@ -188,7 +192,7 @@ public void testReadFooterMetrics() throws Exception { IOSTATISTICS_LOGGING_LEVEL_INFO, statisticsSource); // Ensure data is read successfully and matches the written data. - assertNotEquals("data read in final read()", -1, result); + assertNotEquals(-1, result, "data read in final read()"); assertArrayEquals(readBuffer, b); // Get non-Parquet metrics and assert metrics equality. diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java index c32c0147fe7da..4da64df6995f5 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadWriteAndSeek.java @@ -21,9 +21,9 @@ import java.util.Arrays; import java.util.Random; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; +import org.junit.jupiter.params.provider.MethodSource; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -47,7 +47,8 @@ * Uses package-private methods in AbfsConfiguration, which is why it is in * this package. */ -@RunWith(Parameterized.class) +@ParameterizedClass(name="Size={0}-readahead={1}-Client={2}") +@MethodSource("sizes") public class ITestAbfsReadWriteAndSeek extends AbstractAbfsScaleTest { private static final String TEST_PATH = "/testfile"; @@ -56,7 +57,6 @@ public class ITestAbfsReadWriteAndSeek extends AbstractAbfsScaleTest { * For test performance, a full x*y test matrix is not used. * @return the test parameters */ - @Parameterized.Parameters(name = "Size={0}-readahead={1}-Client={2}") public static Iterable sizes() { return Arrays.asList(new Object[][]{ { @@ -172,7 +172,7 @@ private void testReadWriteAndSeek(int bufferSize) throws Exception { } logIOStatisticsAtLevel(LOG, IOSTATISTICS_LOGGING_LEVEL_INFO, statisticsSource); - assertNotEquals("data read in final read()", -1, result); + assertNotEquals(-1, result, "data read in final read()"); assertArrayEquals(readBuffer, b); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsRestOperationException.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsRestOperationException.java index 0151faa5f412e..e59c385d69788 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsRestOperationException.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsRestOperationException.java @@ -21,7 +21,7 @@ import java.io.IOException; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.azurebfs.constants.AbfsServiceType; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java index 8f692b4477ba4..40e2deb4516a0 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStatistics.java @@ -21,8 +21,8 @@ import java.io.IOException; import java.util.Map; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azurebfs.services.AbfsCounters; @@ -42,7 +42,7 @@ public class ITestAbfsStatistics extends AbstractAbfsIntegrationTest { public ITestAbfsStatistics() throws Exception { } - @Before + @BeforeEach public void setUp() throws Exception { super.setup(); // Setting IOStats to INFO level, to see the IOStats after close(). @@ -209,12 +209,12 @@ public void testOpenAppendRenameExists() throws IOException { assertAbfsStatistics(AbfsStatistic.CALL_RENAME, 1, metricMap); //Testing if file exists at path. - assertTrue(String.format("File with name %s should exist", - destCreateFilePath), - fs.exists(destCreateFilePath)); - assertFalse(String.format("File with name %s should not exist", - createFilePath), - fs.exists(createFilePath)); + assertTrue( + fs.exists(destCreateFilePath), String.format("File with name %s should exist", + destCreateFilePath)); + assertFalse( + fs.exists(createFilePath), String.format("File with name %s should not exist", + createFilePath)); metricMap = fs.getInstrumentationMap(); //Testing exists() calls. @@ -242,12 +242,12 @@ public void testOpenAppendRenameExists() throws IOException { assertTrue(fs.rename(createFilePath, destCreateFilePath)); //check if first name is existing and 2nd is not existing. - assertTrue(String.format("File with name %s should exist", - destCreateFilePath), - fs.exists(destCreateFilePath)); - assertFalse(String.format("File with name %s should not exist", - createFilePath), - fs.exists(createFilePath)); + assertTrue( + fs.exists(destCreateFilePath), String.format("File with name %s should exist", + destCreateFilePath)); + assertFalse( + fs.exists(createFilePath), String.format("File with name %s should not exist", + createFilePath)); } @@ -273,6 +273,6 @@ Testing exists() calls and rename calls. Since both were called 2 */ private void checkInitialValue(String statName, long statValue, long expectedInitialValue) { - assertEquals("Mismatch in " + statName, expectedInitialValue, statValue); + assertEquals(expectedInitialValue, statValue, "Mismatch in " + statName); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java index f62ced9b00ba6..f2e099aa331e7 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsStreamStatistics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.azurebfs; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -95,9 +95,9 @@ public void testAbfsStreamOps() throws Exception { * different setups. * */ - assertTrue(String.format("The actual value of %d was not equal to the " - + "expected value of 2 or 3", statistics.getReadOps()), - statistics.getReadOps() == 2 || statistics.getReadOps() == 3); + assertTrue( + statistics.getReadOps() == 2 || statistics.getReadOps() == 3, String.format("The actual value of %d was not equal to the " + + "expected value of 2 or 3", statistics.getReadOps())); } finally { IOUtils.cleanupWithLogger(LOG, inForOneOperation, @@ -105,9 +105,9 @@ public void testAbfsStreamOps() throws Exception { } //Validating if content is being written in the smallOperationsFile - assertTrue("Mismatch in content validation", - validateContent(fs, smallOperationsFile, - testReadWriteOps.getBytes())); + assertTrue( + validateContent(fs, smallOperationsFile, + testReadWriteOps.getBytes()), "Mismatch in content validation"); FSDataOutputStream outForLargeOperations = null; FSDataInputStream inForLargeOperations = null; @@ -137,9 +137,9 @@ public void testAbfsStreamOps() throws Exception { if (fs.getAbfsStore().isAppendBlobKey(fs.makeQualified(largeOperationsFile).toString())) { // for appendblob data is already flushed, so there might be more data to read. - assertTrue(String.format("The actual value of %d was not equal to the " - + "expected value", statistics.getReadOps()), - statistics.getReadOps() >= largeValue || statistics.getReadOps() <= (largeValue + 4)); + assertTrue( + statistics.getReadOps() >= largeValue || statistics.getReadOps() <= (largeValue + 4), String.format("The actual value of %d was not equal to the " + + "expected value", statistics.getReadOps())); } else { //Test for 1000000 read operations assertReadWriteOps("read", largeValue, statistics.getReadOps()); @@ -150,9 +150,9 @@ public void testAbfsStreamOps() throws Exception { outForLargeOperations); } //Validating if content is being written in largeOperationsFile - assertTrue("Mismatch in content validation", - validateContent(fs, largeOperationsFile, - largeOperationsValidationString.toString().getBytes())); + assertTrue( + validateContent(fs, largeOperationsFile, + largeOperationsValidationString.toString().getBytes()), "Mismatch in content validation"); } @@ -166,7 +166,6 @@ public void testAbfsStreamOps() throws Exception { private void assertReadWriteOps(String operation, long expectedValue, long actualValue) { - assertEquals("Mismatch in " + operation + " operations", expectedValue, - actualValue); + assertEquals(expectedValue, actualValue, "Mismatch in " + operation + " operations"); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java index 414f830aa2c3d..57cd8a7f28fc2 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAppend.java @@ -33,9 +33,7 @@ import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicInteger; -import org.assertj.core.api.Assertions; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.apache.hadoop.conf.Configuration; @@ -83,7 +81,9 @@ import static org.apache.hadoop.fs.store.DataBlocks.DataBlock.DestState.Closed; import static org.apache.hadoop.fs.store.DataBlocks.DataBlock.DestState.Writing; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.assertj.core.api.Assumptions.assumeThat; import static org.mockito.ArgumentMatchers.anyString; +import static org.assertj.core.api.Assertions.assertThat; /** * Test append operations. @@ -105,12 +105,14 @@ public ITestAzureBlobFileSystemAppend() throws Exception { super(); } - @Test(expected = FileNotFoundException.class) + @Test public void testAppendDirShouldFail() throws Exception { - final AzureBlobFileSystem fs = getFileSystem(); - final Path filePath = path(TEST_FILE_PATH); - fs.mkdirs(filePath); - fs.append(filePath, 0).close(); + assertThrows(FileNotFoundException.class, () -> { + final AzureBlobFileSystem fs = getFileSystem(); + final Path filePath = path(TEST_FILE_PATH); + fs.mkdirs(filePath); + fs.append(filePath, 0).close(); + }); } @Test @@ -125,22 +127,25 @@ public void testAppendWithLength0() throws Exception { } - @Test(expected = FileNotFoundException.class) + @Test public void testAppendFileAfterDelete() throws Exception { - final AzureBlobFileSystem fs = getFileSystem(); - final Path filePath = path(TEST_FILE_PATH); - ContractTestUtils.touch(fs, filePath); - fs.delete(filePath, false); - - fs.append(filePath).close(); + assertThrows(FileNotFoundException.class, () -> { + final AzureBlobFileSystem fs = getFileSystem(); + final Path filePath = path(TEST_FILE_PATH); + ContractTestUtils.touch(fs, filePath); + fs.delete(filePath, false); + fs.append(filePath).close(); + }); } - @Test(expected = FileNotFoundException.class) + @Test public void testAppendDirectory() throws Exception { - final AzureBlobFileSystem fs = getFileSystem(); - final Path folderPath = path(TEST_FOLDER_PATH); - fs.mkdirs(folderPath); - fs.append(folderPath).close(); + assertThrows(FileNotFoundException.class, () -> { + final AzureBlobFileSystem fs = getFileSystem(); + final Path folderPath = path(TEST_FOLDER_PATH); + fs.mkdirs(folderPath); + fs.append(folderPath).close(); + }); } @Test @@ -184,13 +189,13 @@ public void testCloseOfDataBlockOnAppendComplete() throws Exception { try (OutputStream os = fs.create( new Path(getMethodName() + "_" + blockBufferType))) { os.write(new byte[1]); - Assertions.assertThat(dataBlock[0].getState()) + assertThat(dataBlock[0].getState()) .describedAs( "On write of data in outputStream, state should become Writing") .isEqualTo(Writing); os.close(); Mockito.verify(dataBlock[0], Mockito.times(1)).close(); - Assertions.assertThat(dataBlock[0].getState()) + assertThat(dataBlock[0].getState()) .describedAs( "On close of outputStream, state should become Closed") .isEqualTo(Closed); @@ -207,7 +212,7 @@ public void testCloseOfDataBlockOnAppendComplete() throws Exception { */ @Test public void testCreateOverDfsAppendOverBlob() throws IOException { - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); final AzureBlobFileSystem fs = getFileSystem(); Path testPath = path(TEST_FILE_PATH); AzureBlobFileSystemStore.Permissions permissions @@ -224,7 +229,7 @@ public void testCreateOverDfsAppendOverBlob() throws IOException { AzureIngressHandler ingressHandler = ((AbfsOutputStream) outputStream.getWrappedStream()).getIngressHandler(); AbfsClient client = ingressHandler.getClient(); - Assertions.assertThat(client) + assertThat(client) .as("Blob client was not used before fallback") .isInstanceOf(AbfsBlobClient.class); outputStream.write(TEN); @@ -236,7 +241,7 @@ public void testCreateOverDfsAppendOverBlob() throws IOException { AzureIngressHandler ingressHandlerFallback = ((AbfsOutputStream) outputStream.getWrappedStream()).getIngressHandler(); AbfsClient clientFallback = ingressHandlerFallback.getClient(); - Assertions.assertThat(clientFallback) + assertThat(clientFallback) .as("DFS client was not used after fallback") .isInstanceOf(AbfsDfsClient.class); } @@ -246,7 +251,7 @@ public void testCreateOverDfsAppendOverBlob() throws IOException { */ @Test public void testMultipleAppendsQualifyForSwitch() throws Exception { - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); final AzureBlobFileSystem fs = getFileSystem(); Path testPath = path(TEST_FILE_PATH); AzureBlobFileSystemStore.Permissions permissions @@ -301,7 +306,7 @@ public void testMultipleAppendsQualifyForSwitch() throws Exception { AzureIngressHandler ingressHandlerFallback = ((AbfsOutputStream) out1.getWrappedStream()).getIngressHandler(); AbfsClient clientFallback = ingressHandlerFallback.getClient(); - Assertions.assertThat(clientFallback) + assertThat(clientFallback) .as("DFS client was not used after fallback") .isInstanceOf(AbfsDfsClient.class); } @@ -311,7 +316,7 @@ public void testMultipleAppendsQualifyForSwitch() throws Exception { */ @Test public void testParallelWritesOnDfsAndBlob() throws Exception { - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); final AzureBlobFileSystem fs = getFileSystem(); Path testPath = path(TEST_FILE_PATH); Path testPath1 = path(TEST_FILE_PATH1); @@ -368,7 +373,7 @@ public void testParallelWritesOnDfsAndBlob() throws Exception { */ @Test public void testCreateOverBlobAppendOverDfs() throws IOException { - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); assumeDfsServiceType(); Configuration conf = getRawConfiguration(); conf.setBoolean(FS_AZURE_ENABLE_DFSTOBLOB_FALLBACK, true); @@ -484,7 +489,7 @@ public void testCreateAppendBlobOverDfsEndpointAppendOverBlob() AzureIngressHandler ingressHandler = ((AbfsOutputStream) outputStream.getWrappedStream()).getIngressHandler(); AbfsClient client = ingressHandler.getClient(); - Assertions.assertThat(client) + assertThat(client) .as("Blob client was not used before fallback") .isInstanceOf(AbfsBlobClient.class); outputStream.write(TEN); @@ -496,7 +501,7 @@ public void testCreateAppendBlobOverDfsEndpointAppendOverBlob() AzureIngressHandler ingressHandlerFallback = ((AbfsOutputStream) outputStream.getWrappedStream()).getIngressHandler(); AbfsClient clientFallback = ingressHandlerFallback.getClient(); - Assertions.assertThat(clientFallback) + assertThat(clientFallback) .as("DFS client was not used after fallback") .isInstanceOf(AbfsDfsClient.class); } @@ -526,11 +531,11 @@ public void testValidateIngressHandler() throws IOException { FSDataOutputStream outputStream = fs.append(testPath); AzureIngressHandler ingressHandler = ((AbfsOutputStream) outputStream.getWrappedStream()).getIngressHandler(); - Assertions.assertThat(ingressHandler) + assertThat(ingressHandler) .as("Blob Ingress handler instance is not correct") .isInstanceOf(AzureBlobIngressHandler.class); AbfsClient client = ingressHandler.getClient(); - Assertions.assertThat(client) + assertThat(client) .as("Blob client was not used correctly") .isInstanceOf(AbfsBlobClient.class); @@ -546,86 +551,96 @@ public void testValidateIngressHandler() throws IOException { FSDataOutputStream outputStream1 = fs.append(testPath1); AzureIngressHandler ingressHandler1 = ((AbfsOutputStream) outputStream1.getWrappedStream()).getIngressHandler(); - Assertions.assertThat(ingressHandler1) + assertThat(ingressHandler1) .as("DFS Ingress handler instance is not correct") .isInstanceOf(AzureDFSIngressHandler.class); AbfsClient client1 = ingressHandler1.getClient(); - Assertions.assertThat(client1) + assertThat(client1) .as("Dfs client was not used correctly") .isInstanceOf(AbfsDfsClient.class); } } - @Test(expected = FileNotFoundException.class) + @Test public void testAppendImplicitDirectory() throws Exception { - final AzureBlobFileSystem fs = getFileSystem(); - final Path folderPath = new Path(TEST_FOLDER_PATH); - fs.mkdirs(folderPath); - fs.append(folderPath.getParent()); + assertThrows(FileNotFoundException.class, () -> { + final AzureBlobFileSystem fs = getFileSystem(); + final Path folderPath = new Path(TEST_FOLDER_PATH); + fs.mkdirs(folderPath); + fs.append(folderPath.getParent()); + }); } - @Test(expected = FileNotFoundException.class) + @Test public void testAppendFileNotExists() throws Exception { - final AzureBlobFileSystem fs = getFileSystem(); - final Path folderPath = new Path(TEST_FOLDER_PATH); - fs.append(folderPath); + assertThrows(FileNotFoundException.class, () -> { + final AzureBlobFileSystem fs = getFileSystem(); + final Path folderPath = new Path(TEST_FOLDER_PATH); + fs.append(folderPath); + }); } /** * Create directory over dfs endpoint and append over blob endpoint. * Should return error as append is not supported for directory. * **/ - @Test(expected = IOException.class) + @Test public void testCreateExplicitDirectoryOverDfsAppendOverBlob() throws IOException { - final AzureBlobFileSystem fs = getFileSystem(); - final Path folderPath = path(TEST_FOLDER_PATH); - AzureBlobFileSystemStore.Permissions permissions + assertThrows(IOException.class, () -> { + final AzureBlobFileSystem fs = getFileSystem(); + final Path folderPath = path(TEST_FOLDER_PATH); + AzureBlobFileSystemStore.Permissions permissions = new AzureBlobFileSystemStore.Permissions(false, FsPermission.getDefault(), FsPermission.getUMask(fs.getConf())); - fs.getAbfsStore().getClientHandler().getDfsClient(). + fs.getAbfsStore().getClientHandler().getDfsClient(). createPath(makeQualified(folderPath).toUri().getPath(), false, false, permissions, false, null, null, getTestTracingContext(fs, true)); - FSDataOutputStream outputStream = fs.append(folderPath); - outputStream.write(TEN); - outputStream.hsync(); - } + FSDataOutputStream outputStream = fs.append(folderPath); + outputStream.write(TEN); + outputStream.hsync(); + }); + } /** * Recreate file between append and flush. Etag mismatch happens. **/ - @Test(expected = IOException.class) + @Test public void testRecreateAppendAndFlush() throws IOException { - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); - final AzureBlobFileSystem fs = getFileSystem(); - final Path filePath = path(TEST_FILE_PATH); - fs.create(filePath); - Assume.assumeTrue(getIngressServiceType() == AbfsServiceType.BLOB); - FSDataOutputStream outputStream = fs.append(filePath); - outputStream.write(TEN); - try (AzureBlobFileSystem fs1 + assertThrows(IOException.class, () -> { + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); + final AzureBlobFileSystem fs = getFileSystem(); + final Path filePath = path(TEST_FILE_PATH); + fs.create(filePath); + assumeThat(getIngressServiceType()).isEqualTo(AbfsServiceType.BLOB); + FSDataOutputStream outputStream = fs.append(filePath); + outputStream.write(TEN); + try (AzureBlobFileSystem fs1 = (AzureBlobFileSystem) FileSystem.newInstance(getRawConfiguration()); FSDataOutputStream outputStream1 = fs1.create(filePath)) { outputStream.hsync(); } + }); } /** * Recreate directory between append and flush. Etag mismatch happens. **/ - @Test(expected = IOException.class) + @Test public void testRecreateDirectoryAppendAndFlush() throws IOException { - final AzureBlobFileSystem fs = getFileSystem(); - final Path filePath = path(TEST_FILE_PATH); - fs.create(filePath); - FSDataOutputStream outputStream = fs.append(filePath); - outputStream.write(TEN); - try (AzureBlobFileSystem fs1 + assertThrows(IOException.class, () -> { + final AzureBlobFileSystem fs = getFileSystem(); + final Path filePath = path(TEST_FILE_PATH); + fs.create(filePath); + FSDataOutputStream outputStream = fs.append(filePath); + outputStream.write(TEN); + try (AzureBlobFileSystem fs1 = (AzureBlobFileSystem) FileSystem.newInstance(getRawConfiguration())) { fs1.mkdirs(filePath); outputStream.hsync(); } + }); } /** @@ -732,14 +747,14 @@ public void testParallelWriteDifferentContentLength() throws Exception { **/ @Test public void testParallelWriteOutputStreamClose() throws Exception { - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); AzureBlobFileSystem fs = getFileSystem(); final Path secondarytestfile = new Path("secondarytestfile"); ExecutorService executorService = Executors.newFixedThreadPool(2); List> futures = new ArrayList<>(); FSDataOutputStream out1 = fs.create(secondarytestfile); - Assume.assumeTrue(getIngressServiceType() == AbfsServiceType.BLOB); + assumeThat(getIngressServiceType()).isEqualTo(AbfsServiceType.BLOB); AbfsOutputStream outputStream1 = (AbfsOutputStream) out1.getWrappedStream(); String fileETag = outputStream1.getIngressHandler().getETag(); final byte[] b1 = new byte[8 * ONE_MB]; @@ -803,12 +818,12 @@ public void testParallelWriteOutputStreamClose() throws Exception { **/ @Test public void testEtagMismatch() throws Exception { - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); AzureBlobFileSystem fs = getFileSystem(); final Path filePath = path(TEST_FILE_PATH); FSDataOutputStream out1 = fs.create(filePath); FSDataOutputStream out2 = fs.create(filePath); - Assume.assumeTrue(getIngressServiceType() == AbfsServiceType.BLOB); + assumeThat(getIngressServiceType()).isEqualTo(AbfsServiceType.BLOB); out2.write(TEN); out2.hsync(); out1.write(TEN); @@ -860,7 +875,7 @@ public void testAppendImplicitDirectoryAzcopy() throws Exception { */ @Test public void testIntermittentAppendFailureToBeReported() throws Exception { - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); try (AzureBlobFileSystem fs = Mockito.spy( (AzureBlobFileSystem) FileSystem.newInstance(getRawConfiguration()))) { assumeHnsDisabled(); @@ -960,7 +975,7 @@ private FSDataOutputStream createMockedOutputStream(AzureBlobFileSystem fs, */ @Test public void testWriteAsyncOpFailedAfterCloseCalled() throws Exception { - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); try (AzureBlobFileSystem fs = Mockito.spy( (AzureBlobFileSystem) FileSystem.newInstance(getRawConfiguration()))) { AzureBlobFileSystemStore store = Mockito.spy(fs.getAbfsStore()); @@ -1050,7 +1065,7 @@ public void testWriteAsyncOpFailedAfterCloseCalled() throws Exception { */ @Test public void testFlushSuccessWithConnectionResetOnResponseValidMd5() throws Exception { - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); // Create a spy of AzureBlobFileSystem try (AzureBlobFileSystem fs = Mockito.spy( (AzureBlobFileSystem) FileSystem.newInstance(getRawConfiguration()))) { @@ -1146,7 +1161,7 @@ public void testFlushSuccessWithConnectionResetOnResponseValidMd5() throws Excep */ @Test public void testFlushSuccessWithConnectionResetOnResponseInvalidMd5() throws Exception { - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); // Create a spy of AzureBlobFileSystem try (AzureBlobFileSystem fs = Mockito.spy( (AzureBlobFileSystem) FileSystem.newInstance(getRawConfiguration()))) { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAttributes.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAttributes.java index b3f99651de4f4..3b7e46b1d0315 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAttributes.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAttributes.java @@ -23,7 +23,7 @@ import java.util.EnumSet; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.XAttrSetFlag; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java index ab01b2e10c4b9..7c0d002feb843 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemAuthorization.java @@ -22,8 +22,8 @@ import java.util.Arrays; import java.util.UUID; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; @@ -44,6 +44,7 @@ import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; import static org.apache.hadoop.fs.permission.AclEntryType.GROUP; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Test Perform Authorization Check operation @@ -58,14 +59,15 @@ public class ITestAzureBlobFileSystemAuthorization extends AbstractAbfsIntegrati public ITestAzureBlobFileSystemAuthorization() throws Exception { // The mock SAS token provider relies on the account key to generate SAS. - Assume.assumeTrue(this.getAuthType() == AuthType.SharedKey); + assumeThat(this.getAuthType()).isEqualTo(AuthType.SharedKey); } + @BeforeEach @Override public void setup() throws Exception { boolean isHNSEnabled = getConfiguration().getBoolean( TestConfigurationKeys.FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT, false); - Assume.assumeTrue(isHNSEnabled); + assumeThat(isHNSEnabled).isTrue(); loadConfiguredFileSystem(); getConfiguration().set(FS_AZURE_SAS_TOKEN_PROVIDER_TYPE, TEST_AUTHZ_CLASS); getConfiguration().set(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.SAS.toString()); @@ -216,55 +218,55 @@ public void testGetFileStatusUnauthorized() throws Exception { @Test public void testSetOwnerUnauthorized() throws Exception { - Assume.assumeTrue(getIsNamespaceEnabled(getFileSystem())); + assumeThat(getIsNamespaceEnabled(getFileSystem())).isTrue(); runTest(FileSystemOperations.SetOwner, true); } @Test public void testSetPermissionUnauthorized() throws Exception { - Assume.assumeTrue(getIsNamespaceEnabled(getFileSystem())); + assumeThat(getIsNamespaceEnabled(getFileSystem())).isTrue(); runTest(FileSystemOperations.SetPermissions, true); } @Test public void testModifyAclEntriesUnauthorized() throws Exception { - Assume.assumeTrue(getIsNamespaceEnabled(getFileSystem())); + assumeThat(getIsNamespaceEnabled(getFileSystem())).isTrue(); runTest(FileSystemOperations.ModifyAclEntries, true); } @Test public void testRemoveAclEntriesUnauthorized() throws Exception { - Assume.assumeTrue(getIsNamespaceEnabled(getFileSystem())); + assumeThat(getIsNamespaceEnabled(getFileSystem())).isTrue(); runTest(FileSystemOperations.RemoveAclEntries, true); } @Test public void testRemoveDefaultAclUnauthorized() throws Exception { - Assume.assumeTrue(getIsNamespaceEnabled(getFileSystem())); + assumeThat(getIsNamespaceEnabled(getFileSystem())).isTrue(); runTest(FileSystemOperations.RemoveDefaultAcl, true); } @Test public void testRemoveAclUnauthorized() throws Exception { - Assume.assumeTrue(getIsNamespaceEnabled(getFileSystem())); + assumeThat(getIsNamespaceEnabled(getFileSystem())).isTrue(); runTest(FileSystemOperations.RemoveAcl, true); } @Test public void testSetAclUnauthorized() throws Exception { - Assume.assumeTrue(getIsNamespaceEnabled(getFileSystem())); + assumeThat(getIsNamespaceEnabled(getFileSystem())).isTrue(); runTest(FileSystemOperations.SetAcl, true); } @Test public void testGetAclStatusAuthorized() throws Exception { - Assume.assumeTrue(getIsNamespaceEnabled(getFileSystem())); + assumeThat(getIsNamespaceEnabled(getFileSystem())).isTrue(); runTest(FileSystemOperations.GetAcl, false); } @Test public void testGetAclStatusUnauthorized() throws Exception { - Assume.assumeTrue(getIsNamespaceEnabled(getFileSystem())); + assumeThat(getIsNamespaceEnabled(getFileSystem())).isTrue(); runTest(FileSystemOperations.GetAcl, true); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java index 2941b96fefa2e..c1cdff01bca63 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemBackCompat.java @@ -23,12 +23,13 @@ import com.microsoft.azure.storage.blob.CloudBlobContainer; import com.microsoft.azure.storage.blob.CloudBlockBlob; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; +import static org.assertj.core.api.Assumptions.assumeThat; + /** * Test AzureBlobFileSystem back compatibility with WASB. */ @@ -42,8 +43,9 @@ public ITestAzureBlobFileSystemBackCompat() throws Exception { @Test public void testBlobBackCompat() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeFalse("This test does not support namespace enabled account", - getIsNamespaceEnabled(getFileSystem())); + assumeThat(getIsNamespaceEnabled(getFileSystem())) + .as("This test does not support namespace enabled account") + .isFalse(); String storageConnectionString = getBlobConnectionString(); CloudStorageAccount storageAccount = CloudStorageAccount.parse(storageConnectionString); CloudBlobClient blobClient = storageAccount.createCloudBlobClient(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCLI.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCLI.java index 6f0d0cc6e1a3b..9d38617094040 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCLI.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCLI.java @@ -20,7 +20,7 @@ import java.util.UUID; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FsShell; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java index c1ea6f3c22817..63043f5d33f3f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCheckAccess.java @@ -24,8 +24,7 @@ import org.apache.hadoop.fs.azurebfs.enums.Trilean; import org.apache.hadoop.util.Lists; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.apache.hadoop.conf.Configuration; @@ -53,6 +52,7 @@ import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_BLOB_FS_CLIENT_SECRET; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Test cases for AzureBlobFileSystem.access() @@ -114,34 +114,37 @@ private void setTestFsConf(final String fsConfKey, conf.set(confKeyWithAccountName, confValue); } - @Test(expected = IllegalArgumentException.class) + @Test public void testCheckAccessWithNullPath() throws IOException { - superUserFs.access(null, FsAction.READ); + assertThrows(IllegalArgumentException.class, () -> { + superUserFs.access(null, FsAction.READ); + }); } - @Test(expected = NullPointerException.class) + @Test public void testCheckAccessForFileWithNullFsAction() throws Exception { - Assume.assumeTrue(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT + " is false", - isHNSEnabled); - Assume.assumeTrue(FS_AZURE_ENABLE_CHECK_ACCESS + " is false", - isCheckAccessEnabled); - // NPE when trying to convert null FsAction enum - superUserFs.access(new Path("test.txt"), null); + assertThrows(NullPointerException.class, () -> { + assumeThat(isHNSEnabled).as(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT + " is false").isTrue(); + assumeThat(isCheckAccessEnabled).as(FS_AZURE_ENABLE_CHECK_ACCESS + " is false").isTrue(); + // NPE when trying to convert null FsAction enum + superUserFs.access(new Path("test.txt"), null); + }); } - @Test(expected = FileNotFoundException.class) + @Test public void testCheckAccessForNonExistentFile() throws Exception { - checkPrerequisites(); - Path nonExistentFile = setupTestDirectoryAndUserAccess( + assertThrows(FileNotFoundException.class, () -> { + checkPrerequisites(); + Path nonExistentFile = setupTestDirectoryAndUserAccess( "/nonExistentFile1.txt", FsAction.ALL); - superUserFs.delete(nonExistentFile, true); - testUserFs.access(nonExistentFile, FsAction.READ); + superUserFs.delete(nonExistentFile, true); + testUserFs.access(nonExistentFile, FsAction.READ); + }); } @Test public void testWhenCheckAccessConfigIsOff() throws Exception { - Assume.assumeTrue(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT + " is false", - isHNSEnabled); + assumeThat(isHNSEnabled).as(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT + " is false").isTrue(); Configuration conf = getRawConfiguration(); conf.setBoolean(FS_AZURE_ENABLE_CHECK_ACCESS, false); FileSystem fs = FileSystem.newInstance(conf); @@ -172,11 +175,13 @@ public void testWhenCheckAccessConfigIsOff() throws Exception { @Test public void testCheckAccessForAccountWithoutNS() throws Exception { - Assume.assumeFalse(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT + " is true", - getConfiguration() - .getBoolean(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT, true)); - Assume.assumeTrue(FS_AZURE_ENABLE_CHECK_ACCESS + " is false", - isCheckAccessEnabled); + assumeThat(getConfiguration().getBoolean(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT, + true)) + .as(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT + " is true") + .isFalse(); + assumeThat(isCheckAccessEnabled) + .as(FS_AZURE_ENABLE_CHECK_ACCESS + " is false") + .isTrue(); checkIfConfigIsSet(FS_AZURE_BLOB_FS_CHECKACCESS_TEST_CLIENT_ID); checkIfConfigIsSet(FS_AZURE_BLOB_FS_CHECKACCESS_TEST_CLIENT_SECRET); checkIfConfigIsSet(FS_AZURE_BLOB_FS_CHECKACCESS_TEST_USER_GUID); @@ -311,10 +316,8 @@ public void testFsActionALL() throws Exception { } private void checkPrerequisites() throws Exception { - Assume.assumeTrue(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT + " is false", - isHNSEnabled); - Assume.assumeTrue(FS_AZURE_ENABLE_CHECK_ACCESS + " is false", - isCheckAccessEnabled); + assumeThat(isHNSEnabled).as(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT + " is false").isTrue(); + assumeThat(isCheckAccessEnabled).as(FS_AZURE_ENABLE_CHECK_ACCESS + " is false").isTrue(); setTestUserFs(); checkIfConfigIsSet(FS_AZURE_BLOB_FS_CHECKACCESS_TEST_CLIENT_ID); checkIfConfigIsSet(FS_AZURE_BLOB_FS_CHECKACCESS_TEST_CLIENT_SECRET); @@ -324,22 +327,22 @@ private void checkPrerequisites() throws Exception { private void checkIfConfigIsSet(String configKey){ AbfsConfiguration conf = getConfiguration(); String value = conf.get(configKey); - Assume.assumeTrue(configKey + " config is mandatory for the test to run", - value != null && value.trim().length() > 1); + assumeThat(value) + .as(configKey + " config is mandatory for the test to run") + .isNotNull() + .matches(v -> v.trim().length() > 1, "trimmed length > 1"); } private void assertAccessible(Path testFilePath, FsAction fsAction) throws IOException { - assertTrue( - "Should have been given access " + fsAction + " on " + testFilePath, - isAccessible(testUserFs, testFilePath, fsAction)); + assertTrue(isAccessible(testUserFs, testFilePath, fsAction), + "Should have been given access " + fsAction + " on " + testFilePath); } private void assertInaccessible(Path testFilePath, FsAction fsAction) throws IOException { - assertFalse( - "Should have been denied access " + fsAction + " on " + testFilePath, - isAccessible(testUserFs, testFilePath, fsAction)); + assertFalse(isAccessible(testUserFs, testFilePath, fsAction), + "Should have been denied access " + fsAction + " on " + testFilePath); } private void setExecuteAccessForParentDirs(Path dir) throws IOException { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemChecksum.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemChecksum.java index 41aaeaf37ef30..3fd62c643d433 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemChecksum.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemChecksum.java @@ -26,8 +26,7 @@ import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.fs.azurebfs.contracts.services.AzureServiceErrorCode; import org.assertj.core.api.Assertions; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.apache.hadoop.conf.Configuration; @@ -48,6 +47,7 @@ import static org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters.Mode.APPEND_MODE; import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.mockito.ArgumentMatchers.any; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Test For Verifying Checksum Related Operations @@ -76,7 +76,7 @@ public void testWriteReadWithChecksum() throws Exception { public void testAppendWithChecksumAtDifferentOffsets() throws Exception { AzureBlobFileSystem fs = getConfiguredFileSystem(MB_4, MB_4, true); if (!getIsNamespaceEnabled(fs)) { - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); } AbfsClient client = fs.getAbfsStore().getClientHandler().getIngressClient(); Path path = path("testPath" + getMethodName()); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemChooseSAS.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemChooseSAS.java index e9b1a27278f24..bd293c8723750 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemChooseSAS.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemChooseSAS.java @@ -21,8 +21,8 @@ import java.nio.file.AccessDeniedException; import org.assertj.core.api.Assertions; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -45,6 +45,7 @@ import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_TEST_APP_SERVICE_PRINCIPAL_OBJECT_ID; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_TEST_APP_SERVICE_PRINCIPAL_TENANT_ID; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Tests to validate the choice between using a custom SASTokenProvider @@ -66,9 +67,10 @@ public ITestAzureBlobFileSystemChooseSAS() throws Exception { // SAS Token configured might not have permissions for creating file system. // Shared Key must be configured to create one. Once created, a new instance // of same file system will be used with SAS Authentication. - Assume.assumeTrue(this.getAuthType() == AuthType.SharedKey); + assumeThat(this.getAuthType()).isEqualTo(AuthType.SharedKey); } + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java index aabaf82b622a8..0390a99c6a519 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCopy.java @@ -24,7 +24,7 @@ import java.io.InputStreamReader; import java.io.OutputStreamWriter; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java index b719a3217b299..c91b8a1f93bbb 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemCreate.java @@ -35,7 +35,7 @@ import java.util.concurrent.Future; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -917,7 +917,7 @@ public void testDeleteBeforeFlush() throws Throwable { // the exception raised in close() must be in the caught exception's // suppressed list Throwable[] suppressed = fnfe.getSuppressed(); - assertEquals("suppressed count", 1, suppressed.length); + assertEquals(1, suppressed.length, "suppressed count"); Throwable inner = suppressed[0]; if (!(inner instanceof IOException)) { throw inner; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java index 378655405da66..0879520d0837c 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelegationSAS.java @@ -28,8 +28,8 @@ import java.util.UUID; import org.assertj.core.api.Assertions; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -68,6 +68,7 @@ import static org.apache.hadoop.fs.permission.AclEntryType.GROUP; import static org.apache.hadoop.fs.permission.AclEntryType.USER; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Test Perform Authorization Check operation @@ -83,16 +84,19 @@ public class ITestAzureBlobFileSystemDelegationSAS extends AbstractAbfsIntegrati public ITestAzureBlobFileSystemDelegationSAS() throws Exception { // These tests rely on specific settings in azure-auth-keys.xml: String sasProvider = getRawConfiguration().get(FS_AZURE_SAS_TOKEN_PROVIDER_TYPE); - Assume.assumeTrue(MockDelegationSASTokenProvider.class.getCanonicalName().equals(sasProvider)); - Assume.assumeNotNull(getRawConfiguration().get(TestConfigurationKeys.FS_AZURE_TEST_APP_ID)); - Assume.assumeNotNull(getRawConfiguration().get(TestConfigurationKeys.FS_AZURE_TEST_APP_SECRET)); - Assume.assumeNotNull(getRawConfiguration().get(TestConfigurationKeys.FS_AZURE_TEST_APP_SERVICE_PRINCIPAL_TENANT_ID)); - Assume.assumeNotNull(getRawConfiguration().get(TestConfigurationKeys.FS_AZURE_TEST_APP_SERVICE_PRINCIPAL_OBJECT_ID)); + assumeThat(MockDelegationSASTokenProvider.class.getCanonicalName()).isEqualTo(sasProvider); + assumeThat(getRawConfiguration().get(TestConfigurationKeys.FS_AZURE_TEST_APP_ID)).isNotNull(); + assumeThat(getRawConfiguration().get(TestConfigurationKeys.FS_AZURE_TEST_APP_SECRET)).isNotNull(); + assumeThat(getRawConfiguration().get( + TestConfigurationKeys.FS_AZURE_TEST_APP_SERVICE_PRINCIPAL_TENANT_ID)).isNotNull(); + assumeThat(getRawConfiguration().get( + TestConfigurationKeys.FS_AZURE_TEST_APP_SERVICE_PRINCIPAL_OBJECT_ID)).isNotNull(); // The test uses shared key to create a random filesystem and then creates another // instance of this filesystem using SAS authorization. - Assume.assumeTrue(this.getAuthType() == AuthType.SharedKey); + assumeThat(this.getAuthType()).isEqualTo(AuthType.SharedKey); } + @BeforeEach @Override public void setup() throws Exception { isHNSEnabled = this.getConfiguration().getBoolean( @@ -114,10 +118,10 @@ public void testCheckAccess() throws Exception { fs.setOwner(rootPath, MockDelegationSASTokenProvider.TEST_OWNER, null); fs.setPermission(rootPath, new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.EXECUTE)); FileStatus rootStatus = fs.getFileStatus(rootPath); - assertEquals("The directory permissions are not expected.", "rwxr-x--x", rootStatus.getPermission().toString()); - assertEquals("The directory owner is not expected.", - MockDelegationSASTokenProvider.TEST_OWNER, - rootStatus.getOwner()); + assertEquals("rwxr-x--x", rootStatus.getPermission().toString(), + "The directory permissions are not expected."); + assertEquals(MockDelegationSASTokenProvider.TEST_OWNER, rootStatus.getOwner(), + "The directory owner is not expected."); Path dirPath = new Path(UUID.randomUUID().toString()); fs.mkdirs(dirPath); @@ -129,28 +133,35 @@ public void testCheckAccess() throws Exception { FileStatus dirStatus = fs.getFileStatus(dirPath); FileStatus fileStatus = fs.getFileStatus(filePath); - assertEquals("The owner is not expected.", MockDelegationSASTokenProvider.TEST_OWNER, dirStatus.getOwner()); - assertEquals("The owner is not expected.", MockDelegationSASTokenProvider.TEST_OWNER, fileStatus.getOwner()); - assertEquals("The directory permissions are not expected.", "rwxr-xr-x", dirStatus.getPermission().toString()); - assertEquals("The file permissions are not expected.", "r--r-----", fileStatus.getPermission().toString()); + assertEquals(MockDelegationSASTokenProvider.TEST_OWNER, dirStatus.getOwner(), + "The owner is not expected."); + assertEquals(MockDelegationSASTokenProvider.TEST_OWNER, fileStatus.getOwner(), + "The owner is not expected."); + assertEquals("rwxr-xr-x", dirStatus.getPermission().toString(), + "The directory permissions are not expected."); + assertEquals("r--r-----", fileStatus.getPermission().toString(), + "The file permissions are not expected."); assertTrue(isAccessible(fs, dirPath, FsAction.READ_WRITE)); assertFalse(isAccessible(fs, filePath, FsAction.READ_WRITE)); fs.setPermission(filePath, new FsPermission(FsAction.READ_WRITE, FsAction.READ, FsAction.NONE)); fileStatus = fs.getFileStatus(filePath); - assertEquals("The file permissions are not expected.", "rw-r-----", fileStatus.getPermission().toString()); + assertEquals("rw-r-----", fileStatus.getPermission().toString(), + "The file permissions are not expected."); assertTrue(isAccessible(fs, filePath, FsAction.READ_WRITE)); fs.setPermission(dirPath, new FsPermission(FsAction.EXECUTE, FsAction.NONE, FsAction.NONE)); dirStatus = fs.getFileStatus(dirPath); - assertEquals("The file permissions are not expected.", "--x------", dirStatus.getPermission().toString()); + assertEquals("--x------", dirStatus.getPermission().toString(), + "The file permissions are not expected."); assertFalse(isAccessible(fs, dirPath, FsAction.READ_WRITE)); assertTrue(isAccessible(fs, dirPath, FsAction.EXECUTE)); fs.setPermission(dirPath, new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.NONE)); dirStatus = fs.getFileStatus(dirPath); - assertEquals("The file permissions are not expected.", "---------", dirStatus.getPermission().toString()); + assertEquals("---------", dirStatus.getPermission().toString(), + "The file permissions are not expected."); assertFalse(isAccessible(fs, filePath, FsAction.READ_WRITE)); } @@ -588,12 +599,10 @@ public void testSetPermissionForNonOwner() throws Exception { Path rootPath = new Path("/"); FileStatus rootStatus = fs.getFileStatus(rootPath); - assertEquals("The permissions are not expected.", - "rwxr-x---", - rootStatus.getPermission().toString()); - assertNotEquals("The owner is not expected.", - MockDelegationSASTokenProvider.TEST_OWNER, - rootStatus.getOwner()); + assertEquals("rwxr-x---", rootStatus.getPermission().toString(), + "The permissions are not expected."); + assertNotEquals(MockDelegationSASTokenProvider.TEST_OWNER, rootStatus.getOwner(), + "The owner is not expected."); // Attempt to set permission without being the owner. intercept(AccessDeniedException.class, @@ -608,12 +617,10 @@ public void testSetPermissionForNonOwner() throws Exception { fs.setPermission(rootPath, new FsPermission(FsAction.ALL, FsAction.READ_EXECUTE, FsAction.EXECUTE)); rootStatus = fs.getFileStatus(rootPath); - assertEquals("The permissions are not expected.", - "rwxr-x--x", - rootStatus.getPermission().toString()); - assertEquals("The directory owner is not expected.", - MockDelegationSASTokenProvider.TEST_OWNER, - rootStatus.getOwner()); + assertEquals("rwxr-x--x", rootStatus.getPermission().toString(), + "The permissions are not expected."); + assertEquals(MockDelegationSASTokenProvider.TEST_OWNER, rootStatus.getOwner(), + "The directory owner is not expected."); } @Test @@ -625,19 +632,16 @@ public void testSetPermissionWithoutAgentForNonOwner() throws Exception { fs.create(path).close(); FileStatus status = fs.getFileStatus(path); - assertEquals("The permissions are not expected.", - "rw-r--r--", - status.getPermission().toString()); - assertNotEquals("The owner is not expected.", - TestConfigurationKeys.FS_AZURE_TEST_APP_SERVICE_PRINCIPAL_OBJECT_ID, - status.getOwner()); + assertEquals("rw-r--r--", status.getPermission().toString(), + "The permissions are not expected."); + assertNotEquals(TestConfigurationKeys.FS_AZURE_TEST_APP_SERVICE_PRINCIPAL_OBJECT_ID, + status.getOwner(), "The owner is not expected."); fs.setPermission(path, new FsPermission(FsAction.READ, FsAction.READ, FsAction.NONE)); FileStatus fileStatus = fs.getFileStatus(path); - assertEquals("The permissions are not expected.", - "r--r-----", - fileStatus.getPermission().toString()); + assertEquals("r--r-----", fileStatus.getPermission().toString(), + "The permissions are not expected."); } @Test diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java index 10d6606d243eb..41e376a5593e4 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemDelete.java @@ -30,8 +30,7 @@ import java.util.concurrent.Future; import org.assertj.core.api.Assertions; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.apache.hadoop.conf.Configuration; @@ -72,6 +71,7 @@ import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Test delete operation. @@ -106,7 +106,7 @@ public void testDeleteRoot() throws Exception { fs.delete(root, true); ls = fs.listStatus(root); - assertEquals("listing size", 0, ls.length); + assertEquals(0, ls.length, "listing size"); } @Test() @@ -180,7 +180,8 @@ public Void call() throws Exception { @Test public void testDeleteIdempotency() throws Exception { - Assume.assumeTrue(DEFAULT_DELETE_CONSIDERED_IDEMPOTENT); + assumeThat(DEFAULT_DELETE_CONSIDERED_IDEMPOTENT).isTrue(); + // Config to reduce the retry and maxBackoff time for test run AbfsConfiguration abfsConfig = TestAbfsConfigurationFieldsValidation.updateRetryConfigs( diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java index efb488988e39f..6a0ef6c83dc94 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2E.java @@ -27,8 +27,7 @@ import org.apache.hadoop.fs.azurebfs.services.AbfsClient; import org.apache.hadoop.fs.azurebfs.services.AbfsDfsClient; import org.apache.hadoop.fs.contract.ContractTestUtils; -import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -46,6 +45,8 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.assertj.core.api.Assertions.assertThat; +import org.junit.jupiter.api.Assertions; /** * Test end to end between ABFS client and ABFS server. @@ -83,34 +84,32 @@ public void testReadWriteBytesToFile() throws Exception { } } - @Test (expected = IOException.class) + @Test public void testOOBWritesAndReadFail() throws Exception { - Configuration conf = this.getRawConfiguration(); - conf.setBoolean(AZURE_TOLERATE_CONCURRENT_APPEND, false); - final AzureBlobFileSystem fs = getFileSystem(); - int readBufferSize = fs.getAbfsStore().getAbfsConfiguration().getReadBufferSize(); - - byte[] bytesToRead = new byte[readBufferSize]; - final byte[] b = new byte[2 * readBufferSize]; - new Random().nextBytes(b); - - final Path testFilePath = path(methodName.getMethodName()); - try(FSDataOutputStream writeStream = fs.create(testFilePath)) { - writeStream.write(b); - writeStream.flush(); - } - - try (FSDataInputStream readStream = fs.open(testFilePath)) { - assertEquals(readBufferSize, - readStream.read(bytesToRead, 0, readBufferSize)); - try (FSDataOutputStream writeStream = fs.create(testFilePath)) { - writeStream.write(b); - writeStream.flush(); - } - - assertEquals(readBufferSize, - readStream.read(bytesToRead, 0, readBufferSize)); - } + Assertions.assertThrows(IOException.class, () -> { + Configuration conf = this.getRawConfiguration(); + conf.setBoolean(AZURE_TOLERATE_CONCURRENT_APPEND, false); + final AzureBlobFileSystem fs = getFileSystem(); + int readBufferSize = fs.getAbfsStore().getAbfsConfiguration().getReadBufferSize(); + byte[] bytesToRead = new byte[readBufferSize]; + final byte[] b = new byte[2 * readBufferSize]; + new Random().nextBytes(b); + final Path testFilePath = path(methodName.getMethodName()); + try(FSDataOutputStream writeStream = fs.create(testFilePath)) { + writeStream.write(b); + writeStream.flush(); + } + + try (FSDataInputStream readStream = fs.open(testFilePath)) { + assertEquals(readBufferSize, readStream.read(bytesToRead, 0, readBufferSize)); + try (FSDataOutputStream writeStream = fs.create(testFilePath)) { + writeStream.write(b); + writeStream.flush(); + } + + assertEquals(readBufferSize, readStream.read(bytesToRead, 0, readBufferSize)); + } + }); } @Test @@ -264,12 +263,14 @@ public void testHttpConnectionTimeout() throws Exception { TEST_STABLE_DEFAULT_READ_TIMEOUT_MS); } - @Test(expected = InvalidAbfsRestOperationException.class) + @Test public void testHttpReadTimeout() throws Exception { - // Small read timeout is bound to make the request fail. - testHttpTimeouts(TEST_STABLE_DEFAULT_CONNECTION_TIMEOUT_MS, + Assertions.assertThrows(InvalidAbfsRestOperationException.class, () -> { + testHttpTimeouts(TEST_STABLE_DEFAULT_CONNECTION_TIMEOUT_MS, TEST_UNSTABLE_READ_TIMEOUT_MS); - } + }); + // Small read timeout is bound to make the request fail. +} public void testHttpTimeouts(int connectionTimeoutMs, int readTimeoutMs) throws Exception { @@ -285,11 +286,11 @@ public void testHttpTimeouts(int connectionTimeoutMs, int readTimeoutMs) // Reduce retry count to reduce test run time conf.setInt(AZURE_MAX_IO_RETRIES, 1); final AzureBlobFileSystem fs = getFileSystem(conf); - Assertions.assertThat( + assertThat( fs.getAbfsStore().getAbfsConfiguration().getHttpConnectionTimeout()) .describedAs("HTTP connection time should be picked from config") .isEqualTo(connectionTimeoutMs); - Assertions.assertThat( + assertThat( fs.getAbfsStore().getAbfsConfiguration().getHttpReadTimeout()) .describedAs("HTTP Read time should be picked from config") .isEqualTo(readTimeoutMs); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2EScale.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2EScale.java index fccd0632375d3..737ea3a2c4ccf 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2EScale.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemE2EScale.java @@ -26,7 +26,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -109,12 +109,10 @@ public void testReadWriteHeavyBytesToFileWithStatistics() throws Exception { } String stats = abfsStatistics.toString(); - assertEquals("Bytes read in " + stats, - remoteData.length, abfsStatistics.getBytesRead()); - assertEquals("bytes written in " + stats, - sourceData.length, abfsStatistics.getBytesWritten()); - assertEquals("bytesRead from read() call", testBufferSize, bytesRead); - assertArrayEquals("round tripped data", sourceData, remoteData); + assertEquals(remoteData.length, abfsStatistics.getBytesRead(), "Bytes read in " + stats); + assertEquals(sourceData.length, abfsStatistics.getBytesWritten(), "bytes written in " + stats); + assertEquals(testBufferSize, bytesRead, "bytesRead from read() call"); + assertArrayEquals(sourceData, remoteData, "round tripped data"); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java index 2075ba81b72f7..380c765135e92 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFileStatus.java @@ -23,7 +23,7 @@ import java.io.IOException; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.apache.hadoop.fs.FileStatus; @@ -63,7 +63,7 @@ public void testEnsureStatusWorksForRoot() throws Exception { Path root = new Path("/"); FileStatus[] rootls = fs.listStatus(root); - assertEquals("root listing", 0, rootls.length); + assertEquals(0, rootls.length, "root listing"); } @Test @@ -82,23 +82,21 @@ private FileStatus validateStatus(final AzureBlobFileSystem fs, final Path name, String errorInStatus = "error in " + fileStatus + " from " + fs; if (!getIsNamespaceEnabled(fs)) { - assertEquals(errorInStatus + ": owner", - fs.getOwnerUser(), fileStatus.getOwner()); - assertEquals(errorInStatus + ": group", - fs.getOwnerUserPrimaryGroup(), fileStatus.getGroup()); + assertEquals(fs.getOwnerUser(), fileStatus.getOwner(), errorInStatus + ": owner"); + assertEquals(fs.getOwnerUserPrimaryGroup(), fileStatus.getGroup(), errorInStatus + ": group"); assertEquals(new FsPermission(FULL_PERMISSION), fileStatus.getPermission()); } else { // When running with namespace enabled account, // the owner and group info retrieved from server will be digit ids. // hence skip the owner and group validation if (isDir) { - assertEquals(errorInStatus + ": permission", - new FsPermission(DEFAULT_DIR_PERMISSION_VALUE), fileStatus.getPermission()); - assertTrue(errorInStatus + "not a directory", fileStatus.isDirectory()); + assertEquals(new FsPermission(DEFAULT_DIR_PERMISSION_VALUE), fileStatus.getPermission(), + errorInStatus + ": permission"); + assertTrue(fileStatus.isDirectory(), errorInStatus + "not a directory"); } else { - assertEquals(errorInStatus + ": permission", - new FsPermission(DEFAULT_FILE_PERMISSION_VALUE), fileStatus.getPermission()); - assertTrue(errorInStatus + "not a file", fileStatus.isFile()); + assertEquals(new FsPermission(DEFAULT_FILE_PERMISSION_VALUE), fileStatus.getPermission(), + errorInStatus + ": permission"); + assertTrue(fileStatus.isFile(), errorInStatus + "not a file"); } } assertPathDns(fileStatus.getPath()); @@ -153,10 +151,10 @@ public void testLastModifiedTime() throws IOException { long createEndTime = System.currentTimeMillis(); FileStatus fStat = fs.getFileStatus(testFilePath); long lastModifiedTime = fStat.getModificationTime(); - assertTrue("lastModifiedTime should be after minCreateStartTime", - minCreateStartTime < lastModifiedTime); - assertTrue("lastModifiedTime should be before createEndTime", - createEndTime > lastModifiedTime); + assertTrue( + minCreateStartTime < lastModifiedTime, "lastModifiedTime should be after minCreateStartTime"); + assertTrue( + createEndTime > lastModifiedTime, "lastModifiedTime should be before createEndTime"); } /** diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFinalize.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFinalize.java index 3c21525549bfe..f1a045ade4c38 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFinalize.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFinalize.java @@ -20,8 +20,8 @@ import java.lang.ref.WeakReference; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -57,6 +57,6 @@ public void testFinalize() throws Exception { i++; } - Assert.assertTrue("testFinalizer didn't get cleaned up within maxTries", ref.get() == null); + Assertions.assertTrue(ref.get() == null, "testFinalizer didn't get cleaned up within maxTries"); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java index b55032c5132a5..1e5ba3689f5da 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemFlush.java @@ -35,9 +35,7 @@ import org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys; import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderValidator; -import org.hamcrest.core.IsEqual; -import org.hamcrest.core.IsNot; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -48,6 +46,7 @@ import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_APPEND_BLOB_KEY; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertHasStreamCapabilities; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertLacksStreamCapabilities; +import static org.assertj.core.api.Assertions.assertThat; /** * Test flush operation. @@ -94,8 +93,8 @@ public void testAbfsOutputStreamAsyncFlushWithRetainUncommittedData() throws Exc while (inputStream.available() != 0) { int result = inputStream.read(r); - assertNotEquals("read returned -1", -1, result); - assertArrayEquals("buffer read from stream", r, b); + assertNotEquals(-1, result, "read returned -1"); + assertArrayEquals(r, b, "buffer read from stream"); } } } @@ -170,7 +169,7 @@ public Void call() throws Exception { es.shutdownNow(); FileStatus fileStatus = fs.getFileStatus(testFilePath); long expectedWrites = (long) TEST_BUFFER_SIZE * FLUSH_TIMES; - assertEquals("Wrong file length in " + testFilePath, expectedWrites, fileStatus.getLen()); + assertEquals(expectedWrites, fileStatus.getLen(), "Wrong file length in " + testFilePath); } @Test @@ -400,15 +399,11 @@ private void validate(InputStream stream, byte[] writeBuffer, boolean isEqual) int numBytesRead = stream.read(readBuffer, 0, readBuffer.length); if (isEqual) { - assertArrayEquals( - "Bytes read do not match bytes written.", - writeBuffer, - readBuffer); + assertArrayEquals(writeBuffer, readBuffer, "Bytes read do not match bytes written."); } else { - assertThat( - "Bytes read unexpectedly match bytes written.", - readBuffer, - IsNot.not(IsEqual.equalTo(writeBuffer))); + assertThat(readBuffer) + .as("Bytes read unexpectedly match bytes written.") + .isNotEqualTo(writeBuffer); } } finally { stream.close(); @@ -420,14 +415,14 @@ private void validate(FileSystem fs, Path path, byte[] writeBuffer, boolean isEq byte[] readBuffer = new byte[TEST_FILE_LENGTH]; int numBytesRead = inputStream.read(readBuffer, 0, readBuffer.length); if (isEqual) { - assertArrayEquals( - String.format("Bytes read do not match bytes written to %1$s", filePath), writeBuffer, readBuffer); + assertArrayEquals(writeBuffer, readBuffer, + String.format("Bytes read do not match bytes written to %1$s", filePath)); } else { - assertThat( - String.format("Bytes read unexpectedly match bytes written to %1$s", - filePath), - readBuffer, - IsNot.not(IsEqual.equalTo(writeBuffer))); + String message = String.format( + "Bytes read unexpectedly match bytes written to %s", filePath); + assertThat(readBuffer) + .as(message) + .isNotEqualTo(writeBuffer); } } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemInitAndCreate.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemInitAndCreate.java index 153edab897a11..f6ca7042b1cb6 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemInitAndCreate.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemInitAndCreate.java @@ -25,7 +25,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.apache.hadoop.fs.azurebfs.constants.AbfsServiceType; @@ -49,6 +51,7 @@ import static org.apache.hadoop.fs.azurebfs.services.AbfsErrors.INCORRECT_INGRESS_TYPE; import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.mockito.ArgumentMatchers.any; +import org.junit.jupiter.api.Assertions; /** * Test filesystem initialization and creation. @@ -60,19 +63,23 @@ public ITestAzureBlobFileSystemInitAndCreate() throws Exception { this.getConfiguration().unset(ConfigurationKeys.AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION); } + @BeforeEach @Override public void setup() throws Exception { super.setup(); } + @AfterEach @Override public void teardown() { } - @Test (expected = FileNotFoundException.class) + @Test public void ensureFilesystemWillNotBeCreatedIfCreationConfigIsNotSet() throws Exception { - final AzureBlobFileSystem fs = this.createFileSystem(); - FileStatus[] fileStatuses = fs.listStatus(new Path("/")); + Assertions.assertThrows(FileNotFoundException.class, () -> { + final AzureBlobFileSystem fs = this.createFileSystem(); + FileStatus[] fileStatuses = fs.listStatus(new Path("/")); + }); } @Test diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemLease.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemLease.java index af1e9e8496dc1..967cdbd3a02c7 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemLease.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemLease.java @@ -20,10 +20,10 @@ import java.io.IOException; import java.util.concurrent.Callable; import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.TimeUnit; -import org.junit.Assert; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; @@ -42,6 +42,7 @@ import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderValidator; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; +import org.junit.jupiter.api.Timeout; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.INFINITE_LEASE_DURATION; import static org.apache.hadoop.fs.azurebfs.services.AbfsErrors.CONDITION_NOT_MET; @@ -60,6 +61,7 @@ import static org.apache.hadoop.fs.azurebfs.services.AbfsErrors.ERR_LEASE_EXPIRED; import static org.apache.hadoop.fs.azurebfs.services.AbfsErrors.ERR_NO_LEASE_ID_SPECIFIED; import static org.apache.hadoop.fs.azurebfs.services.AbfsErrors.ERR_NO_LEASE_THREADS; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Test lease operations. @@ -87,19 +89,21 @@ private AzureBlobFileSystem getCustomFileSystem(Path infiniteLeaseDirs, int numL return getFileSystem(conf); } - @Test(timeout = TEST_EXECUTION_TIMEOUT) + @Test + @Timeout(value = TEST_EXECUTION_TIMEOUT, unit = TimeUnit.MILLISECONDS) public void testNoInfiniteLease() throws IOException { final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); final AzureBlobFileSystem fs = getFileSystem(); fs.mkdirs(testFilePath.getParent()); try (FSDataOutputStream out = fs.create(testFilePath)) { - Assert.assertFalse("Output stream should not have lease", - ((AbfsOutputStream) out.getWrappedStream()).hasLease()); + Assertions.assertFalse( + ((AbfsOutputStream) out.getWrappedStream()).hasLease(), "Output stream should not have lease"); } - Assert.assertTrue("Store leases were not freed", fs.getAbfsStore().areLeasesFreed()); + Assertions.assertTrue(fs.getAbfsStore().areLeasesFreed(), "Store leases were not freed"); } - @Test(timeout = TEST_EXECUTION_TIMEOUT) + @Test + @Timeout(value = TEST_EXECUTION_TIMEOUT, unit = TimeUnit.MILLISECONDS) public void testNoLeaseThreads() throws Exception { final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 0); @@ -111,22 +115,24 @@ public void testNoLeaseThreads() throws Exception { }); } - @Test(timeout = TEST_EXECUTION_TIMEOUT) + @Test + @Timeout(value = TEST_EXECUTION_TIMEOUT, unit = TimeUnit.MILLISECONDS) public void testOneWriter() throws Exception { final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 1); fs.mkdirs(testFilePath.getParent()); FSDataOutputStream out = fs.create(testFilePath); - Assert.assertTrue("Output stream should have lease", - ((AbfsOutputStream) out.getWrappedStream()).hasLease()); + Assertions.assertTrue( + ((AbfsOutputStream) out.getWrappedStream()).hasLease(), "Output stream should have lease"); out.close(); - Assert.assertFalse("Output stream should not have lease", - ((AbfsOutputStream) out.getWrappedStream()).hasLease()); - Assert.assertTrue("Store leases were not freed", fs.getAbfsStore().areLeasesFreed()); + Assertions.assertFalse( + ((AbfsOutputStream) out.getWrappedStream()).hasLease(), "Output stream should not have lease"); + Assertions.assertTrue(fs.getAbfsStore().areLeasesFreed(), "Store leases were not freed"); } - @Test(timeout = TEST_EXECUTION_TIMEOUT) + @Test + @Timeout(value = TEST_EXECUTION_TIMEOUT, unit = TimeUnit.MILLISECONDS) public void testSubDir() throws Exception { final Path testFilePath = new Path(new Path(path(methodName.getMethodName()), "subdir"), TEST_FILE); @@ -135,15 +141,16 @@ public void testSubDir() throws Exception { fs.mkdirs(testFilePath.getParent().getParent()); FSDataOutputStream out = fs.create(testFilePath); - Assert.assertTrue("Output stream should have lease", - ((AbfsOutputStream) out.getWrappedStream()).hasLease()); + Assertions.assertTrue( + ((AbfsOutputStream) out.getWrappedStream()).hasLease(), "Output stream should have lease"); out.close(); - Assert.assertFalse("Output stream should not have lease", - ((AbfsOutputStream) out.getWrappedStream()).hasLease()); - Assert.assertTrue("Store leases were not freed", fs.getAbfsStore().areLeasesFreed()); + Assertions.assertFalse( + ((AbfsOutputStream) out.getWrappedStream()).hasLease(), "Output stream should not have lease"); + Assertions.assertTrue(fs.getAbfsStore().areLeasesFreed(), "Store leases were not freed"); } - @Test(timeout = TEST_EXECUTION_TIMEOUT) + @Test + @Timeout(value = TEST_EXECUTION_TIMEOUT, unit = TimeUnit.MILLISECONDS) public void testTwoCreate() throws Exception { final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 1); @@ -162,7 +169,7 @@ public void testTwoCreate() throws Exception { return "Expected second create on infinite lease dir to fail"; }); } - Assert.assertTrue("Store leases were not freed", fs.getAbfsStore().areLeasesFreed()); + Assertions.assertTrue(fs.getAbfsStore().areLeasesFreed(), "Store leases were not freed"); } private void twoWriters(AzureBlobFileSystem fs, Path testFilePath, boolean expectException) throws Exception { @@ -198,30 +205,33 @@ private void twoWriters(AzureBlobFileSystem fs, Path testFilePath, boolean expec } } - Assert.assertTrue("Store leases were not freed", fs.getAbfsStore().areLeasesFreed()); + Assertions.assertTrue(fs.getAbfsStore().areLeasesFreed(), "Store leases were not freed"); } - @Test(timeout = TEST_EXECUTION_TIMEOUT) + @Test + @Timeout(value = TEST_EXECUTION_TIMEOUT, unit = TimeUnit.MILLISECONDS) public void testTwoWritersCreateAppendNoInfiniteLease() throws Exception { final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); final AzureBlobFileSystem fs = getFileSystem(); - Assume.assumeFalse("Parallel Writes Not Allowed on Append Blobs", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Parallel Writes Not Allowed on Append Blobs").isFalse(); fs.mkdirs(testFilePath.getParent()); twoWriters(fs, testFilePath, false); } - @Test(timeout = LONG_TEST_EXECUTION_TIMEOUT) + @Test + @Timeout(value = TEST_EXECUTION_TIMEOUT, unit = TimeUnit.MILLISECONDS) public void testTwoWritersCreateAppendWithInfiniteLeaseEnabled() throws Exception { final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 1); - Assume.assumeFalse("Parallel Writes Not Allowed on Append Blobs", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Parallel Writes Not Allowed on Append Blobs").isFalse(); fs.mkdirs(testFilePath.getParent()); twoWriters(fs, testFilePath, true); } - @Test(timeout = TEST_EXECUTION_TIMEOUT) + @Test + @Timeout(value = TEST_EXECUTION_TIMEOUT, unit = TimeUnit.MILLISECONDS) public void testLeaseFreedOnClose() throws Exception { final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 1); @@ -230,15 +240,16 @@ public void testLeaseFreedOnClose() throws Exception { FSDataOutputStream out; out = fs.create(testFilePath); out.write(0); - Assert.assertTrue("Output stream should have lease", - ((AbfsOutputStream) out.getWrappedStream()).hasLease()); + Assertions.assertTrue( + ((AbfsOutputStream) out.getWrappedStream()).hasLease(), "Output stream should have lease"); out.close(); - Assert.assertFalse("Output stream should not have lease after close", - ((AbfsOutputStream) out.getWrappedStream()).hasLease()); - Assert.assertTrue("Store leases were not freed", fs.getAbfsStore().areLeasesFreed()); + Assertions.assertFalse( + ((AbfsOutputStream) out.getWrappedStream()).hasLease(), "Output stream should not have lease after close"); + Assertions.assertTrue(fs.getAbfsStore().areLeasesFreed(), "Store leases were not freed"); } - @Test(timeout = TEST_EXECUTION_TIMEOUT) + @Test + @Timeout(value = TEST_EXECUTION_TIMEOUT, unit = TimeUnit.MILLISECONDS) public void testWriteAfterBreakLease() throws Exception { final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 1); @@ -271,18 +282,19 @@ public void testWriteAfterBreakLease() throws Exception { return "Expected exception on close after lease break but got " + out; }); - Assert.assertTrue("Output stream lease should be freed", - ((AbfsOutputStream) out.getWrappedStream()).isLeaseFreed()); + Assertions.assertTrue( + ((AbfsOutputStream) out.getWrappedStream()).isLeaseFreed(), "Output stream lease should be freed"); try (FSDataOutputStream out2 = fs.append(testFilePath)) { out2.write(2); out2.hsync(); } - Assert.assertTrue("Store leases were not freed", fs.getAbfsStore().areLeasesFreed()); + Assertions.assertTrue(fs.getAbfsStore().areLeasesFreed(), "Store leases were not freed"); } - @Test(timeout = LONG_TEST_EXECUTION_TIMEOUT) + @Test + @Timeout(value = TEST_EXECUTION_TIMEOUT, unit = TimeUnit.MILLISECONDS) public void testLeaseFreedAfterBreak() throws Exception { final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 1); @@ -299,34 +311,36 @@ public void testLeaseFreedAfterBreak() throws Exception { return "Expected exception on close after lease break but got " + out; }); - Assert.assertTrue("Output stream lease should be freed", - ((AbfsOutputStream) out.getWrappedStream()).isLeaseFreed()); + Assertions.assertTrue( + ((AbfsOutputStream) out.getWrappedStream()).isLeaseFreed(), "Output stream lease should be freed"); - Assert.assertTrue("Store leases were not freed", fs.getAbfsStore().areLeasesFreed()); + Assertions.assertTrue(fs.getAbfsStore().areLeasesFreed(), "Store leases were not freed"); } - @Test(timeout = TEST_EXECUTION_TIMEOUT) + @Test + @Timeout(value = TEST_EXECUTION_TIMEOUT, unit = TimeUnit.MILLISECONDS) public void testInfiniteLease() throws Exception { final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 1); fs.mkdirs(testFilePath.getParent()); try (FSDataOutputStream out = fs.create(testFilePath)) { - Assert.assertTrue("Output stream should have lease", - ((AbfsOutputStream) out.getWrappedStream()).hasLease()); + Assertions.assertTrue( + ((AbfsOutputStream) out.getWrappedStream()).hasLease(), "Output stream should have lease"); out.write(0); } - Assert.assertTrue(fs.getAbfsStore().areLeasesFreed()); + Assertions.assertTrue(fs.getAbfsStore().areLeasesFreed()); try (FSDataOutputStream out = fs.append(testFilePath)) { - Assert.assertTrue("Output stream should have lease", - ((AbfsOutputStream) out.getWrappedStream()).hasLease()); + Assertions.assertTrue( + ((AbfsOutputStream) out.getWrappedStream()).hasLease(), "Output stream should have lease"); out.write(1); } - Assert.assertTrue("Store leases were not freed", fs.getAbfsStore().areLeasesFreed()); + Assertions.assertTrue(fs.getAbfsStore().areLeasesFreed(), "Store leases were not freed"); } - @Test(timeout = TEST_EXECUTION_TIMEOUT) + @Test + @Timeout(value = TEST_EXECUTION_TIMEOUT, unit = TimeUnit.MILLISECONDS) public void testFileSystemClose() throws Exception { final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 1); @@ -334,11 +348,11 @@ public void testFileSystemClose() throws Exception { try (FSDataOutputStream out = fs.create(testFilePath)) { out.write(0); - Assert.assertFalse("Store leases should exist", - fs.getAbfsStore().areLeasesFreed()); + Assertions.assertFalse( + fs.getAbfsStore().areLeasesFreed(), "Store leases should exist"); } fs.close(); - Assert.assertTrue("Store leases were not freed", fs.getAbfsStore().areLeasesFreed()); + Assertions.assertTrue(fs.getAbfsStore().areLeasesFreed(), "Store leases were not freed"); Callable exceptionRaisingCallable = () -> { try (FSDataOutputStream out2 = fs.append(testFilePath)) { @@ -365,7 +379,8 @@ public void testFileSystemClose() throws Exception { } } - @Test(timeout = TEST_EXECUTION_TIMEOUT) + @Test + @Timeout(value = TEST_EXECUTION_TIMEOUT, unit = TimeUnit.MILLISECONDS) public void testAcquireRetry() throws Exception { final Path testFilePath = new Path(path(methodName.getMethodName()), TEST_FILE); final AzureBlobFileSystem fs = getCustomFileSystem(testFilePath.getParent(), 1); @@ -380,11 +395,11 @@ public void testAcquireRetry() throws Exception { AbfsLease lease = new AbfsLease(fs.getAbfsClient(), testFilePath.toUri().getPath(), true, INFINITE_LEASE_DURATION, null, tracingContext); - Assert.assertNotNull("Did not successfully lease file", lease.getLeaseID()); + Assertions.assertNotNull(lease.getLeaseID(), "Did not successfully lease file"); listener.setOperation(FSOperationType.RELEASE_LEASE); lease.free(); lease.getTracingContext().setListener(null); - Assert.assertEquals("Unexpected acquire retry count", 0, lease.getAcquireRetryCount()); + Assertions.assertEquals(0, lease.getAcquireRetryCount(), "Unexpected acquire retry count"); AbfsClient mockClient = spy(fs.getAbfsClient()); @@ -395,9 +410,9 @@ public void testAcquireRetry() throws Exception { lease = new AbfsLease(mockClient, testFilePath.toUri().getPath(), true, 5, 1, INFINITE_LEASE_DURATION, null, tracingContext); - Assert.assertNotNull("Acquire lease should have retried", lease.getLeaseID()); + Assertions.assertNotNull(lease.getLeaseID(), "Acquire lease should have retried"); lease.free(); - Assert.assertEquals("Unexpected acquire retry count", 2, lease.getAcquireRetryCount()); + Assertions.assertEquals(2, lease.getAcquireRetryCount(), "Unexpected acquire retry count"); doThrow(new AbfsLease.LeaseException("failed to acquire")).when(mockClient) .acquireLease(anyString(), anyInt(), any(), any(TracingContext.class)); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java index e491362df5c90..dd9c329df4949 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemListStatus.java @@ -32,8 +32,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; -import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.mockito.stubbing.Stubber; @@ -87,6 +86,8 @@ import static org.mockito.ArgumentMatchers.nullable; import static org.mockito.Mockito.times; import static org.mockito.Mockito.when; +import static org.assertj.core.api.Assertions.assertThat; +import org.junit.jupiter.api.Assertions; /** * Test listStatus operation. @@ -211,10 +212,10 @@ public void testListPathParsingFailure() throws Exception { spiedStore.listStatus(new Path("/"), "", fileStatuses, true, null, getTestTracingContext(spiedFs, true)); }); - Assertions.assertThat(ex.getStatusCode()) + assertThat(ex.getStatusCode()) .describedAs("Expecting Network Error status code") .isEqualTo(-1); - Assertions.assertThat(ex.getErrorMessage()) + assertThat(ex.getErrorMessage()) .describedAs("Expecting COPY_ABORTED error code") .contains(ERR_BLOB_LIST_PARSING); } @@ -229,7 +230,7 @@ public void testListFileVsListDir() throws Exception { Path path = path("/testFile"); try(FSDataOutputStream ignored = fs.create(path)) { FileStatus[] testFiles = fs.listStatus(path); - assertEquals("length of test files", 1, testFiles.length); + assertEquals(1, testFiles.length, "length of test files"); FileStatus status = testFiles[0]; assertIsFileReference(status); } @@ -247,18 +248,19 @@ public void testListFileVsListDir2() throws Exception { ContractTestUtils.touch(fs, testFile0Path); FileStatus[] testFiles = fs.listStatus(testFile0Path); - assertEquals("Wrong listing size of file " + testFile0Path, - 1, testFiles.length); + assertEquals(1, testFiles.length, "Wrong listing size of file " + testFile0Path); FileStatus file0 = testFiles[0]; - assertEquals("Wrong path for " + file0, new Path(getTestUrl(), - testFolder + "/testFolder2/testFolder3/testFile"), file0.getPath()); + assertEquals(new Path(getTestUrl(), testFolder + "/testFolder2/testFolder3/testFile"), + file0.getPath(), "Wrong path for " + file0); assertIsFileReference(file0); } - @Test(expected = FileNotFoundException.class) + @Test public void testListNonExistentDir() throws Exception { - final AzureBlobFileSystem fs = getFileSystem(); - fs.listStatus(new Path("/testFile/")); + Assertions.assertThrows(FileNotFoundException.class, () -> { + final AzureBlobFileSystem fs = getFileSystem(); + fs.listStatus(new Path("/testFile/")); + }); } @Test @@ -296,23 +298,20 @@ public void testListFiles() throws Exception { () -> fs.listFiles(childF, false).next()); // do some final checks on the status (failing due to version checks) - assertEquals("Path mismatch of " + locatedChildStatus, - childF, locatedChildStatus.getPath()); - assertEquals("locatedstatus.equals(status)", - locatedChildStatus, childStatus); - assertEquals("status.equals(locatedstatus)", - childStatus, locatedChildStatus); + assertEquals(childF, locatedChildStatus.getPath(), "Path mismatch of " + locatedChildStatus); + assertEquals(locatedChildStatus, childStatus, "locatedstatus.equals(status)"); + assertEquals(childStatus, locatedChildStatus, "status.equals(locatedstatus)"); } private void assertIsDirectoryReference(FileStatus status) { - assertTrue("Not a directory: " + status, status.isDirectory()); - assertFalse("Not a directory: " + status, status.isFile()); + assertTrue(status.isDirectory(), "Not a directory: " + status); + assertFalse(status.isFile(), "Not a directory: " + status); assertEquals(0, status.getLen()); } private void assertIsFileReference(FileStatus status) { - assertFalse("Not a file: " + status, status.isDirectory()); - assertTrue("Not a file: " + status, status.isFile()); + assertFalse(status.isDirectory(), "Not a file: " + status); + assertTrue(status.isFile(), "Not a file: " + status); } @Test @@ -331,8 +330,8 @@ public void testMkdirTrailingPeriodDirName() throws IOException { catch(IllegalArgumentException e) { exceptionThrown = true; } - assertTrue("Attempt to create file that ended with a dot should" - + " throw IllegalArgumentException", exceptionThrown); + assertTrue(exceptionThrown, "Attempt to create file that ended with a dot should" + + " throw IllegalArgumentException"); } @Test @@ -353,8 +352,8 @@ public void testCreateTrailingPeriodFileName() throws IOException { catch(IllegalArgumentException e) { exceptionThrown = true; } - assertTrue("Attempt to create file that ended with a dot should" - + " throw IllegalArgumentException", exceptionThrown); + assertTrue(exceptionThrown, "Attempt to create file that ended with a dot should" + + " throw IllegalArgumentException"); } @Test @@ -372,8 +371,8 @@ public void testRenameTrailingPeriodFile() throws IOException { catch(IllegalArgumentException e) { exceptionThrown = true; } - assertTrue("Attempt to create file that ended with a dot should" - + " throw IllegalArgumentException", exceptionThrown); + assertTrue(exceptionThrown, "Attempt to create file that ended with a dot should" + + " throw IllegalArgumentException"); } @Test @@ -478,7 +477,7 @@ private void testEmptyListingInSubsequentCallInternal(String firstCT, any(), any(TracingContext.class), any()); Mockito.verify(spiedClient, times(1)) .postListProcessing(eq("/testPath"), any(), any(), any()); - Assertions.assertThat(list).hasSize(expectedSize); + assertThat(list).hasSize(expectedSize); if (expectedSize == 0) { Mockito.verify(spiedClient, times(1)) @@ -488,18 +487,18 @@ private void testEmptyListingInSubsequentCallInternal(String firstCT, .getPathStatus(eq("/testPath"), any(), eq(null), eq(false)); } - Assertions.assertThat(continuationTokenUsed[0]) + assertThat(continuationTokenUsed[0]) .describedAs("First continuation token used is not as expected") .isNull(); if (expectedInvocations > 1) { - Assertions.assertThat(continuationTokenUsed[1]) + assertThat(continuationTokenUsed[1]) .describedAs("Second continuation token used is not as expected") .isEqualTo(firstCT); } if (expectedInvocations > 2) { - Assertions.assertThat(continuationTokenUsed[2]) + assertThat(continuationTokenUsed[2]) .describedAs("Third continuation token used is not as expected") .isEqualTo(secondCT); } @@ -523,7 +522,7 @@ public void testListStatusWithImplicitExplicitChildren() throws Exception { // Assert that implicit directory is returned FileStatus[] fileStatuses = fs.listStatus(root); - Assertions.assertThat(fileStatuses.length) + assertThat(fileStatuses.length) .describedAs("List size is not expected").isEqualTo(1); assertImplicitDirectoryFileStatus(fileStatuses[0], fs.makeQualified(dir)); @@ -532,7 +531,7 @@ public void testListStatusWithImplicitExplicitChildren() throws Exception { // Assert that only one entry of explicit directory is returned fileStatuses = fs.listStatus(root); - Assertions.assertThat(fileStatuses.length) + assertThat(fileStatuses.length) .describedAs("List size is not expected").isEqualTo(1); assertExplicitDirectoryFileStatus(fileStatuses[0], fs.makeQualified(dir)); @@ -542,7 +541,7 @@ public void testListStatusWithImplicitExplicitChildren() throws Exception { // Assert that two entries are returned in alphabetic order. fileStatuses = fs.listStatus(root); - Assertions.assertThat(fileStatuses.length) + assertThat(fileStatuses.length) .describedAs("List size is not expected").isEqualTo(2); assertExplicitDirectoryFileStatus(fileStatuses[0], fs.makeQualified(dir)); assertFilePathFileStatus(fileStatuses[1], fs.makeQualified(file1)); @@ -553,7 +552,7 @@ public void testListStatusWithImplicitExplicitChildren() throws Exception { // Assert that three entries are returned in alphabetic order. fileStatuses = fs.listStatus(root); - Assertions.assertThat(fileStatuses.length) + assertThat(fileStatuses.length) .describedAs("List size is not expected").isEqualTo(3); assertExplicitDirectoryFileStatus(fileStatuses[0], fs.makeQualified(dir)); assertFilePathFileStatus(fileStatuses[1], fs.makeQualified(file1)); @@ -571,12 +570,12 @@ public void testListStatusOnImplicitDirectoryPath() throws Exception { createAzCopyFolder(implicitPath); FileStatus[] statuses = fs.listStatus(implicitPath); - Assertions.assertThat(statuses.length) + assertThat(statuses.length) .describedAs("List size is not expected").isGreaterThanOrEqualTo(1); assertImplicitDirectoryFileStatus(statuses[0], fs.makeQualified(statuses[0].getPath())); FileStatus[] statuses1 = fs.listStatus(new Path(statuses[0].getPath().toString())); - Assertions.assertThat(statuses1.length) + assertThat(statuses1.length) .describedAs("List size is not expected").isGreaterThanOrEqualTo(1); assertFilePathFileStatus(statuses1[0], fs.makeQualified(statuses1[0].getPath())); } @@ -588,7 +587,7 @@ public void testListStatusOnEmptyDirectory() throws Exception { fs.mkdirs(emptyDir); FileStatus[] statuses = fs.listStatus(emptyDir); - Assertions.assertThat(statuses.length) + assertThat(statuses.length) .describedAs("List size is not expected").isEqualTo(0); } @@ -599,7 +598,7 @@ public void testListStatusOnRenamePendingJsonFile() throws Exception { fs.create(renamePendingJsonPath); FileStatus[] statuses = fs.listStatus(renamePendingJsonPath); - Assertions.assertThat(statuses.length) + assertThat(statuses.length) .describedAs("List size is not expected").isEqualTo(1); assertFilePathFileStatus(statuses[0], fs.makeQualified(statuses[0].getPath())); } @@ -618,18 +617,18 @@ public void testContinuationTokenAcrossListStatus() throws Exception { "/testContinuationToken", false, 1, null, getTestTracingContext(fs, true), fs.getAbfsStore().getUri()); - Assertions.assertThat(listResponseData.getContinuationToken()) + assertThat(listResponseData.getContinuationToken()) .describedAs("Continuation Token Should not be null").isNotNull(); - Assertions.assertThat(listResponseData.getFileStatusList()) + assertThat(listResponseData.getFileStatusList()) .describedAs("Listing Size Not as expected").hasSize(1); ListResponseData listResponseData1 = fs.getAbfsStore().getClient().listPath( "/testContinuationToken", false, 1, listResponseData.getContinuationToken(), getTestTracingContext(fs, true), fs.getAbfsStore().getUri()); - Assertions.assertThat(listResponseData1.getContinuationToken()) + assertThat(listResponseData1.getContinuationToken()) .describedAs("Continuation Token Should be null").isNull(); - Assertions.assertThat(listResponseData1.getFileStatusList()) + assertThat(listResponseData1.getFileStatusList()) .describedAs("Listing Size Not as expected").hasSize(1); } @@ -660,9 +659,9 @@ public void testEmptyContinuationToken() throws Exception { "/testInvalidContinuationToken", false, 1, "", getTestTracingContext(fs, true), fs.getAbfsStore().getUri()); - Assertions.assertThat(listResponseData.getContinuationToken()) + assertThat(listResponseData.getContinuationToken()) .describedAs("Continuation Token Should Not be null").isNotNull(); - Assertions.assertThat(listResponseData.getFileStatusList()) + assertThat(listResponseData.getFileStatusList()) .describedAs("Listing Size Not as expected").hasSize(1); } @@ -726,7 +725,7 @@ public void testDuplicateEntriesAcrossListBlobIterations() throws Exception { .listPath(eq(ROOT_PATH), eq(false), eq(1), any(), any(), any()); // Assert that after duplicate removal, only 7 unique entries are returned. - Assertions.assertThat(fileStatuses.length) + assertThat(fileStatuses.length) .describedAs("List size is not expected").isEqualTo(NUMBER_OF_UNIQUE_PATHS); // Assert that for duplicates, entry corresponding to marker blob is returned. @@ -741,7 +740,7 @@ public void testDuplicateEntriesAcrossListBlobIterations() throws Exception { // Assert that there are no duplicates in the returned file statuses. Set uniquePaths = new HashSet<>(); for (FileStatus fileStatus : fileStatuses) { - Assertions.assertThat(uniquePaths.add(fileStatus.getPath())) + assertThat(uniquePaths.add(fileStatus.getPath())) .describedAs("Duplicate Entries found") .isTrue(); } @@ -749,13 +748,13 @@ public void testDuplicateEntriesAcrossListBlobIterations() throws Exception { private void assertFilePathFileStatus(final FileStatus fileStatus, final Path qualifiedPath) { - Assertions.assertThat(fileStatus.getPath()) + assertThat(fileStatus.getPath()) .describedAs("Path Not as expected").isEqualTo(qualifiedPath); - Assertions.assertThat(fileStatus.isFile()) + assertThat(fileStatus.isFile()) .describedAs("Expecting a File Path").isEqualTo(true); - Assertions.assertThat(fileStatus.isDirectory()) + assertThat(fileStatus.isDirectory()) .describedAs("Expecting a File Path").isEqualTo(false); - Assertions.assertThat(fileStatus.getModificationTime()).isNotEqualTo(0); + assertThat(fileStatus.getModificationTime()).isNotEqualTo(0); } private void assertImplicitDirectoryFileStatus(final FileStatus fileStatus, @@ -763,7 +762,7 @@ private void assertImplicitDirectoryFileStatus(final FileStatus fileStatus, assertDirectoryFileStatus(fileStatus, qualifiedPath); DirectoryStateHelper.isImplicitDirectory(qualifiedPath, getFileSystem(), getTestTracingContext(getFileSystem(), true)); - Assertions.assertThat(fileStatus.getModificationTime()) + assertThat(fileStatus.getModificationTime()) .describedAs("Last Modified Time Not as Expected").isEqualTo(0); } @@ -772,19 +771,19 @@ private void assertExplicitDirectoryFileStatus(final FileStatus fileStatus, assertDirectoryFileStatus(fileStatus, qualifiedPath); DirectoryStateHelper.isExplicitDirectory(qualifiedPath, getFileSystem(), getTestTracingContext(getFileSystem(), true)); - Assertions.assertThat(fileStatus.getModificationTime()) + assertThat(fileStatus.getModificationTime()) .describedAs("Last Modified Time Not as Expected").isNotEqualTo(0); } private void assertDirectoryFileStatus(final FileStatus fileStatus, final Path qualifiedPath) { - Assertions.assertThat(fileStatus.getPath()) + assertThat(fileStatus.getPath()) .describedAs("Path Not as Expected").isEqualTo(qualifiedPath); - Assertions.assertThat(fileStatus.isDirectory()) + assertThat(fileStatus.isDirectory()) .describedAs("Expecting a Directory Path").isEqualTo(true); - Assertions.assertThat(fileStatus.isFile()) + assertThat(fileStatus.isFile()) .describedAs("Expecting a Directory Path").isEqualTo(false); - Assertions.assertThat(fileStatus.getLen()) + assertThat(fileStatus.getLen()) .describedAs("Content Length Not as Expected").isEqualTo(0); } @@ -851,17 +850,17 @@ private void testIsDirectory(boolean expected, String... configName) throws Exce true, getTestTracingContext(fs, true), null).getResult(); - Assertions.assertThat(abfsBlobClient.checkIsDir(op)) + assertThat(abfsBlobClient.checkIsDir(op)) .describedAs("Directory should be marked as " + expected) .isEqualTo(expected); // Verify the header and directory state - Assertions.assertThat(fileStatus.length) + assertThat(fileStatus.length) .describedAs("Expected directory state: " + expected) .isEqualTo(1); // Verify the header and directory state - Assertions.assertThat(fileStatus[0].isDirectory()) + assertThat(fileStatus[0].isDirectory()) .describedAs("Expected directory state: " + expected) .isEqualTo(expected); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java index e54b98e0b7a6e..a4d5ad5d6bac2 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemMkDir.java @@ -20,8 +20,7 @@ import java.util.UUID; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.apache.hadoop.conf.Configuration; @@ -36,6 +35,7 @@ import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertMkdirs; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Test mkdir operation. @@ -48,9 +48,8 @@ public ITestAzureBlobFileSystemMkDir() throws Exception { @Test public void testCreateDirWithExistingDir() throws Exception { - Assume.assumeTrue( - DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE || !getIsNamespaceEnabled( - getFileSystem())); + assumeThat(DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE || !getIsNamespaceEnabled(getFileSystem())) + .isTrue(); final AzureBlobFileSystem fs = getFileSystem(); Path path = path("testFolder"); assertMkdirs(fs, path); @@ -59,10 +58,11 @@ public void testCreateDirWithExistingDir() throws Exception { @Test public void testMkdirExistingDirOverwriteFalse() throws Exception { - Assume.assumeFalse("Ignore test until default overwrite is set to false", - DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE); - Assume.assumeTrue("Ignore test for Non-HNS accounts", - getIsNamespaceEnabled(getFileSystem())); + assumeThat(DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE) + .as("Ignore test until default overwrite is set to false") + .isFalse(); + assumeThat(getIsNamespaceEnabled(getFileSystem())) + .as("Ignore test for Non-HNS accounts").isTrue(); //execute test only for HNS account with default overwrite=false Configuration config = new Configuration(this.getRawConfiguration()); config.set(FS_AZURE_ENABLE_MKDIR_OVERWRITE, Boolean.toString(false)); @@ -71,15 +71,16 @@ public void testMkdirExistingDirOverwriteFalse() throws Exception { assertMkdirs(fs, path); //checks that mkdirs returns true long timeCreated = fs.getFileStatus(path).getModificationTime(); assertMkdirs(fs, path); //call to existing dir should return success - assertEquals("LMT should not be updated for existing dir", timeCreated, - fs.getFileStatus(path).getModificationTime()); + assertEquals(timeCreated, fs.getFileStatus(path).getModificationTime(), + "LMT should not be updated for existing dir"); } @Test public void createDirWithExistingFilename() throws Exception { - Assume.assumeFalse("Ignore test until default overwrite is set to false", - DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE && getIsNamespaceEnabled( - getFileSystem())); + assumeThat(DEFAULT_FS_AZURE_ENABLE_MKDIR_OVERWRITE + && getIsNamespaceEnabled(getFileSystem())) + .as("Ignore test until default overwrite is set to false") + .isFalse(); final AzureBlobFileSystem fs = getFileSystem(); Path path = path("testFilePath"); fs.create(path).close(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java index f81afa24c2f49..c719fb37d71c5 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemOauth.java @@ -22,8 +22,7 @@ import java.io.InputStream; import java.util.Map; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -51,6 +50,7 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathDoesNotExist; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; import static org.assertj.core.api.AssertionsForClassTypes.assertThatCode; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Test Azure Oauth with Blob Data contributor role and Blob Data Reader role. @@ -66,7 +66,7 @@ public class ITestAzureBlobFileSystemOauth extends AbstractAbfsIntegrationTest{ LoggerFactory.getLogger(ITestAbfsStreamStatistics.class); public ITestAzureBlobFileSystemOauth() throws Exception { - Assume.assumeTrue(this.getAuthType() == AuthType.OAuth); + assumeThat(this.getAuthType()).isEqualTo(AuthType.OAuth); } /* * BLOB DATA CONTRIBUTOR should have full access to the container and blobs in the container. @@ -74,9 +74,9 @@ public ITestAzureBlobFileSystemOauth() throws Exception { @Test public void testBlobDataContributor() throws Exception { String clientId = this.getConfiguration().get(TestConfigurationKeys.FS_AZURE_BLOB_DATA_CONTRIBUTOR_CLIENT_ID); - Assume.assumeTrue("Contributor client id not provided", clientId != null); + assumeThat(clientId).as("Contributor client id not provided").isNotNull(); String secret = this.getConfiguration().get(TestConfigurationKeys.FS_AZURE_BLOB_DATA_CONTRIBUTOR_CLIENT_SECRET); - Assume.assumeTrue("Contributor client secret not provided", secret != null); + assumeThat(secret).as("Contributor client secret not provided").isNotNull(); Path existedFilePath = path(EXISTED_FILE_PATH); Path existedFolderPath = path(EXISTED_FOLDER_PATH); @@ -129,9 +129,9 @@ public void testBlobDataContributor() throws Exception { @Test public void testBlobDataReader() throws Exception { String clientId = this.getConfiguration().get(TestConfigurationKeys.FS_AZURE_BLOB_DATA_READER_CLIENT_ID); - Assume.assumeTrue("Reader client id not provided", clientId != null); + assumeThat(clientId).as("Reader client id not provided").isNotNull(); String secret = this.getConfiguration().get(TestConfigurationKeys.FS_AZURE_BLOB_DATA_READER_CLIENT_SECRET); - Assume.assumeTrue("Reader client secret not provided", secret != null); + assumeThat(secret).as("Reader client secret not provided").isNotNull(); Path existedFilePath = path(EXISTED_FILE_PATH); Path existedFolderPath = path(EXISTED_FOLDER_PATH); @@ -177,9 +177,9 @@ public void testBlobDataReader() throws Exception { @Test public void testGetPathStatusWithReader() throws Exception { String clientId = this.getConfiguration().get(FS_AZURE_BLOB_DATA_READER_CLIENT_ID); - Assume.assumeTrue("Reader client id not provided", clientId != null); + assumeThat(clientId).as("Reader client id not provided").isNotNull(); String secret = this.getConfiguration().get(FS_AZURE_BLOB_DATA_READER_CLIENT_SECRET); - Assume.assumeTrue("Reader client secret not provided", secret != null); + assumeThat(secret).as("Reader client secret not provided").isNotNull(); Path existedFolderPath = path(EXISTED_FOLDER_PATH); createAzCopyFolder(existedFolderPath); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemPermission.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemPermission.java index 00f5af02622a6..e1b79c2d1f27c 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemPermission.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemPermission.java @@ -23,23 +23,24 @@ import java.util.UUID; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.junit.Assert; -import org.junit.Assume; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azurebfs.constants.AbfsServiceType; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.fs.azurebfs.utils.Parallelized; +import org.junit.jupiter.params.ParameterizedClass; +import org.junit.jupiter.params.provider.MethodSource; + +import static org.assertj.core.api.Assumptions.assumeThat; /** * Test permission operations. */ -@RunWith(Parallelized.class) +@ParameterizedClass(name="{0}") +@MethodSource("abfsCreateNonRecursiveTestData") public class ITestAzureBlobFileSystemPermission extends AbstractAbfsIntegrationTest{ private static Path testRoot = new Path("/test"); @@ -55,7 +56,6 @@ public ITestAzureBlobFileSystemPermission(FsPermission testPermission) throws Ex permission = testPermission; } - @Parameterized.Parameters(name = "{0}") public static Collection abfsCreateNonRecursiveTestData() throws Exception { /* @@ -77,8 +77,8 @@ public static Collection abfsCreateNonRecursiveTestData() public void testFilePermission() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(getIsNamespaceEnabled(fs)); - Assume.assumeTrue(getIngressServiceType() == AbfsServiceType.DFS); + assumeThat(getIsNamespaceEnabled(fs)).isTrue(); + assumeThat(getIngressServiceType()).isEqualTo(AbfsServiceType.DFS); fs.getConf().set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, DEFAULT_UMASK_VALUE); path = new Path(testRoot, UUID.randomUUID().toString()); @@ -89,14 +89,14 @@ public void testFilePermission() throws Exception { fs.create(path, permission, true, KILOBYTE, (short) 1, KILOBYTE - 1, null).close(); FileStatus status = fs.getFileStatus(path); - Assert.assertEquals(permission.applyUMask(DEFAULT_UMASK_PERMISSION), status.getPermission()); + Assertions.assertEquals(permission.applyUMask(DEFAULT_UMASK_PERMISSION), status.getPermission()); } @Test public void testFolderPermission() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(getIsNamespaceEnabled(fs)); - Assume.assumeTrue(getIngressServiceType() == AbfsServiceType.DFS); + assumeThat(getIsNamespaceEnabled(fs)).isTrue(); + assumeThat(getIngressServiceType()).isEqualTo(AbfsServiceType.DFS); fs.getConf().set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "027"); path = new Path(testRoot, UUID.randomUUID().toString()); @@ -107,6 +107,6 @@ public void testFolderPermission() throws Exception { fs.mkdirs(path, permission); FileStatus status = fs.getFileStatus(path); - Assert.assertEquals(permission.applyUMask(DEFAULT_UMASK_PERMISSION), status.getPermission()); + Assertions.assertEquals(permission.applyUMask(DEFAULT_UMASK_PERMISSION), status.getPermission()); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java index 9c762866ca299..1887fa3537884 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRandomRead.java @@ -23,9 +23,8 @@ import java.util.concurrent.Callable; import java.util.UUID; -import org.junit.Assume; -import org.junit.Ignore; -import org.junit.Test; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,6 +45,7 @@ import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.BYTES_RECEIVED; import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.GET_RESPONSES; import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.ETAG; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Test random read operation. @@ -95,7 +95,7 @@ public void testBasicRead() throws Exception { // forward seek and read a kilobyte into first kilobyte of bufferV2 inputStream.seek(5 * MEGABYTE); int numBytesRead = inputStream.read(buffer, 0, KILOBYTE); - assertEquals("Wrong number of bytes read", KILOBYTE, numBytesRead); + assertEquals(KILOBYTE, numBytesRead, "Wrong number of bytes read"); int len = MEGABYTE; int offset = buffer.length - len; @@ -103,7 +103,7 @@ public void testBasicRead() throws Exception { // reverse seek and read a megabyte into last megabyte of bufferV1 inputStream.seek(3 * MEGABYTE); numBytesRead = inputStream.read(buffer, offset, len); - assertEquals("Wrong number of bytes read after seek", len, numBytesRead); + assertEquals(len, numBytesRead, "Wrong number of bytes read after seek"); } } @@ -113,9 +113,10 @@ public void testBasicRead() throws Exception { */ @Test public void testRandomRead() throws Exception { - Assume.assumeFalse("This test does not support namespace enabled account", - getIsNamespaceEnabled(getFileSystem())); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(getIsNamespaceEnabled(getFileSystem())) + .as("This test does not support namespace enabled account") + .isFalse(); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); Path testPath = path(TEST_FILE_PREFIX + "_testRandomRead"); assumeHugeFileExists(testPath); @@ -216,11 +217,9 @@ public Long call() throws Exception { } ); long elapsedTimeMs = timer.elapsedTimeMs(); - assertTrue( - String.format( - "There should not be any network I/O (elapsedTimeMs=%1$d).", - elapsedTimeMs), - elapsedTimeMs < MAX_ELAPSEDTIMEMS); + assertTrue(elapsedTimeMs < MAX_ELAPSEDTIMEMS, String.format( + "There should not be any network I/O (elapsedTimeMs=%1$d).", + elapsedTimeMs)); } } @@ -251,7 +250,7 @@ public FSDataInputStream call() throws Exception { } ); - assertTrue("Test file length only " + testFileLength, testFileLength > 0); + assertTrue(testFileLength > 0, "Test file length only " + testFileLength); inputStream.seek(testFileLength); assertEquals(testFileLength, inputStream.getPos()); @@ -267,11 +266,9 @@ public FSDataInputStream call() throws Exception { ); long elapsedTimeMs = timer.elapsedTimeMs(); - assertTrue( - String.format( - "There should not be any network I/O (elapsedTimeMs=%1$d).", - elapsedTimeMs), - elapsedTimeMs < MAX_ELAPSEDTIMEMS); + assertTrue(elapsedTimeMs < MAX_ELAPSEDTIMEMS, String.format( + "There should not be any network I/O (elapsedTimeMs=%1$d).", + elapsedTimeMs)); } } @@ -296,15 +293,13 @@ public void testSeekAndAvailableAndPosition() throws Exception { assertEquals(buffer.length, bytesRead); assertArrayEquals(expected1, buffer); assertEquals(buffer.length, inputStream.getPos()); - assertEquals(testFileLength - inputStream.getPos(), - inputStream.available()); + assertEquals(testFileLength - inputStream.getPos(), inputStream.available()); bytesRead = inputStream.read(buffer); assertEquals(buffer.length, bytesRead); assertArrayEquals(expected2, buffer); assertEquals(2 * buffer.length, inputStream.getPos()); - assertEquals(testFileLength - inputStream.getPos(), - inputStream.available()); + assertEquals(testFileLength - inputStream.getPos(), inputStream.available()); // reverse seek int seekPos = 0; @@ -314,8 +309,7 @@ public void testSeekAndAvailableAndPosition() throws Exception { assertEquals(buffer.length, bytesRead); assertArrayEquals(expected1, buffer); assertEquals(buffer.length + seekPos, inputStream.getPos()); - assertEquals(testFileLength - inputStream.getPos(), - inputStream.available()); + assertEquals(testFileLength - inputStream.getPos(), inputStream.available()); // reverse seek seekPos = 1; @@ -325,8 +319,7 @@ public void testSeekAndAvailableAndPosition() throws Exception { assertEquals(buffer.length, bytesRead); assertArrayEquals(expected3, buffer); assertEquals(buffer.length + seekPos, inputStream.getPos()); - assertEquals(testFileLength - inputStream.getPos(), - inputStream.available()); + assertEquals(testFileLength - inputStream.getPos(), inputStream.available()); // forward seek seekPos = 6; @@ -336,8 +329,7 @@ public void testSeekAndAvailableAndPosition() throws Exception { assertEquals(buffer.length, bytesRead); assertArrayEquals(expected4, buffer); assertEquals(buffer.length + seekPos, inputStream.getPos()); - assertEquals(testFileLength - inputStream.getPos(), - inputStream.available()); + assertEquals(testFileLength - inputStream.getPos(), inputStream.available()); } } @@ -364,8 +356,7 @@ public void testSkipAndAvailableAndPosition() throws Exception { long skipped = inputStream.skip(n); assertEquals(skipped, inputStream.getPos()); - assertEquals(testFileLength - inputStream.getPos(), - inputStream.available()); + assertEquals(testFileLength - inputStream.getPos(), inputStream.available()); assertEquals(skipped, n); byte[] buffer = new byte[3]; @@ -373,8 +364,7 @@ public void testSkipAndAvailableAndPosition() throws Exception { assertEquals(buffer.length, bytesRead); assertArrayEquals(expected2, buffer); assertEquals(buffer.length + skipped, inputStream.getPos()); - assertEquals(testFileLength - inputStream.getPos(), - inputStream.available()); + assertEquals(testFileLength - inputStream.getPos(), inputStream.available()); // does skip still work after seek? int seekPos = 1; @@ -384,25 +374,21 @@ public void testSkipAndAvailableAndPosition() throws Exception { assertEquals(buffer.length, bytesRead); assertArrayEquals(expected3, buffer); assertEquals(buffer.length + seekPos, inputStream.getPos()); - assertEquals(testFileLength - inputStream.getPos(), - inputStream.available()); + assertEquals(testFileLength - inputStream.getPos(), inputStream.available()); long currentPosition = inputStream.getPos(); n = 2; skipped = inputStream.skip(n); assertEquals(currentPosition + skipped, inputStream.getPos()); - assertEquals(testFileLength - inputStream.getPos(), - inputStream.available()); + assertEquals(testFileLength - inputStream.getPos(), inputStream.available()); assertEquals(skipped, n); bytesRead = inputStream.read(buffer); assertEquals(buffer.length, bytesRead); assertArrayEquals(expected4, buffer); - assertEquals(buffer.length + skipped + currentPosition, - inputStream.getPos()); - assertEquals(testFileLength - inputStream.getPos(), - inputStream.available()); + assertEquals(buffer.length + skipped + currentPosition, inputStream.getPos()); + assertEquals(testFileLength - inputStream.getPos(), inputStream.available()); } } @@ -433,21 +419,22 @@ public void testSequentialReadAfterReverseSeekPerformance() (long) afterSeekElapsedMs, ratio))); } - assertTrue(String.format( + assertTrue( + ratio < maxAcceptableRatio, String.format( "Performance of ABFS stream after reverse seek is not acceptable:" + " beforeSeekElapsedMs=%1$d, afterSeekElapsedMs=%2$d," + " ratio=%3$.2f", (long) beforeSeekElapsedMs, (long) afterSeekElapsedMs, - ratio), - ratio < maxAcceptableRatio); + ratio)); } @Test - @Ignore("HADOOP-16915") + @Disabled("HADOOP-16915") public void testRandomReadPerformance() throws Exception { - Assume.assumeFalse("This test does not support namespace enabled account", - getIsNamespaceEnabled(getFileSystem())); + assumeThat(getIsNamespaceEnabled(getFileSystem())) + .as("This test does not support namespace enabled account") + .isFalse(); Path testPath = path(TEST_FILE_PREFIX + "_testRandomReadPerformance"); assumeHugeFileExists(testPath); @@ -470,13 +457,13 @@ public void testRandomReadPerformance() throws Exception { (long) v2ElapsedMs, ratio)); } - assertTrue(String.format( + assertTrue( + ratio < maxAcceptableRatio, String.format( "Performance of version 2 is not acceptable: v1ElapsedMs=%1$d," + " v2ElapsedMs=%2$d, ratio=%3$.2f", (long) v1ElapsedMs, (long) v2ElapsedMs, - ratio), - ratio < maxAcceptableRatio); + ratio)); } /** @@ -716,7 +703,7 @@ private long assumeHugeFileExists(Path testPath) throws Exception{ ContractTestUtils.assertPathExists(this.getFileSystem(), "huge file not created", testPath); FileStatus status = fs.getFileStatus(testPath); ContractTestUtils.assertIsFile(testPath, status); - assertTrue("File " + testPath + " is not of expected size " + fileSize + ":actual=" + status.getLen(), status.getLen() == fileSize); + assertTrue(status.getLen() == fileSize, "File " + testPath + " is not of expected size " + fileSize + ":actual=" + status.getLen()); return fileSize; } @@ -726,12 +713,12 @@ private void verifyConsistentReads(FSDataInputStream inputStreamV1, byte[] bufferV2) throws IOException { int size = bufferV1.length; final int numBytesReadV1 = inputStreamV1.read(bufferV1, 0, size); - assertEquals("Bytes read from wasb stream", size, numBytesReadV1); + assertEquals(size, numBytesReadV1, "Bytes read from wasb stream"); final int numBytesReadV2 = inputStreamV2.read(bufferV2, 0, size); - assertEquals("Bytes read from abfs stream", size, numBytesReadV2); + assertEquals(size, numBytesReadV2, "Bytes read from abfs stream"); - assertArrayEquals("Mismatch in read data", bufferV1, bufferV2); + assertArrayEquals(bufferV1, bufferV2, "Mismatch in read data"); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java index 14f2a14b2b64d..e063f71e8c2da 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRename.java @@ -33,7 +33,7 @@ import java.util.concurrent.atomic.AtomicInteger; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.invocation.InvocationOnMock; import org.mockito.Mockito; import org.mockito.stubbing.Answer; @@ -160,10 +160,9 @@ public void testRenameFileUnderDir() throws Exception { Path destDir = path("/testDst"); assertRenameOutcome(fs, sourceDir, destDir, true); FileStatus[] fileStatus = fs.listStatus(destDir); - assertNotNull("Null file status", fileStatus); + assertNotNull(fileStatus, "Null file status"); FileStatus status = fileStatus[0]; - assertEquals("Wrong filename in " + status, - filename, status.getPath().getName()); + assertEquals(filename, status.getPath().getName(), "Wrong filename in " + status); } @Test @@ -210,7 +209,7 @@ public Void call() throws Exception { assertRenameOutcome(fs, source, dest, true); FileStatus[] files = fs.listStatus(dest); - assertEquals("Wrong number of files in listing", 1000, files.length); + assertEquals(1000, files.length, "Wrong number of files in listing"); assertPathDoesNotExist(fs, "rename source dir", source); } @@ -260,8 +259,8 @@ public void testRenameWithNoDestinationParentDir() throws Exception { // Verify that renaming on a destination with no parent dir wasn't // successful. - assertFalse("Rename result expected to be false with no Parent dir", - fs.rename(sourcePath, destPath)); + assertFalse( + fs.rename(sourcePath, destPath), "Rename result expected to be false with no Parent dir"); // Verify that metadata was in an incomplete state after the rename // failure, and we retired the rename once more. @@ -513,9 +512,7 @@ public void testRenamePendingJsonIsRemovedPostSuccessfulRename() Mockito.any(TracingContext.class)); assertTrue(fs.rename(new Path("hbase/test1/test2/test3"), new Path("hbase/test4"))); - assertEquals("RenamePendingJson should be deleted", - 1, - (int) correctDeletePathCount[0]); + assertEquals(1, (int) correctDeletePathCount[0], "RenamePendingJson should be deleted"); } /** diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameRecovery.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameRecovery.java index e4d9f826000c4..49a4da19182b0 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameRecovery.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameRecovery.java @@ -23,7 +23,7 @@ import java.util.concurrent.atomic.AtomicInteger; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java index 589ca5415fc80..167bb808fbd1d 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemRenameUnicode.java @@ -20,12 +20,12 @@ import java.util.Arrays; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; +import org.junit.jupiter.params.ParameterizedClass; +import org.junit.jupiter.params.provider.MethodSource; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsDirectory; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile; @@ -37,20 +37,17 @@ /** * Parameterized test of rename operations of unicode paths. */ -@RunWith(Parameterized.class) +@ParameterizedClass +@MethodSource("params") public class ITestAzureBlobFileSystemRenameUnicode extends AbstractAbfsIntegrationTest { - @Parameterized.Parameter public String srcDir; - @Parameterized.Parameter(1) public String destDir; - @Parameterized.Parameter(2) public String filename; - @Parameterized.Parameters public static Iterable params() { return Arrays.asList( new Object[][]{ @@ -66,7 +63,11 @@ public static Iterable params() { }); } - public ITestAzureBlobFileSystemRenameUnicode() throws Exception { + public ITestAzureBlobFileSystemRenameUnicode(String pSrcDir, + String pDestDir, String pFilename) throws Exception { + this.srcDir = pSrcDir; + this.destDir = pDestDir; + this.filename = pFilename; } /** @@ -92,9 +93,8 @@ public void testRenameFileUsingUnicode() throws Exception { FileStatus[] fileStatus = fs.listStatus(folderPath2); assertNotNull(fileStatus); - assertTrue( - "Empty listing returned from listStatus(\"" + folderPath2 + "\")", - fileStatus.length > 0); + assertTrue(fileStatus.length > 0, + "Empty listing returned from listStatus(\"" + folderPath2 + "\")"); assertEquals(fileStatus[0].getPath().getName(), filename); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemStoreListStatusWithRange.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemStoreListStatusWithRange.java index ef7f1565df73f..05a1a06f0f428 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemStoreListStatusWithRange.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFileSystemStoreListStatusWithRange.java @@ -20,19 +20,20 @@ import java.io.IOException; import java.util.Arrays; -import org.junit.Assert; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; +import org.junit.jupiter.params.ParameterizedClass; +import org.junit.jupiter.params.provider.MethodSource; /** * Test AzureBlobFileSystemStore listStatus with startFrom. * */ -@RunWith(Parameterized.class) +@ParameterizedClass(name="Testing path {0}, startFrom: {1}, Expecting result : {3}") +@MethodSource("params") public class ITestAzureBlobFileSystemStoreListStatusWithRange extends AbstractAbfsIntegrationTest { private static final boolean SUCCEED = true; @@ -42,22 +43,17 @@ public class ITestAzureBlobFileSystemStoreListStatusWithRange extends private AzureBlobFileSystemStore store; private AzureBlobFileSystem fs; - @Parameterized.Parameter public String path; /** * A valid startFrom for listFileStatus with range is a non-fully qualified dir/file name * */ - @Parameterized.Parameter(1) public String startFrom; - @Parameterized.Parameter(2) public int expectedStartIndexInArray; - @Parameterized.Parameter(3) public boolean expectedResult; - @Parameterized.Parameters(name = "Testing path \"{0}\", startFrom: \"{1}\", Expecting result : {3}") // Test path public static Iterable params() { return Arrays.asList( new Object[][]{ @@ -92,8 +88,15 @@ public static Iterable params() { }); } - public ITestAzureBlobFileSystemStoreListStatusWithRange() throws Exception { + public ITestAzureBlobFileSystemStoreListStatusWithRange(String pPath, String pStartFrom, + int pExpectedStartIndexInArray, boolean pExpectedResult) throws Exception { super(); + + this.path = pPath; + this.startFrom = pStartFrom; + this.expectedStartIndexInArray = pExpectedStartIndexInArray; + this.expectedResult = pExpectedResult; + if (this.getFileSystem() == null) { super.createFileSystem(); } @@ -110,12 +113,12 @@ public void testListWithRange() throws IOException { FileStatus[] listResult = store.listStatus(new Path(path), startFrom, getTestTracingContext(fs, true)); if (!expectedResult) { - Assert.fail("Excepting failure with IllegalArgumentException"); + Assertions.fail("Excepting failure with IllegalArgumentException"); } verifyFileStatus(listResult, new Path(path), expectedStartIndexInArray); } catch (IllegalArgumentException ex) { if (expectedResult) { - Assert.fail("Excepting success"); + Assertions.fail("Excepting success"); } } } @@ -123,16 +126,17 @@ public void testListWithRange() throws IOException { // compare the file status private void verifyFileStatus(FileStatus[] listResult, Path parentPath, int startIndexInSortedName) throws IOException { if (startIndexInSortedName == -1) { - Assert.assertEquals("Expected empty FileStatus array", 0, listResult.length); + Assertions.assertEquals(0, listResult.length, "Expected empty FileStatus array"); return; } FileStatus[] allFileStatuses = fs.listStatus(parentPath); - Assert.assertEquals("number of dir/file doesn't match", - SORTED_ENTRY_NAMES.length, allFileStatuses.length); + Assertions.assertEquals(SORTED_ENTRY_NAMES.length, allFileStatuses.length, + "number of dir/file doesn't match"); int indexInResult = 0; for (int index = startIndexInSortedName; index < SORTED_ENTRY_NAMES.length; index++) { - Assert.assertEquals("fileStatus doesn't match", allFileStatuses[index], listResult[indexInResult++]); + Assertions.assertEquals(allFileStatuses[index], listResult[indexInResult++], + "fileStatus doesn't match"); } } @@ -141,7 +145,7 @@ private void prepareTestFiles() throws IOException { // created 2 level file structures for (String levelOneFolder : SORTED_ENTRY_NAMES) { Path levelOnePath = new Path("/" + levelOneFolder); - Assert.assertTrue(fs.mkdirs(levelOnePath)); + Assertions.assertTrue(fs.mkdirs(levelOnePath)); for (String fileName : SORTED_ENTRY_NAMES) { Path filePath = new Path(levelOnePath, fileName); ContractTestUtils.touch(fs, filePath); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java index 5c07bbd13132c..1abb697962930 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAzureBlobFilesystemAcl.java @@ -25,9 +25,8 @@ import java.util.List; import java.util.UUID; -import org.junit.Assume; -import org.junit.Ignore; -import org.junit.Test; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -42,7 +41,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; -import static org.junit.Assume.assumeTrue; +import static org.junit.jupiter.api.Assumptions.assumeTrue; import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT; @@ -51,6 +50,7 @@ import static org.apache.hadoop.fs.permission.AclEntryType.OTHER; import static org.apache.hadoop.fs.permission.AclEntryType.MASK; import static org.apache.hadoop.fs.azurebfs.utils.AclTestHelpers.aclEntry; +import org.junit.jupiter.api.Assertions; /** * Test acl operations. @@ -111,14 +111,11 @@ public void testModifyAclEntries() throws Exception { AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, FOO, READ_EXECUTE), - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, FOO, READ_EXECUTE), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, READ_EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, FOO, READ_EXECUTE), + aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, FOO, READ_EXECUTE), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission(fs, (short) RWX_RX); } @@ -140,9 +137,9 @@ public void testModifyAclEntriesOnlyAccess() throws Exception { fs.modifyAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, FOO, READ_EXECUTE), - aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); + assertArrayEquals(new AclEntry[]{ + aclEntry(ACCESS, USER, FOO, READ_EXECUTE), aclEntry(ACCESS, GROUP, READ_EXECUTE)}, + returned); assertPermission(fs, (short) RWX_RX); } @@ -160,12 +157,10 @@ public void testModifyAclEntriesOnlyDefault() throws Exception { fs.modifyAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, FOO, READ_EXECUTE), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, READ_EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, FOO, READ_EXECUTE), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission(fs, (short) RWX_RX); } @@ -181,9 +176,9 @@ public void testModifyAclEntriesMinimal() throws Exception { fs.modifyAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, FOO, READ_WRITE), - aclEntry(ACCESS, GROUP, READ) }, returned); + assertArrayEquals(new AclEntry[]{ + aclEntry(ACCESS, USER, FOO, READ_WRITE), aclEntry(ACCESS, GROUP, READ)}, + returned); assertPermission(fs, (short) RW_RW); } @@ -200,10 +195,9 @@ public void testModifyAclEntriesMinimalDefault() throws Exception { fs.modifyAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission(fs, (short) RWX_RX); } @@ -220,9 +214,8 @@ public void testModifyAclEntriesCustomMask() throws Exception { fs.modifyAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, FOO, ALL), - aclEntry(ACCESS, GROUP, READ) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, FOO, ALL), + aclEntry(ACCESS, GROUP, READ)}, returned); assertPermission(fs, (short) RW); } @@ -245,41 +238,41 @@ public void testModifyAclEntriesStickyBit() throws Exception { fs.modifyAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, FOO, READ_EXECUTE), - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, FOO, READ_EXECUTE), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, READ_EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, FOO, READ_EXECUTE), + aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, FOO, READ_EXECUTE), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission(fs, (short) 01750); } - @Test(expected=FileNotFoundException.class) + @Test public void testModifyAclEntriesPathNotFound() throws Exception { - final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(getIsNamespaceEnabled(fs)); - path = new Path(testRoot, UUID.randomUUID().toString()); - // Path has not been created. - List aclSpec = Lists.newArrayList( + Assertions.assertThrows(FileNotFoundException.class, () -> { + final AzureBlobFileSystem fs = this.getFileSystem(); + assumeTrue(getIsNamespaceEnabled(fs)); + path = new Path(testRoot, UUID.randomUUID().toString()); + List aclSpec = Lists.newArrayList( aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, FOO, ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, OTHER, NONE)); - fs.modifyAclEntries(path, aclSpec); + fs.modifyAclEntries(path, aclSpec); + }); } - @Test (expected=Exception.class) + @Test public void testModifyAclEntriesDefaultOnFile() throws Exception { - final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(getIsNamespaceEnabled(fs)); - path = new Path(testRoot, UUID.randomUUID().toString()); - fs.create(path).close(); - fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); - List aclSpec = Lists.newArrayList( + Assertions.assertThrows(Exception.class, () -> { + final AzureBlobFileSystem fs = this.getFileSystem(); + assumeTrue(getIsNamespaceEnabled(fs)); + path = new Path(testRoot, UUID.randomUUID().toString()); + fs.create(path).close(); + fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); + List aclSpec = Lists.newArrayList( aclEntry(DEFAULT, USER, FOO, ALL)); - fs.modifyAclEntries(path, aclSpec); + fs.modifyAclEntries(path, aclSpec); + }); } @Test @@ -298,10 +291,8 @@ public void testModifyAclEntriesWithDefaultMask() throws Exception { AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(DEFAULT, USER, READ_WRITE), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, EXECUTE), + assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT, USER, READ_WRITE), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, returned); assertPermission(fs, (short) RWX_RX); } @@ -322,25 +313,25 @@ public void testModifyAclEntriesWithAccessMask() throws Exception { AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, GROUP, READ_EXECUTE)}, returned); + assertArrayEquals(new AclEntry[] {aclEntry(ACCESS, GROUP, READ_EXECUTE)}, returned); assertPermission(fs, (short) RW_X); } - @Test(expected=PathIOException.class) + @Test public void testModifyAclEntriesWithDuplicateEntries() throws Exception { - final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(getIsNamespaceEnabled(fs)); - path = new Path(testRoot, UUID.randomUUID().toString()); - FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); - List aclSpec = Lists.newArrayList( + Assertions.assertThrows(PathIOException.class, () -> { + final AzureBlobFileSystem fs = this.getFileSystem(); + assumeTrue(getIsNamespaceEnabled(fs)); + path = new Path(testRoot, UUID.randomUUID().toString()); + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); + List aclSpec = Lists.newArrayList( aclEntry(ACCESS, MASK, EXECUTE)); - fs.setAcl(path, aclSpec); - - List modifyAclSpec = Lists.newArrayList( + fs.setAcl(path, aclSpec); + List modifyAclSpec = Lists.newArrayList( aclEntry(ACCESS, USER, READ_WRITE), aclEntry(ACCESS, USER, READ_WRITE)); - fs.modifyAclEntries(path, modifyAclSpec); + fs.modifyAclEntries(path, modifyAclSpec); + }); } @Test @@ -362,12 +353,10 @@ public void testRemoveAclEntries() throws Exception { fs.removeAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, READ_EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission(fs, (short) RWX_RX); } @@ -390,9 +379,8 @@ public void testRemoveAclEntriesOnlyAccess() throws Exception { fs.removeAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, BAR, READ_WRITE), - aclEntry(ACCESS, GROUP, READ_WRITE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, BAR, READ_WRITE), + aclEntry(ACCESS, GROUP, READ_WRITE)}, returned); assertPermission(fs, (short) RWX_RW); } @@ -414,12 +402,10 @@ public void testRemoveAclEntriesOnlyDefault() throws Exception { fs.removeAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, BAR, READ_EXECUTE), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, READ_EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, BAR, READ_EXECUTE), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission(fs, (short) RWX_RX); } @@ -467,10 +453,9 @@ public void testRemoveAclEntriesMinimalDefault() throws Exception { fs.removeAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission(fs, (short) RWX_RX); } @@ -493,68 +478,70 @@ public void testRemoveAclEntriesStickyBit() throws Exception { fs.removeAclEntries(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, READ_EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission(fs, (short) 01750); } - @Test(expected=FileNotFoundException.class) + @Test public void testRemoveAclEntriesPathNotFound() throws Exception { - final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(getIsNamespaceEnabled(fs)); - path = new Path(testRoot, UUID.randomUUID().toString()); - // Path has not been created. - List aclSpec = Lists.newArrayList( + Assertions.assertThrows(FileNotFoundException.class, () -> { + final AzureBlobFileSystem fs = this.getFileSystem(); + assumeTrue(getIsNamespaceEnabled(fs)); + path = new Path(testRoot, UUID.randomUUID().toString()); + List aclSpec = Lists.newArrayList( aclEntry(ACCESS, USER, FOO)); - fs.removeAclEntries(path, aclSpec); + fs.removeAclEntries(path, aclSpec); + }); } - @Test(expected=PathIOException.class) + @Test public void testRemoveAclEntriesAccessMask() throws Exception { - final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(getIsNamespaceEnabled(fs)); - path = new Path(testRoot, UUID.randomUUID().toString()); - FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); - List aclSpec = Lists.newArrayList( + Assertions.assertThrows(PathIOException.class, () -> { + final AzureBlobFileSystem fs = this.getFileSystem(); + assumeTrue(getIsNamespaceEnabled(fs)); + path = new Path(testRoot, UUID.randomUUID().toString()); + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); + List aclSpec = Lists.newArrayList( aclEntry(ACCESS, MASK, EXECUTE), aclEntry(ACCESS, USER, FOO, ALL)); - fs.setAcl(path, aclSpec); - - fs.removeAclEntries(path, Lists.newArrayList(aclEntry(ACCESS, MASK, NONE))); + fs.setAcl(path, aclSpec); + fs.removeAclEntries(path, Lists.newArrayList(aclEntry(ACCESS, MASK, NONE))); + }); } - @Test(expected=PathIOException.class) + @Test public void testRemoveAclEntriesDefaultMask() throws Exception { - final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(getIsNamespaceEnabled(fs)); - path = new Path(testRoot, UUID.randomUUID().toString()); - FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); - List aclSpec = Lists.newArrayList( + Assertions.assertThrows(PathIOException.class, () -> { + final AzureBlobFileSystem fs = this.getFileSystem(); + assumeTrue(getIsNamespaceEnabled(fs)); + path = new Path(testRoot, UUID.randomUUID().toString()); + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); + List aclSpec = Lists.newArrayList( aclEntry(DEFAULT, MASK, EXECUTE), aclEntry(DEFAULT, USER, FOO, ALL)); - fs.setAcl(path, aclSpec); - - fs.removeAclEntries(path, Lists.newArrayList(aclEntry(DEFAULT, MASK, NONE))); + fs.setAcl(path, aclSpec); + fs.removeAclEntries(path, Lists.newArrayList(aclEntry(DEFAULT, MASK, NONE))); + }); } - @Test(expected=PathIOException.class) + @Test public void testRemoveAclEntriesWithDuplicateEntries() throws Exception { - final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(getIsNamespaceEnabled(fs)); - path = new Path(testRoot, UUID.randomUUID().toString()); - FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); - List aclSpec = Lists.newArrayList( + Assertions.assertThrows(PathIOException.class, () -> { + final AzureBlobFileSystem fs = this.getFileSystem(); + assumeTrue(getIsNamespaceEnabled(fs)); + path = new Path(testRoot, UUID.randomUUID().toString()); + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); + List aclSpec = Lists.newArrayList( aclEntry(DEFAULT, MASK, EXECUTE)); - fs.setAcl(path, aclSpec); - - List removeAclSpec = Lists.newArrayList( + fs.setAcl(path, aclSpec); + List removeAclSpec = Lists.newArrayList( aclEntry(DEFAULT, USER, READ_WRITE), aclEntry(DEFAULT, USER, READ_WRITE)); - fs.removeAclEntries(path, removeAclSpec); + fs.removeAclEntries(path, removeAclSpec); + }); } @Test @@ -573,9 +560,8 @@ public void testRemoveDefaultAcl() throws Exception { fs.removeDefaultAcl(path); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, FOO, ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, FOO, ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE)}, returned); assertPermission(fs, (short) RWX_RWX); } @@ -595,9 +581,8 @@ public void testRemoveDefaultAclOnlyAccess() throws Exception { fs.removeDefaultAcl(path); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, FOO, ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, FOO, ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE)}, returned); assertPermission(fs, (short) RWX_RWX); } @@ -646,20 +631,21 @@ public void testRemoveDefaultAclStickyBit() throws Exception { fs.removeDefaultAcl(path); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, FOO, ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, FOO, ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE)}, returned); assertPermission(fs, (short) STICKY_RWX_RWX); } - @Test(expected=FileNotFoundException.class) + @Test public void testRemoveDefaultAclPathNotFound() throws Exception { - final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(getIsNamespaceEnabled(fs)); - path = new Path(testRoot, UUID.randomUUID().toString()); - // Path has not been created. - fs.removeDefaultAcl(path); - } + Assertions.assertThrows(FileNotFoundException.class, () -> { + final AzureBlobFileSystem fs = this.getFileSystem(); + assumeTrue(getIsNamespaceEnabled(fs)); + path = new Path(testRoot, UUID.randomUUID().toString()); + fs.removeDefaultAcl(path); + }); + // Path has not been created. +} @Test public void testRemoveAcl() throws Exception { @@ -736,14 +722,16 @@ public void testRemoveAclOnlyDefault() throws Exception { assertPermission(fs, (short) RWX_RX); } - @Test(expected=FileNotFoundException.class) + @Test public void testRemoveAclPathNotFound() throws Exception { - final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(getIsNamespaceEnabled(fs)); - path = new Path(testRoot, UUID.randomUUID().toString()); - // Path has not been created. - fs.removeAcl(path); - } + Assertions.assertThrows(FileNotFoundException.class, () -> { + final AzureBlobFileSystem fs = this.getFileSystem(); + assumeTrue(getIsNamespaceEnabled(fs)); + path = new Path(testRoot, UUID.randomUUID().toString()); + fs.removeAcl(path); + }); + // Path has not been created. +} @Test public void testSetAcl() throws Exception { @@ -760,14 +748,11 @@ public void testSetAcl() throws Exception { fs.setAcl(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, FOO, ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, FOO, ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, ALL), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, FOO, ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, FOO, ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission(fs, (short) RWX_RWX); } @@ -786,9 +771,8 @@ public void testSetAclOnlyAccess() throws Exception { fs.setAcl(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, FOO, READ), - aclEntry(ACCESS, GROUP, READ) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, FOO, READ), + aclEntry(ACCESS, GROUP, READ)}, returned); assertPermission(fs, (short) RW_R); } @@ -803,12 +787,9 @@ public void testSetAclOnlyDefault() throws Exception { fs.setAcl(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, FOO, ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, ALL), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, FOO, ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE)}, returned); assertPermission(fs, (short) RWX_RX); } @@ -849,10 +830,9 @@ public void testSetAclMinimalDefault() throws Exception { fs.setAcl(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission(fs, (short) RWX_RX); } @@ -872,9 +852,8 @@ public void testSetAclCustomMask() throws Exception { fs.setAcl(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, FOO, READ), - aclEntry(ACCESS, GROUP, READ) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, FOO, READ), + aclEntry(ACCESS, GROUP, READ)}, returned); assertPermission(fs, (short) RW_RWX); } @@ -893,41 +872,41 @@ public void testSetAclStickyBit() throws Exception { fs.setAcl(path, aclSpec); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, FOO, ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, FOO, ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, ALL), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, FOO, ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, FOO, ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission(fs, (short) STICKY_RWX_RWX); } - @Test(expected=FileNotFoundException.class) + @Test public void testSetAclPathNotFound() throws Exception { - final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(getIsNamespaceEnabled(fs)); - path = new Path(testRoot, UUID.randomUUID().toString()); - // Path has not been created. - List aclSpec = Lists.newArrayList( + Assertions.assertThrows(FileNotFoundException.class, () -> { + final AzureBlobFileSystem fs = this.getFileSystem(); + assumeTrue(getIsNamespaceEnabled(fs)); + path = new Path(testRoot, UUID.randomUUID().toString()); + List aclSpec = Lists.newArrayList( aclEntry(ACCESS, USER, READ_WRITE), aclEntry(ACCESS, USER, FOO, READ), aclEntry(ACCESS, GROUP, READ), aclEntry(ACCESS, OTHER, NONE)); - fs.setAcl(path, aclSpec); + fs.setAcl(path, aclSpec); + }); } - @Test(expected=Exception.class) + @Test public void testSetAclDefaultOnFile() throws Exception { - final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(getIsNamespaceEnabled(fs)); - path = new Path(testRoot, UUID.randomUUID().toString()); - fs.create(path).close(); - fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); - List aclSpec = Lists.newArrayList( + Assertions.assertThrows(Exception.class, () -> { + final AzureBlobFileSystem fs = this.getFileSystem(); + assumeTrue(getIsNamespaceEnabled(fs)); + path = new Path(testRoot, UUID.randomUUID().toString()); + fs.create(path).close(); + fs.setPermission(path, FsPermission.createImmutable((short) RW_R)); + List aclSpec = Lists.newArrayList( aclEntry(DEFAULT, USER, FOO, ALL)); - fs.setAcl(path, aclSpec); + fs.setAcl(path, aclSpec); + }); } @Test @@ -946,24 +925,24 @@ public void testSetAclDoesNotChangeDefaultMask() throws Exception { // get acl status and check result. AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, EXECUTE), + aclEntry(DEFAULT, OTHER, NONE)}, returned); assertPermission(fs, (short) RWX_RX_RX); } - @Test(expected=PathIOException.class) + @Test public void testSetAclWithDuplicateEntries() throws Exception { - final AzureBlobFileSystem fs = this.getFileSystem(); - assumeTrue(getIsNamespaceEnabled(fs)); - path = new Path(testRoot, UUID.randomUUID().toString()); - FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); - List aclSpec = Lists.newArrayList( + Assertions.assertThrows(PathIOException.class, () -> { + final AzureBlobFileSystem fs = this.getFileSystem(); + assumeTrue(getIsNamespaceEnabled(fs)); + path = new Path(testRoot, UUID.randomUUID().toString()); + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); + List aclSpec = Lists.newArrayList( aclEntry(ACCESS, MASK, EXECUTE), aclEntry(ACCESS, MASK, EXECUTE)); - fs.setAcl(path, aclSpec); + fs.setAcl(path, aclSpec); + }); } @Test @@ -982,14 +961,11 @@ public void testSetPermission() throws Exception { fs.setPermission(path, FsPermission.createImmutable((short) RWX)); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, FOO, ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, FOO, ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, ALL), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, FOO, ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, FOO, ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission(fs, (short) RWX); } @@ -1009,9 +985,8 @@ public void testSetPermissionOnlyAccess() throws Exception { fs.setPermission(path, FsPermission.createImmutable((short) RW)); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, FOO, READ), - aclEntry(ACCESS, GROUP, READ) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, FOO, READ), + aclEntry(ACCESS, GROUP, READ)}, returned); assertPermission(fs, (short) RW); } @@ -1030,12 +1005,10 @@ public void testSetPermissionOnlyDefault() throws Exception { fs.setPermission(path, FsPermission.createImmutable((short) RWX)); AclStatus s = fs.getAclStatus(path); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, FOO, ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, ALL), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, FOO, ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission(fs, (short) RWX); } @@ -1043,7 +1016,7 @@ public void testSetPermissionOnlyDefault() throws Exception { public void testDefaultAclNewFile() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); assumeTrue(getIsNamespaceEnabled(fs)); - Assume.assumeTrue(getIngressServiceType() == AbfsServiceType.DFS); + assumeTrue(getIngressServiceType() == AbfsServiceType.DFS); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -1053,14 +1026,13 @@ public void testDefaultAclNewFile() throws Exception { fs.create(filePath).close(); AclStatus s = fs.getAclStatus(filePath); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, FOO, ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, FOO, ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE)}, returned); assertPermission(fs, filePath, (short) RW_R); } @Test - @Ignore // wait umask fix to be deployed + @Disabled // wait umask fix to be deployed public void testOnlyAccessAclNewFile() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); assumeTrue(getIsNamespaceEnabled(fs)); @@ -1111,14 +1083,11 @@ public void testDefaultAclNewDir() throws Exception { AclStatus s = fs.getAclStatus(dirPath); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, FOO, ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, FOO, ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, ALL), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, FOO, ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, FOO, ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission(fs, dirPath, (short) RWX_RWX); } @@ -1126,7 +1095,7 @@ public void testDefaultAclNewDir() throws Exception { public void testOnlyAccessAclNewDir() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); assumeTrue(getIsNamespaceEnabled(fs)); - Assume.assumeTrue(getIngressServiceType() == AbfsServiceType.DFS); + assumeTrue(getIngressServiceType() == AbfsServiceType.DFS); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX)); List aclSpec = Lists.newArrayList( @@ -1155,10 +1124,9 @@ public void testDefaultMinimalAclNewDir() throws Exception { fs.mkdirs(dirPath); AclStatus s = fs.getAclStatus(dirPath); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, OTHER, NONE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)}, + returned); assertPermission(fs, dirPath, (short) RWX_RX); } @@ -1166,7 +1134,7 @@ public void testDefaultMinimalAclNewDir() throws Exception { public void testDefaultAclNewFileWithMode() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); assumeTrue(getIsNamespaceEnabled(fs)); - Assume.assumeTrue(getIngressServiceType() == AbfsServiceType.DFS); + assumeTrue(getIngressServiceType() == AbfsServiceType.DFS); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX_RX)); List aclSpec = Lists.newArrayList( @@ -1179,9 +1147,8 @@ public void testDefaultAclNewFileWithMode() throws Exception { .close(); AclStatus s = fs.getAclStatus(filePath); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, FOO, ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, FOO, ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE)}, returned); assertPermission(fs, filePath, (short) RWX_R); } @@ -1189,7 +1156,7 @@ public void testDefaultAclNewFileWithMode() throws Exception { public void testDefaultAclNewDirWithMode() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); assumeTrue(getIsNamespaceEnabled(fs)); - Assume.assumeTrue(getIngressServiceType() == AbfsServiceType.DFS); + assumeTrue(getIngressServiceType() == AbfsServiceType.DFS); path = new Path(testRoot, UUID.randomUUID().toString()); FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) RWX_RX_RX)); List aclSpec = Lists.newArrayList( @@ -1199,14 +1166,11 @@ public void testDefaultAclNewDirWithMode() throws Exception { fs.mkdirs(dirPath, new FsPermission((short) RWX_R)); AclStatus s = fs.getAclStatus(dirPath); AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]); - assertArrayEquals(new AclEntry[] { - aclEntry(ACCESS, USER, FOO, ALL), - aclEntry(ACCESS, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, USER, ALL), - aclEntry(DEFAULT, USER, FOO, ALL), - aclEntry(DEFAULT, GROUP, READ_EXECUTE), - aclEntry(DEFAULT, MASK, ALL), - aclEntry(DEFAULT, OTHER, READ_EXECUTE) }, returned); + assertArrayEquals(new AclEntry[]{aclEntry(ACCESS, USER, FOO, ALL), + aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), + aclEntry(DEFAULT, USER, FOO, ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), + aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, READ_EXECUTE)}, + returned); assertPermission(fs, dirPath, (short) RWX_R); } @@ -1299,7 +1263,7 @@ public void testEnsureAclOperationWorksForRoot() throws Exception { public void testSetOwnerForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); AbfsConfiguration conf = fs.getAbfsStore().getAbfsConfiguration(); - Assume.assumeTrue(!getIsNamespaceEnabled(fs)); + assumeTrue(!getIsNamespaceEnabled(fs)); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); @@ -1322,7 +1286,7 @@ public void testSetOwnerForNonNamespaceEnabledAccount() throws Exception { @Test public void testSetPermissionForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!getIsNamespaceEnabled(fs)); + assumeTrue(!getIsNamespaceEnabled(fs)); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); @@ -1341,7 +1305,7 @@ public void testSetPermissionForNonNamespaceEnabledAccount() throws Exception { @Test public void testModifyAclEntriesForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!getIsNamespaceEnabled(fs)); + assumeTrue(!getIsNamespaceEnabled(fs)); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); try { @@ -1349,7 +1313,7 @@ public void testModifyAclEntriesForNonNamespaceEnabledAccount() throws Exception aclEntry(DEFAULT, GROUP, FOO, ALL), aclEntry(ACCESS, GROUP, BAR, ALL)); fs.modifyAclEntries(filePath, aclSpec); - assertFalse("UnsupportedOperationException is expected", false); + assertFalse(false, "UnsupportedOperationException is expected"); } catch (UnsupportedOperationException ex) { //no-op } @@ -1358,7 +1322,7 @@ public void testModifyAclEntriesForNonNamespaceEnabledAccount() throws Exception @Test public void testRemoveAclEntriesEntriesForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!getIsNamespaceEnabled(fs)); + assumeTrue(!getIsNamespaceEnabled(fs)); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); try { @@ -1366,7 +1330,7 @@ public void testRemoveAclEntriesEntriesForNonNamespaceEnabledAccount() throws Ex aclEntry(DEFAULT, GROUP, FOO, ALL), aclEntry(ACCESS, GROUP, BAR, ALL)); fs.removeAclEntries(filePath, aclSpec); - assertFalse("UnsupportedOperationException is expected", false); + assertFalse(false, "UnsupportedOperationException is expected"); } catch (UnsupportedOperationException ex) { //no-op } @@ -1375,12 +1339,12 @@ public void testRemoveAclEntriesEntriesForNonNamespaceEnabledAccount() throws Ex @Test public void testRemoveDefaultAclForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!getIsNamespaceEnabled(fs)); + assumeTrue(!getIsNamespaceEnabled(fs)); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); try { fs.removeDefaultAcl(filePath); - assertFalse("UnsupportedOperationException is expected", false); + assertFalse(false, "UnsupportedOperationException is expected"); } catch (UnsupportedOperationException ex) { //no-op } @@ -1389,12 +1353,12 @@ public void testRemoveDefaultAclForNonNamespaceEnabledAccount() throws Exception @Test public void testRemoveAclForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!getIsNamespaceEnabled(fs)); + assumeTrue(!getIsNamespaceEnabled(fs)); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); try { fs.removeAcl(filePath); - assertFalse("UnsupportedOperationException is expected", false); + assertFalse(false, "UnsupportedOperationException is expected"); } catch (UnsupportedOperationException ex) { //no-op } @@ -1403,7 +1367,7 @@ public void testRemoveAclForNonNamespaceEnabledAccount() throws Exception { @Test public void testSetAclForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!getIsNamespaceEnabled(fs)); + assumeTrue(!getIsNamespaceEnabled(fs)); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); try { @@ -1411,7 +1375,7 @@ public void testSetAclForNonNamespaceEnabledAccount() throws Exception { aclEntry(DEFAULT, GROUP, FOO, ALL), aclEntry(ACCESS, GROUP, BAR, ALL)); fs.setAcl(filePath, aclSpec); - assertFalse("UnsupportedOperationException is expected", false); + assertFalse(false, "UnsupportedOperationException is expected"); } catch (UnsupportedOperationException ex) { //no-op } @@ -1420,12 +1384,12 @@ public void testSetAclForNonNamespaceEnabledAccount() throws Exception { @Test public void testGetAclStatusForNonNamespaceEnabledAccount() throws Exception { final AzureBlobFileSystem fs = this.getFileSystem(); - Assume.assumeTrue(!getIsNamespaceEnabled(fs)); + assumeTrue(!getIsNamespaceEnabled(fs)); final Path filePath = new Path(methodName.getMethodName()); fs.create(filePath); try { AclStatus aclSpec = fs.getAclStatus(filePath); - assertFalse("UnsupportedOperationException is expected", false); + assertFalse(false, "UnsupportedOperationException is expected"); } catch (UnsupportedOperationException ex) { //no-op } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestClientUrlScheme.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestClientUrlScheme.java index 44665f50c11fc..e4b6a45cca91a 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestClientUrlScheme.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestClientUrlScheme.java @@ -22,11 +22,10 @@ import java.net.URL; import java.util.Arrays; -import org.junit.Assert; -import org.junit.Assume; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; +import org.junit.jupiter.params.provider.MethodSource; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException; @@ -37,21 +36,19 @@ import org.apache.hadoop.fs.azurebfs.services.AuthType; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ALWAYS_USE_HTTPS; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Parameterized test of ABFS CLIENT URL scheme verification. */ - -@RunWith(Parameterized.class) +@ParameterizedClass +@MethodSource("params") public class ITestClientUrlScheme extends AbstractAbfsIntegrationTest{ - @Parameterized.Parameter public boolean useSecureScheme; - @Parameterized.Parameter(1) public boolean alwaysUseHttps; - @Parameterized.Parameters public static Iterable params() { return Arrays.asList( new Object[][]{ @@ -62,11 +59,15 @@ public static Iterable params() { }); } - public ITestClientUrlScheme() throws Exception { + public ITestClientUrlScheme(boolean pUseSecureScheme, + boolean pAlwaysUseHttps) throws Exception { super(); + this.useSecureScheme = pUseSecureScheme; + this.alwaysUseHttps = pAlwaysUseHttps; // authentication like OAUTH must use HTTPS - Assume.assumeTrue("ITestClientUrlScheme is skipped because auth type is not SharedKey", - getAuthType() == AuthType.SharedKey); + assumeThat(getAuthType()) + .as("ITestClientUrlScheme is skipped because auth type is not SharedKey") + .isEqualTo(AuthType.SharedKey); } @Test @@ -110,9 +111,9 @@ public void testClientUrlScheme() throws Exception { String url = ((URL) baseUrlField.get(client)).toString(); if (expectHttpConnection) { - Assert.assertTrue(url.startsWith(FileSystemUriSchemes.HTTP_SCHEME)); + Assertions.assertTrue(url.startsWith(FileSystemUriSchemes.HTTP_SCHEME)); } else { - Assert.assertTrue(url.startsWith(FileSystemUriSchemes.HTTPS_SCHEME)); + Assertions.assertTrue(url.startsWith(FileSystemUriSchemes.HTTPS_SCHEME)); } } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemInitialization.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemInitialization.java index f7d4a5b7a83e7..24d4edadf400f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemInitialization.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemInitialization.java @@ -21,7 +21,7 @@ import java.net.URI; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -34,7 +34,6 @@ import static org.apache.hadoop.fs.CommonPathCapabilities.ETAGS_PRESERVED_IN_RENAME; import static org.apache.hadoop.fs.CommonPathCapabilities.FS_ACLS; import static org.apache.hadoop.fs.azurebfs.constants.InternalConstants.CAPABILITY_SAFE_READAHEAD; -import static org.junit.Assume.assumeTrue; /** * Test AzureBlobFileSystem initialization. @@ -52,13 +51,9 @@ public void ensureAzureBlobFileSystemIsInitialized() throws Exception { String scheme = this.getAuthType() == AuthType.SharedKey ? FileSystemUriSchemes.ABFS_SCHEME : FileSystemUriSchemes.ABFS_SECURE_SCHEME; - assertEquals(fs.getUri(), - new URI(scheme, - filesystem + "@" + accountName, - null, - null, - null)); - assertNotNull("working directory", fs.getWorkingDirectory()); + assertEquals(fs.getUri(), new URI(scheme, + filesystem + "@" + accountName, null, null, null)); + assertNotNull(fs.getWorkingDirectory(), "working directory"); } @Test @@ -75,11 +70,8 @@ public void ensureSecureAzureBlobFileSystemIsInitialized() throws Exception { try(SecureAzureBlobFileSystem fs = (SecureAzureBlobFileSystem) FileSystem.newInstance(rawConfig)) { assertEquals(fs.getUri(), new URI(FileSystemUriSchemes.ABFS_SECURE_SCHEME, - filesystem + "@" + accountName, - null, - null, - null)); - assertNotNull("working directory", fs.getWorkingDirectory()); + filesystem + "@" + accountName, null, null, null)); + assertNotNull(fs.getWorkingDirectory(), "working directory"); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemProperties.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemProperties.java index 3f2a4fe98802c..7517b0e0fbd45 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemProperties.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemProperties.java @@ -20,8 +20,7 @@ import java.util.Hashtable; -import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -31,6 +30,8 @@ import org.apache.hadoop.fs.azurebfs.utils.TracingContext; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.ONE_MB; +import static org.assertj.core.api.Assertions.assertThat; +import org.junit.jupiter.api.Assertions; /** * Test FileSystemProperties. @@ -99,32 +100,34 @@ public void testBase64PathProperties() throws Exception { assertEquals(properties, fetchedProperties); } - @Test (expected = Exception.class) + @Test public void testBase64InvalidFileSystemProperties() throws Exception { - final AzureBlobFileSystem fs = getFileSystem(); - final Hashtable properties = new Hashtable<>(); - properties.put("key", "{ value: valueæ­² }"); - TracingContext tracingContext = getTestTracingContext(fs, true); - fs.getAbfsStore().setFilesystemProperties(properties, tracingContext); - Hashtable fetchedProperties = fs.getAbfsStore() + Assertions.assertThrows(Exception.class, () -> { + final AzureBlobFileSystem fs = getFileSystem(); + final Hashtable properties = new Hashtable<>(); + properties.put("key", "{ value: valueæ­² }"); + TracingContext tracingContext = getTestTracingContext(fs, true); + fs.getAbfsStore().setFilesystemProperties(properties, tracingContext); + Hashtable fetchedProperties = fs.getAbfsStore() .getFilesystemProperties(tracingContext); - - assertEquals(properties, fetchedProperties); + assertEquals(properties, fetchedProperties); + }); } - @Test (expected = Exception.class) + @Test public void testBase64InvalidPathProperties() throws Exception { - final AzureBlobFileSystem fs = getFileSystem(); - final Hashtable properties = new Hashtable<>(); - properties.put("key", "{ value: valueTestå…© }"); - Path testPath = path(TEST_PATH); - touch(testPath); - TracingContext tracingContext = getTestTracingContext(fs, true); - fs.getAbfsStore().setPathProperties(testPath, properties, tracingContext); - Hashtable fetchedProperties = fs.getAbfsStore() + Assertions.assertThrows(Exception.class, () -> { + final AzureBlobFileSystem fs = getFileSystem(); + final Hashtable properties = new Hashtable<>(); + properties.put("key", "{ value: valueTestå…© }"); + Path testPath = path(TEST_PATH); + touch(testPath); + TracingContext tracingContext = getTestTracingContext(fs, true); + fs.getAbfsStore().setPathProperties(testPath, properties, tracingContext); + Hashtable fetchedProperties = fs.getAbfsStore() .getPathStatus(testPath, tracingContext); - - assertEquals(properties, fetchedProperties); + assertEquals(properties, fetchedProperties); + }); } @Test @@ -157,10 +160,10 @@ public void testBufferSizeSet() throws Exception { = (AbfsInputStream) inputStream.getWrappedStream(); int actualBufferSize = abfsInputStream.getBufferSize(); - Assertions.assertThat(actualBufferSize) + assertThat(actualBufferSize) .describedAs("Buffer size should be set to the value in the configuration") .isEqualTo(bufferSizeConfig); - Assertions.assertThat(actualBufferSize) + assertThat(actualBufferSize) .describedAs("Buffer size should not be set to the value passed as argument") .isNotEqualTo(bufferSizeArg); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemRegistration.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemRegistration.java index 4393bd82b1161..8e8b16362c8fd 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemRegistration.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestFileSystemRegistration.java @@ -20,7 +20,7 @@ import java.net.URI; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -43,8 +43,8 @@ public ITestFileSystemRegistration() throws Exception { private void assertConfigMatches(Configuration conf, String key, String expected) { String v = conf.get(key); - assertNotNull("No value for key " + key, v); - assertEquals("Wrong value for key " + key, expected, v); + assertNotNull(v, "No value for key " + key); + assertEquals(expected, v, "Wrong value for key " + key); } @Test @@ -79,14 +79,14 @@ public void testSecureAbfsFileContextRegistered() throws Throwable { public void ensureAzureBlobFileSystemIsDefaultFileSystem() throws Exception { Configuration rawConfig = getRawConfiguration(); AzureBlobFileSystem fs = (AzureBlobFileSystem) FileSystem.get(rawConfig); - assertNotNull("filesystem", fs); + assertNotNull(fs, "filesystem"); if (this.getAuthType() == AuthType.OAuth) { Abfss afs = (Abfss) FileContext.getFileContext(rawConfig).getDefaultFileSystem(); - assertNotNull("filecontext", afs); + assertNotNull(afs, "filecontext"); } else { Abfs afs = (Abfs) FileContext.getFileContext(rawConfig).getDefaultFileSystem(); - assertNotNull("filecontext", afs); + assertNotNull(afs, "filecontext"); } } @@ -106,8 +106,8 @@ public void ensureSecureAzureBlobFileSystemIsDefaultFileSystem() throws Exceptio defaultUri.toString()); SecureAzureBlobFileSystem fs = (SecureAzureBlobFileSystem) FileSystem.get(rawConfig); - assertNotNull("filesystem", fs); + assertNotNull(fs, "filesystem"); Abfss afs = (Abfss) FileContext.getFileContext(rawConfig).getDefaultFileSystem(); - assertNotNull("filecontext", afs); + assertNotNull(afs, "filecontext"); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestGetNameSpaceEnabled.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestGetNameSpaceEnabled.java index 78cd6bd9d6ac8..f02086316a731 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestGetNameSpaceEnabled.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestGetNameSpaceEnabled.java @@ -21,8 +21,7 @@ import java.io.IOException; import java.util.UUID; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.assertj.core.api.Assertions; import org.mockito.Mockito; @@ -63,6 +62,7 @@ import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_IS_HNS_ENABLED; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Test getIsNamespaceEnabled call. @@ -81,26 +81,29 @@ public ITestGetNameSpaceEnabled() throws Exception { @Test public void testXNSAccount() throws IOException { - Assume.assumeTrue("Skip this test because the account being used for test is a non XNS account", - isUsingXNSAccount); - assertTrue("Expecting getIsNamespaceEnabled() return true", - getIsNamespaceEnabled(getFileSystem())); + assumeThat(isUsingXNSAccount) + .as("Skip this test because the account being used for test is a non XNS account") + .isTrue(); + assertTrue( + getIsNamespaceEnabled(getFileSystem()), "Expecting getIsNamespaceEnabled() return true"); } @Test public void testNonXNSAccount() throws IOException { assumeValidTestConfigPresent(getRawConfiguration(), FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT); - Assume.assumeFalse("Skip this test because the account being used for test is a XNS account", - isUsingXNSAccount); - assertFalse("Expecting getIsNamespaceEnabled() return false", - getIsNamespaceEnabled(getFileSystem())); + assumeThat(isUsingXNSAccount) + .as("Skip this test because the account being used for test is a XNS account") + .isFalse(); + assertFalse( + getIsNamespaceEnabled(getFileSystem()), "Expecting getIsNamespaceEnabled() return false"); } @Test public void testGetIsNamespaceEnabledWhenConfigIsTrue() throws Exception { assumeValidTestConfigPresent(getRawConfiguration(), FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT); - Assume.assumeTrue("Blob Endpoint Does not Allow FS init on HNS Account", - getAbfsServiceType() == AbfsServiceType.DFS); + assumeThat(getAbfsServiceType()) + .as("Blob Endpoint Does not Allow FS init on HNS Account") + .isEqualTo(AbfsServiceType.DFS); AzureBlobFileSystem fs = getNewFSWithHnsConf(TRUE_STR); Assertions.assertThat(getIsNamespaceEnabled(fs)).describedAs( "getIsNamespaceEnabled should return true when the " diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestOauthOverAbfsScheme.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestOauthOverAbfsScheme.java index 2c80ce85f4e77..1b5de6260f2cd 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestOauthOverAbfsScheme.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestOauthOverAbfsScheme.java @@ -20,8 +20,7 @@ import java.lang.reflect.Field; import java.net.URL; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes; @@ -29,14 +28,17 @@ import org.apache.hadoop.fs.azurebfs.services.AuthType; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import static org.assertj.core.api.Assumptions.assumeThat; + /** * Test Oauth fail fast when uri scheme is incorrect. */ public class ITestOauthOverAbfsScheme extends AbstractAbfsIntegrationTest { public ITestOauthOverAbfsScheme() throws Exception { - Assume.assumeTrue("ITestOauthOverAbfsScheme is skipped because auth type is not OAuth", - getAuthType() == AuthType.OAuth); + assumeThat(getAuthType()) + .as("ITestOauthOverAbfsScheme is skipped because auth type is not OAuth") + .isNotEqualTo(AuthType.OAuth); } @Test @@ -56,8 +58,8 @@ public void testOauthOverSchemeAbfs() throws Exception { baseUrlField.setAccessible(true); String url = ((URL) baseUrlField.get(client)).toString(); - Assume.assumeTrue("OAuth authentication over scheme abfs must use HTTPS", - url.startsWith(FileSystemUriSchemes.HTTPS_SCHEME)); - + assumeThat(url) + .as("OAuth authentication over scheme abfs must use HTTPS") + .startsWith(FileSystemUriSchemes.HTTPS_SCHEME); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestSharedKeyAuth.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestSharedKeyAuth.java index d7c7d655a63ba..bc81b793a230d 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestSharedKeyAuth.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestSharedKeyAuth.java @@ -19,8 +19,7 @@ import java.io.IOException; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -29,6 +28,7 @@ import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_KEY_PROPERTY_NAME; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.assertj.core.api.Assumptions.assumeThat; public class ITestSharedKeyAuth extends AbstractAbfsIntegrationTest { @@ -38,7 +38,7 @@ public ITestSharedKeyAuth() throws Exception { @Test public void testWithWrongSharedKey() throws Exception { - Assume.assumeTrue(this.getAuthType() == AuthType.SharedKey); + assumeThat(this.getAuthType()).isEqualTo(AuthType.SharedKey); Configuration config = this.getRawConfiguration(); config.setBoolean(AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION, true); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestSmallWriteOptimization.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestSmallWriteOptimization.java index 4b124231d8447..5c7dfc2f5cbaa 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestSmallWriteOptimization.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestSmallWriteOptimization.java @@ -25,10 +25,9 @@ import java.io.IOException; import org.assertj.core.api.Assertions; -import org.junit.Assume; -import org.junit.runners.Parameterized; -import org.junit.runner.RunWith; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; +import org.junit.jupiter.params.provider.MethodSource; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; @@ -44,6 +43,7 @@ import static org.apache.hadoop.fs.azurebfs.AbfsStatistic.SEND_REQUESTS; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.DEFAULT_AZURE_ENABLE_SMALL_WRITE_OPTIMIZATION; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_TEST_APPENDBLOB_ENABLED; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Test combination for small writes with flush and close operations. @@ -66,7 +66,8 @@ * 4. Execute test iterations with asserts on number of store requests made and * validating file content. */ -@RunWith(Parameterized.class) +@ParameterizedClass(name="{0}") +@MethodSource("params") public class ITestSmallWriteOptimization extends AbstractAbfsScaleTest { private static final int ONE_MB = 1024 * 1024; private static final int TWO_MB = 2 * ONE_MB; @@ -75,45 +76,37 @@ public class ITestSmallWriteOptimization extends AbstractAbfsScaleTest { private static final int QUARTER_TEST_BUFFER_SIZE = TWO_MB / 4; private static final int TEST_FLUSH_ITERATION = 2; - @Parameterized.Parameter public String testScenario; - @Parameterized.Parameter(1) public boolean enableSmallWriteOptimization; /** * If true, will initiate close after appends. (That is, no explicit hflush or * hsync calls will be made from client app.) */ - @Parameterized.Parameter(2) public boolean directCloseTest; /** * If non-zero, test file should be created as pre-requisite with this size. */ - @Parameterized.Parameter(3) public Integer startingFileSize; /** * Determines the write sizes to be issued by client app. */ - @Parameterized.Parameter(4) public Integer recurringClientWriteSize; /** * Determines the number of Client writes to make. */ - @Parameterized.Parameter(5) public Integer numOfClientWrites; /** * True, if the small write optimization is supposed to be effective in * the scenario. */ - @Parameterized.Parameter(6) public boolean flushExpectedToBeMergedWithAppend; - @Parameterized.Parameters(name = "{0}") public static Iterable params() { return Arrays.asList( // Parameter Order : @@ -297,9 +290,18 @@ public static Iterable params() { }, }); } - public ITestSmallWriteOptimization() throws Exception { - super(); - } + public ITestSmallWriteOptimization(String pTestScenario, + boolean pEnableSmallWriteOptimization, boolean pDirectCloseTest, + Integer pStartingFileSize, Integer pRecurringClientWriteSize, + Integer pNumOfClientWrites, boolean pFlushExpectedToBeMergedWithAppend) throws Exception { + this.testScenario = pTestScenario; + this.enableSmallWriteOptimization = pEnableSmallWriteOptimization; + this.directCloseTest = pDirectCloseTest; + this.startingFileSize = pStartingFileSize; + this.recurringClientWriteSize = pRecurringClientWriteSize; + this.numOfClientWrites = pNumOfClientWrites; + this.flushExpectedToBeMergedWithAppend = pFlushExpectedToBeMergedWithAppend; + } @Test public void testSmallWriteOptimization() @@ -309,7 +311,7 @@ public void testSmallWriteOptimization() // default. Default settings will be turned on when server support is // available on all store prod regions. if (enableSmallWriteOptimization) { - Assume.assumeTrue(serviceDefaultOptmSettings); + assumeThat(serviceDefaultOptmSettings).isTrue(); } final AzureBlobFileSystem currentfs = this.getFileSystem(); @@ -317,7 +319,7 @@ public void testSmallWriteOptimization() boolean isAppendBlobTestSettingEnabled = (config.get(FS_AZURE_TEST_APPENDBLOB_ENABLED) == "true"); // This optimization doesnt take effect when append blob is on. - Assume.assumeFalse(isAppendBlobTestSettingEnabled); + assumeThat(isAppendBlobTestSettingEnabled).isFalse(); config.set(ConfigurationKeys.AZURE_WRITE_BUFFER_SIZE, Integer.toString(TEST_BUFFER_SIZE)); config.set(ConfigurationKeys.AZURE_ENABLE_SMALL_WRITE_OPTIMIZATION, Boolean.toString(enableSmallWriteOptimization)); @@ -503,8 +505,7 @@ private void validateStoreAppends(AzureBlobFileSystem fs, byte[] fileReadFromStore = new byte[totalFileSize]; fs.open(testPath).read(fileReadFromStore, 0, totalFileSize); - assertArrayEquals("Test file content incorrect", bufferWritten, - fileReadFromStore); + assertArrayEquals(bufferWritten, fileReadFromStore, "Test file content incorrect"); } private void assertOpStats(Map metricMap, diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java index 6f7d37d992951..8fe6152e2d64f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestWasbAbfsCompatibility.java @@ -25,8 +25,7 @@ import java.util.UUID; import org.assertj.core.api.Assertions; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,6 +46,7 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsDirectory; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertMkdirs; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertPathExists; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Test compatibility between ABFS client and WASB client. @@ -68,7 +68,7 @@ public class ITestWasbAbfsCompatibility extends AbstractAbfsIntegrationTest { LoggerFactory.getLogger(ITestWasbAbfsCompatibility.class); public ITestWasbAbfsCompatibility() throws Exception { - Assume.assumeFalse("Emulator is not supported", isIPAddress()); + assumeThat(isIPAddress()).as("Emulator is not supported").isFalse(); } @Test @@ -76,9 +76,10 @@ public void testListFileStatus() throws Exception { // crate file using abfs AzureBlobFileSystem fs = getFileSystem(); // test only valid for non-namespace enabled account - Assume.assumeFalse("Namespace enabled account does not support this test,", - getIsNamespaceEnabled(fs)); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(getIsNamespaceEnabled(fs)) + .as("Namespace enabled account does not support this test") + .isFalse(); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); @@ -115,9 +116,10 @@ public void testReadFile() throws Exception { AzureBlobFileSystem abfs = getFileSystem(); // test only valid for non-namespace enabled account - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); @@ -140,8 +142,7 @@ public void testReadFile() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(readFs.open(path)))) { String line = br.readLine(); - assertEquals("Wrong text from " + readFs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + readFs); } // Remove file @@ -156,10 +157,11 @@ public void testReadFile() throws Exception { @Test public void testwriteFile() throws Exception { AzureBlobFileSystem abfs = getFileSystem(); - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); assumeBlobServiceType(); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB"); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -177,8 +179,7 @@ public void testwriteFile() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(path)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } try (FSDataOutputStream abfsOutputStream = abfs.append(path)) { abfsOutputStream.write(TEST_CONTEXT.getBytes()); @@ -197,10 +198,11 @@ public void testwriteFile() throws Exception { @Test public void testwriteFile1() throws Exception { AzureBlobFileSystem abfs = getFileSystem(); - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); assumeBlobServiceType(); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -237,9 +239,10 @@ public void testwriteFile1() throws Exception { @Test public void testazcopywasbcompatibility() throws Exception { AzureBlobFileSystem abfs = getFileSystem(); - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); Path testFile = path("/testReadFile"); Path path = new Path(testFile + "/~12/!008/testfile_" + UUID.randomUUID()); @@ -262,8 +265,9 @@ public void testDir() throws Exception { AzureBlobFileSystem abfs = getFileSystem(); // test only valid for non-namespace enabled account - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); @@ -301,8 +305,9 @@ public void testSetWorkingDirectory() throws Exception { //create folders AzureBlobFileSystem abfs = getFileSystem(); // test only valid for non-namespace enabled account - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); @@ -336,8 +341,9 @@ public void testSetWorkingDirectory() throws Exception { @Test public void testScenario1() throws Exception { AzureBlobFileSystem abfs = getFileSystem(); - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -355,8 +361,7 @@ public void testScenario1() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(path)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } // Remove file @@ -370,10 +375,11 @@ public void testScenario1() throws Exception { @Test public void testScenario2() throws Exception { AzureBlobFileSystem abfs = getFileSystem(); - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); assumeBlobServiceType(); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -391,8 +397,7 @@ public void testScenario2() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(path)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } // Write @@ -413,9 +418,10 @@ public void testScenario2() throws Exception { @Test public void testScenario3() throws Exception { AzureBlobFileSystem abfs = getFileSystem(); - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -434,8 +440,7 @@ public void testScenario3() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(wasb.open(path)))) { String line = br.readLine(); - assertEquals("Wrong text from " + wasb, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + wasb); } // Remove file assertDeleted(abfs, path, true); @@ -448,10 +453,11 @@ public void testScenario3() throws Exception { @Test public void testScenario4() throws Exception { AzureBlobFileSystem abfs = getFileSystem(); - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); assumeBlobServiceType(); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -487,10 +493,11 @@ public void testScenario5() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, false); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); assumeBlobServiceType(); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -510,8 +517,7 @@ public void testScenario5() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(path)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } // Remove file @@ -528,9 +534,10 @@ public void testScenario6() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); assumeBlobServiceType(); NativeAzureFileSystem wasb = getWasbFileSystem(); @@ -551,8 +558,7 @@ public void testScenario6() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(path)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } // Remove file @@ -569,9 +575,10 @@ public void testScenario7() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -589,8 +596,7 @@ public void testScenario7() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(path)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } abfs.create(path, true); FileStatus fileStatus = abfs.getFileStatus(path); @@ -612,9 +618,8 @@ public void testScenario8() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(getIsNamespaceEnabled(abfs)).isFalse(); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -632,8 +637,7 @@ public void testScenario8() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(path)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } try { abfs.create(path, false); @@ -663,10 +667,11 @@ public void testScenario9() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); assumeBlobServiceType(); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -684,8 +689,7 @@ public void testScenario9() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(path)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } wasb.create(path, true); FileStatus fileStatus = abfs.getFileStatus(path); @@ -708,8 +712,9 @@ public void testScenario10() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -727,8 +732,7 @@ public void testScenario10() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(path)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } try { wasb.create(path, false); @@ -759,10 +763,11 @@ public void testScenario11() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); assumeBlobServiceType(); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -782,8 +787,7 @@ public void testScenario11() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(path)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } abfs.delete(path, true); } @@ -799,8 +803,9 @@ public void testScenario12() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -819,8 +824,7 @@ public void testScenario12() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(path)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } wasb.delete(path, true); } @@ -835,10 +839,11 @@ public void testScenario13() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); assumeBlobServiceType(); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -858,8 +863,7 @@ public void testScenario13() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(wasb.open(path)))) { String line = br.readLine(); - assertEquals("Wrong text from " + wasb, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + wasb); } abfs.delete(path, true); } @@ -874,10 +878,11 @@ public void testScenario14() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); assumeBlobServiceType(); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -897,8 +902,7 @@ public void testScenario14() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(wasb.open(path)))) { String line = br.readLine(); - assertEquals("Wrong text from " + wasb, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + wasb); } wasb.delete(path, true); } @@ -913,8 +917,9 @@ public void testScenario15() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -933,8 +938,7 @@ public void testScenario15() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(wasb.open(path)))) { String line = br.readLine(); - assertEquals("Wrong text from " + wasb, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + wasb); } abfs.delete(path, true); } @@ -949,10 +953,11 @@ public void testScenario16() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); assumeBlobServiceType(); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -972,8 +977,7 @@ public void testScenario16() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(path)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } wasb.delete(path, true); } @@ -988,8 +992,9 @@ public void testScenario17() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); Path testFile = path("/testReadFile"); Path path = new Path(testFile + "/~12/!008/testfile_" + UUID.randomUUID()); @@ -1030,8 +1035,9 @@ public void testScenario18() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -1074,8 +1080,9 @@ public void testScenario19() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -1120,9 +1127,10 @@ public void testScenario20() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -1172,10 +1180,11 @@ public void testScenario21() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); assumeBlobServiceType(); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -1225,8 +1234,9 @@ public void testScenario22() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -1276,8 +1286,9 @@ public void testScenario23() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -1321,9 +1332,10 @@ public void testScenario24() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -1368,8 +1380,9 @@ public void testScenario25() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -1414,9 +1427,10 @@ public void testScenario26() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -1459,9 +1473,10 @@ public void testScenario27() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -1481,8 +1496,7 @@ public void testScenario27() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(testPath1)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } // --- RENAME FILE --- boolean renamed = wasb.rename(testPath1, testPath2); @@ -1509,8 +1523,9 @@ public void testScenario28() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -1530,8 +1545,7 @@ public void testScenario28() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(testPath1)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } // --- RENAME FILE --- boolean renamed = abfs.rename(testPath1, testPath2); @@ -1558,10 +1572,11 @@ public void testScenario29() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); assumeBlobServiceType(); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -1582,8 +1597,7 @@ public void testScenario29() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(testPath1)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } // --- RENAME FILE --- boolean renamed = abfs.rename(testPath1, testPath2); @@ -1610,8 +1624,9 @@ public void testScenario30() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -1632,8 +1647,7 @@ public void testScenario30() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(testPath1)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } // --- RENAME FILE --- boolean renamed = wasb.rename(testPath1, testPath2); @@ -1666,8 +1680,9 @@ public void testScenario31() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -1687,8 +1702,7 @@ public void testScenario31() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(testPath1)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } wasb.delete(testPath1, true); @@ -1709,8 +1723,9 @@ public void testScenario32() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -1735,8 +1750,7 @@ public void testScenario32() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(testPath1)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } // --- RENAME DIR --- boolean renamed = abfs.rename(testFile, testFile1); @@ -1760,9 +1774,10 @@ public void testScenario33() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -1787,8 +1802,7 @@ public void testScenario33() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(testPath1)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } // --- RENAME DIR --- boolean renamed = wasb.rename(testFile, testFile1); @@ -1812,8 +1826,9 @@ public void testScenario34() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -1836,8 +1851,7 @@ public void testScenario34() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(testPath1)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } // --- RENAME DIR --- boolean renamed = wasb.rename(testPath1, testPath2); @@ -1861,8 +1875,9 @@ public void testScenario35() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -1885,8 +1900,7 @@ public void testScenario35() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(testPath1)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } // --- RENAME DIR --- boolean renamed = abfs.rename(testPath1, testPath2); @@ -1911,8 +1925,9 @@ public void testScenario36() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -1934,8 +1949,7 @@ public void testScenario36() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(testPath1)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } // --- RENAME DIR --- boolean renamed = abfs.rename(testFile, testFile); @@ -1954,8 +1968,9 @@ public void testScenario37() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -1978,8 +1993,7 @@ public void testScenario37() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(testPath1)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } // --- RENAME NON EXISTENT FILE --- boolean renamed = wasb.rename(testPath2, testPath3); @@ -1998,8 +2012,9 @@ public void testScenario38() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); Path testFile = path("/testReadFile"); @@ -2047,8 +2062,9 @@ public void testScenario39() throws Exception { conf.setBoolean(FS_AZURE_ABFS_ENABLE_CHECKSUM_VALIDATION, true); FileSystem fileSystem = FileSystem.newInstance(conf); AzureBlobFileSystem abfs = (AzureBlobFileSystem) fileSystem; - Assume.assumeFalse("Namespace enabled account does not support this test", - getIsNamespaceEnabled(abfs)); + assumeThat(getIsNamespaceEnabled(abfs)) + .as("Namespace enabled account does not support this test") + .isFalse(); NativeAzureFileSystem wasb = getWasbFileSystem(); String testRunId = UUID.randomUUID().toString(); @@ -2073,8 +2089,7 @@ public void testScenario39() throws Exception { try (BufferedReader br = new BufferedReader( new InputStreamReader(abfs.open(testPath1)))) { String line = br.readLine(); - assertEquals("Wrong text from " + abfs, - TEST_CONTEXT, line); + assertEquals(TEST_CONTEXT, line, "Wrong text from " + abfs); } // --- RENAME DIR --- boolean renamed = wasb.rename(testPath1, testPath2); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsConfigurationFieldsValidation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsConfigurationFieldsValidation.java index 0b7645bd243ba..6578118d93a20 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsConfigurationFieldsValidation.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsConfigurationFieldsValidation.java @@ -35,11 +35,12 @@ import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_SSL_CHANNEL_MODE_KEY; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.*; +import static org.assertj.core.api.Assertions.assertThat; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.InvalidConfigurationValueException; import org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory; -import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Assertions; /** * Test ConfigurationServiceFieldsValidation. @@ -108,15 +109,15 @@ public void testValidateFunctionsInConfigServiceImpl() throws Exception { for (Field field : fields) { field.setAccessible(true); if (field.isAnnotationPresent(IntegerConfigurationValidatorAnnotation.class)) { - Assertions.assertThat(abfsConfiguration.validateInt(field)).isEqualTo(TEST_INT); + assertThat(abfsConfiguration.validateInt(field)).isEqualTo(TEST_INT); } else if (field.isAnnotationPresent(LongConfigurationValidatorAnnotation.class)) { - Assertions.assertThat(abfsConfiguration.validateLong(field)).isEqualTo(DEFAULT_LONG); + assertThat(abfsConfiguration.validateLong(field)).isEqualTo(DEFAULT_LONG); } else if (field.isAnnotationPresent(StringConfigurationValidatorAnnotation.class)) { - Assertions.assertThat(abfsConfiguration.validateString(field)).isEqualTo("stringValue"); + assertThat(abfsConfiguration.validateString(field)).isEqualTo("stringValue"); } else if (field.isAnnotationPresent(Base64StringConfigurationValidatorAnnotation.class)) { - Assertions.assertThat(abfsConfiguration.validateBase64String(field)).isEqualTo(this.encodedString); + assertThat(abfsConfiguration.validateBase64String(field)).isEqualTo(this.encodedString); } else if (field.isAnnotationPresent(BooleanConfigurationValidatorAnnotation.class)) { - Assertions.assertThat(abfsConfiguration.validateBoolean(field)).isEqualTo(true); + assertThat(abfsConfiguration.validateBoolean(field)).isEqualTo(true); } } } @@ -124,37 +125,37 @@ public void testValidateFunctionsInConfigServiceImpl() throws Exception { @Test public void testConfigServiceImplAnnotatedFieldsInitialized() throws Exception { // test that all the ConfigurationServiceImpl annotated fields have been initialized in the constructor - Assertions.assertThat(abfsConfiguration.getWriteBufferSize()) + assertThat(abfsConfiguration.getWriteBufferSize()) .describedAs("Default value of write buffer size should be initialized") .isEqualTo(DEFAULT_WRITE_BUFFER_SIZE); - Assertions.assertThat(abfsConfiguration.getReadBufferSize()) + assertThat(abfsConfiguration.getReadBufferSize()) .describedAs("Default value of read buffer size should be initialized") .isEqualTo(DEFAULT_READ_BUFFER_SIZE); - Assertions.assertThat(abfsConfiguration.getMinBackoffIntervalMilliseconds()) + assertThat(abfsConfiguration.getMinBackoffIntervalMilliseconds()) .describedAs("Default value of min backoff interval should be initialized") .isEqualTo(DEFAULT_MIN_BACKOFF_INTERVAL); - Assertions.assertThat(abfsConfiguration.getMaxBackoffIntervalMilliseconds()) + assertThat(abfsConfiguration.getMaxBackoffIntervalMilliseconds()) .describedAs("Default value of max backoff interval should be initialized") .isEqualTo(DEFAULT_MAX_BACKOFF_INTERVAL); - Assertions.assertThat(abfsConfiguration.getBackoffIntervalMilliseconds()) + assertThat(abfsConfiguration.getBackoffIntervalMilliseconds()) .describedAs("Default value of backoff interval should be initialized") .isEqualTo(DEFAULT_BACKOFF_INTERVAL); - Assertions.assertThat(abfsConfiguration.getMaxIoRetries()) + assertThat(abfsConfiguration.getMaxIoRetries()) .describedAs("Default value of max number of retries should be initialized") .isEqualTo(DEFAULT_MAX_RETRY_ATTEMPTS); - Assertions.assertThat(abfsConfiguration.getAzureBlockSize()) + assertThat(abfsConfiguration.getAzureBlockSize()) .describedAs("Default value of azure block size should be initialized") .isEqualTo(MAX_AZURE_BLOCK_SIZE); - Assertions.assertThat(abfsConfiguration.getAzureBlockLocationHost()) + assertThat(abfsConfiguration.getAzureBlockLocationHost()) .describedAs("Default value of azure block location host should be initialized") .isEqualTo(AZURE_BLOCK_LOCATION_HOST_DEFAULT); - Assertions.assertThat(abfsConfiguration.getReadAheadRange()) + assertThat(abfsConfiguration.getReadAheadRange()) .describedAs("Default value of read ahead range should be initialized") .isEqualTo(DEFAULT_READ_AHEAD_RANGE); - Assertions.assertThat(abfsConfiguration.getHttpConnectionTimeout()) + assertThat(abfsConfiguration.getHttpConnectionTimeout()) .describedAs("Default value of http connection timeout should be initialized") .isEqualTo(DEFAULT_HTTP_CONNECTION_TIMEOUT); - Assertions.assertThat(abfsConfiguration.getHttpReadTimeout()) + assertThat(abfsConfiguration.getHttpReadTimeout()) .describedAs("Default value of http read timeout should be initialized") .isEqualTo(DEFAULT_HTTP_READ_TIMEOUT); } @@ -162,7 +163,7 @@ public void testConfigServiceImplAnnotatedFieldsInitialized() throws Exception { @Test public void testConfigBlockSizeInitialized() throws Exception { // test the block size annotated field has been initialized in the constructor - Assertions.assertThat(abfsConfiguration.getAzureBlockSize()) + assertThat(abfsConfiguration.getAzureBlockSize()) .describedAs("Default value of max azure block size should be initialized") .isEqualTo(MAX_AZURE_BLOCK_SIZE); } @@ -170,42 +171,44 @@ public void testConfigBlockSizeInitialized() throws Exception { @Test public void testGetAccountKey() throws Exception { String accountKey = abfsConfiguration.getStorageAccountKey(); - Assertions.assertThat(accountKey).describedAs("Account Key should be initialized in configs") + assertThat(accountKey).describedAs("Account Key should be initialized in configs") .isEqualTo(this.encodedAccountKey); } - @Test(expected = KeyProviderException.class) + @Test public void testGetAccountKeyWithNonExistingAccountName() throws Exception { - Configuration configuration = new Configuration(); - configuration.addResource(TestConfigurationKeys.TEST_CONFIGURATION_FILE_NAME); - configuration.unset(ConfigurationKeys.FS_AZURE_ACCOUNT_KEY_PROPERTY_NAME); - AbfsConfiguration abfsConfig = new AbfsConfiguration(configuration, "bogusAccountName"); - abfsConfig.getStorageAccountKey(); + Assertions.assertThrows(KeyProviderException.class, () -> { + Configuration configuration = new Configuration(); + configuration.addResource(TestConfigurationKeys.TEST_CONFIGURATION_FILE_NAME); + configuration.unset(ConfigurationKeys.FS_AZURE_ACCOUNT_KEY_PROPERTY_NAME); + AbfsConfiguration abfsConfig = new AbfsConfiguration(configuration, "bogusAccountName"); + abfsConfig.getStorageAccountKey(); + }); } @Test public void testSSLSocketFactoryConfiguration() throws InvalidConfigurationValueException, IllegalAccessException, IOException { - Assertions.assertThat(abfsConfiguration.getPreferredSSLFactoryOption()) + assertThat(abfsConfiguration.getPreferredSSLFactoryOption()) .describedAs("By default SSL Channel Mode should be Default") .isEqualTo(DelegatingSSLSocketFactory.SSLChannelMode.Default); - Assertions.assertThat(abfsConfiguration.getPreferredSSLFactoryOption()) + assertThat(abfsConfiguration.getPreferredSSLFactoryOption()) .describedAs("By default SSL Channel Mode should be Default") .isNotEqualTo(DelegatingSSLSocketFactory.SSLChannelMode.Default_JSSE); - Assertions.assertThat(abfsConfiguration.getPreferredSSLFactoryOption()) + assertThat(abfsConfiguration.getPreferredSSLFactoryOption()) .describedAs("By default SSL Channel Mode should be Default") .isNotEqualTo(DelegatingSSLSocketFactory.SSLChannelMode.OpenSSL); Configuration configuration = new Configuration(); configuration.setEnum(FS_AZURE_SSL_CHANNEL_MODE_KEY, DelegatingSSLSocketFactory.SSLChannelMode.Default_JSSE); AbfsConfiguration localAbfsConfiguration = new AbfsConfiguration(configuration, accountName); - Assertions.assertThat(localAbfsConfiguration.getPreferredSSLFactoryOption()) + assertThat(localAbfsConfiguration.getPreferredSSLFactoryOption()) .describedAs("SSL Channel Mode should be Default_JSSE as set") .isEqualTo(DelegatingSSLSocketFactory.SSLChannelMode.Default_JSSE); configuration = new Configuration(); configuration.setEnum(FS_AZURE_SSL_CHANNEL_MODE_KEY, DelegatingSSLSocketFactory.SSLChannelMode.OpenSSL); localAbfsConfiguration = new AbfsConfiguration(configuration, accountName); - Assertions.assertThat(localAbfsConfiguration.getPreferredSSLFactoryOption()) + assertThat(localAbfsConfiguration.getPreferredSSLFactoryOption()) .describedAs("SSL Channel Mode should be OpenSSL as set") .isEqualTo(DelegatingSSLSocketFactory.SSLChannelMode.OpenSSL); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsCrc64.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsCrc64.java index ab39750ebf9c9..0d55874948950 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsCrc64.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsCrc64.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.fs.azurebfs; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.azurebfs.utils.CRC64; /** @@ -32,7 +32,7 @@ public void tesCrc64Compute() { final String[] testStr = {"#$", "dir_2_ac83abee", "dir_42_976df1f5"}; final String[] expected = {"f91f7e6a837dbfa8", "203f9fefc38ae97b", "cc0d56eafe58a855"}; for (int i = 0; i < testStr.length; i++) { - Assert.assertEquals(expected[i], Long.toHexString(crc64.compute(testStr[i].getBytes()))); + Assertions.assertEquals(expected[i], Long.toHexString(crc64.compute(testStr[i].getBytes()))); } } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsInputStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsInputStreamStatistics.java index 22c247f98af63..dd1b59c37a1e9 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsInputStreamStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsInputStreamStatistics.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.azurebfs; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.azurebfs.services.AbfsInputStreamStatisticsImpl; @@ -48,8 +48,8 @@ public void testBytesReadFromBufferStatistic() { * Since we incremented the bytesReadFromBuffer OPERATIONS times, this * should be the expected value. */ - assertEquals("Mismatch in bytesReadFromBuffer value", OPERATIONS, - abfsInputStreamStatistics.getBytesReadFromBuffer()); + assertEquals(OPERATIONS, abfsInputStreamStatistics.getBytesReadFromBuffer(), + "Mismatch in bytesReadFromBuffer value"); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsNetworkStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsNetworkStatistics.java index 628ad30863c9a..d1eefb1940359 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsNetworkStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsNetworkStatistics.java @@ -22,7 +22,7 @@ import java.util.Map; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsOutputStreamStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsOutputStreamStatistics.java index 5f9404302bd2c..1752073d322eb 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsOutputStreamStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsOutputStreamStatistics.java @@ -20,7 +20,7 @@ import java.util.Random; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream; import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStreamStatisticsImpl; @@ -50,15 +50,15 @@ public void testAbfsOutputStreamBytesFailed() { new AbfsOutputStreamStatisticsImpl(); //Test for zero bytes uploaded. - assertEquals("Mismatch in number of bytes failed to upload", 0, - abfsOutputStreamStatistics.getBytesUploadFailed()); + assertEquals(0, abfsOutputStreamStatistics.getBytesUploadFailed(), + "Mismatch in number of bytes failed to upload"); //Populating small random value for bytesFailed. int randomBytesFailed = new Random().nextInt(LOW_RANGE_FOR_RANDOM_VALUE); abfsOutputStreamStatistics.uploadFailed(randomBytesFailed); //Test for bytes failed to upload. - assertEquals("Mismatch in number of bytes failed to upload", - randomBytesFailed, abfsOutputStreamStatistics.getBytesUploadFailed()); + assertEquals(randomBytesFailed, abfsOutputStreamStatistics.getBytesUploadFailed(), + "Mismatch in number of bytes failed to upload"); //Reset statistics for the next test. abfsOutputStreamStatistics = new AbfsOutputStreamStatisticsImpl(); @@ -74,8 +74,8 @@ public void testAbfsOutputStreamBytesFailed() { expectedBytesFailed += randomBytesFailed; } //Test for bytes failed to upload. - assertEquals("Mismatch in number of bytes failed to upload", - expectedBytesFailed, abfsOutputStreamStatistics.getBytesUploadFailed()); + assertEquals(expectedBytesFailed, abfsOutputStreamStatistics.getBytesUploadFailed(), + "Mismatch in number of bytes failed to upload"); } /** @@ -91,14 +91,14 @@ public void testAbfsOutputStreamTimeSpentOnWaitTask() { new AbfsOutputStreamStatisticsImpl(); //Test for initial value of timeSpentWaitTask. - assertEquals("Mismatch in time spent on waiting for tasks to complete", 0, - abfsOutputStreamStatistics.getTimeSpentOnTaskWait()); + assertEquals(0, abfsOutputStreamStatistics.getTimeSpentOnTaskWait(), + "Mismatch in time spent on waiting for tasks to complete"); abfsOutputStreamStatistics .timeSpentTaskWait(); //Test for one op call value of timeSpentWaitTask. - assertEquals("Mismatch in time spent on waiting for tasks to complete", - 1, abfsOutputStreamStatistics.getTimeSpentOnTaskWait()); + assertEquals(1, abfsOutputStreamStatistics.getTimeSpentOnTaskWait(), + "Mismatch in time spent on waiting for tasks to complete"); //Reset statistics for the next test. abfsOutputStreamStatistics = new AbfsOutputStreamStatisticsImpl(); @@ -115,9 +115,8 @@ public void testAbfsOutputStreamTimeSpentOnWaitTask() { * Test to check correct value of timeSpentTaskWait after OPERATIONS * number of op calls. */ - assertEquals("Mismatch in time spent on waiting for tasks to complete", - OPERATIONS, - abfsOutputStreamStatistics.getTimeSpentOnTaskWait()); + assertEquals(OPERATIONS, abfsOutputStreamStatistics.getTimeSpentOnTaskWait(), + "Mismatch in time spent on waiting for tasks to complete"); } /** @@ -133,14 +132,14 @@ public void testAbfsOutputStreamQueueShrink() { new AbfsOutputStreamStatisticsImpl(); //Test for shrinking queue zero time. - assertEquals("Mismatch in queue shrunk operations", 0, - abfsOutputStreamStatistics.getQueueShrunkOps()); + assertEquals(0, abfsOutputStreamStatistics.getQueueShrunkOps(), + "Mismatch in queue shrunk operations"); abfsOutputStreamStatistics.queueShrunk(); //Test for shrinking queue 1 time. - assertEquals("Mismatch in queue shrunk operations", 1, - abfsOutputStreamStatistics.getQueueShrunkOps()); + assertEquals(1, abfsOutputStreamStatistics.getQueueShrunkOps(), + "Mismatch in queue shrunk operations"); //Reset statistics for the next test. abfsOutputStreamStatistics = new AbfsOutputStreamStatisticsImpl(); @@ -156,8 +155,7 @@ public void testAbfsOutputStreamQueueShrink() { /* * Test for random times incrementing queue shrunk operations. */ - assertEquals("Mismatch in queue shrunk operations", - randomQueueValues * OPERATIONS, - abfsOutputStreamStatistics.getQueueShrunkOps()); + assertEquals(randomQueueValues * OPERATIONS, abfsOutputStreamStatistics.getQueueShrunkOps(), + "Mismatch in queue shrunk operations"); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsStatistics.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsStatistics.java index f831d2d4cd26b..e559437a1bb3c 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsStatistics.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsStatistics.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.util.Map; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.azurebfs.services.AbfsCounters; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAccountConfiguration.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAccountConfiguration.java index 483a7e3d5d58e..8123a79438faf 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAccountConfiguration.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAccountConfiguration.java @@ -39,7 +39,7 @@ import org.apache.hadoop.test.LambdaTestUtils; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_OAUTH_CLIENT_ENDPOINT; @@ -51,8 +51,8 @@ import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_OAUTH_USER_PASSWORD; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_TOKEN_PROVIDER_TYPE_PROPERTY_NAME; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_SAS_TOKEN_PROVIDER_TYPE; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; /** * Tests correct precedence of various configurations that might be returned. @@ -126,24 +126,24 @@ public void testStringPrecedence() conf.set(globalKey, globalValue); abfsConf = new AbfsConfiguration(conf, accountName1); - assertEquals("Wrong value returned when account-specific value was requested", - abfsConf.get(accountKey1), accountValue1); - assertEquals("Account-specific value was not returned when one existed", - abfsConf.get(globalKey), accountValue1); + assertEquals(abfsConf.get(accountKey1), accountValue1, + "Wrong value returned when account-specific value was requested"); + assertEquals(abfsConf.get(globalKey), accountValue1, + "Account-specific value was not returned when one existed"); abfsConf = new AbfsConfiguration(conf, accountName2); - assertEquals("Wrong value returned when a different account-specific value was requested", - abfsConf.get(accountKey1), accountValue1); - assertEquals("Wrong value returned when account-specific value was requested", - abfsConf.get(accountKey2), accountValue2); - assertEquals("Account-agnostic value return even though account-specific value was set", - abfsConf.get(globalKey), accountValue2); + assertEquals(abfsConf.get(accountKey1), accountValue1, + "Wrong value returned when a different account-specific value was requested"); + assertEquals(abfsConf.get(accountKey2), accountValue2, + "Wrong value returned when account-specific value was requested"); + assertEquals(abfsConf.get(globalKey), accountValue2, + "Account-agnostic value return even though account-specific value was set"); abfsConf = new AbfsConfiguration(conf, accountName3); - assertNull("Account-specific value returned when none was set", - abfsConf.get(accountKey3)); - assertEquals("Account-agnostic value not returned when no account-specific value was set", - abfsConf.get(globalKey), globalValue); + assertNull( + abfsConf.get(accountKey3), "Account-specific value returned when none was set"); + assertEquals(abfsConf.get(globalKey), globalValue, + "Account-agnostic value not returned when no account-specific value was set"); } @Test @@ -170,24 +170,24 @@ public void testPasswordPrecedence() conf.set(globalKey, globalValue); abfsConf = new AbfsConfiguration(conf, accountName1); - assertEquals("Wrong value returned when account-specific value was requested", - abfsConf.getPasswordString(accountKey1), accountValue1); - assertEquals("Account-specific value was not returned when one existed", - abfsConf.getPasswordString(globalKey), accountValue1); + assertEquals(abfsConf.getPasswordString(accountKey1), accountValue1, + "Wrong value returned when account-specific value was requested"); + assertEquals(abfsConf.getPasswordString(globalKey), accountValue1, + "Account-specific value was not returned when one existed"); abfsConf = new AbfsConfiguration(conf, accountName2); - assertEquals("Wrong value returned when a different account-specific value was requested", - abfsConf.getPasswordString(accountKey1), accountValue1); - assertEquals("Wrong value returned when account-specific value was requested", - abfsConf.getPasswordString(accountKey2), accountValue2); - assertEquals("Account-agnostic value return even though account-specific value was set", - abfsConf.getPasswordString(globalKey), accountValue2); + assertEquals(abfsConf.getPasswordString(accountKey1), accountValue1, + "Wrong value returned when a different account-specific value was requested"); + assertEquals(abfsConf.getPasswordString(accountKey2), accountValue2, + "Wrong value returned when account-specific value was requested"); + assertEquals(abfsConf.getPasswordString(globalKey), accountValue2, + "Account-agnostic value return even though account-specific value was set"); abfsConf = new AbfsConfiguration(conf, accountName3); - assertNull("Account-specific value returned when none was set", - abfsConf.getPasswordString(accountKey3)); - assertEquals("Account-agnostic value not returned when no account-specific value was set", - abfsConf.getPasswordString(globalKey), globalValue); + assertNull(abfsConf.getPasswordString(accountKey3), + "Account-specific value returned when none was set"); + assertEquals(abfsConf.getPasswordString(globalKey), globalValue, + "Account-agnostic value not returned when no account-specific value was set"); } @Test @@ -202,23 +202,23 @@ public void testBooleanPrecedence() final AbfsConfiguration abfsConf = new AbfsConfiguration(conf, accountName); conf.setBoolean(globalKey, false); - assertEquals("Default value returned even though account-agnostic config was set", - abfsConf.getBoolean(globalKey, true), false); + assertEquals(abfsConf.getBoolean(globalKey, true), false, + "Default value returned even though account-agnostic config was set"); conf.unset(globalKey); - assertEquals("Default value not returned even though config was unset", - abfsConf.getBoolean(globalKey, true), true); + assertEquals(abfsConf.getBoolean(globalKey, true), true, + "Default value not returned even though config was unset"); conf.setBoolean(accountKey, false); - assertEquals("Default value returned even though account-specific config was set", - abfsConf.getBoolean(globalKey, true), false); + assertEquals(abfsConf.getBoolean(globalKey, true), false, + "Default value returned even though account-specific config was set"); conf.unset(accountKey); - assertEquals("Default value not returned even though config was unset", - abfsConf.getBoolean(globalKey, true), true); + assertEquals(abfsConf.getBoolean(globalKey, true), true, + "Default value not returned even though config was unset"); conf.setBoolean(accountKey, true); conf.setBoolean(globalKey, false); - assertEquals("Account-agnostic or default value returned even though account-specific config was set", - abfsConf.getBoolean(globalKey, false), true); + assertEquals(abfsConf.getBoolean(globalKey, false), true, + "Account-agnostic or default value returned even though account-specific config was set"); } @Test @@ -233,23 +233,23 @@ public void testLongPrecedence() final AbfsConfiguration abfsConf = new AbfsConfiguration(conf, accountName); conf.setLong(globalKey, 0); - assertEquals("Default value returned even though account-agnostic config was set", - abfsConf.getLong(globalKey, 1), 0); + assertEquals(abfsConf.getLong(globalKey, 1), 0, + "Default value returned even though account-agnostic config was set"); conf.unset(globalKey); - assertEquals("Default value not returned even though config was unset", - abfsConf.getLong(globalKey, 1), 1); + assertEquals(abfsConf.getLong(globalKey, 1), 1, + "Default value not returned even though config was unset"); conf.setLong(accountKey, 0); - assertEquals("Default value returned even though account-specific config was set", - abfsConf.getLong(globalKey, 1), 0); + assertEquals(abfsConf.getLong(globalKey, 1), 0, + "Default value returned even though account-specific config was set"); conf.unset(accountKey); - assertEquals("Default value not returned even though config was unset", - abfsConf.getLong(globalKey, 1), 1); + assertEquals(abfsConf.getLong(globalKey, 1), 1, + "Default value not returned even though config was unset"); conf.setLong(accountKey, 1); conf.setLong(globalKey, 0); - assertEquals("Account-agnostic or default value returned even though account-specific config was set", - abfsConf.getLong(globalKey, 0), 1); + assertEquals(abfsConf.getLong(globalKey, 0), 1, + "Account-agnostic or default value returned even though account-specific config was set"); } /** @@ -271,23 +271,23 @@ public void testEnumPrecedence() final AbfsConfiguration abfsConf = new AbfsConfiguration(conf, accountName); conf.setEnum(globalKey, GetEnumType.FALSE); - assertEquals("Default value returned even though account-agnostic config was set", - abfsConf.getEnum(globalKey, GetEnumType.TRUE), GetEnumType.FALSE); + assertEquals(abfsConf.getEnum(globalKey, GetEnumType.TRUE), GetEnumType.FALSE, + "Default value returned even though account-agnostic config was set"); conf.unset(globalKey); - assertEquals("Default value not returned even though config was unset", - abfsConf.getEnum(globalKey, GetEnumType.TRUE), GetEnumType.TRUE); + assertEquals(abfsConf.getEnum(globalKey, GetEnumType.TRUE), GetEnumType.TRUE, + "Default value not returned even though config was unset"); conf.setEnum(accountKey, GetEnumType.FALSE); - assertEquals("Default value returned even though account-specific config was set", - abfsConf.getEnum(globalKey, GetEnumType.TRUE), GetEnumType.FALSE); + assertEquals(abfsConf.getEnum(globalKey, GetEnumType.TRUE), GetEnumType.FALSE, + "Default value returned even though account-specific config was set"); conf.unset(accountKey); - assertEquals("Default value not returned even though config was unset", - abfsConf.getEnum(globalKey, GetEnumType.TRUE), GetEnumType.TRUE); + assertEquals(abfsConf.getEnum(globalKey, GetEnumType.TRUE), GetEnumType.TRUE, + "Default value not returned even though config was unset"); conf.setEnum(accountKey, GetEnumType.TRUE); conf.setEnum(globalKey, GetEnumType.FALSE); - assertEquals("Account-agnostic or default value returned even though account-specific config was set", - abfsConf.getEnum(globalKey, GetEnumType.FALSE), GetEnumType.TRUE); + assertEquals(abfsConf.getEnum(globalKey, GetEnumType.FALSE), GetEnumType.TRUE, + "Account-agnostic or default value returned even though account-specific config was set"); } /** @@ -324,23 +324,23 @@ public void testClass() final Class xface = GetClassInterface.class; conf.setClass(globalKey, class0, xface); - assertEquals("Default value returned even though account-agnostic config was set", - abfsConf.getAccountAgnosticClass(globalKey, class1, xface), class0); + assertEquals(abfsConf.getAccountAgnosticClass(globalKey, class1, xface), class0, + "Default value returned even though account-agnostic config was set"); conf.unset(globalKey); - assertEquals("Default value not returned even though config was unset", - abfsConf.getAccountAgnosticClass(globalKey, class1, xface), class1); + assertEquals(abfsConf.getAccountAgnosticClass(globalKey, class1, xface), class1, + "Default value not returned even though config was unset"); conf.setClass(accountKey, class0, xface); - assertEquals("Default value returned even though account-specific config was set", - abfsConf.getAccountSpecificClass(globalKey, class1, xface), class0); + assertEquals(abfsConf.getAccountSpecificClass(globalKey, class1, xface), class0, + "Default value returned even though account-specific config was set"); conf.unset(accountKey); - assertEquals("Default value not returned even though config was unset", - abfsConf.getAccountSpecificClass(globalKey, class1, xface), class1); + assertEquals(abfsConf.getAccountSpecificClass(globalKey, class1, xface), class1, + "Default value not returned even though config was unset"); conf.setClass(accountKey, class1, xface); conf.setClass(globalKey, class0, xface); - assertEquals("Account-agnostic or default value returned even though account-specific config was set", - abfsConf.getAccountSpecificClass(globalKey, class0, xface), class1); + assertEquals(abfsConf.getAccountSpecificClass(globalKey, class0, xface), class1, + "Account-agnostic or default value returned even though account-specific config was set"); } @Test diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestTracingContext.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestTracingContext.java index 7e05754996f42..dd6c025d92170 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestTracingContext.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestTracingContext.java @@ -27,9 +27,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.assertj.core.api.Assertions; -import org.junit.Assume; -import org.junit.Ignore; -import org.junit.Test; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.apache.hadoop.fs.CommonPathCapabilities; @@ -60,6 +59,7 @@ import static org.apache.hadoop.fs.azurebfs.services.RetryPolicyConstants.STATIC_RETRY_POLICY_ABBREVIATION; import static org.apache.hadoop.fs.azurebfs.services.RetryReasonConstants.CONNECTION_TIMEOUT_ABBREVIATION; import static org.apache.hadoop.fs.azurebfs.services.RetryReasonConstants.READ_TIMEOUT_ABBREVIATION; +import static org.assertj.core.api.Assumptions.assumeThat; public class TestTracingContext extends AbstractAbfsIntegrationTest { private static final String[] CLIENT_CORRELATIONID_LIST = { @@ -131,7 +131,7 @@ public void checkCorrelationConfigValidation(String clientCorrelationId, } } - @Ignore + @Disabled @Test //call test methods from the respective test classes //can be ignored when running all tests as these get covered @@ -206,9 +206,9 @@ public void testExternalOps() throws Exception { fs.getAbfsStore().getAbfsConfiguration().setIsNamespaceEnabledAccountForTesting(Trilean.UNKNOWN); fs.hasPathCapability(new Path("/"), CommonPathCapabilities.FS_ACLS); - Assume.assumeTrue(getIsNamespaceEnabled(getFileSystem())); - Assume.assumeTrue(getConfiguration().isCheckAccessEnabled()); - Assume.assumeTrue(getAuthType() == AuthType.OAuth); + assumeThat(getIsNamespaceEnabled(getFileSystem())).isTrue(); + assumeThat(getConfiguration().isCheckAccessEnabled()).isTrue(); + assumeThat(getAuthType()).isEqualTo(AuthType.OAuth); fs.setListenerOperation(FSOperationType.ACCESS); fs.getAbfsStore().getAbfsConfiguration().setIsNamespaceEnabledAccountForTesting(Trilean.TRUE); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TrileanTests.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TrileanTests.java index 45467d4140132..8f835cb34d912 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TrileanTests.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TrileanTests.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.azurebfs; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.TrileanConversionException; import org.apache.hadoop.fs.azurebfs.enums.Trilean; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/AbstractAbfsClusterITest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/AbstractAbfsClusterITest.java index 00e46cc98de12..d906333b0b27b 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/AbstractAbfsClusterITest.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/AbstractAbfsClusterITest.java @@ -23,10 +23,7 @@ import java.time.format.DateTimeFormatter; import org.junit.jupiter.api.AfterAll; -import org.junit.Assume; -import org.junit.Rule; import org.junit.jupiter.api.BeforeEach; -import org.junit.rules.TemporaryFolder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -49,6 +46,7 @@ import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_OVERRIDE_OWNER_SP_LIST; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_BLOB_FS_CLIENT_SERVICE_PRINCIPAL_OBJECT_ID; import static org.apache.hadoop.io.IOUtils.closeStream; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Tests which create a yarn minicluster. @@ -205,15 +203,6 @@ protected MiniMRYarnCluster getYarn() { } - /** - * We stage work into a temporary directory rather than directly under - * the user's home directory, as that is often rejected by CI test - * runners. - */ - @Rule - public final TemporaryFolder stagingFilesDir = new TemporaryFolder(); - - /** * binding on demand rather than in a BeforeClass static method. * Subclasses can override this to change the binding options. @@ -279,7 +268,7 @@ protected void requireScaleTestsEnabled() { protected void assumeValidTestConfigPresent(final String key) { String configuredValue = getConfiguration().get(key); - Assume.assumeTrue(configuredValue != null && !configuredValue.isEmpty()); + assumeThat(configuredValue != null && !configuredValue.isEmpty()).isTrue(); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsManifestStoreOperations.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsManifestStoreOperations.java index 9fc06ee8250c5..737ea13eff1d8 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsManifestStoreOperations.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsManifestStoreOperations.java @@ -40,7 +40,7 @@ import static org.apache.hadoop.fs.CommonPathCapabilities.ETAGS_PRESERVED_IN_RENAME; import static org.apache.hadoop.fs.azurebfs.commit.AbfsCommitTestHelper.prepareTestConfiguration; -import static org.junit.Assume.assumeTrue; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Test {@link AbfsManifestStoreOperations}. @@ -68,10 +68,9 @@ public void setup() throws Exception { super.setup(); // skip tests on non-HNS stores - assumeTrue("Resilient rename not available", - getFileSystem().hasPathCapability(getContract().getTestPath(), - ETAGS_PRESERVED_IN_RENAME)); - + assumeThat(getFileSystem().hasPathCapability(getContract().getTestPath(), + ETAGS_PRESERVED_IN_RENAME)).as("Resilient rename not available") + .isTrue(); } @Override diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsTerasort.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsTerasort.java index 5ccb5d7652224..0fdec2574400a 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsTerasort.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsTerasort.java @@ -26,7 +26,6 @@ import java.util.Optional; import java.util.function.Consumer; -import org.junit.Assume; import org.junit.FixMethodOrder; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -59,6 +58,7 @@ import static org.apache.hadoop.mapreduce.lib.output.committer.manifest.ManifestCommitterTestSupport.assertNoFailureStatistics; import static org.apache.hadoop.mapreduce.lib.output.committer.manifest.ManifestCommitterTestSupport.loadSuccessFile; import static org.apache.hadoop.mapreduce.lib.output.committer.manifest.ManifestCommitterTestSupport.validateSuccessFile; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Runs Terasort against ABFS using the manifest committer. @@ -184,9 +184,9 @@ private static void completedStage(final String stage, * @param stage stage name */ private static void requireStage(final String stage) { - Assume.assumeTrue( - "Required stage was not completed: " + stage, - COMPLETED_STAGES.get(stage) != null); + assumeThat(COMPLETED_STAGES.get(stage)) + .as("Required stage was not completed: " + stage) + .isNotNull(); } /** @@ -219,9 +219,7 @@ private ManifestSuccessData executeStage( d.close(); } dumpOutputTree(dest); - assertEquals(0, result, stage - + "(" + StringUtils.join(", ", args) + ")" - + " failed"); + assertEquals(0, result, stage+ "(" + StringUtils.join(", ", args) + ")"+ " failed"); final ManifestSuccessData successFile = validateSuccessFile(getFileSystem(), dest, minimumFileCount, ""); final IOStatistics iostats = successFile.getIOStatistics(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ABFSContractTestBinding.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ABFSContractTestBinding.java index 79e295a99fd2d..42476b4a3ee72 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ABFSContractTestBinding.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ABFSContractTestBinding.java @@ -26,7 +26,8 @@ import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes; import org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys; import org.apache.hadoop.fs.azurebfs.services.AuthType; -import org.junit.Assume; + +import static org.assertj.core.api.Assumptions.assumeThat; /** * Bind ABFS contract tests to the Azure test setup/teardown. @@ -43,8 +44,11 @@ public ABFSContractTestBinding( if (useExistingFileSystem) { AbfsConfiguration configuration = getConfiguration(); String testUrl = configuration.get(TestConfigurationKeys.FS_AZURE_CONTRACT_TEST_URI); - Assume.assumeTrue("Contract tests are skipped because of missing config property :" - + TestConfigurationKeys.FS_AZURE_CONTRACT_TEST_URI, testUrl != null); + + assumeThat(testUrl) + .as("Contract tests are skipped because of missing config property :" + + TestConfigurationKeys.FS_AZURE_CONTRACT_TEST_URI) + .isNotNull(); if (getAuthType() != AuthType.SharedKey) { testUrl = testUrl.replaceFirst(FileSystemUriSchemes.ABFS_SCHEME, FileSystemUriSchemes.ABFS_SECURE_SCHEME); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractDistCp.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractDistCp.java index 402c75ad52c87..b02c99af2b799 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractDistCp.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractDistCp.java @@ -24,9 +24,8 @@ import org.apache.hadoop.tools.contract.AbstractContractDistCpTest; import org.junit.jupiter.api.BeforeEach; -import static org.junit.jupiter.api.Assumptions.assumeTrue; - import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.assumeScaleTestsEnabled; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Contract test for distCp operation. @@ -41,7 +40,7 @@ protected int getTestTimeoutMillis() { public ITestAbfsFileSystemContractDistCp() throws Exception { binding = new ABFSContractTestBinding(); - assumeTrue(binding.getAuthType() != AuthType.OAuth); + assumeThat(binding.getAuthType()).isNotEqualTo(AuthType.OAuth); } @BeforeEach diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/ITestAbfsDelegationTokens.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/ITestAbfsDelegationTokens.java index d2c852a70b6b6..c3e7db5e1f097 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/ITestAbfsDelegationTokens.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/ITestAbfsDelegationTokens.java @@ -24,9 +24,11 @@ import java.io.PrintStream; import java.net.URI; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -74,7 +76,7 @@ public class ITestAbfsDelegationTokens extends AbstractAbfsIntegrationTest { /*** * Set up the clusters. */ - @BeforeClass + @BeforeAll public static void setupCluster() throws Exception { resetUGI(); cluster = new KerberizedAbfsCluster(); @@ -86,7 +88,7 @@ public static void setupCluster() throws Exception { * Tear down the Cluster. */ @SuppressWarnings("ThrowableNotThrown") - @AfterClass + @AfterAll public static void teardownCluster() throws Exception { resetUGI(); ServiceOperations.stopQuietly(LOG, cluster); @@ -95,6 +97,7 @@ public static void teardownCluster() throws Exception { public ITestAbfsDelegationTokens() throws Exception { } + @BeforeEach @Override public void setup() throws Exception { // create the FS @@ -112,8 +115,8 @@ public void setup() throws Exception { StubDelegationTokenManager.useStubDTManager(conf); FileSystem.closeAllForUGI(UserGroupInformation.getLoginUser()); super.setup(); - assertNotNull("No StubDelegationTokenManager created in filesystem init", - getStubDTManager()); + assertNotNull( + getStubDTManager(), "No StubDelegationTokenManager created in filesystem init"); } protected StubDelegationTokenManager getStubDTManager() throws IOException { @@ -124,6 +127,7 @@ protected StubDelegationTokenManager getStubDTManager() throws IOException { * Cleanup removes cached filesystems and the last instance of the * StubDT manager. */ + @AfterEach @Override public void teardown() throws Exception { // clean up all of alice's instances. @@ -135,8 +139,8 @@ public void teardown() throws Exception { * General assertion that security is turred on for a cluster. */ public static void assertSecurityEnabled() { - assertTrue("Security is needed for this test", - UserGroupInformation.isSecurityEnabled()); + assertTrue( + UserGroupInformation.isSecurityEnabled(), "Security is needed for this test"); } /** @@ -163,10 +167,10 @@ protected static Credentials mkTokens(final FileSystem fs) public void testTokenManagerBinding() throws Throwable { StubDelegationTokenManager instance = getStubDTManager(); - assertNotNull("No StubDelegationTokenManager created in filesystem init", - instance); - assertTrue("token manager not initialized: " + instance, - instance.isInitialized()); + assertNotNull( + instance, "No StubDelegationTokenManager created in filesystem init"); + assertTrue( + instance.isInitialized(), "token manager not initialized: " + instance); } /** @@ -176,10 +180,9 @@ public void testTokenManagerBinding() throws Throwable { @Test public void testCanonicalization() throws Throwable { String service = getCanonicalServiceName(); - assertNotNull("No canonical service name from filesystem " + getFileSystem(), - service); - assertEquals("canonical URI and service name mismatch", - getFilesystemURI(), new URI(service)); + assertNotNull( + service, "No canonical service name from filesystem " + getFileSystem()); + assertEquals(getFilesystemURI(), new URI(service), "canonical URI and service name mismatch"); } protected URI getFilesystemURI() throws IOException { @@ -199,8 +202,8 @@ public void testDefaultCanonicalization() throws Throwable { FileSystem fs = getFileSystem(); clearTokenServiceName(); - assertEquals("canonicalServiceName is not the default", - getDefaultServiceName(fs), getCanonicalServiceName()); + assertEquals(getDefaultServiceName(fs), getCanonicalServiceName(), + "canonicalServiceName is not the default"); } protected String getDefaultServiceName(final FileSystem fs) { @@ -218,8 +221,7 @@ protected void clearTokenServiceName() throws IOException { public void testRequestToken() throws Throwable { AzureBlobFileSystem fs = getFileSystem(); Credentials credentials = mkTokens(fs); - assertEquals("Number of collected tokens", 1, - credentials.numberOfTokens()); + assertEquals(1, credentials.numberOfTokens(), "Number of collected tokens"); verifyCredentialsContainsToken(credentials, fs); } @@ -231,12 +233,11 @@ public void testRequestTokenDefault() throws Throwable { clearTokenServiceName(); AzureBlobFileSystem fs = getFileSystem(); - assertEquals("canonicalServiceName is not the default", - getDefaultServiceName(fs), fs.getCanonicalServiceName()); + assertEquals(getDefaultServiceName(fs), fs.getCanonicalServiceName(), + "canonicalServiceName is not the default"); Credentials credentials = mkTokens(fs); - assertEquals("Number of collected tokens", 1, - credentials.numberOfTokens()); + assertEquals(1, credentials.numberOfTokens(), "Number of collected tokens"); verifyCredentialsContainsToken(credentials, getDefaultServiceName(fs), getFilesystemURI().toString()); } @@ -264,16 +265,13 @@ public StubAbfsTokenIdentifier verifyCredentialsContainsToken( Token token = credentials.getToken( new Text(serviceName)); - assertEquals("Token Kind in " + token, - StubAbfsTokenIdentifier.TOKEN_KIND, token.getKind()); - assertEquals("Token Service Kind in " + token, - tokenService, token.getService().toString()); + assertEquals(StubAbfsTokenIdentifier.TOKEN_KIND, token.getKind(), "Token Kind in " + token); + assertEquals(tokenService, token.getService().toString(), "Token Service Kind in " + token); StubAbfsTokenIdentifier abfsId = (StubAbfsTokenIdentifier) token.decodeIdentifier(); LOG.info("Created token {}", abfsId); - assertEquals("token URI in " + abfsId, - tokenService, abfsId.getUri().toString()); + assertEquals(tokenService, abfsId.getUri().toString(), "token URI in " + abfsId); return abfsId; } @@ -315,9 +313,8 @@ protected String dtutil(final int expected, () -> ToolRunner.run(conf, dt, args)); String s = dtUtilContent.toString(); LOG.info("\n{}", s); - assertEquals("Exit code from command dtutil " - + StringUtils.join(" ", args) + " with output " + s, - expected, r); + assertEquals(expected, r, + "Exit code from command dtutil "+ StringUtils.join(" ", args) + " with output " + s); return s; } @@ -334,18 +331,18 @@ public void testDTUtilShell() throws Throwable { "get", fsURI, "-format", "protobuf", tfs); - assertTrue("not created: " + tokenfile, - tokenfile.exists()); - assertTrue("File is empty " + tokenfile, - tokenfile.length() > 0); - assertTrue("File only contains header " + tokenfile, - tokenfile.length() > 6); + assertTrue( + tokenfile.exists(), "not created: " + tokenfile); + assertTrue( + tokenfile.length() > 0, "File is empty " + tokenfile); + assertTrue( + tokenfile.length() > 6, "File only contains header " + tokenfile); String printed = dtutil(0, getRawConfiguration(), "print", tfs); - assertTrue("no " + fsURI + " in " + printed, - printed.contains(fsURI)); - assertTrue("no " + StubAbfsTokenIdentifier.ID + " in " + printed, - printed.contains(StubAbfsTokenIdentifier.ID)); + assertTrue( + printed.contains(fsURI), "no " + fsURI + " in " + printed); + assertTrue( + printed.contains(StubAbfsTokenIdentifier.ID), "no " + StubAbfsTokenIdentifier.ID + " in " + printed); } /** @@ -360,8 +357,7 @@ public void testBaseDTLifecycle() throws Throwable { ClassicDelegationTokenManager.useClassicDTManager(conf); try (FileSystem fs = FileSystem.newInstance(getFilesystemURI(), conf)) { Credentials credentials = mkTokens(fs); - assertEquals("Number of collected tokens", 1, - credentials.numberOfTokens()); + assertEquals(1, credentials.numberOfTokens(), "Number of collected tokens"); verifyCredentialsContainsToken(credentials, fs.getCanonicalServiceName(), ClassicDelegationTokenManager.UNSET); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/TestCustomOauthTokenProvider.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/TestCustomOauthTokenProvider.java index 6d9dc5a98fef4..d504f717bc561 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/TestCustomOauthTokenProvider.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/TestCustomOauthTokenProvider.java @@ -21,7 +21,7 @@ import java.net.URI; import java.util.Date; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; @@ -55,23 +55,21 @@ public void testCustomProviderBinding() throws Throwable { "not-a-real-account"); CustomTokenProviderAdapter provider = (CustomTokenProviderAdapter) abfs.getTokenProvider(); - assertEquals("User agent", INITED, provider.getUserAgentSuffix()); + assertEquals(INITED, provider.getUserAgentSuffix(), "User agent"); // now mimic the bind call ExtensionHelper.bind(provider, new URI("abfs://store@user.dfs.core.windows.net"), conf); - assertEquals("User agent", BOUND, - ExtensionHelper.getUserAgentSuffix(provider, "")); + assertEquals(BOUND, ExtensionHelper.getUserAgentSuffix(provider, ""), "User agent"); AzureADToken token = provider.getToken(); - assertEquals("Access token propagation", - ACCESS_TOKEN, token.getAccessToken()); + assertEquals(ACCESS_TOKEN, token.getAccessToken(), "Access token propagation"); Date expiry = token.getExpiry(); long time = expiry.getTime(); - assertTrue("date wrong: " + expiry, - time <= System.currentTimeMillis()); + assertTrue( + time <= System.currentTimeMillis(), "date wrong: " + expiry); // once closed, the UA state changes. provider.close(); - assertEquals("User agent", CLOSED, provider.getUserAgentSuffix()); + assertEquals(CLOSED, provider.getUserAgentSuffix(), "User agent"); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/TestDTManagerLifecycle.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/TestDTManagerLifecycle.java index 5566a4b535ed4..74aca3fa09f2a 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/TestDTManagerLifecycle.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/TestDTManagerLifecycle.java @@ -20,9 +20,9 @@ import java.net.URI; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.azurebfs.AbstractAbfsTestWithTimeout; @@ -54,12 +54,12 @@ public class TestDTManagerLifecycle extends AbstractAbfsTestWithTimeout { public static final Text KIND2 = new Text("kind2"); - @Before + @BeforeEach public void setup() throws Exception { conf = StubDelegationTokenManager.useStubDTManager(new Configuration()); } - @After + @AfterEach public void teardown() throws Exception { } @@ -70,8 +70,7 @@ public void teardown() throws Exception { */ protected void assertTokenKind(final Text kind, final Token dt) { - assertEquals("Token Kind", - kind, dt.getKind()); + assertEquals(kind, dt.getKind(), "Token Kind"); } /** @@ -88,19 +87,19 @@ public void testClassicLifecycle() throws Throwable { StubDelegationTokenManager stub = getTokenManager(manager); // this is automatically inited - assertTrue("Not initialized: " + stub, stub.isInitialized()); + assertTrue(stub.isInitialized(), "Not initialized: " + stub); Token dt = stub.getDelegationToken(RENEWER); assertTokenKind(StubAbfsTokenIdentifier.TOKEN_KIND, dt); - assertNull("canonicalServiceName in " + stub, - manager.getCanonicalServiceName()); - assertEquals("Issued count number in " + stub, 1, stub.getIssued()); + assertNull( + manager.getCanonicalServiceName(), "canonicalServiceName in " + stub); + assertEquals(1, stub.getIssued(), "Issued count number in " + stub); StubAbfsTokenIdentifier id = decodeIdentifier(dt); - assertEquals("Sequence number in " + id, 1, id.getSequenceNumber()); + assertEquals(1, id.getSequenceNumber(), "Sequence number in " + id); stub.renewDelegationToken(dt); - assertEquals("Renewal count in " + stub, 1, stub.getRenewals()); + assertEquals(1, stub.getRenewals(), "Renewal count in " + stub); stub.cancelDelegationToken(dt); - assertEquals("Cancel count in " + stub, 1, stub.getCancellations()); + assertEquals(1, stub.getCancellations(), "Cancel count in " + stub); } protected StubDelegationTokenManager getTokenManager(final AbfsDelegationTokenManager manager) { @@ -114,15 +113,15 @@ protected StubDelegationTokenManager getTokenManager(final AbfsDelegationTokenMa public void testBindingLifecycle() throws Throwable { AbfsDelegationTokenManager manager = new AbfsDelegationTokenManager(conf); StubDelegationTokenManager stub = getTokenManager(manager); - assertTrue("Not initialized: " + stub, stub.isInitialized()); + assertTrue(stub.isInitialized(), "Not initialized: " + stub); stub.bind(FSURI, conf); - assertEquals("URI in " + stub, FSURI, stub.getFsURI()); + assertEquals(FSURI, stub.getFsURI(), "URI in " + stub); decodeIdentifier(stub.getDelegationToken(RENEWER)); stub.close(); - assertTrue("Not closed: " + stub, stub.isClosed()); + assertTrue(stub.isClosed(), "Not closed: " + stub); // and for resilience stub.close(); - assertTrue("Not closed: " + stub, stub.isClosed()); + assertTrue(stub.isClosed(), "Not closed: " + stub); } @Test @@ -130,14 +129,12 @@ public void testBindingThroughManager() throws Throwable { AbfsDelegationTokenManager manager = new AbfsDelegationTokenManager(conf); manager.bind(FSURI, conf); StubDelegationTokenManager stub = getTokenManager(manager); - assertEquals("Service in " + manager, - ABFS, stub.createServiceText().toString()); - assertEquals("Binding URI of " + stub, FSURI, stub.getFsURI()); + assertEquals(ABFS, stub.createServiceText().toString(), "Service in " + manager); + assertEquals(FSURI, stub.getFsURI(), "Binding URI of " + stub); Token token = manager.getDelegationToken( RENEWER); - assertEquals("Service in " + token, - ABFS, token.getService().toString()); + assertEquals(ABFS, token.getService().toString(), "Service in " + token); decodeIdentifier(token); assertTokenKind(StubAbfsTokenIdentifier.TOKEN_KIND, token); @@ -148,12 +145,12 @@ public void testBindingThroughManager() throws Throwable { assertTokenKind(KIND2, dt2); // change the token kind and, unless it is registered, it will not decode. - assertNull("Token is of unknown kind, must not decode", - dt2.decodeIdentifier()); + assertNull( + dt2.decodeIdentifier(), "Token is of unknown kind, must not decode"); // closing the manager will close the stub too. manager.close(); - assertTrue("Not closed: " + stub, stub.isClosed()); + assertTrue(stub.isClosed(), "Not closed: " + stub); } /** @@ -170,22 +167,21 @@ public void testRenewalThroughManager() throws Throwable { // create a DT manager in the renewer codepath. AbfsDelegationTokenManager manager = new AbfsDelegationTokenManager(conf); StubDelegationTokenManager stub = getTokenManager(manager); - assertNull("Stub should not bebound " + stub, stub.getFsURI()); + assertNull(stub.getFsURI(), "Stub should not bebound " + stub); StubAbfsTokenIdentifier dtId = (StubAbfsTokenIdentifier) dt.decodeIdentifier(); String idStr = dtId.toString(); - assertEquals("URI in " + idStr, FSURI, dtId.getUri()); - assertEquals("renewer in " + idStr, - RENEWER, dtId.getRenewer().toString()); + assertEquals(FSURI, dtId.getUri(), "URI in " + idStr); + assertEquals(RENEWER, dtId.getRenewer().toString(), "renewer in " + idStr); manager.renewDelegationToken(dt); - assertEquals("Renewal count in " + stub, 1, stub.getRenewals()); + assertEquals(1, stub.getRenewals(), "Renewal count in " + stub); manager.cancelDelegationToken(dt); - assertEquals("Cancel count in " + stub, 1, stub.getCancellations()); + assertEquals(1, stub.getCancellations(), "Cancel count in " + stub); // closing the manager will close the stub too. manager.close(); - assertTrue("Not closed: " + stub, stub.isClosed()); + assertTrue(stub.isClosed(), "Not closed: " + stub); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/oauth2/TestWorkloadIdentityTokenProvider.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/oauth2/TestWorkloadIdentityTokenProvider.java index 4c3039ba9b773..cbf24a1b0428d 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/oauth2/TestWorkloadIdentityTokenProvider.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/oauth2/TestWorkloadIdentityTokenProvider.java @@ -24,7 +24,7 @@ import java.util.Date; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.apache.commons.io.FileUtils; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsClient.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsClient.java index c80f727abe47b..890045d8b7850 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsClient.java @@ -29,10 +29,9 @@ import java.util.regex.Pattern; import org.assertj.core.api.Assertions; -import org.junit.Assume; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; +import org.junit.jupiter.params.provider.MethodSource; import org.mockito.Mockito; import org.apache.hadoop.conf.Configuration; @@ -95,12 +94,14 @@ import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_CLUSTER_NAME; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_CLUSTER_TYPE; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.TEST_CONFIGURATION_FILE_NAME; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Test useragent of abfs client. * */ -@RunWith(Parameterized.class) +@ParameterizedClass(name="{0}") +@MethodSource("params") public final class ITestAbfsClient extends AbstractAbfsIntegrationTest { private static final String ACCOUNT_NAME = "bogusAccountName.dfs.core.windows.net"; @@ -115,10 +116,8 @@ public final class ITestAbfsClient extends AbstractAbfsIntegrationTest { private final Pattern userAgentStringPattern; - @Parameterized.Parameter public HttpOperationType httpOperationType; - @Parameterized.Parameters(name = "{0}") public static Iterable params() { return Arrays.asList(new Object[][]{ {HttpOperationType.JDK_HTTP_URL_CONNECTION}, @@ -126,7 +125,8 @@ public static Iterable params() { }); } - public ITestAbfsClient() throws Exception { + public ITestAbfsClient(HttpOperationType pHttpOperationType) throws Exception { + this.httpOperationType = pHttpOperationType; StringBuilder regEx = new StringBuilder(); regEx.append("^"); regEx.append(APN_VERSION); @@ -183,7 +183,7 @@ private String getUserAgentString(AbfsConfiguration config, @Test public void verifyBasicInfo() throws Exception { - Assume.assumeTrue(JDK_HTTP_URL_CONNECTION == httpOperationType); + assumeThat(JDK_HTTP_URL_CONNECTION).isEqualTo(httpOperationType); final Configuration configuration = new Configuration(); configuration.addResource(TEST_CONFIGURATION_FILE_NAME); AbfsConfiguration abfsConfiguration = new AbfsConfiguration(configuration, @@ -213,7 +213,7 @@ private void verifyBasicInfo(String userAgentStr) { @Test public void verifyUserAgentPrefix() throws IOException, IllegalAccessException, URISyntaxException { - Assume.assumeTrue(JDK_HTTP_URL_CONNECTION == httpOperationType); + assumeThat(JDK_HTTP_URL_CONNECTION).isEqualTo(httpOperationType); final Configuration configuration = new Configuration(); configuration.addResource(TEST_CONFIGURATION_FILE_NAME); configuration.set(ConfigurationKeys.FS_AZURE_USER_AGENT_PREFIX_KEY, FS_AZURE_USER_AGENT_PREFIX); @@ -248,7 +248,7 @@ public void verifyUserAgentPrefix() @Test public void verifyUserAgentExpectHeader() throws IOException, IllegalAccessException, URISyntaxException { - Assume.assumeTrue(JDK_HTTP_URL_CONNECTION == httpOperationType); + assumeThat(JDK_HTTP_URL_CONNECTION).isEqualTo(httpOperationType); final Configuration configuration = new Configuration(); configuration.addResource(TEST_CONFIGURATION_FILE_NAME); configuration.set(ConfigurationKeys.FS_AZURE_USER_AGENT_PREFIX_KEY, FS_AZURE_USER_AGENT_PREFIX); @@ -275,7 +275,7 @@ public void verifyUserAgentExpectHeader() @Test public void verifyUserAgentWithoutSSLProvider() throws Exception { - Assume.assumeTrue(JDK_HTTP_URL_CONNECTION == httpOperationType); + assumeThat(JDK_HTTP_URL_CONNECTION).isEqualTo(httpOperationType); final Configuration configuration = new Configuration(); configuration.addResource(TEST_CONFIGURATION_FILE_NAME); configuration.set(ConfigurationKeys.FS_AZURE_SSL_CHANNEL_MODE_KEY, @@ -299,7 +299,7 @@ public void verifyUserAgentWithoutSSLProvider() throws Exception { @Test public void verifyUserAgentClusterName() throws Exception { - Assume.assumeTrue(JDK_HTTP_URL_CONNECTION == httpOperationType); + assumeThat(JDK_HTTP_URL_CONNECTION).isEqualTo(httpOperationType); final String clusterName = "testClusterName"; final Configuration configuration = new Configuration(); configuration.addResource(TEST_CONFIGURATION_FILE_NAME); @@ -328,7 +328,7 @@ public void verifyUserAgentClusterName() throws Exception { @Test public void verifyUserAgentClusterType() throws Exception { - Assume.assumeTrue(JDK_HTTP_URL_CONNECTION == httpOperationType); + assumeThat(JDK_HTTP_URL_CONNECTION).isEqualTo(httpOperationType); final String clusterType = "testClusterType"; final Configuration configuration = new Configuration(); configuration.addResource(TEST_CONFIGURATION_FILE_NAME); @@ -488,9 +488,9 @@ public static AbfsClient getMockAbfsClient(AbfsClient baseAbfsClientInstance, abfsConfig.getAccountName()); AbfsCounters abfsCounters = Mockito.spy(new AbfsCountersImpl(new URI("abcd"))); - org.junit.Assume.assumeTrue( - (currentAuthType == AuthType.SharedKey) - || (currentAuthType == AuthType.OAuth)); + assumeThat(currentAuthType) + .as("Auth type must be SharedKey or OAuth for this test") + .isIn(AuthType.SharedKey, AuthType.OAuth); AbfsClient client; if (AbfsServiceType.DFS.equals(abfsConfig.getFsConfiguredServiceType())) { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsClientHandler.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsClientHandler.java index 85e20fb274e2c..de140fb108196 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsClientHandler.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsClientHandler.java @@ -19,7 +19,7 @@ package org.apache.hadoop.fs.azurebfs.services; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.azurebfs.AbstractAbfsIntegrationTest; import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsHttpClientRequestExecutor.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsHttpClientRequestExecutor.java index 1fe8df82b3b52..48e6f6e36ccc6 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsHttpClientRequestExecutor.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsHttpClientRequestExecutor.java @@ -25,8 +25,7 @@ import java.net.URL; import org.assertj.core.api.Assertions; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.apache.hadoop.conf.Configuration; @@ -50,6 +49,7 @@ import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_NETWORKING_LIBRARY; import static org.apache.hadoop.fs.azurebfs.constants.HttpOperationType.APACHE_HTTP_CLIENT; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.assertj.core.api.Assumptions.assumeThat; public class ITestAbfsHttpClientRequestExecutor extends AbstractAbfsIntegrationTest { @@ -67,8 +67,9 @@ public void testExpect100ContinueHandling() throws Exception { AzureBlobFileSystem fs = getFileSystem(); Path path = new Path("/testExpect100ContinueHandling"); if (isAppendBlobEnabled()) { - Assume.assumeFalse("Not valid for AppendBlob with blob endpoint", - getIngressServiceType() == AbfsServiceType.BLOB); + assumeThat(getIngressServiceType()) + .as("Not valid for AppendBlob with blob endpoint") + .isEqualTo(AbfsServiceType.BLOB); } Configuration conf = new Configuration(fs.getConf()); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStream.java index d14ac05d5f5aa..6bcf31f9e69dd 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStream.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStream.java @@ -29,7 +29,7 @@ import org.apache.hadoop.fs.azurebfs.utils.TracingContext; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.ONE_MB; import static org.apache.hadoop.fs.azurebfs.services.AbfsInputStreamTestUtils.HUNDRED; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamReadFooter.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamReadFooter.java index c7c9da94ab2ed..fbafc12490c78 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamReadFooter.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamReadFooter.java @@ -32,9 +32,9 @@ import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystemStore; import org.assertj.core.api.Assertions; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.apache.hadoop.fs.FSDataInputStream; @@ -96,13 +96,13 @@ public ITestAbfsInputStreamReadFooter() throws Exception { this.abfsInputStreamTestUtils = new AbfsInputStreamTestUtils(this); } - @BeforeClass + @BeforeAll public static void init() { executorService = Executors.newFixedThreadPool( 2 * Runtime.getRuntime().availableProcessors()); } - @AfterClass + @AfterAll public static void close() { executorService.shutdown(); } @@ -184,11 +184,9 @@ private void validateNumBackendCalls(final AzureBlobFileSystem spiedFs, .get(CONNECTIONS_MADE.getStatName()); if (optimizeFooterRead) { - assertEquals(1, - requestsMadeAfterTest - requestsMadeBeforeTest); + assertEquals(1, requestsMadeAfterTest - requestsMadeBeforeTest); } else { - assertEquals(3, - requestsMadeAfterTest - requestsMadeBeforeTest); + assertEquals(3, requestsMadeAfterTest - requestsMadeBeforeTest); } } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamSmallFileReads.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamSmallFileReads.java index 64fac9ca94ed8..5e3879a525cfe 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamSmallFileReads.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsInputStreamSmallFileReads.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.util.Map; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; import org.apache.hadoop.fs.FileSystem; @@ -289,8 +289,7 @@ private void partialReadWithNoData(final FileSystem fs, assertEquals(bytesRead, length); abfsInputStreamTestUtils.assertContentReadCorrectly(fileContent, seekPos, length, buffer, testFilePath); assertEquals(fileContent.length, abfsInputStream.getFCursor()); - assertEquals(fileContent.length, - abfsInputStream.getFCursorAfterLastRead()); + assertEquals(fileContent.length, abfsInputStream.getFCursorAfterLastRead()); assertEquals(length, abfsInputStream.getBCursor()); assertTrue(abfsInputStream.getLimit() >= length); } finally { diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java index 0b7cbb38db76e..51317f8e89562 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsOutputStream.java @@ -27,12 +27,13 @@ import java.net.URL; import java.security.MessageDigest; import java.util.Arrays; +import java.util.concurrent.TimeUnit; import org.assertj.core.api.Assertions; -import org.junit.Assume; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedClass; +import org.junit.jupiter.params.provider.MethodSource; import org.mockito.Mockito; import org.apache.hadoop.conf.Configuration; @@ -59,21 +60,21 @@ import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_IS_EXPECT_HEADER_ENABLED; import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.EXPECT; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Test create operation. */ -@RunWith(Parameterized.class) +@ParameterizedClass(name="{0}") +@MethodSource("params") public class ITestAbfsOutputStream extends AbstractAbfsIntegrationTest { private static final int TEST_EXECUTION_TIMEOUT = 2 * 60 * 1000; private static final String TEST_FILE_PATH = "testfile"; private static final int TEN = 10; - @Parameterized.Parameter public HttpOperationType httpOperationType; - @Parameterized.Parameters(name = "{0}") public static Iterable params() { return Arrays.asList(new Object[][]{ {HttpOperationType.JDK_HTTP_URL_CONNECTION}, @@ -81,9 +82,9 @@ public static Iterable params() { }); } - - public ITestAbfsOutputStream() throws Exception { + public ITestAbfsOutputStream(final HttpOperationType pHttpOperationType) throws Exception { super(); + this.httpOperationType = pHttpOperationType; } @Override @@ -145,7 +146,8 @@ public void testMaxRequestsAndQueueCapacity() throws Exception { * Verify the passing of AzureBlobFileSystem reference to AbfsOutputStream * to make sure that the FS instance is not eligible for GC while writing. */ - @Test(timeout = TEST_EXECUTION_TIMEOUT) + @Test + @Timeout(value = TEST_EXECUTION_TIMEOUT, unit = TimeUnit.MILLISECONDS) public void testAzureBlobFileSystemBackReferenceInOutputStream() throws Exception { byte[] testBytes = new byte[5 * 1024]; @@ -198,7 +200,7 @@ public void testAbfsOutputStreamClosingFsBeforeStream() @Test public void testExpect100ContinueFailureInAppend() throws Exception { if (!getIsNamespaceEnabled(getFileSystem())) { - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); } Configuration configuration = new Configuration(getRawConfiguration()); configuration.set(FS_AZURE_ACCOUNT_IS_EXPECT_HEADER_ENABLED, "true"); @@ -312,7 +314,7 @@ private AbfsOutputStream getStream() throws URISyntaxException, IOException { @Test public void testValidateGetBlockList() throws Exception { AzureBlobFileSystem fs = Mockito.spy(getFileSystem()); - Assume.assumeTrue(!getIsNamespaceEnabled(fs)); + assumeThat(getIsNamespaceEnabled(fs)).isFalse(); AzureBlobFileSystemStore store = Mockito.spy(fs.getAbfsStore()); assumeBlobServiceType(); @@ -351,7 +353,7 @@ public void testValidateGetBlockList() throws Exception { @Test public void testNoNetworkCallsForFlush() throws Exception { AzureBlobFileSystem fs = Mockito.spy(getFileSystem()); - Assume.assumeTrue(!getIsNamespaceEnabled(fs)); + assumeThat(getIsNamespaceEnabled(fs)).isFalse(); AzureBlobFileSystemStore store = Mockito.spy(fs.getAbfsStore()); assumeBlobServiceType(); @@ -394,10 +396,10 @@ private AbfsRestOperationException getMockAbfsRestOperationException(int status) @Test public void testNoNetworkCallsForSecondFlush() throws Exception { AzureBlobFileSystem fs = Mockito.spy(getFileSystem()); - Assume.assumeTrue(!getIsNamespaceEnabled(fs)); + assumeThat(getIsNamespaceEnabled(fs)).isFalse(); AzureBlobFileSystemStore store = Mockito.spy(fs.getAbfsStore()); assumeBlobServiceType(); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); // Step 2: Mock the clientHandler to return the blobClient when getBlobClient is called AbfsClientHandler clientHandler = Mockito.spy(store.getClientHandler()); @@ -437,10 +439,10 @@ public void testNoNetworkCallsForSecondFlush() throws Exception { @Test public void testResetCalledOnExceptionInRemoteFlush() throws Exception { AzureBlobFileSystem fs = Mockito.spy(getFileSystem()); - Assume.assumeTrue(!getIsNamespaceEnabled(fs)); + assumeThat(getIsNamespaceEnabled(fs)).isFalse(); AzureBlobFileSystemStore store = Mockito.spy(fs.getAbfsStore()); assumeBlobServiceType(); - Assume.assumeFalse("Not valid for APPEND BLOB", isAppendBlobEnabled()); + assumeThat(isAppendBlobEnabled()).as("Not valid for APPEND BLOB").isFalse(); // Create a file and spy on AbfsOutputStream Path path = new Path("/testFile"); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsPaginatedDelete.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsPaginatedDelete.java index 392443caec3d1..b30e2c993f6a9 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsPaginatedDelete.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsPaginatedDelete.java @@ -23,8 +23,8 @@ import java.util.UUID; import org.assertj.core.api.Assertions; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.apache.commons.lang3.StringUtils; @@ -62,6 +62,7 @@ import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT; import static org.apache.hadoop.fs.azurebfs.services.AbfsClientUtils.getHeaderValue; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Tests to verify server side pagination feature is supported from driver. @@ -93,6 +94,7 @@ public ITestAbfsPaginatedDelete() throws Exception { * Create file system instances for both super-user and test user. * @throws Exception */ + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -143,8 +145,8 @@ private void setTestUserConf(Configuration conf, String key, String value) { */ @Test public void testRecursiveDeleteWithPagination() throws Exception { - Assume.assumeTrue( - getFileSystem().getAbfsStore().getClient() instanceof AbfsDfsClient); + assumeThat(getFileSystem().getAbfsStore().getClient()) + .isInstanceOf(AbfsDfsClient.class); testRecursiveDeleteWithPaginationInternal(false, true, AbfsHttpConstants.ApiVersion.DEC_12_2019); testRecursiveDeleteWithPaginationInternal(false, true, @@ -178,8 +180,8 @@ public void testNonRecursiveDeleteWithPagination() throws Exception { */ @Test public void testRecursiveDeleteWithInvalidCT() throws Exception { - Assume.assumeTrue( - getFileSystem().getAbfsStore().getClient() instanceof AbfsDfsClient); + assumeThat(getFileSystem().getAbfsStore().getClient()) + .isInstanceOf(AbfsDfsClient.class); testRecursiveDeleteWithInvalidCTInternal(true); testRecursiveDeleteWithInvalidCTInternal(false); } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsPositionedRead.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsPositionedRead.java index 25f33db1cae9e..16257741b1c64 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsPositionedRead.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsPositionedRead.java @@ -21,9 +21,7 @@ import java.util.Arrays; import java.util.concurrent.ExecutionException; -import org.junit.Rule; -import org.junit.rules.TestName; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FutureDataInputStreamBuilder; @@ -39,9 +37,6 @@ public class ITestAbfsPositionedRead extends AbstractAbfsIntegrationTest { private static final int TEST_FILE_DATA_SIZE = 100; - @Rule - public TestName methodName = new TestName(); - public ITestAbfsPositionedRead() throws Exception { } @@ -55,10 +50,8 @@ public void testPositionedRead() throws IOException { TEST_FILE_DATA_SIZE, true); int bytesToRead = 10; try (FSDataInputStream inputStream = getFileSystem().open(dest)) { - assertTrue( - "unexpected stream type " - + inputStream.getWrappedStream().getClass().getSimpleName(), - inputStream.getWrappedStream() instanceof AbfsInputStream); + assertTrue(inputStream.getWrappedStream() instanceof AbfsInputStream, + "unexpected stream type " + inputStream.getWrappedStream().getClass().getSimpleName()); byte[] readBuffer = new byte[bytesToRead]; int readPos = 0; Assertions @@ -148,7 +141,7 @@ public void testPositionedReadWithBufferedReadDisabled() throws IOException { "Exception opening " + dest + " with FutureDataInputStreamBuilder", e); } - assertNotNull("Null InputStream over " + dest, inputStream); + assertNotNull(inputStream, "Null InputStream over " + dest); int bytesToRead = 10; try { AbfsInputStream abfsIs = (AbfsInputStream) inputStream.getWrappedStream(); @@ -167,8 +160,8 @@ public void testPositionedReadWithBufferedReadDisabled() throws IOException { // disabled, it will only read the exact bytes as requested and no data // will get read into the AbfsInputStream#buffer. Infact the buffer won't // even get initialized. - assertNull("AbfsInputStream pread caused the internal buffer creation", - abfsIs.getBuffer()); + assertNull( + abfsIs.getBuffer(), "AbfsInputStream pread caused the internal buffer creation"); // Check statistics assertStatistics(inputStream.getIOStatistics(), bytesToRead, 1, 1, bytesToRead); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsRestOperation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsRestOperation.java index cdb40ddd521d5..ca0ed4eec5f84 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsRestOperation.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsRestOperation.java @@ -26,9 +26,9 @@ import java.util.List; import org.assertj.core.api.Assertions; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; +import org.junit.jupiter.params.provider.MethodSource; import org.mockito.Mockito; import org.apache.hadoop.conf.Configuration; @@ -68,7 +68,8 @@ import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; -@RunWith(Parameterized.class) +@ParameterizedClass(name="expect={0}-code={1}-ErrorType={3}=NetLib={4}") +@MethodSource("params") public class ITestAbfsRestOperation extends AbstractAbfsIntegrationTest { // Specifies whether getOutputStream() or write() throws IOException. @@ -84,22 +85,17 @@ public enum ErrorType {OUTPUTSTREAM, WRITE}; private static final String TEST_PATH = "/testfile"; // Specifies whether the expect header is enabled or not. - @Parameterized.Parameter public boolean expectHeaderEnabled; // Gives the http response code. - @Parameterized.Parameter(1) public int responseCode; // Gives the http response message. - @Parameterized.Parameter(2) public String responseMessage; // Gives the errorType based on the enum. - @Parameterized.Parameter(3) public ErrorType errorType; - @Parameterized.Parameter(4) public HttpOperationType httpOperationType; // The intercept. @@ -112,7 +108,6 @@ public enum ErrorType {OUTPUTSTREAM, WRITE}; HTTP_EXPECTATION_FAILED = 417, HTTP_ERROR = 0. */ - @Parameterized.Parameters(name = "expect={0}-code={1}-ErrorType={3}=NetLib={4}") public static Iterable params() { return Arrays.asList(new Object[][]{ {true, HTTP_OK, "OK", ErrorType.WRITE, JDK_HTTP_URL_CONNECTION}, @@ -135,8 +130,14 @@ public static Iterable params() { }); } - public ITestAbfsRestOperation() throws Exception { - super(); + public ITestAbfsRestOperation(boolean pExpectHeaderEnabled, int pResponseCode, + String pResponseMessage, ErrorType pErrorType, + HttpOperationType phttpOperationType) throws Exception { + this.expectHeaderEnabled = pExpectHeaderEnabled; + this.responseCode = pResponseCode; + this.responseMessage = pResponseMessage; + this.errorType = pErrorType; + this.httpOperationType = phttpOperationType; } @Override diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsUnbuffer.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsUnbuffer.java index 7c96a950e2358..1fc6dbcb67d47 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsUnbuffer.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestAbfsUnbuffer.java @@ -20,7 +20,8 @@ import java.io.IOException; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.Path; @@ -39,6 +40,7 @@ public class ITestAbfsUnbuffer extends AbstractAbfsIntegrationTest { public ITestAbfsUnbuffer() throws Exception { } + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -53,17 +55,17 @@ public void setup() throws Exception { public void testUnbuffer() throws IOException { // Open file, read half the data, and then call unbuffer try (FSDataInputStream inputStream = getFileSystem().open(dest)) { - assertTrue("unexpected stream type " - + inputStream.getWrappedStream().getClass().getSimpleName(), - inputStream.getWrappedStream() instanceof AbfsInputStream); + assertTrue( + inputStream.getWrappedStream() instanceof AbfsInputStream, "unexpected stream type " + + inputStream.getWrappedStream().getClass().getSimpleName()); readAndAssertBytesRead(inputStream, 8); - assertFalse("AbfsInputStream buffer should not be null", - isBufferNull(inputStream)); + assertFalse( + isBufferNull(inputStream), "AbfsInputStream buffer should not be null"); inputStream.unbuffer(); // Check the the underlying buffer is null - assertTrue("AbfsInputStream buffer should be null", - isBufferNull(inputStream)); + assertTrue( + isBufferNull(inputStream), "AbfsInputStream buffer should be null"); } } @@ -78,7 +80,7 @@ private boolean isBufferNull(FSDataInputStream inputStream) { */ private static void readAndAssertBytesRead(FSDataInputStream inputStream, int bytesToRead) throws IOException { - assertEquals("AbfsInputStream#read did not read the correct number of " - + "bytes", bytesToRead, inputStream.read(new byte[bytesToRead])); + assertEquals(bytesToRead, inputStream.read(new byte[bytesToRead]), + "AbfsInputStream#read did not read the correct number of "+ "bytes"); } } diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestApacheClientConnectionPool.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestApacheClientConnectionPool.java index 9ff37332ad702..05313b52172b9 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestApacheClientConnectionPool.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestApacheClientConnectionPool.java @@ -22,7 +22,7 @@ import java.util.Map; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ClosedIOException; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestExponentialRetryPolicy.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestExponentialRetryPolicy.java index 1d289eabfa9bd..c9a73ac91a106 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestExponentialRetryPolicy.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestExponentialRetryPolicy.java @@ -40,7 +40,6 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.assertj.core.api.Assertions; -import org.junit.Assume; import org.mockito.Mockito; import java.net.URI; @@ -49,7 +48,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; @@ -207,8 +206,8 @@ public void testOperationOnAccountIdle() throws Exception { AzureBlobFileSystem fs = getFileSystem(); AbfsClient client = fs.getAbfsStore().getClient(); AbfsConfiguration configuration1 = client.getAbfsConfiguration(); - Assume.assumeTrue(configuration1.isAutoThrottlingEnabled()); - Assume.assumeTrue(configuration1.accountThrottlingEnabled()); + assumeTrue(configuration1.isAutoThrottlingEnabled()); + assumeTrue(configuration1.accountThrottlingEnabled()); AbfsClientThrottlingIntercept accountIntercept = (AbfsClientThrottlingIntercept) client.getIntercept(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestReadBufferManager.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestReadBufferManager.java index b70f36de31867..1ead30e9fa2a9 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestReadBufferManager.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestReadBufferManager.java @@ -37,7 +37,7 @@ import org.apache.hadoop.io.IOUtils; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_READ_BUFFER_SIZE; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_READ_AHEAD_BLOCK_SIZE; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestStaticRetryPolicy.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestStaticRetryPolicy.java index 9b4467c1dbd35..14637be275016 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestStaticRetryPolicy.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/ITestStaticRetryPolicy.java @@ -19,7 +19,7 @@ package org.apache.hadoop.fs.azurebfs.services; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java index a4b5cd068e941..9b388b57c3e55 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsInputStream.java @@ -29,9 +29,9 @@ import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; import org.apache.hadoop.fs.azurebfs.AbfsCountersImpl; -import org.assertj.core.api.Assertions; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; import org.mockito.Mockito; @@ -79,6 +79,7 @@ import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants.FORWARD_SLASH; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_READ_AHEAD_QUEUE_DEPTH; +import static org.assertj.core.api.Assertions.assertThat; /** * Unit test AbfsInputStream. @@ -104,6 +105,7 @@ public class TestAbfsInputStream extends private static final int OPERATION_INDEX = 6; private static final int READTYPE_INDEX = 11; + @AfterEach @Override public void teardown() throws Exception { super.teardown(); @@ -247,12 +249,10 @@ private void verifyOpenWithProvidedStatus(Path path, FileStatus fileStatus, FutureDataInputStreamBuilder builder = fs.openFile(path); builder.withFileStatus(fileStatus); FSDataInputStream in = builder.build().get(); - assertEquals(String.format( - "Open with fileStatus [from %s result]: Incorrect number of bytes read", - source), buf.length, in.read(readBuf)); - assertArrayEquals(String - .format("Open with fileStatus [from %s result]: Incorrect read data", - source), readBuf, buf); + assertEquals(buf.length, in.read(readBuf), + String.format("Open with fileStatus [from %s result]: Incorrect number of bytes read", source)); + assertArrayEquals(readBuf, buf, + String.format("Open with fileStatus [from %s result]: Incorrect read data", source)); } private void checkGetPathStatusCalls(Path testFile, FileStatus fileStatus, @@ -508,7 +508,7 @@ public void testSuccessfulReadAhead() throws Exception { // inputstream can proceed with read and not be blocked on readahead thread // availability. So the count of buffers in completedReadQueue for the stream // can be same or lesser than the requests triggered to queue readahead. - Assertions.assertThat(newAdditionsToCompletedRead) + assertThat(newAdditionsToCompletedRead) .describedAs( "New additions to completed reads should be same or less than as number of readaheads") .isLessThanOrEqualTo(3); @@ -564,28 +564,28 @@ public void testStreamPurgeDuringReadAheadCallExecuting() throws Exception { //Sleeping to give ReadBufferWorker to pick the readBuffers for processing. Thread.sleep(readBufferTransferToInProgressProbableTime); - Assertions.assertThat(readBufferManager.getInProgressCopiedList()) + assertThat(readBufferManager.getInProgressCopiedList()) .describedAs(String.format("InProgressList should have %d elements", readBufferQueuedCount)) .hasSize(readBufferQueuedCount); - Assertions.assertThat(readBufferManager.getFreeListCopy()) + assertThat(readBufferManager.getFreeListCopy()) .describedAs(String.format("FreeList should have %d elements", expectedFreeListBufferCount)) .hasSize(expectedFreeListBufferCount); - Assertions.assertThat(readBufferManager.getCompletedReadListCopy()) + assertThat(readBufferManager.getCompletedReadListCopy()) .describedAs("CompletedList should have 0 elements") .hasSize(0); } - Assertions.assertThat(readBufferManager.getInProgressCopiedList()) + assertThat(readBufferManager.getInProgressCopiedList()) .describedAs(String.format("InProgressList should have %d elements", readBufferQueuedCount)) .hasSize(readBufferQueuedCount); - Assertions.assertThat(readBufferManager.getFreeListCopy()) + assertThat(readBufferManager.getFreeListCopy()) .describedAs(String.format("FreeList should have %d elements", expectedFreeListBufferCount)) .hasSize(expectedFreeListBufferCount); - Assertions.assertThat(readBufferManager.getCompletedReadListCopy()) + assertThat(readBufferManager.getCompletedReadListCopy()) .describedAs("CompletedList should have 0 elements") .hasSize(0); } @@ -686,8 +686,8 @@ public void testReadAheadManagerForOlderReadAheadFailure() throws Exception { ONE_KB, ONE_KB, new byte[ONE_KB]); - Assert.assertEquals("bytesRead should be zero when previously read " - + "ahead buffer had failed", 0, bytesRead); + Assertions.assertEquals(0, bytesRead, + "bytesRead should be zero when previously read "+ "ahead buffer had failed"); // Stub returns success for the 5th read request, if ReadBuffers still // persisted request would have failed for position 0. @@ -740,8 +740,8 @@ public void testReadAheadManagerForSuccessfulReadAhead() throws Exception { ONE_KB, new byte[ONE_KB]); - Assert.assertTrue("bytesRead should be non-zero from the " - + "buffer that was read-ahead", bytesRead > 0); + Assertions.assertTrue(bytesRead > 0, "bytesRead should be non-zero from the " + + "buffer that was read-ahead"); // Once created, mock will remember all interactions. // As the above read should not have triggered any server calls, total @@ -792,7 +792,7 @@ public void testDefaultReadaheadQueueDepth() throws Exception { Path testFile = path("/testFile"); fs.create(testFile).close(); FSDataInputStream in = fs.open(testFile); - Assertions.assertThat( + assertThat( ((AbfsInputStream) in.getWrappedStream()).getReadAheadQueueDepth()) .describedAs("readahead queue depth should be set to default value 2") .isEqualTo(2); @@ -882,7 +882,7 @@ public void testReadTypeInTracingContextHeader() throws Exception { AbfsInputStream stream = (AbfsInputStream) iStream.getWrappedStream(); int bytesRead = stream.read(ONE_MB/3, new byte[fileSize], 0, fileSize); - Assertions.assertThat(fileSize - ONE_MB/3) + assertThat(fileSize - ONE_MB/3) .describedAs("Read size should match file size") .isEqualTo(bytesRead); } @@ -910,7 +910,7 @@ private void readFile(AzureBlobFileSystem fs, Path testPath, int fileSize) throw try (FSDataInputStream iStream = fs.open(testPath)) { int bytesRead = iStream.read(new byte[fileSize], 0, fileSize); - Assertions.assertThat(fileSize) + assertThat(fileSize) .describedAs("Read size should match file size") .isEqualTo(bytesRead); } @@ -963,16 +963,16 @@ private void verifyHeaderForReadTypeInTracingContextHeader(TracingContext tracin doReturn(EMPTY_STRING).when(mockOp).getTracingContextSuffix(); tracingContext.constructHeader(mockOp, null, null); String[] idList = tracingContext.getHeader().split(COLON, SPLIT_NO_LIMIT); - Assertions.assertThat(idList).describedAs("Client Request Id should have all fields").hasSize( + assertThat(idList).describedAs("Client Request Id should have all fields").hasSize( TracingHeaderVersion.getCurrentVersion().getFieldCount()); if (expectedReadPos > 0) { - Assertions.assertThat(idList[POSITION_INDEX]) + assertThat(idList[POSITION_INDEX]) .describedAs("Read Position should match") .isEqualTo(Integer.toString(expectedReadPos)); } - Assertions.assertThat(idList[OPERATION_INDEX]).describedAs("Operation Type Should Be Read") + assertThat(idList[OPERATION_INDEX]).describedAs("Operation Type Should Be Read") .isEqualTo(FSOperationType.READ.toString()); - Assertions.assertThat(idList[READTYPE_INDEX]).describedAs("Read type in tracing context header should match") + assertThat(idList[READTYPE_INDEX]).describedAs("Read type in tracing context header should match") .isEqualTo(readType.toString()); } @@ -995,21 +995,21 @@ private void testReadAheads(AbfsInputStream inputStream, getExpectedBufferData(readRequestSize, readAheadRequestSize, expectedSecondReadAheadBufferContents); - Assertions.assertThat(inputStream.read(firstReadBuffer, 0, readRequestSize)) + assertThat(inputStream.read(firstReadBuffer, 0, readRequestSize)) .describedAs("Read should be of exact requested size") .isEqualTo(readRequestSize); - assertTrue("Data mismatch found in RAH1", - Arrays.equals(firstReadBuffer, - expectedFirstReadAheadBufferContents)); + assertTrue( + Arrays.equals(firstReadBuffer, + expectedFirstReadAheadBufferContents), "Data mismatch found in RAH1"); - Assertions.assertThat(inputStream.read(secondReadBuffer, 0, readAheadRequestSize)) + assertThat(inputStream.read(secondReadBuffer, 0, readAheadRequestSize)) .describedAs("Read should be of exact requested size") .isEqualTo(readAheadRequestSize); - assertTrue("Data mismatch found in RAH2", - Arrays.equals(secondReadBuffer, - expectedSecondReadAheadBufferContents)); + assertTrue( + Arrays.equals(secondReadBuffer, + expectedSecondReadAheadBufferContents), "Data mismatch found in RAH2"); } public AbfsInputStream testReadAheadConfigs(int readRequestSize, @@ -1037,19 +1037,19 @@ public AbfsInputStream testReadAheadConfigs(int readRequestSize, AbfsInputStream inputStream = this.getAbfsStore(fs) .openFileForRead(testPath, null, getTestTracingContext(fs, false)); - Assertions.assertThat(inputStream.getBufferSize()) + assertThat(inputStream.getBufferSize()) .describedAs("Unexpected AbfsInputStream buffer size") .isEqualTo(readRequestSize); - Assertions.assertThat(inputStream.getReadAheadQueueDepth()) + assertThat(inputStream.getReadAheadQueueDepth()) .describedAs("Unexpected ReadAhead queue depth") .isEqualTo(readAheadQueueDepth); - Assertions.assertThat(inputStream.shouldAlwaysReadBufferSize()) + assertThat(inputStream.shouldAlwaysReadBufferSize()) .describedAs("Unexpected AlwaysReadBufferSize settings") .isEqualTo(alwaysReadBufferSizeEnabled); - Assertions.assertThat(getBufferManager().getReadAheadBlockSize()) + assertThat(getBufferManager().getReadAheadBlockSize()) .describedAs("Unexpected readAhead block size") .isEqualTo(readAheadBlockSize); @@ -1109,7 +1109,7 @@ private AzureBlobFileSystem createTestFile(Path testFilePath, long testFileSize, } } - Assertions.assertThat(fs.getFileStatus(testFilePath).getLen()) + assertThat(fs.getFileStatus(testFilePath).getLen()) .describedAs("File not created of expected size") .isEqualTo(testFileSize); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsRenameRetryRecovery.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsRenameRetryRecovery.java index 159d8e1379dbd..997818b5f9f96 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsRenameRetryRecovery.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsRenameRetryRecovery.java @@ -25,8 +25,7 @@ import java.time.Duration; import org.assertj.core.api.Assertions; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -66,6 +65,7 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Testing Abfs Rename recovery using Mockito. @@ -143,9 +143,9 @@ public void testRenameFailuresDueToIncompleteMetadata() throws Exception { + "being in incomplete state") .isSameAs(recoveredMetaDataIncompleteResult); // Verify Incomplete metadata state happened for our second rename call. - assertTrue("Metadata incomplete state should be true if a rename is " - + "retried after no Parent directory is found", - resultOfSecondRenameCall.isIncompleteMetadataState()); + assertTrue( + resultOfSecondRenameCall.isIncompleteMetadataState(), "Metadata incomplete state should be true if a rename is " + + "retried after no Parent directory is found"); // Verify renamePath occurred two times implying a retry was attempted. @@ -252,8 +252,7 @@ public void testRenameRecoveryEtagMatchFsLevel() throws IOException { AzureBlobFileSystem fs = getFileSystem(); AzureBlobFileSystemStore abfsStore = fs.getAbfsStore(); TracingContext testTracingContext = getTestTracingContext(fs, false); - - Assume.assumeTrue(fs.getAbfsStore().getIsNamespaceEnabled(testTracingContext)); + assumeThat(fs.getAbfsStore().getIsNamespaceEnabled(testTracingContext)).isTrue(); AbfsClient mockClient = getMockAbfsClient(); @@ -310,8 +309,7 @@ public void testRenameRecoveryEtagMismatchFsLevel() throws Exception { AzureBlobFileSystemStore abfsStore = fs.getAbfsStore(); TracingContext testTracingContext = getTestTracingContext(fs, false); - Assume.assumeTrue(fs.getAbfsStore().getIsNamespaceEnabled(testTracingContext)); - + assumeThat(fs.getAbfsStore().getIsNamespaceEnabled(testTracingContext)).isTrue(); AbfsClient mockClient = getMockAbfsClient(); String base = "/" + getMethodName(); @@ -333,7 +331,7 @@ public void testRenameRecoveryFailsForDirFsLevel() throws Exception { AzureBlobFileSystemStore abfsStore = fs.getAbfsStore(); TracingContext testTracingContext = getTestTracingContext(fs, false); - Assume.assumeTrue(fs.getAbfsStore().getIsNamespaceEnabled(testTracingContext)); + assumeThat(fs.getAbfsStore().getIsNamespaceEnabled(testTracingContext)).isTrue(); AbfsClient mockClient = getMockAbfsClient(); @@ -402,7 +400,7 @@ public void testDirRenameRecoveryUnsupported() throws Exception { AzureBlobFileSystem fs = getFileSystem(); TracingContext testTracingContext = getTestTracingContext(fs, false); - Assume.assumeTrue(fs.getAbfsStore().getIsNamespaceEnabled(testTracingContext)); + assumeThat(fs.getAbfsStore().getIsNamespaceEnabled(testTracingContext)).isTrue(); AbfsClient spyClient = getMockAbfsClient(); @@ -432,7 +430,7 @@ public void testExistingPathCorrectlyRejected() throws Exception { AzureBlobFileSystem fs = getFileSystem(); TracingContext testTracingContext = getTestTracingContext(fs, false); - Assume.assumeTrue(fs.getAbfsStore().getIsNamespaceEnabled(testTracingContext)); + assumeThat(fs.getAbfsStore().getIsNamespaceEnabled(testTracingContext)).isTrue(); AbfsClient spyClient = getMockAbfsClient(); @@ -515,7 +513,7 @@ public void testResilientCommitOperation() throws Throwable { TracingContext testTracingContext = getTestTracingContext(fs, false); final AzureBlobFileSystemStore store = fs.getAbfsStore(); - Assume.assumeTrue(store.getIsNamespaceEnabled(testTracingContext)); + assumeThat(store.getIsNamespaceEnabled(testTracingContext)).isTrue(); // patch in the mock abfs client to the filesystem, for the resilient // commit API to pick up. @@ -547,7 +545,7 @@ public void testResilientCommitOperationTagMismatch() throws Throwable { TracingContext testTracingContext = getTestTracingContext(fs, false); final AzureBlobFileSystemStore store = fs.getAbfsStore(); - Assume.assumeTrue(store.getIsNamespaceEnabled(testTracingContext)); + assumeThat(store.getIsNamespaceEnabled(testTracingContext)).isTrue(); // patch in the mock abfs client to the filesystem, for the resilient // commit API to pick up. diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsRestOperation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsRestOperation.java index 1c49afb99bdce..62f6e253fb518 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsRestOperation.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAbfsRestOperation.java @@ -82,9 +82,9 @@ public void testBackoffRetryMetrics() throws Exception { } // For retry count greater than the max configured value, the request should fail. - assertEquals("Number of failed requests does not match expected value.", - "3", String.valueOf(testClient.getAbfsCounters().getAbfsBackoffMetrics(). - getMetricValue(NUMBER_OF_REQUESTS_FAILED))); + assertEquals("3", String.valueOf(testClient.getAbfsCounters().getAbfsBackoffMetrics(). + getMetricValue(NUMBER_OF_REQUESTS_FAILED)), + "Number of failed requests does not match expected value."); // Close the AzureBlobFileSystem. fs.close(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestApacheClientConnectionPool.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestApacheClientConnectionPool.java index 1e97bbca5ed5f..4e636125142b1 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestApacheClientConnectionPool.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestApacheClientConnectionPool.java @@ -22,7 +22,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestApacheHttpClientFallback.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestApacheHttpClientFallback.java index 201b712b40f2c..55c5f76c71b52 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestApacheHttpClientFallback.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestApacheHttpClientFallback.java @@ -23,7 +23,7 @@ import java.util.ArrayList; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAzureADAuthenticator.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAzureADAuthenticator.java index 37a7a986e1149..49d6fd256ff2b 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAzureADAuthenticator.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestAzureADAuthenticator.java @@ -20,7 +20,7 @@ import java.io.IOException; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.azurebfs.AbfsConfiguration; import org.apache.hadoop.fs.azurebfs.AbstractAbfsIntegrationTest; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestListActionTaker.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestListActionTaker.java index f67ba45360c2a..81392fcb3ee9e 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestListActionTaker.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestListActionTaker.java @@ -23,7 +23,7 @@ import java.util.List; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.apache.hadoop.fs.Path; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/AbfsTestUtils.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/AbfsTestUtils.java index d6b11865130bd..1752dafd8d950 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/AbfsTestUtils.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/AbfsTestUtils.java @@ -27,7 +27,6 @@ import com.microsoft.azure.storage.CloudStorageAccount; import com.microsoft.azure.storage.blob.CloudBlobClient; import com.microsoft.azure.storage.blob.CloudBlobContainer; -import org.junit.Assume; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,6 +41,7 @@ import static org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes.ABFS_SCHEME; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes.ABFS_SECURE_SCHEME; import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.TEST_CONTAINER_PREFIX; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Some Utils for ABFS tests. @@ -63,7 +63,7 @@ public AbfsTestUtils() throws Exception { */ public void checkContainers() throws Throwable { - Assume.assumeTrue(this.getAuthType() == AuthType.SharedKey); + assumeThat(this.getAuthType()).isEqualTo(AuthType.SharedKey); int count = 0; CloudStorageAccount storageAccount = AzureBlobStorageTestAccount.createTestAccount(); CloudBlobClient blobClient = storageAccount.createCloudBlobClient(); @@ -80,7 +80,7 @@ public void checkContainers() throws Throwable { public void deleteContainers() throws Throwable { - Assume.assumeTrue(this.getAuthType() == AuthType.SharedKey); + assumeThat(this.getAuthType()).isEqualTo(AuthType.SharedKey); int count = 0; CloudStorageAccount storageAccount = AzureBlobStorageTestAccount.createTestAccount(); CloudBlobClient blobClient = storageAccount.createCloudBlobClient(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/DirectoryStateHelper.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/DirectoryStateHelper.java index 22eb8e1e68abe..5d7ab48849a85 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/DirectoryStateHelper.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/DirectoryStateHelper.java @@ -18,8 +18,6 @@ package org.apache.hadoop.fs.azurebfs.utils; -import org.junit.Assume; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem; @@ -28,6 +26,7 @@ import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation; import static java.net.HttpURLConnection.HTTP_NOT_FOUND; +import static org.assertj.core.api.Assumptions.assumeThat; /** * Helper class to check the state of a directory as implicit or explicit. @@ -54,7 +53,7 @@ private DirectoryStateHelper() { */ public static boolean isImplicitDirectory(Path path, AzureBlobFileSystem fs, TracingContext testTracingContext) throws Exception { - Assume.assumeFalse(fs.getAbfsStore().getIsNamespaceEnabled(testTracingContext)); + assumeThat(fs.getAbfsStore().getIsNamespaceEnabled(testTracingContext)).isFalse(); path = new Path(fs.makeQualified(path).toUri().getPath()); String relativePath = fs.getAbfsStore().getRelativePath(path); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/TestAzcopyToolHelper.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/TestAzcopyToolHelper.java index 8fc8998ac036b..f08b66da0750e 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/TestAzcopyToolHelper.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/TestAzcopyToolHelper.java @@ -19,7 +19,7 @@ package org.apache.hadoop.fs.azurebfs.utils; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azurebfs.AbstractAbfsIntegrationTest;