From 29806c06d3a7f073b953d47c4a087c96002cdb10 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Wed, 7 May 2025 12:57:23 +0800 Subject: [PATCH 1/3] HADOOP-19415. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-common Part6. --- .../hadoop/fs/obs/TestOBSContractAppend.java | 5 +- .../hadoop/fs/TestLocalFSCopyFromLocal.java | 2 +- .../contract/AbstractContractAppendTest.java | 4 +- .../AbstractContractBulkDeleteTest.java | 4 +- .../contract/AbstractContractConcatTest.java | 4 +- .../AbstractContractContentSummaryTest.java | 2 +- .../AbstractContractCopyFromLocalTest.java | 22 +-- .../contract/AbstractContractCreateTest.java | 38 ++-- .../contract/AbstractContractDeleteTest.java | 14 +- .../fs/contract/AbstractContractEtagTest.java | 2 +- .../AbstractContractGetEnclosingRoot.java | 60 +++---- .../AbstractContractGetFileStatusTest.java | 53 +++--- .../AbstractContractLeaseRecoveryTest.java | 2 +- .../contract/AbstractContractMkdirTest.java | 2 +- ...AbstractContractMultipartUploaderTest.java | 23 +-- .../fs/contract/AbstractContractOpenTest.java | 46 +++-- .../AbstractContractPathHandleTest.java | 96 +++++----- .../contract/AbstractContractRenameTest.java | 33 ++-- .../AbstractContractRootDirectoryTest.java | 20 ++- .../AbstractContractSafeModeTest.java | 2 +- .../fs/contract/AbstractContractSeekTest.java | 28 +-- .../AbstractContractSetTimesTest.java | 4 +- ...bstractContractStreamIOStatisticsTest.java | 8 +- .../AbstractContractUnbufferTest.java | 20 ++- .../AbstractContractVectoredReadTest.java | 164 +++++++++++------- .../contract/AbstractFSContractTestBase.java | 46 ++--- .../localfs/TestLocalFSContractCreate.java | 2 +- .../localfs/TestLocalFSContractLoaded.java | 10 +- .../TestLocalFSContractVectoredRead.java | 26 ++- .../TestRawLocalContractVectoredRead.java | 3 +- .../TestRawlocalContractPathHandle.java | 6 +- .../rawlocal/TestRawlocalContractRename.java | 2 +- .../org/apache/hadoop/fs/extend/TestName.java | 39 +++++ .../io/wrappedio/impl/TestWrappedIO.java | 8 +- .../router/TestRouterHDFSContractAppend.java | 8 +- .../TestRouterHDFSContractAppendSecure.java | 8 +- .../router/TestRouterHDFSContractConcat.java | 8 +- .../TestRouterHDFSContractConcatSecure.java | 8 +- .../router/TestRouterHDFSContractCreate.java | 8 +- .../TestRouterHDFSContractCreateSecure.java | 8 +- ...TestRouterHDFSContractDelegationToken.java | 20 +-- .../router/TestRouterHDFSContractDelete.java | 8 +- .../TestRouterHDFSContractDeleteSecure.java | 8 +- .../TestRouterHDFSContractGetFileStatus.java | 8 +- ...RouterHDFSContractGetFileStatusSecure.java | 8 +- .../router/TestRouterHDFSContractMkdir.java | 8 +- .../TestRouterHDFSContractMkdirSecure.java | 8 +- .../router/TestRouterHDFSContractOpen.java | 8 +- .../TestRouterHDFSContractOpenSecure.java | 8 +- .../router/TestRouterHDFSContractRename.java | 8 +- .../TestRouterHDFSContractRenameSecure.java | 8 +- .../TestRouterHDFSContractRootDirectory.java | 8 +- ...RouterHDFSContractRootDirectorySecure.java | 8 +- .../router/TestRouterHDFSContractSeek.java | 8 +- .../TestRouterHDFSContractSeekSecure.java | 8 +- .../TestRouterHDFSContractSetTimes.java | 8 +- .../TestRouterHDFSContractSetTimesSecure.java | 8 +- .../web/TestRouterWebHDFSContractAppend.java | 8 +- .../web/TestRouterWebHDFSContractConcat.java | 8 +- .../web/TestRouterWebHDFSContractCreate.java | 8 +- .../web/TestRouterWebHDFSContractDelete.java | 8 +- .../web/TestRouterWebHDFSContractMkdir.java | 8 +- .../web/TestRouterWebHDFSContractOpen.java | 10 +- .../web/TestRouterWebHDFSContractRename.java | 8 +- ...estRouterWebHDFSContractRootDirectory.java | 8 +- .../web/TestRouterWebHDFSContractSeek.java | 8 +- .../fs/contract/hdfs/TestDFSWrappedIO.java | 8 +- .../contract/hdfs/TestHDFSContractAppend.java | 8 +- .../hdfs/TestHDFSContractBulkDelete.java | 8 +- .../contract/hdfs/TestHDFSContractConcat.java | 8 +- .../contract/hdfs/TestHDFSContractCreate.java | 8 +- .../contract/hdfs/TestHDFSContractDelete.java | 8 +- .../hdfs/TestHDFSContractGetFileStatus.java | 8 +- .../hdfs/TestHDFSContractLeaseRecovery.java | 8 +- .../contract/hdfs/TestHDFSContractMkdir.java | 8 +- .../TestHDFSContractMultipartUploader.java | 8 +- .../contract/hdfs/TestHDFSContractOpen.java | 8 +- .../hdfs/TestHDFSContractPathHandle.java | 13 +- .../contract/hdfs/TestHDFSContractRename.java | 8 +- .../hdfs/TestHDFSContractRootDirectory.java | 8 +- .../hdfs/TestHDFSContractSafeMode.java | 8 +- .../contract/hdfs/TestHDFSContractSeek.java | 8 +- .../hdfs/TestHDFSContractSetTimes.java | 8 +- .../hdfs/TestHDFSContractUnbuffer.java | 8 +- .../hdfs/TestHDFSContractVectoredRead.java | 11 +- .../AbstractManifestCommitterTest.java | 8 +- .../committer/manifest/TestCleanupStage.java | 4 +- .../manifest/TestCommitTaskStage.java | 4 +- .../TestCreateOutputDirectoriesStage.java | 4 +- .../TestJobThroughManifestCommitter.java | 10 +- .../manifest/TestLoadManifestsStage.java | 6 +- .../manifest/TestManifestCommitProtocol.java | 10 +- .../manifest/TestRenameStageFailure.java | 10 +- .../manifest/TestTaskManifestFileIO.java | 4 +- .../manifest/impl/TestEntryFileIO.java | 10 +- hadoop-project/pom.xml | 2 +- ...tAliyunOSSContractGetFileStatusV1List.java | 2 + .../contract/TestAliyunOSSContractSeek.java | 2 +- .../oss/yarn/TestOSSMiniYarnCluster.java | 13 +- ...3AContractAnalyticsStreamVectoredRead.java | 10 +- .../s3a/ITestS3AContractBulkDelete.java | 38 ++-- .../s3a/ITestS3AContractContentSummary.java | 2 +- .../contract/s3a/ITestS3AContractCreate.java | 61 +++---- .../contract/s3a/ITestS3AContractDistCp.java | 9 +- .../s3a/ITestS3AContractGetFileStatus.java | 2 + .../ITestS3AContractMkdirWithCreatePerf.java | 4 +- .../ITestS3AContractMultipartUploader.java | 2 + .../fs/contract/s3a/ITestS3AContractOpen.java | 2 +- .../contract/s3a/ITestS3AContractRename.java | 4 +- .../contract/s3a/ITestS3AContractRootDir.java | 6 +- .../fs/contract/s3a/ITestS3AContractSeek.java | 78 +++++---- .../s3a/ITestS3AContractVectoredRead.java | 52 +++--- .../hadoop/fs/s3a/AbstractS3ATestBase.java | 14 +- .../fs/s3a/AbstractTestS3AEncryption.java | 14 +- .../hadoop/fs/s3a/ITestDowngradeSyncable.java | 2 +- .../fs/s3a/ITestLocatedFileStatusFetcher.java | 4 +- .../fs/s3a/ITestS3ABlockOutputArray.java | 40 +++-- .../hadoop/fs/s3a/ITestS3ABlocksize.java | 21 +-- .../fs/s3a/ITestS3ABucketExistence.java | 21 +-- .../hadoop/fs/s3a/ITestS3ACannedACLs.java | 2 +- .../fs/s3a/ITestS3AClientSideEncryption.java | 29 ++-- .../s3a/ITestS3AClientSideEncryptionKms.java | 5 +- .../hadoop/fs/s3a/ITestS3AClosedFS.java | 10 +- .../fs/s3a/ITestS3AContentEncoding.java | 2 +- .../ITestS3AContractGetFileStatusV1List.java | 2 + .../fs/s3a/ITestS3ACopyFromLocalFile.java | 37 ++-- ...3ADSSEEncryptionWithDefaultS3Settings.java | 4 +- .../hadoop/fs/s3a/ITestS3ADelayedFNF.java | 2 +- .../hadoop/fs/s3a/ITestS3ADeleteOnExit.java | 2 +- .../hadoop/fs/s3a/ITestS3AEmptyDirectory.java | 4 +- ...ITestS3AEncryptionAlgorithmValidation.java | 56 +++--- .../hadoop/fs/s3a/ITestS3AEncryptionSSEC.java | 8 +- .../ITestS3AEncryptionSSEKMSDefaultKey.java | 10 +- ...estS3AEncryptionWithDefaultS3Settings.java | 10 +- .../hadoop/fs/s3a/ITestS3AEndpointRegion.java | 7 +- .../fs/s3a/ITestS3AFailureHandling.java | 6 +- .../fs/s3a/ITestS3AFileOperationCost.java | 8 +- ...ITestS3AFileSystemIsolatedClassloader.java | 2 +- .../fs/s3a/ITestS3AIOStatisticsContext.java | 10 +- .../fs/s3a/ITestS3AInputStreamLeakage.java | 4 +- .../apache/hadoop/fs/s3a/ITestS3AMetrics.java | 10 +- .../hadoop/fs/s3a/ITestS3AMiscOperations.java | 21 ++- .../hadoop/fs/s3a/ITestS3AMultipartUtils.java | 2 +- .../fs/s3a/ITestS3APrefetchingCacheFiles.java | 24 +-- .../s3a/ITestS3APrefetchingInputStream.java | 35 ++-- .../s3a/ITestS3APrefetchingLruEviction.java | 19 +- .../hadoop/fs/s3a/ITestS3ARequesterPays.java | 2 +- .../hadoop/fs/s3a/ITestS3AStorageClass.java | 48 +++-- .../fs/s3a/ITestS3ATemporaryCredentials.java | 12 +- .../hadoop/fs/s3a/ITestS3AUnbuffer.java | 23 +-- .../hadoop/fs/s3a/ITestS3AUrlScheme.java | 2 +- .../fs/s3a/TestS3AAWSCredentialsProvider.java | 107 ++++++------ .../fs/s3a/audit/ITestAuditAccessChecks.java | 4 +- .../fs/s3a/audit/ITestAuditManager.java | 2 +- .../s3a/audit/ITestAuditManagerDisabled.java | 2 +- .../hadoop/fs/s3a/auth/ITestAssumeRole.java | 16 +- .../ITestAssumedRoleCommitOperations.java | 4 + .../hadoop/fs/s3a/auth/ITestCustomSigner.java | 27 +-- .../hadoop/fs/s3a/auth/ITestHttpSigner.java | 6 +- .../hadoop/fs/s3a/auth/ITestJceksIO.java | 10 +- .../s3a/auth/ITestRestrictedReadAccess.java | 6 +- .../auth/delegation/AbstractDelegationIT.java | 19 +- .../ILoadTestSessionCredentials.java | 9 +- .../auth/delegation/ITestDelegatedMRJob.java | 55 +++--- .../ITestRoleDelegationInFilesystem.java | 2 + .../delegation/ITestRoleDelegationTokens.java | 6 +- .../ITestSessionDelegationInFilesystem.java | 144 +++++++-------- .../ITestSessionDelegationTokens.java | 73 ++++---- .../fs/s3a/commit/AbstractCommitITest.java | 26 +-- .../s3a/commit/AbstractITCommitProtocol.java | 60 +++---- .../s3a/commit/AbstractYarnClusterITest.java | 21 +-- .../s3a/commit/ITestCommitOperationCost.java | 6 +- .../fs/s3a/commit/ITestCommitOperations.java | 14 +- .../s3a/commit/ITestS3ACommitterFactory.java | 57 +++--- .../fs/s3a/commit/ITestUploadRecovery.java | 49 ++++-- .../integration/ITestS3ACommitterMRJob.java | 95 +++++----- .../magic/ITestMagicCommitProtocol.java | 21 +-- .../ITestMagicCommitProtocolFailure.java | 2 +- .../magic/ITestS3AHugeMagicCommits.java | 10 +- .../ITestDirectoryCommitProtocol.java | 10 +- .../ITestStagingCommitProtocol.java | 13 +- .../ITestStagingCommitProtocolFailure.java | 2 +- .../commit/terasort/ITestTerasortOnS3A.java | 88 ++++++---- .../fs/s3a/impl/ITestAwsSdkWorkarounds.java | 2 +- .../fs/s3a/impl/ITestConnectionTimeouts.java | 4 +- .../s3a/impl/ITestPartialRenamesDeletes.java | 101 +++++++---- .../fs/s3a/impl/ITestRenameDeleteRace.java | 2 +- .../fs/s3a/impl/ITestTreewalkProblems.java | 4 +- ...ITestUploadPurgeOnDirectoryOperations.java | 4 +- .../hadoop/fs/s3a/impl/ITestXAttrCost.java | 2 +- .../s3a/performance/AbstractS3ACostTest.java | 2 + .../s3a/performance/ITestCreateFileCost.java | 81 +++++---- .../ITestCreateSessionTimeout.java | 4 +- .../ITestDirectoryMarkerListing.java | 20 ++- .../s3a/performance/ITestS3ADeleteCost.java | 4 +- .../ITestS3AMiscOperationCost.java | 43 +++-- .../fs/s3a/performance/ITestS3AMkdirCost.java | 2 +- .../fs/s3a/performance/ITestS3AOpenCost.java | 10 +- .../s3a/performance/ITestS3ARenameCost.java | 2 +- .../performance/ITestUnbufferDraining.java | 6 +- .../s3guard/AbstractS3GuardToolTestBase.java | 12 +- .../fs/s3a/s3guard/ITestS3GuardTool.java | 14 +- .../s3a/scale/AbstractSTestS3AHugeFiles.java | 34 ++-- .../ILoadTestS3ABulkDeleteThrottling.java | 68 ++++---- ...ITestS3ABlockOutputStreamInterruption.java | 70 +++++--- .../fs/s3a/scale/ITestS3AConcurrentOps.java | 10 +- .../s3a/scale/ITestS3ACreatePerformance.java | 8 +- .../fs/s3a/scale/ITestS3ADeleteManyFiles.java | 4 +- .../scale/ITestS3ADirectoryPerformance.java | 34 ++-- .../scale/ITestS3AHugeFilesEncryption.java | 2 + .../ITestS3AHugeFilesSSECDiskBlocks.java | 2 + .../scale/ITestS3AHugeFilesStorageClass.java | 5 +- .../scale/ITestS3AInputStreamPerformance.java | 78 ++++----- .../ITestS3AMultipartUploadSizeLimits.java | 2 +- .../hadoop/fs/s3a/scale/S3AScaleTestBase.java | 2 + .../fs/s3a/select/ITestSelectUnsupported.java | 2 +- .../ITestAWSStatisticCollection.java | 2 +- .../ITestAggregateIOStatistics.java | 2 +- .../ITestS3AFileSystemStatistic.java | 6 +- .../hadoop/fs/s3a/tools/ITestBucketTool.java | 4 +- .../hadoop/fs/s3a/tools/ITestMarkerTool.java | 2 +- .../tools/ITestMarkerToolRootOperations.java | 10 +- .../apache/hadoop/fs/s3a/yarn/ITestS3A.java | 25 ++- .../fs/s3a/yarn/ITestS3AMiniYarnCluster.java | 18 +- .../adl/live/TestAdlContractAppendLive.java | 2 +- .../adl/live/TestAdlContractConcatLive.java | 2 +- .../commit/AbstractAbfsClusterITest.java | 8 +- .../commit/ITestAbfsCleanupStage.java | 2 + .../commit/ITestAbfsCommitTaskStage.java | 2 + ...ITestAbfsCreateOutputDirectoriesStage.java | 2 + .../ITestAbfsJobThroughManifestCommitter.java | 2 + .../commit/ITestAbfsLoadManifestsStage.java | 2 + .../ITestAbfsManifestCommitProtocol.java | 2 + .../ITestAbfsManifestStoreOperations.java | 4 +- .../commit/ITestAbfsRenameStageFailure.java | 3 + .../commit/ITestAbfsTaskManifestFileIO.java | 2 + .../fs/azurebfs/commit/ITestAbfsTerasort.java | 9 +- .../contract/ITestAbfsContractBulkDelete.java | 2 + .../contract/ITestAbfsContractUnbuffer.java | 2 + .../ITestAbfsFileSystemContractAppend.java | 4 +- .../ITestAbfsFileSystemContractConcat.java | 2 + .../ITestAbfsFileSystemContractCreate.java | 2 + .../ITestAbfsFileSystemContractDelete.java | 2 + .../ITestAbfsFileSystemContractEtag.java | 2 + ...stAbfsFileSystemContractGetFileStatus.java | 2 + .../ITestAbfsFileSystemContractMkdir.java | 2 + .../ITestAbfsFileSystemContractOpen.java | 2 + .../ITestAbfsFileSystemContractRename.java | 2 + ...stAbfsFileSystemContractRootDirectory.java | 2 + .../ITestAbfsFileSystemContractSeek.java | 8 +- .../ITestAbfsFileSystemContractSetTimes.java | 2 + ...estAbfsFileSystemContractVectoredRead.java | 5 +- .../contract/AbstractContractDistCpTest.java | 48 +++-- .../contract/TestHDFSContractDistCp.java | 8 +- 254 files changed, 2204 insertions(+), 1733 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/extend/TestName.java diff --git a/hadoop-cloud-storage-project/hadoop-huaweicloud/src/test/java/org/apache/hadoop/fs/obs/TestOBSContractAppend.java b/hadoop-cloud-storage-project/hadoop-huaweicloud/src/test/java/org/apache/hadoop/fs/obs/TestOBSContractAppend.java index a4fb8153e7ca4..372b87eff6758 100644 --- a/hadoop-cloud-storage-project/hadoop-huaweicloud/src/test/java/org/apache/hadoop/fs/obs/TestOBSContractAppend.java +++ b/hadoop-cloud-storage-project/hadoop-huaweicloud/src/test/java/org/apache/hadoop/fs/obs/TestOBSContractAppend.java @@ -21,7 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractAppendTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.Assume; + +import static org.junit.jupiter.api.Assumptions.assumeTrue; /** * Append test cases on obs file system. @@ -35,6 +36,6 @@ protected AbstractFSContract createContract(final Configuration conf) { @Override public void testRenameFileBeingAppended() { - Assume.assumeTrue("unsupport.", false); + assumeTrue(false, "unsupport."); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSCopyFromLocal.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSCopyFromLocal.java index 15466af7c16fb..e4bf15209add2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSCopyFromLocal.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSCopyFromLocal.java @@ -20,7 +20,7 @@ import java.io.File; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractCopyFromLocalTest; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java index 9b92dacadd90e..e2265129481ba 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java @@ -22,7 +22,8 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -41,6 +42,7 @@ public abstract class AbstractContractAppendTest extends AbstractFSContractTestB private Path testPath; private Path target; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractBulkDeleteTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractBulkDeleteTest.java index 199790338b2df..81e136f0951f1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractBulkDeleteTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractBulkDeleteTest.java @@ -25,7 +25,8 @@ import java.util.Map; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -74,6 +75,7 @@ public abstract class AbstractContractBulkDeleteTest extends AbstractFSContractT */ private DynamicWrappedIO dynamicWrappedIO; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java index d712369de3b97..872dea4ac92fc 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java @@ -20,7 +20,8 @@ import org.apache.hadoop.fs.Path; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,6 +45,7 @@ public abstract class AbstractContractConcatTest extends AbstractFSContractTestB private Path zeroByteFile; private Path target; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractContentSummaryTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractContentSummaryTest.java index 5e5c917395413..07405dc9cd784 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractContentSummaryTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractContentSummaryTest.java @@ -23,7 +23,7 @@ import org.apache.hadoop.fs.Path; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.FileNotFoundException; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java index e24eb7181ec9f..61f0333428fb0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java @@ -25,7 +25,8 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; @@ -43,6 +44,7 @@ public abstract class AbstractContractCopyFromLocalTest extends private static final Charset ASCII = StandardCharsets.US_ASCII; private File file; + @AfterEach @Override public void teardown() throws Exception { super.teardown(); @@ -65,12 +67,12 @@ public void testCopyFile() throws Throwable { Path dest = copyFromLocal(file, true); assertPathExists("uploaded file not found", dest); - assertTrue("source file deleted", Files.exists(file.toPath())); + assertTrue(Files.exists(file.toPath()), "source file deleted"); FileSystem fs = getFileSystem(); FileStatus status = fs.getFileStatus(dest); - assertEquals("File length not equal " + status, - message.getBytes(ASCII).length, status.getLen()); + assertEquals(message.getBytes(ASCII).length, status.getLen(), + "File length not equal " + status); assertFileTextEquals(dest, message); } @@ -109,7 +111,7 @@ public void testSourceIsFileAndDelSrcTrue() throws Throwable { file = createTempFile("test"); copyFromLocal(file, false, true); - assertFalse("Source file not deleted", Files.exists(file.toPath())); + assertFalse(Files.exists(file.toPath()), "Source file not deleted"); } @Test @@ -215,7 +217,7 @@ public void testSrcIsDirWithDelSrcOptions() throws Throwable { copyFromLocal(source, false, true); Path dest = fileToPath(child, source.getParentFile()); - assertFalse("Directory not deleted", Files.exists(source.toPath())); + assertFalse(Files.exists(source.toPath()), "Directory not deleted"); assertFileTextEquals(dest, contents); } @@ -258,8 +260,8 @@ public void testCopyDirectoryWithDelete() throws Throwable { Path dst = path(srcDir.getFileName().toString()); getFileSystem().copyFromLocalFile(true, true, src, dst); - assertFalse("Source directory was not deleted", - Files.exists(srcDir)); + assertFalse(Files.exists(srcDir), + "Source directory was not deleted"); } @Test @@ -330,7 +332,7 @@ protected File createTempDirectory(File parent, String name) private void assertFileTextEquals(Path path, String expected) throws IOException { - assertEquals("Wrong data in " + path, - expected, IOUtils.toString(getFileSystem().open(path), ASCII)); + assertEquals(expected, IOUtils.toString(getFileSystem().open(path), ASCII), + "Wrong data in " + path); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java index 91d19ecad1ec6..6548f092d1217 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java @@ -27,7 +27,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StreamCapabilities; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.junit.AssumptionViolatedException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -278,8 +278,8 @@ public void testFileStatusBlocksizeNonEmptyFile() throws Throwable { FileSystem fs = getFileSystem(); long rootPath = fs.getDefaultBlockSize(path("/")); - assertTrue("Root block size is invalid " + rootPath, - rootPath > 0); + assertTrue(rootPath > 0, + "Root block size is invalid " + rootPath); Path path = path("testFileStatusBlocksizeNonEmptyFile"); byte[] data = dataset(256, 'a', 'z'); @@ -303,13 +303,13 @@ private void validateBlockSize(FileSystem fs, Path path, int minValue) FileStatus status = getFileStatusEventually(fs, path, CREATE_TIMEOUT); String statusDetails = status.toString(); - assertTrue("File status block size too low: " + statusDetails - + " min value: " + minValue, - status.getBlockSize() >= minValue); + assertTrue(status.getBlockSize() >= minValue, + "File status block size too low: " + statusDetails + + " min value: " + minValue); long defaultBlockSize = fs.getDefaultBlockSize(path); - assertTrue("fs.getDefaultBlockSize(" + path + ") size " + - defaultBlockSize + " is below the minimum of " + minValue, - defaultBlockSize >= minValue); + assertTrue(defaultBlockSize >= minValue, + "fs.getDefaultBlockSize(" + path + ") size " + + defaultBlockSize + " is below the minimum of " + minValue); } @Test @@ -320,14 +320,14 @@ public void testCreateMakesParentDirs() throws Throwable { Path parent = new Path(grandparent, "parent"); Path child = new Path(parent, "child"); touch(fs, child); - assertEquals("List status of parent should include the 1 child file", - 1, fs.listStatus(parent).length); - assertTrue("Parent directory does not appear to be a directory", - fs.getFileStatus(parent).isDirectory()); - assertEquals("List status of grandparent should include the 1 parent dir", - 1, fs.listStatus(grandparent).length); - assertTrue("Grandparent directory does not appear to be a directory", - fs.getFileStatus(grandparent).isDirectory()); + assertEquals(1, fs.listStatus(parent).length, + "List status of parent should include the 1 child file"); + assertTrue(fs.getFileStatus(parent).isDirectory(), + "Parent directory does not appear to be a directory"); + assertEquals(1, fs.listStatus(grandparent).length, + "List status of grandparent should include the 1 parent dir"); + assertTrue(fs.getFileStatus(grandparent).isDirectory(), + "Grandparent directory does not appear to be a directory"); } @Test @@ -531,8 +531,8 @@ protected void validateSyncableSemantics(final FileSystem fs, final FileStatus st = fs.getFileStatus(path); if (metadataUpdatedOnHSync) { // not all stores reliably update it, HDFS/webHDFS in particular - assertEquals("Metadata not updated during write " + st, - 2, st.getLen()); + assertEquals(2, st.getLen(), + "Metadata not updated during write " + st); } // there's no way to verify durability, but we can diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java index 605ea45649a16..2760a2ffec191 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java @@ -20,7 +20,7 @@ import org.apache.hadoop.fs.Path; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; @@ -49,9 +49,9 @@ public void testDeleteNonexistentPathRecursive() throws Throwable { Path path = path("testDeleteNonexistentPathRecursive"); assertPathDoesNotExist("leftover", path); ContractTestUtils.rejectRootOperation(path); - assertFalse("Returned true attempting to recursively delete" - + " a nonexistent path " + path, - getFileSystem().delete(path, true)); + assertFalse(getFileSystem().delete(path, true), + "Returned true attempting to recursively delete" + + " a nonexistent path " + path); } @Test @@ -59,9 +59,9 @@ public void testDeleteNonexistentPathNonRecursive() throws Throwable { Path path = path("testDeleteNonexistentPathNonRecursive"); assertPathDoesNotExist("leftover", path); ContractTestUtils.rejectRootOperation(path); - assertFalse("Returned true attempting to non recursively delete" - + " a nonexistent path " + path, - getFileSystem().delete(path, false)); + assertFalse(getFileSystem().delete(path, false), + "Returned true attempting to non recursively delete" + + " a nonexistent path " + path); } @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractEtagTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractEtagTest.java index e7a121b704677..880cdfb9106e4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractEtagTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractEtagTest.java @@ -22,7 +22,7 @@ import org.assertj.core.api.Assertions; import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetEnclosingRoot.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetEnclosingRoot.java index 9564c31725d06..b47e5ea08c452 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetEnclosingRoot.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetEnclosingRoot.java @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.UserGroupInformation; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -36,23 +36,20 @@ public void testEnclosingRootEquivalence() throws IOException { Path root = path("/"); Path foobar = path("/foo/bar"); - assertEquals("Ensure getEnclosingRoot on the root directory returns the root directory", - root, fs.getEnclosingRoot(foobar)); - assertEquals("Ensure getEnclosingRoot called on itself returns the root directory", - root, fs.getEnclosingRoot(fs.getEnclosingRoot(foobar))); - assertEquals( + assertEquals(root, fs.getEnclosingRoot(foobar), + "Ensure getEnclosingRoot on the root directory returns the root directory"); + assertEquals(root, fs.getEnclosingRoot(fs.getEnclosingRoot(foobar)), + "Ensure getEnclosingRoot called on itself returns the root directory"); + assertEquals(fs.getEnclosingRoot(root), fs.getEnclosingRoot(foobar), "Ensure getEnclosingRoot for different paths in the same enclosing root " - + "returns the same path", - fs.getEnclosingRoot(root), fs.getEnclosingRoot(foobar)); - assertEquals("Ensure getEnclosingRoot on a path returns the root directory", - root, fs.getEnclosingRoot(methodPath())); - assertEquals("Ensure getEnclosingRoot called on itself on a path returns the root directory", - root, fs.getEnclosingRoot(fs.getEnclosingRoot(methodPath()))); - assertEquals( + + "returns the same path"); + assertEquals(root, fs.getEnclosingRoot(methodPath()), + "Ensure getEnclosingRoot on a path returns the root directory"); + assertEquals(root, fs.getEnclosingRoot(fs.getEnclosingRoot(methodPath())), + "Ensure getEnclosingRoot called on itself on a path returns the root directory"); + assertEquals(fs.getEnclosingRoot(root), fs.getEnclosingRoot(methodPath()), "Ensure getEnclosingRoot for different paths in the same enclosing root " - + "returns the same path", - fs.getEnclosingRoot(root), - fs.getEnclosingRoot(methodPath())); + + "returns the same path"); } @@ -63,11 +60,12 @@ public void testEnclosingRootPathExists() throws Exception { Path foobar = methodPath(); fs.mkdirs(foobar); - assertEquals( - "Ensure getEnclosingRoot returns the root directory when the root directory exists", - root, fs.getEnclosingRoot(foobar)); - assertEquals("Ensure getEnclosingRoot returns the root directory when the directory exists", - root, fs.getEnclosingRoot(foobar)); + assertEquals(root, fs.getEnclosingRoot(foobar), + "Ensure getEnclosingRoot returns the root directory " + + "when the root directory exists"); + assertEquals(root, fs.getEnclosingRoot(foobar), + "Ensure getEnclosingRoot returns the root directory " + + "when the directory exists"); } @Test @@ -77,12 +75,12 @@ public void testEnclosingRootPathDNE() throws Exception { Path root = path("/"); // . - assertEquals( - "Ensure getEnclosingRoot returns the root directory even when the path does not exist", - root, fs.getEnclosingRoot(foobar)); - assertEquals( - "Ensure getEnclosingRoot returns the root directory even when the path does not exist", - root, fs.getEnclosingRoot(methodPath())); + assertEquals(root, fs.getEnclosingRoot(foobar), + "Ensure getEnclosingRoot returns the root directory " + + "even when the path does not exist"); + assertEquals(root, fs.getEnclosingRoot(methodPath()), + "Ensure getEnclosingRoot returns the root directory " + + "even when the path does not exist"); } @Test @@ -90,14 +88,16 @@ public void testEnclosingRootWrapped() throws Exception { FileSystem fs = getFileSystem(); Path root = path("/"); - assertEquals("Ensure getEnclosingRoot returns the root directory when the directory exists", - root, fs.getEnclosingRoot(new Path("/foo/bar"))); + assertEquals(root, fs.getEnclosingRoot(new Path("/foo/bar")), + "Ensure getEnclosingRoot returns the root directory " + + "when the directory exists"); UserGroupInformation ugi = UserGroupInformation.createRemoteUser("foo"); Path p = ugi.doAs((PrivilegedExceptionAction) () -> { FileSystem wFs = getContract().getTestFileSystem(); return wFs.getEnclosingRoot(new Path("/foo/bar")); }); - assertEquals("Ensure getEnclosingRoot works correctly within a wrapped FileSystem", root, p); + assertEquals(root, p, "Ensure getEnclosingRoot works correctly " + + "within a wrapped FileSystem"); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java index c0d9733bbb9a7..16df7acf59852 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java @@ -33,7 +33,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.RemoteIterator; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import static org.apache.hadoop.fs.contract.ContractTestUtils.*; import static org.apache.hadoop.test.LambdaTestUtils.intercept; @@ -55,6 +56,7 @@ public abstract class AbstractContractGetFileStatusTest extends private static final int TREE_FILES = 4; private static final int TREE_FILESIZE = 512; + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -284,10 +286,9 @@ protected void checkListFilesComplexDirRecursive(TreeScanResults tree) treeWalk.assertFieldsEquivalent("files", listing, treeWalk.getFiles(), listing.getFiles()); - assertEquals("Size of status list through next() calls", - count, - toListThroughNextCallsAlone( - fs.listFiles(tree.getBasePath(), true)).size()); + assertEquals(count, toListThroughNextCallsAlone( + fs.listFiles(tree.getBasePath(), true)).size(), + "Size of status list through next() calls"); } @Test @@ -398,12 +399,12 @@ public void testListFilesFile() throws Throwable { Path f = touchf("listfilesfile"); List statusList = toList( getFileSystem().listFiles(f, false)); - assertEquals("size of file list returned", 1, statusList.size()); + assertEquals(1, statusList.size(), "size of file list returned"); assertIsNamedFile(f, statusList.get(0)); List statusList2 = toListThroughNextCallsAlone( getFileSystem().listFiles(f, false)); - assertEquals("size of file list returned through next() calls", - 1, statusList2.size()); + assertEquals(1, statusList2.size(), + "size of file list returned through next() calls"); assertIsNamedFile(f, statusList2.get(0)); } @@ -413,11 +414,11 @@ public void testListFilesFileRecursive() throws Throwable { Path f = touchf("listfilesRecursive"); List statusList = toList( getFileSystem().listFiles(f, true)); - assertEquals("size of file list returned", 1, statusList.size()); + assertEquals(1, statusList.size(), "size of file list returned"); assertIsNamedFile(f, statusList.get(0)); List statusList2 = toListThroughNextCallsAlone( getFileSystem().listFiles(f, true)); - assertEquals("size of file list returned", 1, statusList2.size()); + assertEquals(1, statusList2.size(), "size of file list returned"); } @Test @@ -426,12 +427,12 @@ public void testListLocatedStatusFile() throws Throwable { Path f = touchf("listLocatedStatus"); List statusList = toList( getFileSystem().listLocatedStatus(f)); - assertEquals("size of file list returned", 1, statusList.size()); + assertEquals(1, statusList.size(), "size of file list returned"); assertIsNamedFile(f, statusList.get(0)); List statusList2 = toListThroughNextCallsAlone( getFileSystem().listLocatedStatus(f)); - assertEquals("size of file list returned through next() calls", - 1, statusList2.size()); + assertEquals(1, statusList2.size(), + "size of file list returned through next() calls"); } /** @@ -451,8 +452,8 @@ private void verifyStatusArrayMatchesFile(Path f, FileStatus[] status) { * @param fileStatus status to validate */ private void assertIsNamedFile(Path f, FileStatus fileStatus) { - assertEquals("Wrong pathname in " + fileStatus, f, fileStatus.getPath()); - assertTrue("Not a file: " + fileStatus, fileStatus.isFile()); + assertEquals(f, fileStatus.getPath(), "Wrong pathname in " + fileStatus); + assertTrue(fileStatus.isFile(), "Not a file: " + fileStatus); } /** @@ -515,10 +516,10 @@ private int verifyFileStats(RemoteIterator results) count++; LocatedFileStatus next = results.next(); FileStatus fileStatus = getFileSystem().getFileStatus(next.getPath()); - assertEquals("isDirectory", fileStatus.isDirectory(), next.isDirectory()); - assertEquals("isFile", fileStatus.isFile(), next.isFile()); - assertEquals("getLen", fileStatus.getLen(), next.getLen()); - assertEquals("getOwner", fileStatus.getOwner(), next.getOwner()); + assertEquals(fileStatus.isDirectory(), next.isDirectory(), "isDirectory"); + assertEquals(fileStatus.isFile(), next.isFile(), "isFile"); + assertEquals(fileStatus.getLen(), next.getLen(), "getLen"); + assertEquals(fileStatus.getOwner(), next.getOwner(), "getOwner"); } return count; } @@ -604,9 +605,9 @@ private FileStatus[] verifyListStatus(int expected, Path path, PathFilter filter) throws IOException { FileStatus[] result = getFileSystem().listStatus(path, filter); - assertEquals("length of listStatus(" + path + ", " + filter + " ) " + - Arrays.toString(result), - expected, result.length); + assertEquals(expected, result.length, + "length of listStatus(" + path + ", " + filter + " ) " + + Arrays.toString(result)); return result; } @@ -626,8 +627,8 @@ private List verifyListLocatedStatus(ExtendedFilterFS xfs, PathFilter filter) throws IOException { RemoteIterator it = xfs.listLocatedStatus(path, filter); List result = toList(it); - assertEquals("length of listLocatedStatus(" + path + ", " + filter + " )", - expected, result.size()); + assertEquals(expected, result.size(), + "length of listLocatedStatus(" + path + ", " + filter + " )"); return result; } @@ -650,8 +651,8 @@ private List verifyListLocatedStatusNextCalls( PathFilter filter) throws IOException { RemoteIterator it = xfs.listLocatedStatus(path, filter); List result = toListThroughNextCallsAlone(it); - assertEquals("length of listLocatedStatus(" + path + ", " + filter + " )", - expected, result.size()); + assertEquals(expected, result.size(), + "length of listLocatedStatus(" + path + ", " + filter + " )"); return result; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractLeaseRecoveryTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractLeaseRecoveryTest.java index e99b62ae1e37f..d4a9c00a1dbb4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractLeaseRecoveryTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractLeaseRecoveryTest.java @@ -22,7 +22,7 @@ import java.io.IOException; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LeaseRecoverable; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java index 65ca0ee218fd9..28200f11d3fc2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java index 4c4514b249c28..d1ee94803b475 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java @@ -30,8 +30,8 @@ import java.util.concurrent.CompletableFuture; import org.assertj.core.api.Assertions; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -55,6 +55,7 @@ import static org.apache.hadoop.test.LambdaTestUtils.eventually; import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.apache.hadoop.util.functional.FutureIO.awaitFuture; +import static org.junit.jupiter.api.Assumptions.assumeTrue; /** * Tests of multipart uploads. @@ -83,15 +84,16 @@ public abstract class AbstractContractMultipartUploaderTest extends private UploadHandle activeUpload; private Path activeUploadPath; + @BeforeEach @Override public void setup() throws Exception { super.setup(); final FileSystem fs = getFileSystem(); Path testPath = getContract().getTestPath(); - Assume.assumeTrue("Multipart uploader is not supported", - fs.hasPathCapability(testPath, - CommonPathCapabilities.FS_MULTIPART_UPLOADER)); + assumeTrue(fs.hasPathCapability(testPath, + CommonPathCapabilities.FS_MULTIPART_UPLOADER), + "Multipart uploader is not supported"); uploader0 = fs.createMultipartUploader(testPath).build(); uploader1 = fs.createMultipartUploader(testPath).build(); } @@ -264,8 +266,8 @@ public void testSingleUpload() throws Exception { } else { // otherwise, the same or other uploader can try again. PathHandle fd2 = complete(completer, uploadHandle, file, partHandles); - assertArrayEquals("Path handles differ", fd.toByteArray(), - fd2.toByteArray()); + assertArrayEquals(fd.toByteArray(), + fd2.toByteArray(), "Path handles differ"); } } @@ -791,9 +793,8 @@ public void testConcurrentUploads() throws Throwable { UploadHandle upload2; try { upload2 = startUpload(file); - Assume.assumeTrue( - "The Filesystem is unexpectedly supporting concurrent uploads", - concurrent); + assumeTrue(concurrent, + "The Filesystem is unexpectedly supporting concurrent uploads"); } catch (IOException e) { if (!concurrent) { // this is expected, so end the test @@ -805,7 +806,7 @@ public void testConcurrentUploads() throws Throwable { } Map partHandles2 = new HashMap<>(); - assertNotEquals("Upload handles match", upload1, upload2); + assertNotEquals(upload1, upload2, "Upload handles match"); // put part 1 partHandles1.put(partId1, putPart(file, upload1, partId1, false, payload1)); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java index bd920fd03b08d..60d9267316f7e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java @@ -47,7 +47,8 @@ import static org.apache.hadoop.util.functional.FutureIO.awaitFuture; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; /** * Test Open operations. @@ -64,6 +65,7 @@ protected Configuration createConfiguration() { return conf; } + @AfterEach @Override public void teardown() throws Exception { IOUtils.closeStream(instream); @@ -84,13 +86,12 @@ public void testOpenReadZeroByteFile() throws Throwable { @Test public void testFsIsEncrypted() throws Exception { - describe("create an empty file and call FileStatus.isEncrypted()"); - final Path path = path("file"); - createFile(getFileSystem(), path, false, new byte[0]); - final FileStatus stat = getFileSystem().getFileStatus(path); - assertEquals("Result wrong for for isEncrypted() in " + stat, - areZeroByteFilesEncrypted(), - stat.isEncrypted()); + describe("create an empty file and call FileStatus.isEncrypted()"); + final Path path = path("file"); + createFile(getFileSystem(), path, false, new byte[0]); + final FileStatus stat = getFileSystem().getFileStatus(path); + assertEquals(areZeroByteFilesEncrypted(), + stat.isEncrypted(), "Result wrong for for isEncrypted() in " + stat); } /** @@ -155,10 +156,10 @@ public void testOpenFileTwice() throws Throwable { int c = instream1.read(); assertEquals(0,c); instream2 = getFileSystem().open(path); - assertEquals("first read of instream 2", 0, instream2.read()); - assertEquals("second read of instream 1", 1, instream1.read()); + assertEquals(0, instream2.read(), "first read of instream 2"); + assertEquals(1, instream1.read(), "second read of instream 1"); instream1.close(); - assertEquals("second read of instream 2", 1, instream2.read()); + assertEquals(1, instream2.read(), "second read of instream 2"); //close instream1 again instream1.close(); } finally { @@ -241,8 +242,8 @@ public void testOpenFileFailExceptionally() throws Throwable { FutureDataInputStreamBuilder builder = getFileSystem().openFile(path("testOpenFileFailExceptionally")) .opt("fs.test.something", true); - assertNull("exceptional uprating", - builder.build().exceptionally(ex -> null).get()); + assertNull(builder.build().exceptionally(ex -> null).get(), + "exceptional uprating"); } @Test @@ -306,9 +307,8 @@ public void testOpenFileApplyRead() throws Throwable { .withFileStatus(st) .build() .thenApply(ContractTestUtils::readStream); - assertEquals("Wrong number of bytes read value", - len, - (long) readAllBytes.get()); + assertEquals(len, (long) readAllBytes.get(), + "Wrong number of bytes read value"); // now reattempt with a new FileStatus and a different path // other than the final name element // implementations MUST use path in openFile() call @@ -322,13 +322,12 @@ public void testOpenFileApplyRead() throws Throwable { st.getOwner(), st.getGroup(), new Path("gopher:///localhost:/" + path.getName())); - assertEquals("Wrong number of bytes read value", - len, + assertEquals(len, (long) fs.openFile(path) - .withFileStatus(st2) - .build() - .thenApply(ContractTestUtils::readStream) - .get()); + .withFileStatus(st2) + .build() + .thenApply(ContractTestUtils::readStream) + .get(), "Wrong number of bytes read value"); } @Test @@ -347,8 +346,7 @@ public void testOpenFileApplyAsyncRead() throws Throwable { accepted.set(true); return ContractTestUtils.readStream(stream); }).get(); - assertTrue("async accept operation not invoked", - accepted.get()); + assertTrue(accepted.get(), "async accept operation not invoked"); Assertions.assertThat(bytes) .describedAs("bytes read from stream") .isEqualTo(len); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java index 17043dca93e43..ee8c58033628d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java @@ -44,9 +44,8 @@ import static org.apache.hadoop.test.LambdaTestUtils.interceptFuture; import org.apache.hadoop.fs.RawPathHandle; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; /** * Test {@link PathHandle} operations and semantics. @@ -56,26 +55,26 @@ * @see org.apache.hadoop.fs.FileSystem#open(PathHandle) * @see org.apache.hadoop.fs.FileSystem#open(PathHandle, int) */ -@RunWith(Parameterized.class) public abstract class AbstractContractPathHandleTest extends AbstractFSContractTestBase { - private final HandleOpt[] opts; - private final boolean serialized; + private HandleOpt[] opts; + private boolean serialized; private static final byte[] B1 = dataset(TEST_FILE_LEN, 43, 255); private static final byte[] B2 = dataset(TEST_FILE_LEN, 44, 255); /** * Create an instance of the test from {@link #params()}. - * @param testname Name of the set of options under test - * @param opts Set of {@link HandleOpt} params under test. - * @param serialized Serialize the handle before using it. + * @param pTestname Name of the set of options under test + * @param pOpts Set of {@link HandleOpt} params under test. + * @param pSerialized Serialize the handle before using it. */ - public AbstractContractPathHandleTest(String testname, HandleOpt[] opts, - boolean serialized) { - this.opts = opts; - this.serialized = serialized; + public void initAbstractContractPathHandleTest( + String pTestname, HandleOpt[] pOpts, + boolean pSerialized) { + this.opts = pOpts; + this.serialized = pSerialized; } /** @@ -83,7 +82,6 @@ public AbstractContractPathHandleTest(String testname, HandleOpt[] opts, * after converting the PathHandle to bytes and back. * @return */ - @Parameterized.Parameters(name="Test{0}") public static Collection params() { return Arrays.asList( Arrays.asList("Exact", HandleOpt.exact()), @@ -108,8 +106,11 @@ protected Configuration createConfiguration() { return conf; } - @Test - public void testIdent() throws IOException { + @MethodSource("params") + @ParameterizedTest + public void testIdent(String pTestname, HandleOpt[] pOpts, + boolean pSerialized) throws IOException { + initAbstractContractPathHandleTest(pTestname, pOpts, pSerialized); describe("verify simple open, no changes"); FileStatus stat = testFile(B1); PathHandle fd = getHandleOrSkip(stat); @@ -120,8 +121,11 @@ public void testIdent() throws IOException { } } - @Test - public void testChanged() throws IOException { + @MethodSource("params") + @ParameterizedTest + public void testChanged(String pTestname, HandleOpt[] pOpts, + boolean pSerialized) throws IOException { + initAbstractContractPathHandleTest(pTestname, pOpts, pSerialized); describe("verify open(PathHandle, changed(*))"); assumeSupportsContentCheck(); HandleOpt.Data data = HandleOpt.getOpt(HandleOpt.Data.class, opts) @@ -143,15 +147,18 @@ public void testChanged() throws IOException { PathHandle fd = getHandleOrSkip(stat); try (FSDataInputStream in = getFileSystem().open(fd)) { - assertTrue("Failed to detect content change", data.allowChange()); + assertTrue(data.allowChange(), "Failed to detect content change"); verifyRead(in, b12, 0, b12.length); } catch (InvalidPathHandleException e) { - assertFalse("Failed to allow content change", data.allowChange()); + assertFalse(data.allowChange(), "Failed to allow content change"); } } - @Test - public void testMoved() throws IOException { + @MethodSource("params") + @ParameterizedTest + public void testMoved(String pTestname, HandleOpt[] pOpts, + boolean pSerialized) throws IOException { + initAbstractContractPathHandleTest(pTestname, pOpts, pSerialized); describe("verify open(PathHandle, moved(*))"); assumeSupportsFileReference(); HandleOpt.Location loc = HandleOpt.getOpt(HandleOpt.Location.class, opts) @@ -164,15 +171,18 @@ public void testMoved() throws IOException { PathHandle fd = getHandleOrSkip(stat); try (FSDataInputStream in = getFileSystem().open(fd)) { - assertTrue("Failed to detect location change", loc.allowChange()); + assertTrue(loc.allowChange(), "Failed to detect location change"); verifyRead(in, B1, 0, B1.length); } catch (InvalidPathHandleException e) { - assertFalse("Failed to allow location change", loc.allowChange()); + assertFalse(loc.allowChange(), "Failed to allow location change"); } } - @Test - public void testChangedAndMoved() throws IOException { + @MethodSource("params") + @ParameterizedTest + public void testChangedAndMoved(String pTestname, HandleOpt[] pOpts, + boolean pSerialized) throws IOException { + initAbstractContractPathHandleTest(pTestname, pOpts, pSerialized); describe("verify open(PathHandle, changed(*), moved(*))"); assumeSupportsFileReference(); assumeSupportsContentCheck(); @@ -189,15 +199,15 @@ public void testChangedAndMoved() throws IOException { byte[] b12 = Arrays.copyOf(B1, B1.length + B2.length); System.arraycopy(B2, 0, b12, B1.length, B2.length); try (FSDataInputStream in = getFileSystem().open(fd)) { - assertTrue("Failed to detect location change", loc.allowChange()); - assertTrue("Failed to detect content change", data.allowChange()); + assertTrue(loc.allowChange(), "Failed to detect location change"); + assertTrue(data.allowChange(), "Failed to detect content change"); verifyRead(in, b12, 0, b12.length); } catch (InvalidPathHandleException e) { if (data.allowChange()) { - assertFalse("Failed to allow location change", loc.allowChange()); + assertFalse(loc.allowChange(), "Failed to allow location change"); } if (loc.allowChange()) { - assertFalse("Failed to allow content change", data.allowChange()); + assertFalse(data.allowChange(), "Failed to allow content change"); } } } @@ -255,7 +265,8 @@ protected PathHandle getHandleOrSkip(FileStatus stat) { } - @Test + @MethodSource("params") + @ParameterizedTest public void testOpenFileApplyRead() throws Throwable { describe("use the apply sequence to read a whole file"); CompletableFuture readAllBytes = getFileSystem() @@ -264,13 +275,15 @@ public void testOpenFileApplyRead() throws Throwable { testFile(B1))) .build() .thenApply(ContractTestUtils::readStream); - assertEquals("Wrong number of bytes read value", - TEST_FILE_LEN, - (long) readAllBytes.get()); + assertEquals(TEST_FILE_LEN, + (long) readAllBytes.get(), "Wrong number of bytes read value"); } - @Test - public void testOpenFileDelete() throws Throwable { + @MethodSource("params") + @ParameterizedTest + public void testOpenFileDelete(String pTestname, HandleOpt[] pOpts, + boolean pSerialized) throws Throwable { + initAbstractContractPathHandleTest(pTestname, pOpts, pSerialized); describe("use the apply sequence to read a whole file"); FileStatus testFile = testFile(B1); PathHandle handle = getHandleOrSkip(testFile); @@ -295,8 +308,10 @@ public void testOpenFileDelete() throws Throwable { } } - @Test - public void testOpenFileLazyFail() throws Throwable { + @MethodSource("params") + @ParameterizedTest + public void testOpenFileLazyFail(String pTestname, HandleOpt[] pOpts, + boolean pSerialized) throws Throwable { describe("openFile fails on a misssng file in the get() and not before"); FileStatus stat = testFile(B1); CompletableFuture readAllBytes = getFileSystem() @@ -305,9 +320,8 @@ public void testOpenFileLazyFail() throws Throwable { stat)) .build() .thenApply(ContractTestUtils::readStream); - assertEquals("Wrong number of bytes read value", - TEST_FILE_LEN, - (long) readAllBytes.get()); + assertEquals(TEST_FILE_LEN, + (long) readAllBytes.get(), "Wrong number of bytes read value"); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java index e032604b5788c..19a1ac96d0872 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java @@ -21,7 +21,7 @@ import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.FileNotFoundException; import java.io.IOException; @@ -43,8 +43,8 @@ public void testRenameNewFileSameDir() throws Throwable { writeDataset(getFileSystem(), renameSrc, data, data.length, 1024 * 1024, false); boolean rename = rename(renameSrc, renameTarget); - assertTrue("rename("+renameSrc+", "+ renameTarget+") returned false", - rename); + assertTrue(rename, + "rename("+renameSrc+", "+ renameTarget+") returned false"); assertListStatusFinds(getFileSystem(), renameTarget.getParent(), renameTarget); verifyFileContents(getFileSystem(), renameTarget, data); @@ -70,7 +70,7 @@ public void testRenameNonexistentFile() throws Throwable { // at least one FS only returns false here, if that is the case // warn but continue getLogger().warn("Rename returned {} renaming a nonexistent file", renamed); - assertFalse("Renaming a missing file returned true", renamed); + assertFalse(renamed, "Renaming a missing file returned true"); } } catch (FileNotFoundException e) { if (renameReturnsFalseOnFailure) { @@ -105,9 +105,9 @@ public void testRenameFileOverExistingFile() throws Throwable { boolean renameOverwritesDest = isSupported(RENAME_OVERWRITES_DEST); boolean renameReturnsFalseOnRenameDestExists = isSupported(RENAME_RETURNS_FALSE_IF_DEST_EXISTS); - assertFalse(RENAME_OVERWRITES_DEST + " and " + - RENAME_RETURNS_FALSE_IF_DEST_EXISTS + " cannot be both supported", - renameOverwritesDest && renameReturnsFalseOnRenameDestExists); + assertFalse(renameOverwritesDest && renameReturnsFalseOnRenameDestExists, + RENAME_OVERWRITES_DEST + " and " + + RENAME_RETURNS_FALSE_IF_DEST_EXISTS + " cannot be both supported"); String expectedTo = "expected rename(" + srcFile + ", " + destFile + ") to "; boolean destUnchanged = true; @@ -117,11 +117,10 @@ public void testRenameFileOverExistingFile() throws Throwable { destUnchanged = !renamed; if (renameOverwritesDest) { - assertTrue(expectedTo + "overwrite destination, but got false", - renamed); + assertTrue(renamed, expectedTo + "overwrite destination, but got false"); } else if (renameReturnsFalseOnRenameDestExists) { - assertFalse(expectedTo + "be rejected with false, but destination " + - "was overwritten", renamed); + assertFalse(renamed, expectedTo + "be rejected with false, but destination " + + "was overwritten"); } else if (renamed) { String destDirLS = generateAndLogErrorListing(srcFile, destFile); getLogger().error("dest dir {}", destDirLS); @@ -133,10 +132,10 @@ public void testRenameFileOverExistingFile() throws Throwable { } catch (FileAlreadyExistsException e) { // rename(file, file2) should throw exception iff // it neither overwrites nor returns false - assertFalse(expectedTo + "overwrite destination, but got exception", - renameOverwritesDest); - assertFalse(expectedTo + "be rejected with false, but got exception", - renameReturnsFalseOnRenameDestExists); + assertFalse(renameOverwritesDest, + expectedTo + "overwrite destination, but got exception"); + assertFalse(renameReturnsFalseOnRenameDestExists, + expectedTo + "be rejected with false, but got exception"); handleExpectedException(e); } @@ -170,7 +169,7 @@ public void testRenameDirIntoExistingDir() throws Throwable { assertIsFile(destFilePath); assertIsDirectory(renamedSrc); verifyFileContents(fs, destFilePath, destData); - assertTrue("rename returned false though the contents were copied", rename); + assertTrue(rename, "rename returned false though the contents were copied"); } @Test @@ -348,7 +347,7 @@ protected void expectRenameUnderFileFails(String action, outcome = "rename raised an exception: " + e; } assertPathDoesNotExist("after " + outcome, renameTarget); - assertFalse(outcome, renamed); + assertFalse(renamed, outcome); assertPathExists(action, renameSrc); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java index 2988ebd215b8e..0c2fe326b0023 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java @@ -21,7 +21,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.assertj.core.api.Assertions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -56,6 +57,7 @@ public abstract class AbstractContractRootDirectoryTest extends AbstractFSContra LoggerFactory.getLogger(AbstractContractRootDirectoryTest.class); public static final int OBJECTSTORE_RETRY_TIMEOUT = 30000; + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -230,16 +232,16 @@ public void testSimpleRootListing() throws IOException { fs.listLocatedStatus(root)); String locatedStatusResult = join(locatedStatusList, "\n"); - assertEquals("listStatus(/) vs listLocatedStatus(/) with \n" - + "listStatus =" + listStatusResult - +" listLocatedStatus = " + locatedStatusResult, - statuses.length, locatedStatusList.size()); + assertEquals(statuses.length, + locatedStatusList.size(), "listStatus(/) vs listLocatedStatus(/) with \n" + + "listStatus =" + listStatusResult + +" listLocatedStatus = " + locatedStatusResult); List fileList = toList(fs.listFiles(root, false)); String listFilesResult = join(fileList, "\n"); - assertTrue("listStatus(/) vs listFiles(/, false) with \n" - + "listStatus = " + listStatusResult - + "listFiles = " + listFilesResult, - fileList.size() <= statuses.length); + assertTrue(fileList.size() <= statuses.length, + "listStatus(/) vs listFiles(/, false) with \n" + + "listStatus = " + listStatusResult + + "listFiles = " + listFilesResult); List statusList = (List) iteratorToList( fs.listStatusIterator(root)); Assertions.assertThat(statusList) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSafeModeTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSafeModeTest.java index 72d0dce9ff9e7..88666ee8a95ba 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSafeModeTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSafeModeTest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.fs.contract; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.SafeMode; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java index d34178489c81d..801a49adba7f2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java @@ -24,7 +24,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -51,6 +53,7 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas private Path zeroByteFile; private FSDataInputStream instream; + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -72,6 +75,7 @@ protected Configuration createConfiguration() { return conf; } + @AfterEach @Override public void teardown() throws Exception { IOUtils.closeStream(instream); @@ -225,8 +229,8 @@ public void testSeekAndReadPastEndOfFile() throws Throwable { //expect that seek to 0 works //go just before the end instream.seek(TEST_FILE_LEN - 2); - assertTrue("Premature EOF", instream.read() != -1); - assertTrue("Premature EOF", instream.read() != -1); + assertTrue(instream.read() != -1, "Premature EOF"); + assertTrue(instream.read() != -1, "Premature EOF"); assertMinusOne("read past end of file", instream.read()); } @@ -260,7 +264,7 @@ public void testSeekPastEndOfFileThenReseekAndRead() throws Throwable { } //now go back and try to read from a valid point in the file instream.seek(1); - assertTrue("Premature EOF", instream.read() != -1); + assertTrue(instream.read() != -1, "Premature EOF"); } /** @@ -284,13 +288,13 @@ public void testSeekBigFile() throws Throwable { //do seek 32KB ahead instream.seek(32768); - assertEquals("@32768", block[32768], (byte) instream.read()); + assertEquals(block[32768], (byte) instream.read(), "@32768"); instream.seek(40000); - assertEquals("@40000", block[40000], (byte) instream.read()); + assertEquals(block[40000], (byte) instream.read(), "@40000"); instream.seek(8191); - assertEquals("@8191", block[8191], (byte) instream.read()); + assertEquals(block[8191], (byte) instream.read(), "@8191"); instream.seek(0); - assertEquals("@0", 0, (byte) instream.read()); + assertEquals(0, (byte) instream.read(), "@0"); // try read & readFully instream.seek(0); @@ -321,10 +325,10 @@ public void testPositionedBulkReadDoesntChangePosition() throws Throwable { //have gone back assertEquals(40000, instream.getPos()); //content is the same too - assertEquals("@40000", block[40000], (byte) instream.read()); + assertEquals(block[40000], (byte) instream.read(), "@40000"); //now verify the picked up data for (int i = 0; i < 256; i++) { - assertEquals("@" + i, block[i + 128], readBuffer[i]); + assertEquals(block[i + 128], readBuffer[i], "@" + i); } } @@ -585,7 +589,7 @@ public void testReadAtExactEOF() throws Throwable { describe("read at the end of the file"); instream = getFileSystem().open(smallSeekFile); instream.seek(TEST_FILE_LEN -1); - assertTrue("read at last byte", instream.read() > 0); - assertEquals("read just past EOF", -1, instream.read()); + assertTrue(instream.read() > 0, "read at last byte"); + assertEquals(-1, instream.read(), "read just past EOF"); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java index 2cb23487fbe92..1b4d615851703 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java @@ -21,7 +21,8 @@ import java.io.FileNotFoundException; import org.apache.hadoop.fs.Path; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -36,6 +37,7 @@ public abstract class AbstractContractSetTimesTest extends private Path testPath; private Path target; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractStreamIOStatisticsTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractStreamIOStatisticsTest.java index 89b21c497083b..a2ac82d515603 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractStreamIOStatisticsTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractStreamIOStatisticsTest.java @@ -22,8 +22,9 @@ import java.util.List; import org.assertj.core.api.Assertions; -import org.junit.AfterClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -65,6 +66,7 @@ public abstract class AbstractContractStreamIOStatisticsTest protected static final IOStatisticsSnapshot FILESYSTEM_IOSTATS = snapshotIOStatistics(); + @AfterEach @Override public void teardown() throws Exception { final FileSystem fs = getFileSystem(); @@ -77,7 +79,7 @@ public void teardown() throws Exception { /** * Dump the filesystem statistics after the class if contains any values. */ - @AfterClass + @AfterAll public static void dumpFileSystemIOStatistics() { if (!FILESYSTEM_IOSTATS.counters().isEmpty()) { // if there is at least one counter diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java index adaf0a910c620..ab1e78c5308e5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java @@ -18,7 +18,8 @@ package org.apache.hadoop.fs.contract; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.Arrays; @@ -37,6 +38,7 @@ public abstract class AbstractContractUnbufferTest extends AbstractFSContractTes private Path file; private byte[] fileBytes; + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -115,16 +117,16 @@ public void testUnbufferMultipleReads() throws IOException { unbuffer(stream); validateFileContents(stream, TEST_FILE_LEN / 2, TEST_FILE_LEN / 2); unbuffer(stream); - assertEquals("stream should be at end of file", TEST_FILE_LEN, - stream.getPos()); + assertEquals(TEST_FILE_LEN, + stream.getPos(), "stream should be at end of file"); } } private void unbuffer(FSDataInputStream stream) throws IOException { long pos = stream.getPos(); stream.unbuffer(); - assertEquals("unbuffer unexpectedly changed the stream position", pos, - stream.getPos()); + assertEquals(pos, + stream.getPos(), "unbuffer unexpectedly changed the stream position"); } protected void validateFullFileContents(FSDataInputStream stream) @@ -136,9 +138,9 @@ protected void validateFileContents(FSDataInputStream stream, int length, int startIndex) throws IOException { byte[] streamData = new byte[length]; - assertEquals("failed to read expected number of bytes from " - + "stream. This may be transient", - length, stream.read(streamData)); + assertEquals(length, stream.read(streamData), + "failed to read expected number of bytes from " + + "stream. This may be transient"); byte[] validateFileBytes; if (startIndex == 0 && length == fileBytes.length) { validateFileBytes = fileBytes; @@ -146,7 +148,7 @@ protected void validateFileContents(FSDataInputStream stream, int length, validateFileBytes = Arrays.copyOfRange(fileBytes, startIndex, startIndex + length); } - assertArrayEquals("invalid file contents", validateFileBytes, streamData); + assertArrayEquals(validateFileBytes, streamData, "invalid file contents"); } protected Path getFile() { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractVectoredReadTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractVectoredReadTest.java index e32107be656b7..0de26a6376695 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractVectoredReadTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractVectoredReadTest.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; @@ -34,8 +35,9 @@ import org.assertj.core.api.Assertions; import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,8 +49,8 @@ import org.apache.hadoop.io.ElasticByteBufferPool; import org.apache.hadoop.io.WeakReferencedElasticByteBufferPool; import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.util.functional.FutureIO; -import static java.util.Arrays.asList; import static org.apache.hadoop.fs.Options.OpenFileOptions.FS_OPTION_OPENFILE_LENGTH; import static org.apache.hadoop.fs.Options.OpenFileOptions.FS_OPTION_OPENFILE_READ_POLICY; import static org.apache.hadoop.fs.Options.OpenFileOptions.FS_OPTION_OPENFILE_READ_POLICY_VECTOR; @@ -58,7 +60,7 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.range; import static org.apache.hadoop.fs.contract.ContractTestUtils.returnBuffersToPoolPostRead; import static org.apache.hadoop.fs.contract.ContractTestUtils.validateVectoredReadResult; - import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.apache.hadoop.test.LambdaTestUtils.interceptFuture; import static org.apache.hadoop.util.functional.FutureIO.awaitFuture; @@ -68,7 +70,6 @@ * Both the original readVectored(allocator) and the readVectored(allocator, release) * operations are tested. */ -@RunWith(Parameterized.class) public abstract class AbstractContractVectoredReadTest extends AbstractFSContractTestBase { private static final Logger LOG = @@ -81,15 +82,15 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac /** * Buffer allocator for vector IO. */ - private final IntFunction allocate; + protected IntFunction allocate; /** * Buffer pool for vector IO. */ - private final ElasticByteBufferPool pool = + protected final ElasticByteBufferPool pool = new WeakReferencedElasticByteBufferPool(); - private final String bufferType; + protected String bufferType; /** * Path to the vector file. @@ -103,13 +104,12 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac */ private final AtomicInteger bufferReleases = new AtomicInteger(); - @Parameterized.Parameters(name = "Buffer type : {0}") public static List params() { - return asList("direct", "array"); + return Arrays.asList("direct", "array"); } - protected AbstractContractVectoredReadTest(String bufferType) { - this.bufferType = bufferType; + public void initAbstractContractVectoredReadTest(String pBufferType) { + this.bufferType = pBufferType; final boolean isDirect = !"array".equals(bufferType); this.allocate = size -> pool.getBuffer(isDirect, size); } @@ -140,6 +140,7 @@ protected ElasticByteBufferPool getPool() { return pool; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -178,8 +179,10 @@ protected FSDataInputStream openVectorFile(final FileSystem fs) throws IOExcepti .build()); } - @Test - public void testVectoredReadMultipleRanges() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testVectoredReadMultipleRanges(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); List fileRanges = new ArrayList<>(); for (int i = 0; i < 10; i++) { FileRange fileRange = FileRange.createFileRange(i * 100, 100); @@ -200,8 +203,10 @@ public void testVectoredReadMultipleRanges() throws Exception { } } - @Test - public void testVectoredReadAndReadFully() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testVectoredReadAndReadFully(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); List fileRanges = new ArrayList<>(); range(fileRanges, 100, 100); try (FSDataInputStream in = openVectorFile()) { @@ -216,8 +221,10 @@ public void testVectoredReadAndReadFully() throws Exception { } } - @Test - public void testVectoredReadWholeFile() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testVectoredReadWholeFile(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); describe("Read the whole file in one single vectored read"); List fileRanges = new ArrayList<>(); range(fileRanges, 0, DATASET_LEN); @@ -235,8 +242,10 @@ public void testVectoredReadWholeFile() throws Exception { * As the minimum seek value is 4*1024,none of the below ranges * will get merged. */ - @Test - public void testDisjointRanges() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testDisjointRanges(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); List fileRanges = new ArrayList<>(); range(fileRanges, 0, 100); range(fileRanges, 4_000 + 101, 100); @@ -252,8 +261,10 @@ public void testDisjointRanges() throws Exception { * As the minimum seek value is 4*1024, all the below ranges * will get merged into one. */ - @Test - public void testAllRangesMergedIntoOne() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testAllRangesMergedIntoOne(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); List fileRanges = new ArrayList<>(); final int length = 100; range(fileRanges, 0, length); @@ -270,8 +281,10 @@ public void testAllRangesMergedIntoOne() throws Exception { * As the minimum seek value is 4*1024, the first three ranges will be * merged into and other two will remain as it is. */ - @Test - public void testSomeRangesMergedSomeUnmerged() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testSomeRangesMergedSomeUnmerged(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); FileSystem fs = getFileSystem(); List fileRanges = new ArrayList<>(); range(fileRanges, 8 * 1024, 100); @@ -295,8 +308,10 @@ public void testSomeRangesMergedSomeUnmerged() throws Exception { * Most file systems won't support overlapping ranges. * Currently, only Raw Local supports it. */ - @Test - public void testOverlappingRanges() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testOverlappingRanges(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); if (!isSupported(VECTOR_IO_OVERLAPPING_RANGES)) { verifyExceptionalVectoredRead( getSampleOverlappingRanges(), @@ -314,8 +329,10 @@ public void testOverlappingRanges() throws Exception { /** * Same ranges are special case of overlapping. */ - @Test - public void testSameRanges() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testSameRanges(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); if (!isSupported(VECTOR_IO_OVERLAPPING_RANGES)) { verifyExceptionalVectoredRead( getSampleSameRanges(), @@ -333,8 +350,10 @@ public void testSameRanges() throws Exception { /** * A null range is not permitted. */ - @Test - public void testNullRange() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testNullRange(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); List fileRanges = new ArrayList<>(); range(fileRanges, 500, 100); fileRanges.add(null); @@ -345,15 +364,19 @@ public void testNullRange() throws Exception { /** * A null range is not permitted. */ - @Test - public void testNullRangeList() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testNullRangeList(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); verifyExceptionalVectoredRead( null, NullPointerException.class); } - @Test - public void testSomeRandomNonOverlappingRanges() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testSomeRandomNonOverlappingRanges(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); List fileRanges = new ArrayList<>(); range(fileRanges, 500, 100); range(fileRanges, 1000, 200); @@ -366,8 +389,10 @@ public void testSomeRandomNonOverlappingRanges() throws Exception { } } - @Test - public void testConsecutiveRanges() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testConsecutiveRanges(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); List fileRanges = new ArrayList<>(); final int offset = 500; final int length = 2011; @@ -380,8 +405,10 @@ public void testConsecutiveRanges() throws Exception { } } - @Test - public void testEmptyRanges() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testEmptyRanges(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); List fileRanges = new ArrayList<>(); try (FSDataInputStream in = openVectorFile()) { in.readVectored(fileRanges, allocate); @@ -400,8 +427,10 @@ public void testEmptyRanges() throws Exception { * The contract option {@link ContractOptions#VECTOR_IO_EARLY_EOF_CHECK} is used * to determine which check to perform. */ - @Test - public void testEOFRanges() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testEOFRanges(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); describe("Testing reading with an offset past the end of the file"); List fileRanges = range(DATASET_LEN + 1, 100); @@ -414,8 +443,10 @@ public void testEOFRanges() throws Exception { } - @Test - public void testVectoredReadWholeFilePlusOne() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testVectoredReadWholeFilePlusOne(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); describe("Try to read whole file plus 1 byte"); List fileRanges = range(0, DATASET_LEN + 1); @@ -442,30 +473,35 @@ private void expectEOFinRead(final List fileRanges) throws Exception } } - @Test - public void testNegativeLengthRange() throws Exception { - + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testNegativeLengthRange(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); verifyExceptionalVectoredRead(range(0, -50), IllegalArgumentException.class); } - @Test - public void testNegativeOffsetRange() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testNegativeOffsetRange(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); verifyExceptionalVectoredRead(range(-1, 50), EOFException.class); } - @Test - public void testNullReleaseOperation() throws Exception { - + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testNullReleaseOperation(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); final List range = range(0, 10); - try (FSDataInputStream in = openVectorFile()) { - intercept(NullPointerException.class, () -> - in.readVectored(range, allocate, null)); + intercept(NullPointerException.class, () -> + in.readVectored(range, allocate, null)); } } - @Test - public void testNormalReadAfterVectoredRead() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testNormalReadAfterVectoredRead(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); List fileRanges = createSampleNonOverlappingRanges(); try (FSDataInputStream in = openVectorFile()) { in.readVectored(fileRanges, allocate); @@ -480,8 +516,10 @@ public void testNormalReadAfterVectoredRead() throws Exception { } } - @Test - public void testVectoredReadAfterNormalRead() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testVectoredReadAfterNormalRead(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); List fileRanges = createSampleNonOverlappingRanges(); try (FSDataInputStream in = openVectorFile()) { // read starting 200 bytes @@ -496,8 +534,10 @@ public void testVectoredReadAfterNormalRead() throws Exception { } } - @Test - public void testMultipleVectoredReads() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testMultipleVectoredReads(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); List fileRanges1 = createSampleNonOverlappingRanges(); List fileRanges2 = createSampleNonOverlappingRanges(); try (FSDataInputStream in = openVectorFile()) { @@ -515,8 +555,10 @@ public void testMultipleVectoredReads() throws Exception { * operation and then uses a separate thread pool to process the * results asynchronously. */ - @Test - public void testVectoredIOEndToEnd() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testVectoredIOEndToEnd(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); List fileRanges = new ArrayList<>(); range(fileRanges, 8 * 1024, 100); range(fileRanges, 14 * 1024, 100); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java index 7b32f28507cb7..baaf3c1426ffa 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java @@ -22,20 +22,19 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; +import org.apache.hadoop.fs.extend.TestName; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Timeout; import org.junit.AssumptionViolatedException; -import org.junit.rules.TestName; -import org.junit.rules.Timeout; +import org.junit.jupiter.api.extension.RegisterExtension; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.net.URI; -import java.util.concurrent.TimeUnit; import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup; import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; @@ -43,7 +42,8 @@ /** * This is the base class for all the contract tests. */ -public abstract class AbstractFSContractTestBase extends Assert +@Timeout(180) +public abstract class AbstractFSContractTestBase extends Assertions implements ContractOptions { private static final Logger LOG = @@ -74,16 +74,15 @@ public abstract class AbstractFSContractTestBase extends Assert */ private Path testPath; - @Rule + @RegisterExtension public TestName methodName = new TestName(); - - @BeforeClass + @BeforeAll public static void nameTestThread() { Thread.currentThread().setName("JUnit"); } - @Before + @BeforeEach public void nameThread() { Thread.currentThread().setName("JUnit-" + getMethodName()); } @@ -161,13 +160,6 @@ protected Configuration createConfiguration() { return new Configuration(); } - /** - * Set the timeout for every test. - */ - @Rule - public Timeout testTimeout = - new Timeout(getTestTimeoutMillis(), TimeUnit.MILLISECONDS); - /** * Option for tests to override the default timeout value. * @return the current test timeout @@ -181,7 +173,7 @@ protected int getTestTimeoutMillis() { * Setup: create the contract then init it. * @throws Exception on any failure */ - @Before + @BeforeEach public void setup() throws Exception { Thread.currentThread().setName("setup"); LOG.debug("== Setup =="); @@ -191,15 +183,15 @@ public void setup() throws Exception { assumeEnabled(); //extract the test FS fileSystem = contract.getTestFileSystem(); - assertNotNull("null filesystem", fileSystem); + assertNotNull(fileSystem, "null filesystem"); URI fsURI = fileSystem.getUri(); LOG.info("Test filesystem = {} implemented by {}", fsURI, fileSystem); //sanity check to make sure that the test FS picked up really matches //the scheme chosen. This is to avoid defaulting back to the localFS //which would be drastic for root FS tests - assertEquals("wrong filesystem of " + fsURI, - contract.getScheme(), fsURI.getScheme()); + assertEquals(contract.getScheme(), fsURI.getScheme(), + "wrong filesystem of " + fsURI); //create the test path testPath = getContract().getTestPath(); mkdirs(testPath); @@ -210,7 +202,7 @@ public void setup() throws Exception { * Teardown. * @throws Exception on any failure */ - @After + @AfterEach public void teardown() throws Exception { Thread.currentThread().setName("teardown"); LOG.debug("== Teardown =="); @@ -360,7 +352,7 @@ protected void assertIsDirectory(Path path) throws IOException { * @throws IOException IO problems during file operations */ protected void mkdirs(Path path) throws IOException { - assertTrue("Failed to mkdir " + path, fileSystem.mkdirs(path)); + assertTrue(fileSystem.mkdirs(path), "Failed to mkdir " + path); } /** @@ -381,7 +373,7 @@ protected void assertDeleted(Path path, boolean recursive) throws * @param result read result to validate */ protected void assertMinusOne(String text, int result) { - assertEquals(text + " wrong read result " + result, -1, result); + assertEquals(-1, result, text + " wrong read result " + result); } protected boolean rename(Path src, Path dst) throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractCreate.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractCreate.java index 3cea68c221000..da82e03d112dd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractCreate.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractCreate.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.contract.localfs; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.LocalFileSystem; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractLoaded.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractLoaded.java index 3b9ea4c4a15ec..60e14e7760552 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractLoaded.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractLoaded.java @@ -21,7 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.AbstractFSContractTestBase; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.net.URL; @@ -38,9 +38,9 @@ protected AbstractFSContract createContract(Configuration conf) { @Test public void testContractWorks() throws Throwable { String key = getContract().getConfKey(SUPPORTS_ATOMIC_RENAME); - assertNotNull("not set: " + key, getContract().getConf().get(key)); - assertTrue("not true: " + key, - getContract().isSupported(SUPPORTS_ATOMIC_RENAME, false)); + assertNotNull(getContract().getConf().get(key), "not set: " + key); + assertTrue(getContract().isSupported(SUPPORTS_ATOMIC_RENAME, false), + "not true: " + key); } @Test @@ -48,6 +48,6 @@ public void testContractResourceOnClasspath() throws Throwable { URL url = this.getClass() .getClassLoader() .getResource(LocalFSContract.CONTRACT_XML); - assertNotNull("could not find contract resource", url); + assertNotNull(url, "could not find contract resource"); } } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractVectoredRead.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractVectoredRead.java index 23cfcce75a2c9..dcb474fa6f050 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractVectoredRead.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractVectoredRead.java @@ -23,7 +23,6 @@ import java.util.concurrent.CompletableFuture; import org.assertj.core.api.Assertions; -import org.junit.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ChecksumException; @@ -35,14 +34,15 @@ import org.apache.hadoop.fs.contract.AbstractContractVectoredReadTest; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import static org.apache.hadoop.fs.contract.ContractTestUtils.validateVectoredReadResult; import static org.apache.hadoop.test.LambdaTestUtils.intercept; public class TestLocalFSContractVectoredRead extends AbstractContractVectoredReadTest { - public TestLocalFSContractVectoredRead(String bufferType) { - super(bufferType); + public TestLocalFSContractVectoredRead() { } @Override @@ -50,8 +50,11 @@ protected AbstractFSContract createContract(Configuration conf) { return new LocalFSContract(conf); } - @Test - public void testChecksumValidationDuringVectoredRead() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testChecksumValidationDuringVectoredRead( + String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); Path testPath = path("big_range_checksum_file"); List someRandomRanges = new ArrayList<>(); someRandomRanges.add(FileRange.createFileRange(10, 1024)); @@ -64,8 +67,11 @@ public void testChecksumValidationDuringVectoredRead() throws Exception { * Test for file size less than checksum chunk size. * {@code ChecksumFileSystem#bytesPerChecksum}. */ - @Test - public void testChecksumValidationDuringVectoredReadSmallFile() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testChecksumValidationDuringVectoredReadSmallFile( + String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); Path testPath = path("big_range_checksum_file"); final int length = 471; List smallFileRanges = new ArrayList<>(); @@ -104,8 +110,10 @@ private void validateCheckReadException(Path testPath, () -> validateVectoredReadResult(ranges, datasetCorrupted, 0)); } } - @Test - public void tesChecksumVectoredReadBoundaries() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void tesChecksumVectoredReadBoundaries(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); Path testPath = path("boundary_range_checksum_file"); final int length = 1071; LocalFileSystem localFs = (LocalFileSystem) getFileSystem(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawLocalContractVectoredRead.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawLocalContractVectoredRead.java index cbb31ffe27a59..7ec7403e181f6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawLocalContractVectoredRead.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawLocalContractVectoredRead.java @@ -24,8 +24,7 @@ public class TestRawLocalContractVectoredRead extends AbstractContractVectoredReadTest { - public TestRawLocalContractVectoredRead(String bufferType) { - super(bufferType); + public TestRawLocalContractVectoredRead() { } @Override diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractPathHandle.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractPathHandle.java index c34269708ddcb..7de7e79192912 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractPathHandle.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractPathHandle.java @@ -18,17 +18,13 @@ package org.apache.hadoop.fs.contract.rawlocal; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.contract.AbstractContractPathHandleTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.apache.hadoop.fs.contract.rawlocal.RawlocalFSContract; public class TestRawlocalContractPathHandle extends AbstractContractPathHandleTest { - public TestRawlocalContractPathHandle(String testname, - Options.HandleOpt[] opts, boolean serialized) { - super(testname, opts, serialized); + public TestRawlocalContractPathHandle() { } @Override diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractRename.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractRename.java index 25611f11b1e94..5ef64988d4408 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractRename.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractRename.java @@ -25,7 +25,7 @@ import org.apache.hadoop.fs.contract.AbstractContractRenameTest; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.ContractTestUtils; -import org.junit.Test; +import org.junit.jupiter.api.Test; public class TestRawlocalContractRename extends AbstractContractRenameTest { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/extend/TestName.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/extend/TestName.java new file mode 100644 index 0000000000000..440726f3de424 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/extend/TestName.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.extend; + +import org.junit.jupiter.api.extension.BeforeEachCallback; +import org.junit.jupiter.api.extension.ExtensionContext; + +/** + * This is a custom JUnit5 `RegisterExtension` + * we created to obtain the methond name of the executing function. + */ +public class TestName implements BeforeEachCallback { + + private volatile String name; + + @Override + public void beforeEach(ExtensionContext extensionContext) throws Exception { + name = extensionContext.getTestMethod().get().getName(); + } + + public String getMethodName() { + return this.name; + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/wrappedio/impl/TestWrappedIO.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/wrappedio/impl/TestWrappedIO.java index edbe06b8fe031..63986accb7cc8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/wrappedio/impl/TestWrappedIO.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/wrappedio/impl/TestWrappedIO.java @@ -29,8 +29,9 @@ import java.util.Map; import org.assertj.core.api.Assertions; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -79,7 +80,7 @@ public class TestWrappedIO extends AbstractFSContractTestBase { */ private DynamicWrappedStatistics statistics; - @Before + @BeforeEach public void setup() throws Exception { super.setup(); @@ -88,6 +89,7 @@ public void setup() throws Exception { statistics.iostatisticsContext_reset(); } + @AfterEach @Override public void teardown() throws Exception { super.teardown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractAppend.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractAppend.java index 5a9395757fc07..695ec80f73862 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractAppend.java @@ -19,20 +19,20 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractAppendTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; /** * Test append operations on the Router-based FS. */ public class TestRouterHDFSContractAppend extends AbstractContractAppendTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { RouterHDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractAppendSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractAppendSecure.java index 33f59f02f7af2..9f23f978e1ec4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractAppendSecure.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractAppendSecure.java @@ -18,8 +18,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractAppendTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; /** * Test secure append operations on the Router-based FS. @@ -27,12 +27,12 @@ public class TestRouterHDFSContractAppendSecure extends AbstractContractAppendTest { - @BeforeClass + @BeforeAll public static void createCluster() throws Exception { RouterHDFSContract.createCluster(true); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractConcat.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractConcat.java index 96ee71894525c..76af6ff5646fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractConcat.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractConcat.java @@ -22,8 +22,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.AbstractContractConcatTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -32,14 +32,14 @@ */ public class TestRouterHDFSContractConcat extends AbstractContractConcatTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { RouterHDFSContract.createCluster(); // perform a simple operation on the cluster to verify it is up RouterHDFSContract.getFileSystem().getDefaultBlockSize(new Path("/")); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractConcatSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractConcatSecure.java index d45f639c99be1..4e9e0fff746cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractConcatSecure.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractConcatSecure.java @@ -18,8 +18,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.AbstractContractConcatTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -29,14 +29,14 @@ public class TestRouterHDFSContractConcatSecure extends AbstractContractConcatTest { - @BeforeClass + @BeforeAll public static void createCluster() throws Exception { RouterHDFSContract.createCluster(true); // perform a simple operation on the cluster to verify it is up RouterHDFSContract.getFileSystem().getDefaultBlockSize(new Path("/")); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractCreate.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractCreate.java index 530b3068ef0f9..e1e6a70ebcd61 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractCreate.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractCreate.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractCreateTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -31,12 +31,12 @@ */ public class TestRouterHDFSContractCreate extends AbstractContractCreateTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { RouterHDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractCreateSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractCreateSecure.java index 9327c1ba86639..88e2398aa5bec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractCreateSecure.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractCreateSecure.java @@ -17,8 +17,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractCreateTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -28,12 +28,12 @@ public class TestRouterHDFSContractCreateSecure extends AbstractContractCreateTest { - @BeforeClass + @BeforeAll public static void createCluster() throws Exception { RouterHDFSContract.createCluster(true); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java index 77cb602ae0414..8ed6f65c57379 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java @@ -32,11 +32,9 @@ import org.apache.hadoop.hdfs.server.federation.metrics.RouterMBean; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.Token; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; /** * Test to verify router contracts for delegation token operations. @@ -44,12 +42,12 @@ public class TestRouterHDFSContractDelegationToken extends AbstractFSContractTestBase { - @BeforeClass + @BeforeAll public static void createCluster() throws Exception { RouterHDFSContract.createCluster(false, 1, true); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } @@ -59,9 +57,6 @@ protected AbstractFSContract createContract(Configuration conf) { return new RouterHDFSContract(conf); } - @Rule - public ExpectedException exceptionRule = ExpectedException.none(); - @Test public void testRouterDelegationToken() throws Exception { RouterMBean bean = FederationTestUtils.getBean( @@ -109,7 +104,8 @@ public void testRouterDelegationToken() throws Exception { assertEquals(0, bean.getCurrentTokensCount()); // Renew a cancelled token - exceptionRule.expect(SecretManager.InvalidToken.class); - token.renew(initSecurity()); + assertThrows(SecretManager.InvalidToken.class, () -> { + token.renew(initSecurity()); + }); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelete.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelete.java index a7d488127ad19..1f6500b8c9b13 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelete.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelete.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractDeleteTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -31,12 +31,12 @@ */ public class TestRouterHDFSContractDelete extends AbstractContractDeleteTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { RouterHDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDeleteSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDeleteSecure.java index 43af1a7900f84..90a28c043126d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDeleteSecure.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDeleteSecure.java @@ -18,8 +18,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractDeleteTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; /** * Test secure delete operations on the Router-based FS. @@ -27,12 +27,12 @@ public class TestRouterHDFSContractDeleteSecure extends AbstractContractDeleteTest { - @BeforeClass + @BeforeAll public static void createCluster() throws Exception { RouterHDFSContract.createCluster(true); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractGetFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractGetFileStatus.java index b06c570f70429..3ead69c23441f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractGetFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractGetFileStatus.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -32,12 +32,12 @@ public class TestRouterHDFSContractGetFileStatus extends AbstractContractGetFileStatusTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { RouterHDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractGetFileStatusSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractGetFileStatusSecure.java index 5643cfaaaa960..711e91ac725f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractGetFileStatusSecure.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractGetFileStatusSecure.java @@ -18,8 +18,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; /** * Test secure get file status operations on the Router-based FS. @@ -27,12 +27,12 @@ public class TestRouterHDFSContractGetFileStatusSecure extends AbstractContractGetFileStatusTest { - @BeforeClass + @BeforeAll public static void createCluster() throws Exception { RouterHDFSContract.createCluster(true); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractMkdir.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractMkdir.java index 8c683163ccab6..5809409977e47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractMkdir.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractMkdir.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractMkdirTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -31,12 +31,12 @@ */ public class TestRouterHDFSContractMkdir extends AbstractContractMkdirTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { RouterHDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractMkdirSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractMkdirSecure.java index bb1564fd879e0..f6eecade90cb0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractMkdirSecure.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractMkdirSecure.java @@ -17,8 +17,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractMkdirTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -28,12 +28,12 @@ public class TestRouterHDFSContractMkdirSecure extends AbstractContractMkdirTest { - @BeforeClass + @BeforeAll public static void createCluster() throws Exception { RouterHDFSContract.createCluster(true); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractOpen.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractOpen.java index 5e8826e96e4df..90680196efb9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractOpen.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractOpen.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractOpenTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -31,12 +31,12 @@ */ public class TestRouterHDFSContractOpen extends AbstractContractOpenTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { RouterHDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractOpenSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractOpenSecure.java index 91749fd54cadb..06227383d7c1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractOpenSecure.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractOpenSecure.java @@ -17,8 +17,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractOpenTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -27,12 +27,12 @@ */ public class TestRouterHDFSContractOpenSecure extends AbstractContractOpenTest { - @BeforeClass + @BeforeAll public static void createCluster() throws Exception { RouterHDFSContract.createCluster(true); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRename.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRename.java index a90fe0a1e4c7f..4265a9a4f58e1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRename.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractRenameTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -31,12 +31,12 @@ */ public class TestRouterHDFSContractRename extends AbstractContractRenameTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { RouterHDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRenameSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRenameSecure.java index 3de39fd83e7c5..ee988dbdcf911 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRenameSecure.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRenameSecure.java @@ -17,8 +17,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractRenameTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -28,12 +28,12 @@ public class TestRouterHDFSContractRenameSecure extends AbstractContractRenameTest { - @BeforeClass + @BeforeAll public static void createCluster() throws Exception { RouterHDFSContract.createCluster(true); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectory.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectory.java index 21f5ee7bd758d..59f9b84b1373c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectory.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -32,12 +32,12 @@ public class TestRouterHDFSContractRootDirectory extends AbstractContractRootDirectoryTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { RouterHDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java index f7fb59c465d04..ce580a68c9c78 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java @@ -17,8 +17,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -28,12 +28,12 @@ public class TestRouterHDFSContractRootDirectorySecure extends AbstractContractRootDirectoryTest { - @BeforeClass + @BeforeAll public static void createCluster() throws Exception { RouterHDFSContract.createCluster(true); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSeek.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSeek.java index 587704241c6af..6ab4c7f2e4e73 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSeek.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSeek.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractSeekTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -31,12 +31,12 @@ */ public class TestRouterHDFSContractSeek extends AbstractContractSeekTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { RouterHDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSeekSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSeekSecure.java index e318f0ec256f0..3046c8615cf00 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSeekSecure.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSeekSecure.java @@ -17,8 +17,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractSeekTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -27,12 +27,12 @@ */ public class TestRouterHDFSContractSeekSecure extends AbstractContractSeekTest { - @BeforeClass + @BeforeAll public static void createCluster() throws Exception { RouterHDFSContract.createCluster(true); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSetTimes.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSetTimes.java index e7d157e81b174..425e7b270a48a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSetTimes.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSetTimes.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractSetTimesTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -32,12 +32,12 @@ public class TestRouterHDFSContractSetTimes extends AbstractContractSetTimesTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { RouterHDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSetTimesSecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSetTimesSecure.java index 69123f6dc1191..129c7e9923bad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSetTimesSecure.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSetTimesSecure.java @@ -17,8 +17,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractSetTimesTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -28,12 +28,12 @@ public class TestRouterHDFSContractSetTimesSecure extends AbstractContractSetTimesTest { - @BeforeClass + @BeforeAll public static void createCluster() throws Exception { RouterHDFSContract.createCluster(true); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractAppend.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractAppend.java index 40278c204b149..88ba4c5a77e23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractAppend.java @@ -17,8 +17,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractAppendTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -28,12 +28,12 @@ public class TestRouterWebHDFSContractAppend extends AbstractContractAppendTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { RouterWebHDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterWebHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractConcat.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractConcat.java index b82a8e10e6c79..718ba33580973 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractConcat.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractConcat.java @@ -22,8 +22,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.AbstractContractConcatTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -33,14 +33,14 @@ public class TestRouterWebHDFSContractConcat extends AbstractContractConcatTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { RouterWebHDFSContract.createCluster(); // perform a simple operation on the cluster to verify it is up RouterWebHDFSContract.getFileSystem().getDefaultBlockSize(new Path("/")); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterWebHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractCreate.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractCreate.java index ff1c610220f83..48eb472f957a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractCreate.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractCreate.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractCreateTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -32,12 +32,12 @@ public class TestRouterWebHDFSContractCreate extends AbstractContractCreateTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { RouterWebHDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterWebHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractDelete.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractDelete.java index dede65234b64e..02c0c916545f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractDelete.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractDelete.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractDeleteTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -32,12 +32,12 @@ public class TestRouterWebHDFSContractDelete extends AbstractContractDeleteTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { RouterWebHDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterWebHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractMkdir.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractMkdir.java index 9db4114be7071..ef89bd79e6329 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractMkdir.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractMkdir.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractMkdirTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -31,12 +31,12 @@ */ public class TestRouterWebHDFSContractMkdir extends AbstractContractMkdirTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { RouterWebHDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterWebHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractOpen.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractOpen.java index f5517ddaaf136..1171282e1901e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractOpen.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractOpen.java @@ -21,9 +21,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractOpenTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; import java.io.IOException; @@ -32,12 +32,12 @@ */ public class TestRouterWebHDFSContractOpen extends AbstractContractOpenTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { RouterWebHDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterWebHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRename.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRename.java index a426ae0f002f4..7aa99e2f86bcf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRename.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractRenameTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -32,12 +32,12 @@ public class TestRouterWebHDFSContractRename extends AbstractContractRenameTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { RouterWebHDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterWebHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRootDirectory.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRootDirectory.java index b1e4a05500fda..27e81e9000e59 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRootDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRootDirectory.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -32,12 +32,12 @@ public class TestRouterWebHDFSContractRootDirectory extends AbstractContractRootDirectoryTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { RouterWebHDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterWebHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractSeek.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractSeek.java index 5fbbc9b1e5a31..a18311aa5b4dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractSeek.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractSeek.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractSeekTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -31,12 +31,12 @@ */ public class TestRouterWebHDFSContractSeek extends AbstractContractSeekTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { RouterWebHDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { RouterWebHDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestDFSWrappedIO.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestDFSWrappedIO.java index 2b874fd532034..9536bc2d3e664 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestDFSWrappedIO.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestDFSWrappedIO.java @@ -20,8 +20,8 @@ import java.io.IOException; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractFSContract; @@ -32,12 +32,12 @@ */ public class TestDFSWrappedIO extends TestWrappedIO { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { HDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { HDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractAppend.java index 897354c1386f3..ed3cead8a2812 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractAppend.java @@ -17,19 +17,19 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractAppendTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; public class TestHDFSContractAppend extends AbstractContractAppendTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { HDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { HDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractBulkDelete.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractBulkDelete.java index 3a851b6ff1c37..19a1e4e7277dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractBulkDelete.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractBulkDelete.java @@ -20,8 +20,8 @@ import java.io.IOException; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractBulkDeleteTest; @@ -37,12 +37,12 @@ protected AbstractFSContract createContract(Configuration conf) { return new HDFSContract(conf); } - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { HDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { HDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractConcat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractConcat.java index 05587ce7e40fa..ddb5cc2560c78 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractConcat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractConcat.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractConcatTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -31,14 +31,14 @@ */ public class TestHDFSContractConcat extends AbstractContractConcatTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { HDFSContract.createCluster(); // perform a simple operation on the cluster to verify it is up HDFSContract.getCluster().getFileSystem().getDefaultBlockSize(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { HDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractCreate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractCreate.java index b209bf130e2be..8c9cbaf395854 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractCreate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractCreate.java @@ -21,19 +21,19 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractCreateTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; public class TestHDFSContractCreate extends AbstractContractCreateTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { HDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { HDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractDelete.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractDelete.java index 4dc4af05addac..47fbf30d42e3f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractDelete.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractDelete.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractDeleteTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -31,12 +31,12 @@ */ public class TestHDFSContractDelete extends AbstractContractDeleteTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { HDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { HDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractGetFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractGetFileStatus.java index d81d3c200fee2..81b04407e76aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractGetFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractGetFileStatus.java @@ -21,20 +21,20 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; public class TestHDFSContractGetFileStatus extends AbstractContractGetFileStatusTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { HDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { HDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractLeaseRecovery.java index c3ad3c694acca..8a2a1354c9ceb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractLeaseRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractLeaseRecovery.java @@ -20,8 +20,8 @@ import java.io.IOException; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractLeaseRecoveryTest; @@ -32,12 +32,12 @@ */ public class TestHDFSContractLeaseRecovery extends AbstractContractLeaseRecoveryTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { HDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { HDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMkdir.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMkdir.java index 053429dec803f..fae15f4a3c31a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMkdir.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMkdir.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractMkdirTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -31,12 +31,12 @@ */ public class TestHDFSContractMkdir extends AbstractContractMkdirTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { HDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { HDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMultipartUploader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMultipartUploader.java index 0efb33f5db200..72f306113fc6f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMultipartUploader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMultipartUploader.java @@ -19,8 +19,8 @@ import java.io.IOException; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -37,12 +37,12 @@ public class TestHDFSContractMultipartUploader extends protected static final Logger LOG = LoggerFactory.getLogger(TestHDFSContractMultipartUploader.class); - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { HDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { HDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractOpen.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractOpen.java index 0d9e8103208ee..3a11f1b85b9ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractOpen.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractOpen.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractOpenTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -31,12 +31,12 @@ */ public class TestHDFSContractOpen extends AbstractContractOpenTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { HDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { HDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractPathHandle.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractPathHandle.java index c65a60b18b195..240bf51cc99f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractPathHandle.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractPathHandle.java @@ -18,11 +18,10 @@ package org.apache.hadoop.fs.contract.hdfs; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.contract.AbstractContractPathHandleTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -33,17 +32,15 @@ public class TestHDFSContractPathHandle extends AbstractContractPathHandleTest { - public TestHDFSContractPathHandle(String testname, Options.HandleOpt[] opts, - boolean serialized) { - super(testname, opts, serialized); + public TestHDFSContractPathHandle() { } - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { HDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { HDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRename.java index 706b0cf826494..f96dfae62296b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRename.java @@ -21,19 +21,19 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractRenameTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; public class TestHDFSContractRename extends AbstractContractRenameTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { HDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { HDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRootDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRootDirectory.java index fc1851db5fb08..dc9fdd7989a6b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRootDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRootDirectory.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -32,12 +32,12 @@ public class TestHDFSContractRootDirectory extends AbstractContractRootDirectoryTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { HDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { HDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSafeMode.java index 13621327fe15f..319e4d552b5a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSafeMode.java @@ -20,8 +20,8 @@ import java.io.IOException; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractSafeModeTest; @@ -32,12 +32,12 @@ */ public class TestHDFSContractSafeMode extends AbstractContractSafeModeTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { HDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { HDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSeek.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSeek.java index 259ffce824c57..b12d6aa7039a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSeek.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSeek.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractSeekTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -31,12 +31,12 @@ */ public class TestHDFSContractSeek extends AbstractContractSeekTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { HDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { HDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSetTimes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSetTimes.java index 4899189b01477..8ae002a3525c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSetTimes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSetTimes.java @@ -21,19 +21,19 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractSetTimesTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; public class TestHDFSContractSetTimes extends AbstractContractSetTimesTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { HDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { HDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractUnbuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractUnbuffer.java index 54b8bf1c700e6..297bc53fe35bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractUnbuffer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractUnbuffer.java @@ -22,19 +22,19 @@ import org.apache.hadoop.fs.contract.AbstractContractUnbufferTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; public class TestHDFSContractUnbuffer extends AbstractContractUnbufferTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { HDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { HDFSContract.destroyCluster(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractVectoredRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractVectoredRead.java index 374dcedcbd300..217395c972793 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractVectoredRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractVectoredRead.java @@ -20,8 +20,8 @@ import java.io.IOException; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractVectoredReadTest; @@ -33,16 +33,15 @@ public class TestHDFSContractVectoredRead extends AbstractContractVectoredReadTest { - public TestHDFSContractVectoredRead(final String bufferType) { - super(bufferType); + public TestHDFSContractVectoredRead() { } - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { HDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { HDFSContract.destroyCluster(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/AbstractManifestCommitterTest.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/AbstractManifestCommitterTest.java index 57c0c39ed9b7f..5be964751f25b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/AbstractManifestCommitterTest.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/AbstractManifestCommitterTest.java @@ -36,7 +36,8 @@ import java.util.stream.IntStream; import org.assertj.core.api.Assertions; -import org.junit.AfterClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -309,6 +310,7 @@ protected Configuration createConfiguration() { return enableManifestCommitter(super.createConfiguration()); } + @BeforeEach @Override public void setup() throws Exception { @@ -444,7 +446,7 @@ protected static ThreadLeakTracker getThreadLeakTracker() { /** * Make sure there's no thread leakage. */ - @AfterClass + @AfterAll public static void threadLeakage() { THREAD_LEAK_TRACKER.assertNoThreadLeakage(); } @@ -452,7 +454,7 @@ public static void threadLeakage() { /** * Dump the filesystem statistics after the class. */ - @AfterClass + @AfterAll public static void dumpFileSystemIOStatistics() { LOG.info("Aggregate FileSystem Statistics {}", ioStatisticsToPrettyString(FILESYSTEM_IOSTATS)); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestCleanupStage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestCleanupStage.java index c8c766a43cff3..da59dd1b11803 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestCleanupStage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestCleanupStage.java @@ -20,7 +20,8 @@ import java.util.List; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.lib.output.committer.manifest.files.TaskManifest; @@ -62,6 +63,7 @@ public class TestCleanupStage extends AbstractManifestCommitterTest { */ private List manifests; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestCommitTaskStage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestCommitTaskStage.java index 95de9a32eecd1..40645537d922b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestCommitTaskStage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestCommitTaskStage.java @@ -22,7 +22,8 @@ import java.net.SocketTimeoutException; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; @@ -62,6 +63,7 @@ public class TestCommitTaskStage extends AbstractManifestCommitterTest { public static final String TASK1_ATTEMPT1 = String.format("%s_%02d", TASK1, 1); + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestCreateOutputDirectoriesStage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestCreateOutputDirectoriesStage.java index b2d3c3f84a6bd..114287a3230b7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestCreateOutputDirectoriesStage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestCreateOutputDirectoriesStage.java @@ -26,7 +26,8 @@ import java.util.stream.Collectors; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore; @@ -75,6 +76,7 @@ public class TestCreateOutputDirectoriesStage extends AbstractManifestCommitterT private StageConfig stageConfig; private IOStatisticsStore iostats; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestJobThroughManifestCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestJobThroughManifestCommitter.java index 152b2c86e0f9c..bb8129884345e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestJobThroughManifestCommitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestJobThroughManifestCommitter.java @@ -26,9 +26,10 @@ import org.assertj.core.api.Assertions; import org.assertj.core.api.Assumptions; -import org.junit.FixMethodOrder; -import org.junit.Test; -import org.junit.runners.MethodSorters; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileSystem; @@ -83,7 +84,7 @@ * after each test case. * The last test case MUST perform the cleanup. */ -@FixMethodOrder(MethodSorters.NAME_ASCENDING) +@TestMethodOrder(MethodOrderer.Alphanumeric.class) public class TestJobThroughManifestCommitter extends AbstractManifestCommitterTest { @@ -152,6 +153,7 @@ public class TestJobThroughManifestCommitter private static LoadedManifestData loadedManifestData; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestLoadManifestsStage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestLoadManifestsStage.java index ce20e02457a89..45e205a4b29f6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestLoadManifestsStage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestLoadManifestsStage.java @@ -23,7 +23,9 @@ import java.util.Set; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -71,6 +73,7 @@ protected int numberOfTaskAttempts() { return ManifestCommitterTestSupport.NUMBER_OF_TASK_ATTEMPTS; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -80,6 +83,7 @@ public void setup() throws Exception { .isGreaterThan(0); } + @AfterEach @Override public void teardown() throws Exception { if (entryFile != null) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestManifestCommitProtocol.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestManifestCommitProtocol.java index 3037bf33ad62f..43d0e36942b35 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestManifestCommitProtocol.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestManifestCommitProtocol.java @@ -29,8 +29,10 @@ import java.util.concurrent.TimeUnit; import org.assertj.core.api.Assertions; -import org.junit.AfterClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -228,6 +230,7 @@ protected String getMethodName() { return suitename() + "-" + super.getMethodName(); } + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -236,6 +239,7 @@ public void setup() throws Exception { cleanupOutputDir(); } + @AfterEach @Override public void teardown() throws Exception { describe("teardown"); @@ -254,7 +258,7 @@ public void teardown() throws Exception { super.teardown(); } - @AfterClass + @AfterAll public static void logAggregateIOStatistics() { LOG.info("Final IOStatistics {}", ioStatisticsToPrettyString(IOSTATISTICS)); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestRenameStageFailure.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestRenameStageFailure.java index 5c80aee5b5f98..079140127591c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestRenameStageFailure.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestRenameStageFailure.java @@ -26,8 +26,8 @@ import java.util.List; import org.assertj.core.api.Assertions; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.commons.lang3.tuple.Triple; import org.apache.hadoop.fs.CommonPathCapabilities; @@ -57,6 +57,7 @@ import static org.apache.hadoop.mapreduce.lib.output.committer.manifest.impl.UnreliableManifestStoreOperations.SIMULATED_FAILURE; import static org.apache.hadoop.mapreduce.lib.output.committer.manifest.stages.AbstractJobOrTaskStage.FAILED_TO_RENAME_PREFIX; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.junit.jupiter.api.Assumptions.assumeTrue; /** * Test renaming files with fault injection. @@ -103,6 +104,7 @@ protected boolean isEtagsSupported() { return etagsSupported; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -269,8 +271,8 @@ public void testRenameReturnsFalse() throws Throwable { describe("commit where rename() returns false for one file." + " Expect failure to be escalated to an IOE"); - Assume.assumeTrue("not used when resilient commits are available", - !resilientCommit); + assumeTrue(!resilientCommit, + "not used when resilient commits are available"); // destination directory. Path destDir = methodPath(); StageConfig stageConfig = createStageConfigForJob(JOB1, destDir); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestTaskManifestFileIO.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestTaskManifestFileIO.java index 3bd75e5750c75..02df50484fae6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestTaskManifestFileIO.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestTaskManifestFileIO.java @@ -21,7 +21,8 @@ import java.io.IOException; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.lib.output.committer.manifest.files.DirEntry; @@ -50,6 +51,7 @@ public class TestTaskManifestFileIO extends AbstractManifestCommitterTest { private Path taPath; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/impl/TestEntryFileIO.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/impl/TestEntryFileIO.java index 93f5050287551..77feed7b4e038 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/impl/TestEntryFileIO.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/impl/TestEntryFileIO.java @@ -26,9 +26,9 @@ import java.util.concurrent.atomic.AtomicLong; import org.assertj.core.api.Assertions; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.slf4j.Logger; @@ -73,7 +73,7 @@ public class TestEntryFileIO extends AbstractManifestCommitterTest { /** * Create an entry file during setup. */ - @Before + @BeforeEach public void setup() throws Exception { entryFileIO = new EntryFileIO(new Configuration()); createEntryFile(); @@ -83,7 +83,7 @@ public void setup() throws Exception { * Teardown deletes any entry file. * @throws Exception on any failure */ - @After + @AfterEach public void teardown() throws Exception { Thread.currentThread().setName("teardown"); if (getEntryFile() != null) { diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index d210994a83450..673f88208f2a4 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -184,7 +184,7 @@ --add-opens=java.base/sun.security.x509=ALL-UNNAMED - -Xmx2048m -Xss2m -XX:+HeapDumpOnOutOfMemoryError ${extraJavaTestArgs} + -Xmx4096m -Xss2m -XX:+HeapDumpOnOutOfMemoryError ${extraJavaTestArgs} 3.0.0-M4 ${maven-surefire-plugin.version} ${maven-surefire-plugin.version} diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractGetFileStatusV1List.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractGetFileStatusV1List.java index 2f1213c65ded2..f27d7957b4ad1 100644 --- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractGetFileStatusV1List.java +++ b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractGetFileStatusV1List.java @@ -22,6 +22,7 @@ import org.apache.hadoop.fs.aliyun.oss.Constants; import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest; import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.jupiter.api.AfterEach; /** * Test getFileStatus and related listing operations, @@ -35,6 +36,7 @@ protected AbstractFSContract createContract(Configuration conf) { return new AliyunOSSContract(conf); } + @AfterEach @Override public void teardown() throws Exception { getLogger().info("FS details {}", getFileSystem()); diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractSeek.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractSeek.java index d9b367440a27f..debea20c922e1 100644 --- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractSeek.java +++ b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractSeek.java @@ -24,7 +24,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.AbstractContractSeekTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/yarn/TestOSSMiniYarnCluster.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/yarn/TestOSSMiniYarnCluster.java index 439aa0c8a3c8a..5ccbbbce1d61c 100644 --- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/yarn/TestOSSMiniYarnCluster.java +++ b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/yarn/TestOSSMiniYarnCluster.java @@ -37,7 +37,9 @@ import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.yarn.server.MiniYARNCluster; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.EnumSet; @@ -60,6 +62,7 @@ protected AbstractFSContract createContract(Configuration configuration) { return new AliyunOSSContract(conf); } + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -97,7 +100,7 @@ public void testWithMiniCluster() throws Exception { FileOutputFormat.setOutputPath(job, output); int exitCode = (job.waitForCompletion(true) ? 0 : 1); - assertEquals("Returned error code.", 0, exitCode); + assertEquals(0, exitCode, "Returned error code."); assertTrue(fs.exists(new Path(output, "_SUCCESS"))); String outputAsStr = readStringFromFile(new Path(output, "part-r-00000")); @@ -118,9 +121,8 @@ private Map getResultAsMap(String outputAsStr) { Map result = new HashMap<>(); for (String line : outputAsStr.split("\n")) { String[] tokens = line.split("\t"); - assertTrue("Not enough tokens in in string \" " - + line + "\" from output \"" + outputAsStr + "\"", - tokens.length > 1); + assertTrue(tokens.length > 1, "Not enough tokens in in string \" " + + line + "\" from output \"" + outputAsStr + "\""); result.put(tokens[0], Integer.parseInt(tokens[1])); } return result; @@ -149,6 +151,7 @@ private String readStringFromFile(Path path) throws IOException { } } + @AfterEach @Override public void teardown() throws Exception { if (yarnCluster != null) { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractAnalyticsStreamVectoredRead.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractAnalyticsStreamVectoredRead.java index 8cf182680c350..41a107c36f07a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractAnalyticsStreamVectoredRead.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractAnalyticsStreamVectoredRead.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractVectoredReadTest; import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.jupiter.api.BeforeEach; import static org.apache.hadoop.fs.s3a.S3ATestUtils.enableAnalyticsAccelerator; import static org.apache.hadoop.fs.s3a.S3ATestUtils.skipForAnyEncryptionExceptSSES3; @@ -35,8 +36,13 @@ */ public class ITestS3AContractAnalyticsStreamVectoredRead extends AbstractContractVectoredReadTest { - public ITestS3AContractAnalyticsStreamVectoredRead(String bufferType) { - super(bufferType); + public ITestS3AContractAnalyticsStreamVectoredRead() { + } + + @BeforeEach + @Override + public void setup() throws Exception { + super.setup(); } /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractBulkDelete.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractBulkDelete.java index 71c3a30359e10..48173fe230b6a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractBulkDelete.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractBulkDelete.java @@ -23,9 +23,8 @@ import java.util.List; import org.assertj.core.api.Assertions; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -54,7 +53,7 @@ /** * Contract tests for bulk delete operation for S3A Implementation. */ -@RunWith(Parameterized.class) + public class ITestS3AContractBulkDelete extends AbstractContractBulkDeleteTest { private static final Logger LOG = LoggerFactory.getLogger(ITestS3AContractBulkDelete.class); @@ -67,9 +66,8 @@ public class ITestS3AContractBulkDelete extends AbstractContractBulkDeleteTest { */ private static final int DELETE_PAGE_SIZE = 20; - private final boolean enableMultiObjectDelete; + private boolean enableMultiObjectDelete; - @Parameterized.Parameters(name = "enableMultiObjectDelete = {0}") public static Iterable enableMultiObjectDelete() { return Arrays.asList(new Object[][]{ {true}, @@ -77,8 +75,8 @@ public static Iterable enableMultiObjectDelete() { }); } - public ITestS3AContractBulkDelete(boolean enableMultiObjectDelete) { - this.enableMultiObjectDelete = enableMultiObjectDelete; + public void initITestS3AContractBulkDelete(boolean pEnableMultiObjectDelete) { + this.enableMultiObjectDelete = pEnableMultiObjectDelete; } @Override @@ -119,8 +117,11 @@ public void validatePageSize() throws Exception { .isEqualTo(getExpectedPageSize()); } - @Test - public void testBulkDeleteZeroPageSizePrecondition() throws Exception { + @MethodSource("enableMultiObjectDelete") + @ParameterizedTest(name = "enableMultiObjectDelete = {0}") + public void testBulkDeleteZeroPageSizePrecondition( + boolean pEnableMultiObjectDelete) throws Exception { + initITestS3AContractBulkDelete(pEnableMultiObjectDelete); if (!enableMultiObjectDelete) { // if multi-object delete is disabled, skip this test as // page size is always 1. @@ -135,8 +136,11 @@ public void testBulkDeleteZeroPageSizePrecondition() throws Exception { } } - @Test - public void testPageSizeWhenMultiObjectsDisabled() throws Exception { + @MethodSource("enableMultiObjectDelete") + @ParameterizedTest(name = "enableMultiObjectDelete = {0}") + public void testPageSizeWhenMultiObjectsDisabled( + boolean pEnableMultiObjectDelete) throws Exception { + initITestS3AContractBulkDelete(pEnableMultiObjectDelete); Configuration conf = getContract().getConf(); conf.setBoolean(Constants.ENABLE_MULTI_DELETE, false); Path testPath = path(getMethodName()); @@ -165,8 +169,11 @@ public void testDeletePathsDirectory() throws Exception { assertIsDirectory(dirPath); } - @Test - public void testBulkDeleteParentDirectoryWithDirectories() throws Exception { + @MethodSource("enableMultiObjectDelete") + @ParameterizedTest(name = "enableMultiObjectDelete = {0}") + public void testBulkDeleteParentDirectoryWithDirectories( + boolean pEnableMultiObjectDelete) throws Exception { + initITestS3AContractBulkDelete(pEnableMultiObjectDelete); List paths = new ArrayList<>(); Path dirPath = new Path(basePath, "dir"); fs.mkdirs(dirPath); @@ -195,7 +202,8 @@ public void testBulkDeleteParentDirectoryWithFiles() throws Exception { } - @Test + @MethodSource("enableMultiObjectDelete") + @ParameterizedTest(name = "enableMultiObjectDelete = {0}") public void testRateLimiting() throws Exception { if (!enableMultiObjectDelete) { skip("Multi-object delete is disabled so hard to trigger rate limiting"); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractContentSummary.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractContentSummary.java index ad83cfe52dadc..5544ab7bad68a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractContentSummary.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractContentSummary.java @@ -19,7 +19,7 @@ package org.apache.hadoop.fs.contract.s3a; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractCreate.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractCreate.java index f346064d4d291..03f5611b187b3 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractCreate.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractCreate.java @@ -21,8 +21,8 @@ import java.util.Arrays; import java.util.Collection; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractCreateTest; @@ -41,7 +41,6 @@ * Parameterized on the create performance flag as all overwrite * tests are required to fail in create performance mode. */ -@RunWith(Parameterized.class) public class ITestS3AContractCreate extends AbstractContractCreateTest { /** @@ -49,7 +48,6 @@ public class ITestS3AContractCreate extends AbstractContractCreateTest { * options. * @return a list of test parameters. */ - @Parameterized.Parameters public static Collection params() { return Arrays.asList(new Object[][]{ {false, false}, @@ -60,17 +58,17 @@ public static Collection params() { /** * Is this test run in create performance mode? */ - private final boolean createPerformance; + private boolean createPerformance; /** * Expect a 100-continue response? */ - private final boolean expectContinue; + private boolean expectContinue; - public ITestS3AContractCreate(final boolean createPerformance, - final boolean expectContinue) { - this.createPerformance = createPerformance; - this.expectContinue = expectContinue; + public void initITestS3AContractCreate(final boolean pCreatePerformance, + final boolean pExpectContinue) { + this.createPerformance = pCreatePerformance; + this.expectContinue = pExpectContinue; } @Override @@ -94,29 +92,31 @@ protected Configuration createConfiguration() { return conf; } - @Override - public void testOverwriteExistingFile() throws Throwable { - // Currently analytics accelerator does not support reading of files that have been overwritten. - // This is because the analytics accelerator library caches metadata, and when a file is - // overwritten, the old metadata continues to be used, until it is removed from the cache over - // time. This will be fixed in https://github.com/awslabs/analytics-accelerator-s3/issues/218. - skipIfAnalyticsAcceleratorEnabled(getContract().getConf(), - "Analytics Accelerator currently does not support reading of over written files"); - super.testOverwriteExistingFile(); - } - - @Override - public void testOverwriteNonEmptyDirectory() throws Throwable { + @MethodSource("params") + @ParameterizedTest + public void testOverwriteNonEmptyDirectory(boolean pCreatePerformance, + boolean pExpectContinue) throws Throwable { + initITestS3AContractCreate(pCreatePerformance, pExpectContinue); try { - super.testOverwriteNonEmptyDirectory(); - failWithCreatePerformance(); + // Currently analytics accelerator does not support reading of files that have been overwritten. + // This is because the analytics accelerator library caches metadata, and when a file is + // overwritten, the old metadata continues to be used, until it is removed from the cache over + // time. This will be fixed in https://github.com/awslabs/analytics-accelerator-s3/issues/218. + skipIfAnalyticsAcceleratorEnabled(getContract().getConf(), + "Analytics Accelerator currently does not support reading of over written files"); + + super.testOverwriteNonEmptyDirectory(); + failWithCreatePerformance(); } catch (AssertionError e) { swallowWithCreatePerformance(e); } } - @Override - public void testOverwriteEmptyDirectory() throws Throwable { + @MethodSource("params") + @ParameterizedTest + public void testOverwriteEmptyDirectory(boolean pCreatePerformance, + boolean pExpectContinue) throws Throwable { + initITestS3AContractCreate(pCreatePerformance, pExpectContinue); try { super.testOverwriteEmptyDirectory(); failWithCreatePerformance(); @@ -125,8 +125,11 @@ public void testOverwriteEmptyDirectory() throws Throwable { } } - @Override - public void testCreateFileOverExistingFileNoOverwrite() throws Throwable { + @MethodSource("params") + @ParameterizedTest + public void testCreateFileOverExistingFileNoOverwrite(boolean pCreatePerformance, + boolean pExpectContinue) throws Throwable { + initITestS3AContractCreate(pCreatePerformance, pExpectContinue); try { super.testCreateFileOverExistingFileNoOverwrite(); failWithCreatePerformance(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractDistCp.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractDistCp.java index f91479abe86e9..1d35439d4fee2 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractDistCp.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractDistCp.java @@ -66,17 +66,16 @@ protected S3AContract createContract(Configuration conf) { public void testDistCpWithIterator() throws Exception { final long renames = getRenameOperationCount(); super.testDistCpWithIterator(); - assertEquals("Expected no renames for a direct write distcp", - getRenameOperationCount(), - renames); + assertEquals(getRenameOperationCount(), + renames, "Expected no renames for a direct write distcp"); } @Override public void testNonDirectWrite() throws Exception { final long renames = getRenameOperationCount(); super.testNonDirectWrite(); - assertEquals("Expected 2 renames for a non-direct write distcp", 2L, - getRenameOperationCount() - renames); + assertEquals(2L, getRenameOperationCount() - renames, + "Expected 2 renames for a non-direct write distcp"); } @Override diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractGetFileStatus.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractGetFileStatus.java index a68b0ea12af57..c1f0056bcca60 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractGetFileStatus.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractGetFileStatus.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.s3a.Constants; import org.apache.hadoop.fs.s3a.S3ATestConstants; import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.junit.jupiter.api.AfterEach; /** * S3A contract tests covering getFileStatus. @@ -37,6 +38,7 @@ protected AbstractFSContract createContract(Configuration conf) { return new S3AContract(conf); } + @AfterEach @Override public void teardown() throws Exception { getLogger().info("FS details {}", getFileSystem()); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMkdirWithCreatePerf.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMkdirWithCreatePerf.java index 45dfc391b001d..f36094e4283ed 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMkdirWithCreatePerf.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMkdirWithCreatePerf.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.contract.s3a; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -61,7 +61,7 @@ public void testMkdirOverParentFile() throws Throwable { createFile(getFileSystem(), path, false, dataset); Path child = new Path(path, "child-to-mkdir"); boolean childCreated = fs.mkdirs(child); - assertTrue("Child dir is created", childCreated); + assertTrue(childCreated, "Child dir is created"); assertIsFile(path); byte[] bytes = ContractTestUtils.readDataset(getFileSystem(), path, dataset.length); ContractTestUtils.compareByteArrays(dataset, bytes, dataset.length); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java index 9a945ad0ee710..550a1caacd02a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java @@ -22,6 +22,7 @@ import org.apache.hadoop.fs.contract.AbstractContractMultipartUploaderTest; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.junit.jupiter.api.BeforeEach; import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; import static org.apache.hadoop.fs.s3a.S3ATestConstants.DEFAULT_SCALE_TESTS_ENABLED; @@ -91,6 +92,7 @@ protected boolean finalizeConsumesUploadIdImmediately() { return false; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractOpen.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractOpen.java index 82a7a3c63b37f..cc39e2a338f60 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractOpen.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractOpen.java @@ -20,7 +20,7 @@ import java.io.FileNotFoundException; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRename.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRename.java index d3ba7373cc944..eaf10b7ed9789 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRename.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRename.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.contract.s3a; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -73,7 +73,7 @@ public void testRenameDirIntoExistingDir() throws Throwable { assertIsFile(destFilePath); boolean rename = fs.rename(srcDir, destDir); - assertFalse("s3a doesn't support rename to non-empty directory", rename); + assertFalse(rename, "s3a doesn't support rename to non-empty directory"); } /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRootDir.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRootDir.java index cd5c078a9ed2b..4f999c99dfa3a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRootDir.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRootDir.java @@ -18,7 +18,8 @@ package org.apache.hadoop.fs.contract.s3a; -import org.junit.Ignore; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Disabled; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,6 +39,7 @@ public class ITestS3AContractRootDir extends private static final Logger LOG = LoggerFactory.getLogger(ITestS3AContractRootDir.class); + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -55,7 +57,7 @@ public S3AFileSystem getFileSystem() { } @Override - @Ignore("S3 always return false when non-recursively remove root dir") + @Disabled("S3 always return false when non-recursively remove root dir") public void testRmNonEmptyRootDirNonRecursive() throws Throwable { } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractSeek.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractSeek.java index 60e3d66317a4c..e8b015fafbede 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractSeek.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractSeek.java @@ -24,10 +24,9 @@ import java.util.Arrays; import java.util.Collection; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,7 +63,6 @@ /** * S3A contract tests covering file seek. */ -@RunWith(Parameterized.class) public class ITestS3AContractSeek extends AbstractContractSeekTest { private static final Logger LOG = @@ -72,8 +70,8 @@ public class ITestS3AContractSeek extends AbstractContractSeekTest { protected static final int READAHEAD = 1024; - private final String seekPolicy; - private final DelegatingSSLSocketFactory.SSLChannelMode sslChannelMode; + private String seekPolicy; + private DelegatingSSLSocketFactory.SSLChannelMode sslChannelMode; public static final int DATASET_LEN = READAHEAD * 2; @@ -84,7 +82,6 @@ public class ITestS3AContractSeek extends AbstractContractSeekTest { * which S3A Supports. * @return a list of seek policies to test. */ - @Parameterized.Parameters(name="policy={0}") public static Collection params() { return Arrays.asList(new Object[][]{ {FS_OPTION_OPENFILE_READ_POLICY_SEQUENTIAL, Default_JSSE}, @@ -97,10 +94,11 @@ public static Collection params() { * Run the test with a chosen seek policy. * @param seekPolicy fadvise policy to use. */ - public ITestS3AContractSeek(final String seekPolicy, - final DelegatingSSLSocketFactory.SSLChannelMode sslChannelMode) { - this.seekPolicy = seekPolicy; - this.sslChannelMode = sslChannelMode; + public void initITestS3AContractSeek(final String pSeekPolicy, + final DelegatingSSLSocketFactory.SSLChannelMode pSslChannelMode) { + this.seekPolicy = pSeekPolicy; + this.sslChannelMode = pSslChannelMode; + validateSSLChannelMode(); } /** @@ -136,6 +134,7 @@ protected AbstractFSContract createContract(Configuration conf) { return new S3AContract(conf); } + @AfterEach @Override public void teardown() throws Exception { super.teardown(); @@ -191,10 +190,10 @@ private void assertDatasetEquals( int length) { for (int i = 0; i < length; i++) { int o = readOffset + i; - assertEquals(operation + " with seek policy " + seekPolicy + assertEquals(DATASET[o], data[i], + operation + " with seek policy " + seekPolicy + "and read offset " + readOffset - + ": data[" + i + "] != DATASET[" + o + "]", - DATASET[o], data[i]); + + ": data[" + i + "] != DATASET[" + o + "]"); } } @@ -203,7 +202,6 @@ public S3AFileSystem getFileSystem() { return (S3AFileSystem) super.getFileSystem(); } - @Before public void validateSSLChannelMode() { if (this.sslChannelMode == OpenSSL) { assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && @@ -211,8 +209,11 @@ public void validateSSLChannelMode() { } } - @Test - public void testReadPolicyInFS() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name="policy={0}") + public void testReadPolicyInFS(String pSeekPolicy, + DelegatingSSLSocketFactory.SSLChannelMode pSslChannelMode) throws Throwable { + initITestS3AContractSeek(pSeekPolicy, pSslChannelMode); describe("Verify the read policy is being consistently set"); S3AFileSystem fs = getFileSystem(); assertEquals(S3AInputPolicy.getPolicy(seekPolicy, S3AInputPolicy.Normal), @@ -224,8 +225,11 @@ public void testReadPolicyInFS() throws Throwable { * This sets up a read which will span the active readahead and, * in random IO mode, a subsequent GET. */ - @Test - public void testReadAcrossReadahead() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name="policy={0}") + public void testReadAcrossReadahead(String pSeekPolicy, + DelegatingSSLSocketFactory.SSLChannelMode pSslChannelMode) throws Throwable { + initITestS3AContractSeek(pSeekPolicy, pSslChannelMode); describe("Sets up a read which will span the active readahead" + " and the rest of the file."); Path path = path("testReadAcrossReadahead"); @@ -243,7 +247,7 @@ public void testReadAcrossReadahead() throws Throwable { try (FSDataInputStream in = fs.open(path)) { final byte[] temp = new byte[5]; readAtEndAndReturn(in); - assertEquals("current position", 1, (int)(in.getPos())); + assertEquals(1, (int)(in.getPos()), "current position"); in.readFully(READAHEAD, temp); assertDatasetEquals(READAHEAD, "read exactly on boundary", temp, temp.length); @@ -254,8 +258,11 @@ public void testReadAcrossReadahead() throws Throwable { * Read across the end of the read buffer using the readByte call, * which will read a single byte only. */ - @Test - public void testReadSingleByteAcrossReadahead() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name="policy={0}") + public void testReadSingleByteAcrossReadahead(String pSeekPolicy, + DelegatingSSLSocketFactory.SSLChannelMode pSslChannelMode) throws Throwable { + initITestS3AContractSeek(pSeekPolicy, pSslChannelMode); describe("Read over boundary using read()/readByte() calls."); Path path = path("testReadSingleByteAcrossReadahead"); writeTestDataset(path); @@ -275,8 +282,11 @@ public void testReadSingleByteAcrossReadahead() throws Throwable { } } - @Test - public void testSeekToReadaheadAndRead() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name="policy={0}") + public void testSeekToReadaheadAndRead(String pSeekPolicy, + DelegatingSSLSocketFactory.SSLChannelMode pSslChannelMode) throws Throwable { + initITestS3AContractSeek(pSeekPolicy, pSslChannelMode); describe("Seek to just before readahead limit and call" + " InputStream.read(byte[])"); Path path = path("testSeekToReadaheadAndRead"); @@ -289,14 +299,17 @@ public void testSeekToReadaheadAndRead() throws Throwable { in.seek(offset); // expect to read at least one byte. int l = in.read(temp); - assertTrue("Reading in temp data", l > 0); + assertTrue(l > 0, "Reading in temp data"); LOG.info("Read of byte array at offset {} returned {} bytes", offset, l); assertDatasetEquals(offset, "read at end of boundary", temp, l); } } - @Test - public void testSeekToReadaheadExactlyAndRead() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name="policy={0}") + public void testSeekToReadaheadExactlyAndRead(String pSeekPolicy, + DelegatingSSLSocketFactory.SSLChannelMode pSslChannelMode) throws Throwable { + initITestS3AContractSeek(pSeekPolicy, pSslChannelMode); describe("Seek to exactly the readahead limit and call" + " InputStream.read(byte[])"); Path path = path("testSeekToReadaheadExactlyAndRead"); @@ -310,13 +323,16 @@ public void testSeekToReadaheadExactlyAndRead() throws Throwable { // expect to read at least one byte. int l = in.read(temp); LOG.info("Read of byte array at offset {} returned {} bytes", offset, l); - assertTrue("Reading in temp data", l > 0); + assertTrue(l > 0, "Reading in temp data"); assertDatasetEquals(offset, "read at end of boundary", temp, l); } } - @Test - public void testSeekToReadaheadExactlyAndReadByte() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name="policy={0}") + public void testSeekToReadaheadExactlyAndReadByte(String pSeekPolicy, + DelegatingSSLSocketFactory.SSLChannelMode pSslChannelMode) throws Throwable { + initITestS3AContractSeek(pSeekPolicy, pSslChannelMode); describe("Seek to exactly the readahead limit and call" + " readByte()"); Path path = path("testSeekToReadaheadExactlyAndReadByte"); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractVectoredRead.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractVectoredRead.java index bf489fc44a5ff..ef47acb459587 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractVectoredRead.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractVectoredRead.java @@ -30,7 +30,8 @@ import java.util.concurrent.TimeUnit; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -80,8 +81,7 @@ public class ITestS3AContractVectoredRead extends AbstractContractVectoredReadTe private static final Logger LOG = LoggerFactory.getLogger(ITestS3AContractVectoredRead.class); - public ITestS3AContractVectoredRead(String bufferType) { - super(bufferType); + public ITestS3AContractVectoredRead() { } @Override @@ -107,8 +107,10 @@ public void setup() throws Exception { * this test thinks the file is longer than it is, so the call * fails in the GET request. */ - @Test - public void testEOFRanges416Handling() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testEOFRanges416Handling(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); FileSystem fs = getFileSystem(); final int extendedLen = DATASET_LEN + 1024; @@ -151,8 +153,10 @@ public void testEOFRanges416Handling() throws Exception { } - @Test - public void testMinSeekAndMaxSizeConfigsPropagation() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testMinSeekAndMaxSizeConfigsPropagation(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); Configuration conf = getFileSystem().getConf(); S3ATestUtils.removeBaseAndBucketOverrides(conf, AWS_S3_VECTOR_READS_MAX_MERGED_READ_SIZE, @@ -174,8 +178,11 @@ public void testMinSeekAndMaxSizeConfigsPropagation() throws Exception { } } - @Test - public void testMinSeekAndMaxSizeDefaultValues() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testMinSeekAndMaxSizeDefaultValues(String pBufferType) + throws Exception { + initAbstractContractVectoredReadTest(pBufferType); Configuration conf = getFileSystem().getConf(); S3ATestUtils.removeBaseAndBucketOverrides(conf, AWS_S3_VECTOR_READS_MIN_SEEK_SIZE, @@ -192,9 +199,10 @@ public void testMinSeekAndMaxSizeDefaultValues() throws Exception { } } - @Test - public void testStopVectoredIoOperationsCloseStream() throws Exception { - + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testStopVectoredIoOperationsCloseStream(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); List fileRanges = createSampleNonOverlappingRanges(); try (FSDataInputStream in = openVectorFile()){ in.readVectored(fileRanges, getAllocate()); @@ -214,9 +222,10 @@ public void testStopVectoredIoOperationsCloseStream() throws Exception { * There's a small risk of a race condition where the unbuffer() call * is made after the vector reads have completed. */ - @Test - public void testStopVectoredIoOperationsUnbuffer() throws Exception { - + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testStopVectoredIoOperationsUnbuffer(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); List fileRanges = createSampleNonOverlappingRanges(); try (FSDataInputStream in = openVectorFile()){ in.readVectored(fileRanges, getAllocate()); @@ -234,9 +243,10 @@ public void testStopVectoredIoOperationsUnbuffer() throws Exception { * As the minimum seek value is 4*1024, the first three ranges will be * merged into and other two will remain as it is. * */ - @Test - public void testNormalReadVsVectoredReadStatsCollection() throws Exception { - + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testNormalReadVsVectoredReadStatsCollection(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); try (S3AFileSystem fs = getTestFileSystemWithReadAheadDisabled()) { List fileRanges = new ArrayList<>(); range(fileRanges, 10 * 1024, 100); @@ -354,8 +364,10 @@ public void testNormalReadVsVectoredReadStatsCollection() throws Exception { } } - @Test - public void testMultiVectoredReadStatsCollection() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "Buffer type : {0}") + public void testMultiVectoredReadStatsCollection(String pBufferType) throws Exception { + initAbstractContractVectoredReadTest(pBufferType); try (S3AFileSystem fs = getTestFileSystemWithReadAheadDisabled()) { List ranges1 = getConsecutiveRanges(); List ranges2 = getConsecutiveRanges(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java index a8f509727e4a9..2cbae974ea211 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java @@ -31,8 +31,9 @@ import org.apache.hadoop.fs.store.audit.AuditSpanSource; import org.apache.hadoop.io.IOUtils; -import org.junit.AfterClass; -import org.junit.Assume; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,6 +44,7 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset; import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToPrettyString; import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.snapshotIOStatistics; +import static org.junit.jupiter.api.Assumptions.assumeTrue; /** * An extension of the contract test base set up for S3A tests. @@ -93,6 +95,7 @@ protected AbstractFSContract createContract(Configuration conf) { return new S3AContract(conf, false); } + @BeforeEach @Override public void setup() throws Exception { Thread.currentThread().setName("setup"); @@ -110,6 +113,7 @@ public void setup() throws Exception { IOStatisticsContext.getCurrentIOStatisticsContext().reset(); } + @AfterEach @Override public void teardown() throws Exception { Thread.currentThread().setName("teardown"); @@ -125,7 +129,7 @@ public void teardown() throws Exception { /** * Dump the filesystem statistics after the class. */ - @AfterClass + @AfterAll public static void dumpFileSystemIOStatistics() { LOG.info("Aggregate FileSystem Statistics {}", ioStatisticsToPrettyString(FILESYSTEM_IOSTATS)); @@ -231,8 +235,8 @@ protected AuditSpan span(AuditSpanSource source) throws IOException { * Method to assume that S3 client side encryption is disabled on a test. */ public void skipIfClientSideEncryption() { - Assume.assumeTrue("Skipping test if CSE is enabled", - !getFileSystem().isCSEEnabled()); + assumeTrue(!getFileSystem().isCSEEnabled(), + "Skipping test if CSE is enabled"); } /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractTestS3AEncryption.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractTestS3AEncryption.java index 55cebeab8ef32..f9e45a17f400f 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractTestS3AEncryption.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractTestS3AEncryption.java @@ -21,7 +21,8 @@ import java.io.IOException; import java.nio.file.AccessDeniedException; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -99,6 +100,7 @@ protected void requireEncryptedFileSystem() { * S3 throw AmazonS3Exception with status 403 AccessDenied * then it is translated into AccessDeniedException by S3AUtils.translateException(...) */ + @BeforeEach @Override public void setup() throws Exception { try { @@ -119,8 +121,8 @@ public void testEncryptionSettingPropagation() throws Throwable { S3AFileSystem fs = getFileSystem(); S3AEncryptionMethods algorithm = getEncryptionAlgorithm( fs.getBucket(), fs.getConf()); - assertEquals("Configuration has wrong encryption algorithm", - getSSEAlgorithm(), algorithm); + assertEquals(getSSEAlgorithm(), algorithm, + "Configuration has wrong encryption algorithm"); } @Test @@ -158,10 +160,10 @@ public void testEncryptionOverRename() throws Throwable { * @param secrets encryption secrets of the filesystem. */ protected void validateEncryptionSecrets(final EncryptionSecrets secrets) { - assertNotNull("No encryption secrets for filesystem", secrets); + assertNotNull(secrets, "No encryption secrets for filesystem"); S3AEncryptionMethods sseAlgorithm = getSSEAlgorithm(); - assertEquals("Filesystem has wrong encryption algorithm", - sseAlgorithm, secrets.getEncryptionMethod()); + assertEquals(sseAlgorithm, secrets.getEncryptionMethod(), + "Filesystem has wrong encryption algorithm"); } protected void validateEncryptionForFilesize(int len) throws IOException { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestDowngradeSyncable.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestDowngradeSyncable.java index 86e9f15568155..5fd16db3f0852 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestDowngradeSyncable.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestDowngradeSyncable.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.s3a; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestLocatedFileStatusFetcher.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestLocatedFileStatusFetcher.java index fcf412dac8a79..2c3db2b26daf9 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestLocatedFileStatusFetcher.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestLocatedFileStatusFetcher.java @@ -21,7 +21,8 @@ import java.nio.charset.StandardCharsets; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -109,6 +110,7 @@ public class ITestLocatedFileStatusFetcher extends AbstractS3ATestBase { private Configuration listConfig; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputArray.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputArray.java index b0e15adacd886..18f665ecef2ce 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputArray.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputArray.java @@ -27,8 +27,8 @@ import org.apache.hadoop.fs.s3a.statistics.BlockOutputStreamStatistics; import org.apache.hadoop.io.IOUtils; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.io.InputStream; @@ -51,7 +51,7 @@ public class ITestS3ABlockOutputArray extends AbstractS3ATestBase { private static byte[] dataset; - @BeforeClass + @BeforeAll public static void setupDataset() { dataset = ContractTestUtils.dataset(BLOCK_SIZE, 0, 256); } @@ -80,19 +80,21 @@ public void testRegularUpload() throws IOException { verifyUpload("regular", 1024); } - @Test(expected = IOException.class) + @Test public void testWriteAfterStreamClose() throws Throwable { - Path dest = path("testWriteAfterStreamClose"); - describe(" testWriteAfterStreamClose"); - FSDataOutputStream stream = getFileSystem().create(dest, true); - byte[] data = ContractTestUtils.dataset(16, 'a', 26); - try { - stream.write(data); - stream.close(); - stream.write(data); - } finally { - IOUtils.closeStream(stream); - } + assertThrows(IOException.class, () -> { + Path dest = path("testWriteAfterStreamClose"); + describe(" testWriteAfterStreamClose"); + FSDataOutputStream stream = getFileSystem().create(dest, true); + byte[] data = ContractTestUtils.dataset(16, 'a', 26); + try { + stream.write(data); + stream.close(); + stream.write(data); + } finally { + IOUtils.closeStream(stream); + } + }); } @Test @@ -106,10 +108,10 @@ public void testBlocksClosed() throws Throwable { stream.write(data); LOG.info("closing output stream"); stream.close(); - assertEquals("total allocated blocks in " + statistics, - 1, statistics.getBlocksAllocated()); - assertEquals("actively allocated blocks in " + statistics, - 0, statistics.getBlocksActivelyAllocated()); + assertEquals(1, statistics.getBlocksAllocated(), + "total allocated blocks in " + statistics); + assertEquals(0, statistics.getBlocksActivelyAllocated(), + "actively allocated blocks in " + statistics); LOG.info("end of test case"); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlocksize.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlocksize.java index 2f630abe576f0..865bfc7b010e0 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlocksize.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlocksize.java @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,8 +43,8 @@ public class ITestS3ABlocksize extends AbstractS3ATestBase { public void testBlockSize() throws Exception { FileSystem fs = getFileSystem(); long defaultBlockSize = fs.getDefaultBlockSize(); - assertEquals("incorrect blocksize", - S3AFileSystem.DEFAULT_BLOCKSIZE, defaultBlockSize); + assertEquals(S3AFileSystem.DEFAULT_BLOCKSIZE, defaultBlockSize, + "incorrect blocksize"); long newBlockSize = defaultBlockSize * 2; fs.getConf().setLong(Constants.FS_S3A_BLOCK_SIZE, newBlockSize); @@ -52,9 +52,8 @@ public void testBlockSize() throws Exception { Path file = new Path(dir, "file"); createFile(fs, file, true, dataset(1024, 'a', 'z' - 'a')); FileStatus fileStatus = fs.getFileStatus(file); - assertEquals("Double default block size in stat(): " + fileStatus, - newBlockSize, - fileStatus.getBlockSize()); + assertEquals(newBlockSize, fileStatus.getBlockSize(), + "Double default block size in stat(): " + fileStatus); // check the listing & assert that the block size is picked up by // this route too. @@ -64,20 +63,18 @@ public void testBlockSize() throws Exception { LOG.info("entry: {}", stat); if (file.equals(stat.getPath())) { found = true; - assertEquals("Double default block size in ls(): " + stat, - newBlockSize, - stat.getBlockSize()); + assertEquals(newBlockSize, stat.getBlockSize(), + "Double default block size in ls(): " + stat); } } - assertTrue("Did not find " + fileStatsToString(listing, ", "), found); + assertTrue(found, "Did not find " + fileStatsToString(listing, ", ")); } @Test public void testRootFileStatusHasBlocksize() throws Throwable { FileSystem fs = getFileSystem(); FileStatus status = fs.getFileStatus(new Path("/")); - assertTrue("Invalid root blocksize", - status.getBlockSize() >= 0); + assertTrue(status.getBlockSize() >= 0, "Invalid root blocksize"); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABucketExistence.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABucketExistence.java index ce6d8a7e1ef6f..8b9a202f620e0 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABucketExistence.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABucketExistence.java @@ -23,7 +23,8 @@ import java.util.UUID; import java.util.concurrent.Callable; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -69,9 +70,9 @@ public void testNoBucketProbing() throws Exception { Path root = new Path(uri); //See HADOOP-17323. - assertTrue("root path should always exist", fs.exists(root)); - assertTrue("getFileStatus on root should always return a directory", - fs.getFileStatus(root).isDirectory()); + assertTrue(fs.exists(root), "root path should always exist"); + assertTrue(fs.getFileStatus(root).isDirectory(), + "getFileStatus on root should always return a directory"); try { expectUnknownStore( @@ -91,9 +92,8 @@ public void testNoBucketProbing() throws Exception { expectUnknownStore(() -> fs.exists(src)); // now that isFile() only does a HEAD, it will get a 404 without // the no-such-bucket error. - assertFalse("isFile(" + src + ")" - + " was expected to complete by returning false", - fs.isFile(src)); + assertFalse(fs.isFile(src), "isFile(" + src + ")" + + " was expected to complete by returning false"); expectUnknownStore(() -> fs.isDirectory(src)); expectUnknownStore(() -> fs.mkdirs(src)); expectUnknownStore(() -> fs.delete(src)); @@ -171,9 +171,9 @@ public void testBucketProbing3() throws Exception { fs = FileSystem.get(uri, configuration); Path root = new Path(uri); - assertTrue("root path should always exist", fs.exists(root)); - assertTrue("getFileStatus on root should always return a directory", - fs.getFileStatus(root).isDirectory()); + assertTrue(fs.exists(root), "root path should always exist"); + assertTrue(fs.getFileStatus(root).isDirectory(), + "getFileStatus on root should always return a directory"); } @Test @@ -233,6 +233,7 @@ protected Configuration getConfiguration() { return configuration; } + @AfterEach @Override public void teardown() throws Exception { IOUtils.cleanupWithLogger(getLogger(), fs); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACannedACLs.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACannedACLs.java index e8dcca6df4baa..70ba5450fabdc 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACannedACLs.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACannedACLs.java @@ -28,7 +28,7 @@ import software.amazon.awssdk.services.s3.model.Permission; import software.amazon.awssdk.services.s3.model.Type; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryption.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryption.java index 396a9f60a054d..96470b70d1489 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryption.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryption.java @@ -27,7 +27,7 @@ import java.util.Map; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import org.apache.hadoop.conf.Configuration; @@ -179,26 +179,25 @@ KEY_SCALE_TESTS_ENABLED, getTestPropertyBool( try (FSDataInputStream in = fs.open(filePath)) { // Verify random IO. in.seek(BIG_FILE_SIZE - 4); - assertEquals("Byte at a specific position not equal to actual byte", - offsetSeek, in.read()); + assertEquals(offsetSeek, in.read(), + "Byte at a specific position not equal to actual byte"); in.seek(0); - assertEquals("Byte at a specific position not equal to actual byte", - 'a', in.read()); + assertEquals('a', in.read(), + "Byte at a specific position not equal to actual byte"); // Verify seek-read between two multipart blocks. in.seek(MULTIPART_MIN_SIZE - 1); int byteBeforeBlockEnd = fileContent[MULTIPART_MIN_SIZE]; - assertEquals("Byte before multipart block end mismatch", - byteBeforeBlockEnd - 1, in.read()); - assertEquals("Byte at multipart end mismatch", - byteBeforeBlockEnd, in.read()); - assertEquals("Byte after multipart end mismatch", - byteBeforeBlockEnd + 1, in.read()); + assertEquals(byteBeforeBlockEnd - 1, in.read(), + "Byte before multipart block end mismatch"); + assertEquals(byteBeforeBlockEnd, in.read(), + "Byte at multipart end mismatch"); + assertEquals(byteBeforeBlockEnd + 1, in.read(), + "Byte after multipart end mismatch"); // Verify end of file seek read. in.seek(BIG_FILE_SIZE + 1); - assertEquals("Byte at eof mismatch", - -1, in.read()); + assertEquals(-1, in.read(), "Byte at eof mismatch"); // Verify full read. in.readFully(0, fileContent); @@ -265,8 +264,8 @@ public void testEncryptionEnabledAndDisabledFS() throws Exception { cseDisabledFS.getFileStatus(encryptedFilePath); // Due to padding and encryption, content written and length shouldn't be // equal to what a CSE disabled FS would read. - assertNotEquals("Mismatch in content length", 1, - unEncryptedFSFileStatus.getLen()); + assertNotEquals(1, unEncryptedFSFileStatus.getLen(), + "Mismatch in content length"); Assertions.assertThat(in.read()) .describedAs("Encrypted data shouldn't be equal to actual content " + "without deciphering") diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryptionKms.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryptionKms.java index 4f1dcdfd5238b..d77c567d70235 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryptionKms.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryptionKms.java @@ -67,9 +67,10 @@ protected void assertEncrypted(Path path) throws IOException { String xAttrPrefix = "header."; // Assert KeyWrap Algo - assertEquals("Key wrap algo isn't same as expected", KMS_KEY_WRAP_ALGO, + assertEquals(KMS_KEY_WRAP_ALGO, processHeader(fsXAttrs, - xAttrPrefix + AWSHeaders.CRYPTO_KEYWRAP_ALGORITHM)); + xAttrPrefix + AWSHeaders.CRYPTO_KEYWRAP_ALGORITHM), + "Key wrap algo isn't same as expected"); // Assert content encryption algo for KMS, is present in the // materials description and KMS key ID isn't. diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClosedFS.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClosedFS.java index 327b0fab288f7..2f8eef5ac68d2 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClosedFS.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClosedFS.java @@ -22,8 +22,10 @@ import java.util.Set; import org.assertj.core.api.Assertions; -import org.junit.AfterClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.Path; @@ -39,6 +41,7 @@ public class ITestS3AClosedFS extends AbstractS3ATestBase { private Path root = new Path("/"); + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -46,6 +49,7 @@ public void setup() throws Exception { getFileSystem().close(); } + @AfterEach @Override public void teardown() { // no op, as the FS is closed @@ -54,7 +58,7 @@ public void teardown() { private static final Set THREAD_SET = listInitialThreadsForLifecycleChecks(); - @AfterClass + @AfterAll public static void checkForThreadLeakage() { Assertions.assertThat(getCurrentThreadNames()) .describedAs("The threads at the end of the test run") diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AContentEncoding.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AContentEncoding.java index ed86143100ac3..48847892aa168 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AContentEncoding.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AContentEncoding.java @@ -22,7 +22,7 @@ import java.util.Map; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AContractGetFileStatusV1List.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AContractGetFileStatusV1List.java index e9329bb418228..06521e97526d1 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AContractGetFileStatusV1List.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AContractGetFileStatusV1List.java @@ -22,6 +22,7 @@ import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.s3a.S3AContract; +import org.junit.jupiter.api.AfterEach; import static org.apache.hadoop.fs.s3a.Constants.LIST_VERSION; import static org.apache.hadoop.fs.s3a.S3ATestConstants.KEY_LIST_V1_ENABLED; @@ -41,6 +42,7 @@ protected AbstractFSContract createContract(Configuration conf) { return new S3AContract(conf); } + @AfterEach @Override public void teardown() throws Exception { getLogger().info("FS details {}", getFileSystem()); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACopyFromLocalFile.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACopyFromLocalFile.java index ffc8a990ed900..2777a013e0a0f 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACopyFromLocalFile.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACopyFromLocalFile.java @@ -31,9 +31,8 @@ import org.apache.hadoop.fs.Path; import org.assertj.core.api.Assertions; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import static org.apache.hadoop.fs.s3a.Constants.OPTIMIZED_COPY_FROM_LOCAL; import static org.apache.hadoop.fs.s3a.S3ATestUtils.disableFilesystemCaching; @@ -46,23 +45,21 @@ * Parameterized on whether or not the optimized * copyFromLocalFile is enabled. */ -@RunWith(Parameterized.class) public class ITestS3ACopyFromLocalFile extends AbstractContractCopyFromLocalTest { /** * Parameterization. */ - @Parameterized.Parameters(name = "enabled={0}") public static Collection params() { return Arrays.asList(new Object[][]{ {true}, {false}, }); } - private final boolean enabled; + private boolean enabled; - public ITestS3ACopyFromLocalFile(final boolean enabled) { - this.enabled = enabled; + public void initITestS3ACopyFromLocalFile(final boolean pEnabled) { + this.enabled = pEnabled; } @Override @@ -81,8 +78,10 @@ protected AbstractFSContract createContract(Configuration conf) { return new S3AContract(conf); } - @Test - public void testOptionPropagation() throws Throwable { + @MethodSource("params") + @ParameterizedTest + public void testOptionPropagation(boolean pEnabled) throws Throwable { + initITestS3ACopyFromLocalFile(pEnabled); Assertions.assertThat(getFileSystem().hasPathCapability(new Path("/"), OPTIMIZED_COPY_FROM_LOCAL)) .describedAs("path capability of %s", OPTIMIZED_COPY_FROM_LOCAL) @@ -90,8 +89,10 @@ public void testOptionPropagation() throws Throwable { } - @Test - public void testLocalFilesOnly() throws Throwable { + @MethodSource("params") + @ParameterizedTest + public void testLocalFilesOnly(boolean pEnabled) throws Throwable { + initITestS3ACopyFromLocalFile(pEnabled); describe("Copying into other file systems must fail"); Path dest = fileToPath(createTempDirectory("someDir")); @@ -99,8 +100,10 @@ public void testLocalFilesOnly() throws Throwable { () -> getFileSystem().copyFromLocalFile(false, true, dest, dest)); } - @Test - public void testOnlyFromLocal() throws Throwable { + @MethodSource("params") + @ParameterizedTest + public void testOnlyFromLocal(boolean pEnabled) throws Throwable { + initITestS3ACopyFromLocalFile(pEnabled); describe("Copying must be from a local file system"); File source = createTempFile("someFile"); Path dest = copyFromLocal(source, true); @@ -109,8 +112,10 @@ public void testOnlyFromLocal() throws Throwable { () -> getFileSystem().copyFromLocalFile(true, true, dest, dest)); } - @Test - public void testCopyFromLocalWithNoFileScheme() throws IOException { + @MethodSource("params") + @ParameterizedTest + public void testCopyFromLocalWithNoFileScheme(boolean pEnabled) throws IOException { + initITestS3ACopyFromLocalFile(pEnabled); describe("Copying from local file with no file scheme to remote s3 destination"); File source = createTempFile("tempData"); Path dest = path(getMethodName()); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ADSSEEncryptionWithDefaultS3Settings.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ADSSEEncryptionWithDefaultS3Settings.java index a39490174424c..732f95702d993 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ADSSEEncryptionWithDefaultS3Settings.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ADSSEEncryptionWithDefaultS3Settings.java @@ -21,7 +21,8 @@ import java.io.IOException; import org.junit.Ignore; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; @@ -49,6 +50,7 @@ public class ITestS3ADSSEEncryptionWithDefaultS3Settings extends AbstractTestS3AEncryption { + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ADelayedFNF.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ADelayedFNF.java index ca9d185c3e9e1..cfdfcded4eecb 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ADelayedFNF.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ADelayedFNF.java @@ -27,7 +27,7 @@ import org.apache.hadoop.test.LambdaTestUtils; import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.FileNotFoundException; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ADeleteOnExit.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ADeleteOnExit.java index 31c58de629b5f..fd66dd9c012b8 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ADeleteOnExit.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ADeleteOnExit.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.fs.s3a; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEmptyDirectory.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEmptyDirectory.java index aecfca71e2ef7..e38bc6fe749f1 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEmptyDirectory.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEmptyDirectory.java @@ -23,7 +23,7 @@ import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum; import org.apache.hadoop.fs.store.audit.AuditSpan; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; @@ -56,7 +56,7 @@ private static void assertEmptyDirectory(boolean isEmpty, S3AFileStatus s) { // Should *not* be Tristate.UNKNOWN since we request a definitive value // in getS3AFileStatus() below Tristate expected = Tristate.fromBool(isEmpty); - assertEquals(msg, expected, s.isEmptyDirectory()); + assertEquals(expected, s.isEmptyDirectory(), msg); } @Test diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionAlgorithmValidation.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionAlgorithmValidation.java index 7e6aeb2eb07cf..d2e239140bc98 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionAlgorithmValidation.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionAlgorithmValidation.java @@ -27,14 +27,14 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.s3a.S3AContract; -import org.junit.Ignore; -import org.junit.Test; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; /** * Test whether or not encryption settings propagate by choosing an invalid * one. We expect the S3AFileSystem to fail to initialize. */ -@Ignore +@Disabled public class ITestS3AEncryptionAlgorithmValidation extends AbstractS3ATestBase { @@ -51,11 +51,11 @@ public void testEncryptionAlgorithmSetToDES() throws Throwable { contract.init(); //extract the test FS FileSystem fileSystem = contract.getTestFileSystem(); - assertNotNull("null filesystem", fileSystem); + assertNotNull(fileSystem, "null filesystem"); URI fsURI = fileSystem.getUri(); LOG.info("Test filesystem = {} implemented by {}", fsURI, fileSystem); - assertEquals("wrong filesystem of " + fsURI, - contract.getScheme(), fsURI.getScheme()); + assertEquals(contract.getScheme(), fsURI.getScheme(), + "wrong filesystem of " + fsURI); fileSystem.initialize(fsURI, conf); return fileSystem; }); @@ -69,22 +69,22 @@ public void testEncryptionAlgorithmSSECWithNoEncryptionKey() throws intercept(IllegalArgumentException.class, "The value of property " + Constants.S3_ENCRYPTION_KEY + " must not be null", () -> { - Configuration conf = super.createConfiguration(); - //SSE-C must be configured with an encryption key - conf.set(Constants.S3_ENCRYPTION_ALGORITHM, - S3AEncryptionMethods.SSE_C.getMethod()); - conf.set(Constants.S3_ENCRYPTION_KEY, null); - S3AContract contract = (S3AContract) createContract(conf); - contract.init(); - //extract the test FS - FileSystem fileSystem = contract.getTestFileSystem(); - assertNotNull("null filesystem", fileSystem); - URI fsURI = fileSystem.getUri(); - LOG.info("Test filesystem = {} implemented by {}", fsURI, fileSystem); - assertEquals("wrong filesystem of " + fsURI, - contract.getScheme(), fsURI.getScheme()); - fileSystem.initialize(fsURI, conf); - return fileSystem; + Configuration conf = super.createConfiguration(); + //SSE-C must be configured with an encryption key + conf.set(Constants.S3_ENCRYPTION_ALGORITHM, + S3AEncryptionMethods.SSE_C.getMethod()); + conf.set(Constants.S3_ENCRYPTION_KEY, null); + S3AContract contract = (S3AContract) createContract(conf); + contract.init(); + //extract the test FS + FileSystem fileSystem = contract.getTestFileSystem(); + assertNotNull(fileSystem, "null filesystem"); + URI fsURI = fileSystem.getUri(); + LOG.info("Test filesystem = {} implemented by {}", fsURI, fileSystem); + assertEquals(contract.getScheme(), fsURI.getScheme(), + "wrong filesystem of " + fsURI); + fileSystem.initialize(fsURI, conf); + return fileSystem; }); } @@ -102,11 +102,11 @@ public void testEncryptionAlgorithmSSECWithBlankEncryptionKey() throws contract.init(); //extract the test FS FileSystem fileSystem = contract.getTestFileSystem(); - assertNotNull("null filesystem", fileSystem); + assertNotNull(fileSystem, "null filesystem"); URI fsURI = fileSystem.getUri(); LOG.info("Test filesystem = {} implemented by {}", fsURI, fileSystem); - assertEquals("wrong filesystem of " + fsURI, - contract.getScheme(), fsURI.getScheme()); + assertEquals(contract.getScheme(), fsURI.getScheme(), + "wrong filesystem of " + fsURI); fileSystem.initialize(fsURI, conf); return fileSystem; }); @@ -129,11 +129,11 @@ public void testEncryptionAlgorithmSSES3WithEncryptionKey() throws contract.init(); //extract the test FS FileSystem fileSystem = contract.getTestFileSystem(); - assertNotNull("null filesystem", fileSystem); + assertNotNull(fileSystem, "null filesystem"); URI fsURI = fileSystem.getUri(); LOG.info("Test filesystem = {} implemented by {}", fsURI, fileSystem); - assertEquals("wrong filesystem of " + fsURI, - contract.getScheme(), fsURI.getScheme()); + assertEquals(contract.getScheme(), fsURI.getScheme(), + "wrong filesystem of " + fsURI); fileSystem.initialize(fsURI, conf); return fileSystem; }); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEC.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEC.java index 0f79881466f1e..9f7f93e5bf9b7 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEC.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEC.java @@ -22,7 +22,9 @@ import java.nio.file.AccessDeniedException; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -93,6 +95,7 @@ protected Configuration createConfiguration() { return conf; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -105,6 +108,7 @@ public void setup() throws Exception { assumeStoreAwsHosted(getFileSystem()); } + @AfterEach @Override public void teardown() throws Exception { super.teardown(); @@ -272,7 +276,7 @@ public void testListStatusEncryptedDir() throws Exception { @Test public void testListStatusEncryptedFile() throws Exception { Path pathABC = new Path(methodPath(), "a/b/c/"); - assertTrue("mkdirs failed", getFileSystem().mkdirs(pathABC)); + assertTrue(getFileSystem().mkdirs(pathABC), "mkdirs failed"); Path fileToStat = new Path(pathABC, "fileToStat.txt"); writeThenReadFile(fileToStat, TEST_FILE_LEN); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSDefaultKey.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSDefaultKey.java index f35f15c1131ac..48c8aae875da7 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSDefaultKey.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSDefaultKey.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.util.Optional; -import org.junit.Test; +import org.junit.jupiter.api.Test; import software.amazon.awssdk.services.s3.model.HeadObjectResponse; import org.apache.hadoop.conf.Configuration; @@ -31,7 +31,7 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset; import static org.apache.hadoop.fs.s3a.EncryptionTestUtils.validateEncryptionFileAttributes; -import static org.hamcrest.CoreMatchers.containsString; +import static org.assertj.core.api.Assertions.assertThat; /** * Concrete class that extends {@link AbstractTestS3AEncryption} @@ -58,9 +58,9 @@ protected S3AEncryptionMethods getSSEAlgorithm() { @Override protected void assertEncrypted(Path path) throws IOException { HeadObjectResponse md = getS3AInternals().getObjectMetadata(path); - assertEquals("SSE Algorithm", EncryptionTestUtils.AWS_KMS_SSE_ALGORITHM, - md.serverSideEncryptionAsString()); - assertThat(md.ssekmsKeyId(), containsString("arn:aws:kms:")); + assertEquals(EncryptionTestUtils.AWS_KMS_SSE_ALGORITHM, + md.serverSideEncryptionAsString(), "SSE Algorithm"); + assertThat(md.ssekmsKeyId()).contains("arn:aws:kms:"); } @Test diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionWithDefaultS3Settings.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionWithDefaultS3Settings.java index 4fc63cd4e1b18..4f3b454cc3c2b 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionWithDefaultS3Settings.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionWithDefaultS3Settings.java @@ -21,8 +21,9 @@ import java.io.IOException; import java.util.Optional; -import org.junit.Ignore; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; @@ -55,6 +56,7 @@ public class ITestS3AEncryptionWithDefaultS3Settings extends AbstractTestS3AEncryption { + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -117,13 +119,13 @@ public void testEncryptionFileAttributes() throws Exception { @Override - @Ignore + @Disabled @Test public void testEncryptionSettingPropagation() throws Throwable { } @Override - @Ignore + @Disabled @Test public void testEncryption() throws Throwable { } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEndpointRegion.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEndpointRegion.java index 62f2ffbc0df5d..ae0daf879b03c 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEndpointRegion.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEndpointRegion.java @@ -27,7 +27,8 @@ import java.util.concurrent.atomic.AtomicReference; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; import software.amazon.awssdk.awscore.AwsExecutionAttribute; import software.amazon.awssdk.awscore.exception.AwsServiceException; import software.amazon.awssdk.core.interceptor.Context; @@ -116,6 +117,7 @@ public class ITestS3AEndpointRegion extends AbstractS3ATestBase { */ private S3AFileSystem newFS; + @AfterEach @Override public void teardown() throws Exception { closeStream(newFS); @@ -482,7 +484,6 @@ public void testCentralEndpointAndNullRegionWithCRUD() throws Throwable { describe("Access the test bucket using central endpoint and" + " null region, perform file system CRUD operations"); final Configuration conf = getConfiguration(); - assumeStoreAwsHosted(getFileSystem()); final Configuration newConf = new Configuration(conf); @@ -583,7 +584,7 @@ private void assertOpsUsingNewFs() throws IOException { .isFalse(); } - private static final class RegionInterceptor implements ExecutionInterceptor { + private final class RegionInterceptor implements ExecutionInterceptor { private final String endpoint; private final String region; private final boolean isFips; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFailureHandling.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFailureHandling.java index b550fc5864b73..1b6d4ad4e0511 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFailureHandling.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFailureHandling.java @@ -31,7 +31,7 @@ import org.apache.hadoop.fs.statistics.StoreStatisticNames; import org.apache.hadoop.fs.store.audit.AuditSpan; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.ArrayList; @@ -72,8 +72,8 @@ protected Configuration createConfiguration() { * @param readResult result */ private void assertIsEOF(String operation, int readResult) { - assertEquals("Expected EOF from "+ operation - + "; got char " + (char) readResult, -1, readResult); + assertEquals(-1, readResult, "Expected EOF from "+ operation + + "; got char " + (char) readResult); } @Test diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java index 3e3f8245e7c85..fe378bdfbcd87 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java @@ -25,7 +25,7 @@ import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum; import org.apache.hadoop.fs.s3a.performance.AbstractS3ACostTest; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.assertj.core.api.Assertions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -186,7 +186,7 @@ public void testCostOfGetFileStatusOnFile() throws Throwable { S3AFileStatus status = verifyInnerGetFileStatus(simpleFile, true, StatusProbeEnum.ALL, GET_FILE_STATUS_ON_FILE); - assertTrue("not a file: " + status, status.isFile()); + assertTrue(status.isFile(), "not a file: " + status); } @Test @@ -196,8 +196,8 @@ public void testCostOfGetFileStatusOnEmptyDir() throws Throwable { S3AFileStatus status = verifyInnerGetFileStatus(dir, true, StatusProbeEnum.ALL, GET_FILE_STATUS_ON_DIR_MARKER); - assertSame("not empty: " + status, Tristate.TRUE, - status.isEmptyDirectory()); + assertSame(Tristate.TRUE, + status.isEmptyDirectory(), "not empty: " + status); // but now only ask for the directories and the file check is skipped. verifyInnerGetFileStatus(dir, false, StatusProbeEnum.DIRECTORIES, diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileSystemIsolatedClassloader.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileSystemIsolatedClassloader.java index 05635ca213be0..947bd8ab56085 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileSystemIsolatedClassloader.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileSystemIsolatedClassloader.java @@ -24,7 +24,7 @@ import java.util.function.Consumer; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AIOStatisticsContext.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AIOStatisticsContext.java index daf5306dc399e..12a1cd7d8f63e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AIOStatisticsContext.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AIOStatisticsContext.java @@ -24,7 +24,9 @@ import java.util.concurrent.ExecutorService; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -74,6 +76,7 @@ protected Configuration createConfiguration() { return configuration; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -85,6 +88,7 @@ public void setup() throws Exception { } + @AfterEach @Override public void teardown() throws Exception { if (executor != null) { @@ -161,8 +165,8 @@ public void testS3AInputStreamIOStatisticsContext() * @return thread context */ private static IOStatisticsContext getAndResetThreadStatisticsContext() { - assertTrue("thread-level IOStatistics should be enabled by default", - IOStatisticsContext.enabled()); + assertTrue(IOStatisticsContext.enabled(), + "thread-level IOStatistics should be enabled by default"); IOStatisticsContext context = IOStatisticsContext.getCurrentIOStatisticsContext(); context.reset(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AInputStreamLeakage.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AInputStreamLeakage.java index f26a585776a21..31fe270a8b9e3 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AInputStreamLeakage.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AInputStreamLeakage.java @@ -22,7 +22,8 @@ import java.time.Duration; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -58,6 +59,7 @@ public class ITestS3AInputStreamLeakage extends AbstractS3ATestBase { */ public static final long GC_DELAY = Duration.ofSeconds(1).toMillis(); + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMetrics.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMetrics.java index 4ec579ce4f649..2ae28c74fe5b7 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMetrics.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMetrics.java @@ -23,7 +23,7 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.io.InputStream; @@ -46,8 +46,8 @@ public void testMetricsRegister() MutableCounterLong fileCreated = (MutableCounterLong) fs.getInstrumentation().getRegistry() .get(Statistic.FILES_CREATED.getSymbol()); - assertEquals("Metrics system should report single file created event", - 1, fileCreated.value()); + assertEquals(1, fileCreated.value(), + "Metrics system should report single file created event"); } @Test @@ -87,8 +87,8 @@ public void testStreamStatistics() throws IOException { MutableCounterLong read = (MutableCounterLong) instrumentation.getRegistry() .get(statName); - assertEquals("Stream statistics were not merged", expectedBytesRead, - read.value()); + assertEquals(expectedBytesRead, + read.value(), "Stream statistics were not merged"); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java index 3b30a8e05dca5..7dbba293a6b70 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java @@ -22,9 +22,10 @@ import java.net.URI; import java.nio.charset.StandardCharsets; +import org.junit.jupiter.api.BeforeEach; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; @@ -58,6 +59,7 @@ public class ITestS3AMiscOperations extends AbstractS3ATestBase { private static final byte[] HELLO = "hello".getBytes(StandardCharsets.UTF_8); + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -165,7 +167,7 @@ public void testChecksumDisabled() throws Throwable { EtagChecksum checksum1 = fs.getFileChecksum(file1, 0); assertLacksPathCapabilities(fs, file1, CommonPathCapabilities.FS_CHECKSUMS); - assertNull("Checksums are being generated", checksum1); + assertNull(checksum1, "Checksums are being generated"); } /** @@ -179,10 +181,10 @@ public void testNonEmptyFileChecksums() throws Throwable { final Path file3 = mkFile("file3", HELLO); final EtagChecksum checksum1 = fs.getFileChecksum(file3, 0); - assertNotNull("file 3 checksum", checksum1); + assertNotNull(checksum1, "file 3 checksum"); final Path file4 = touchFile("file4"); final EtagChecksum checksum2 = fs.getFileChecksum(file4, 0); - assertNotEquals("checksums", checksum1, checksum2); + assertNotEquals(checksum1, checksum2, "checksums"); // overwrite createFile(fs, file4, true, "hello, world".getBytes(StandardCharsets.UTF_8)); @@ -215,7 +217,7 @@ public void testChecksumLengthPastEOF() throws Throwable { final S3AFileSystem fs = getFileSystem(); Path f = mkFile("file5", HELLO); EtagChecksum l = fs.getFileChecksum(f, HELLO.length); - assertNotNull("Null checksum", l); + assertNotNull(l, "Null checksum"); assertEquals(l, fs.getFileChecksum(f, HELLO.length * 2)); } @@ -325,10 +327,8 @@ public void testRootPathFixup() throws Throwable { */ private static T verifyTrailingSlash(String role, T o) { String s = o.toString(); - assertTrue(role + " lacks trailing slash " + s, - s.endsWith("/")); - assertFalse(role + " has double trailing slash " + s, - s.endsWith("//")); + assertTrue(s.endsWith("/"), role + " lacks trailing slash " + s); + assertFalse(s.endsWith("//"), role + " has double trailing slash " + s); return o; } @@ -342,8 +342,7 @@ private static T verifyTrailingSlash(String role, T o) { */ private static T verifyNoTrailingSlash(String role, T o) { String s = o.toString(); - assertFalse(role + " has trailing slash " + s, - s.endsWith("/")); + assertFalse(s.endsWith("/"), role + " has trailing slash " + s); return o; } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMultipartUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMultipartUtils.java index e0559b7c49edc..1fcc41a3bde28 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMultipartUtils.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMultipartUtils.java @@ -19,7 +19,7 @@ package org.apache.hadoop.fs.s3a; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import software.amazon.awssdk.services.s3.model.MultipartUpload; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingCacheFiles.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingCacheFiles.java index 39acaa717a7f2..5707e3b0c940e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingCacheFiles.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingCacheFiles.java @@ -22,8 +22,9 @@ import java.util.UUID; import org.assertj.core.api.Assertions; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -68,7 +69,7 @@ public class ITestS3APrefetchingCacheFiles extends AbstractS3ACostTest { private String bufferDir; - @Before + @BeforeEach public void setUp() throws Exception { super.setup(); // Sets BUFFER_DIR by calling S3ATestUtils#prepareTestConfiguration @@ -96,6 +97,7 @@ public Configuration createConfiguration() { return configuration; } + @AfterEach @Override public synchronized void teardown() throws Exception { super.teardown(); @@ -141,14 +143,14 @@ public void testCacheFileExistence() throws Throwable { Path path = new Path(tmpFile.getAbsolutePath()); FileStatus stat = localFs.getFileStatus(path); ContractTestUtils.assertIsFile(path, stat); - assertEquals("File length not matching with prefetchBlockSize", prefetchBlockSize, - stat.getLen()); - assertEquals("User permissions should be RW", FsAction.READ_WRITE, - stat.getPermission().getUserAction()); - assertEquals("Group permissions should be NONE", FsAction.NONE, - stat.getPermission().getGroupAction()); - assertEquals("Other permissions should be NONE", FsAction.NONE, - stat.getPermission().getOtherAction()); + assertEquals(prefetchBlockSize, + stat.getLen(), "File length not matching with prefetchBlockSize"); + assertEquals(FsAction.READ_WRITE, + stat.getPermission().getUserAction(), "User permissions should be RW"); + assertEquals(FsAction.NONE, + stat.getPermission().getGroupAction(), "Group permissions should be NONE"); + assertEquals(FsAction.NONE, + stat.getPermission().getOtherAction(), "Other permissions should be NONE"); } } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingInputStream.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingInputStream.java index d894adb66c738..5e66d6911cb83 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingInputStream.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingInputStream.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.s3a; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -255,10 +255,10 @@ public void testStatusProbesAfterClosingStream() throws Throwable { S3AInputStreamStatistics inputStreamStatistics = ((S3APrefetchingInputStream) (in.getWrappedStream())).getS3AStreamStatistics(); - assertNotNull("Prefetching input IO stats should not be null", ioStats); - assertNotNull("Prefetching input stream stats should not be null", inputStreamStatistics); - assertNotEquals("Position retrieved from prefetching input stream should be greater than 0", 0, - pos); + assertNotNull(ioStats, "Prefetching input IO stats should not be null"); + assertNotNull(inputStreamStatistics, "Prefetching input stream stats should not be null"); + assertNotEquals(0, pos, + "Position retrieved from prefetching input stream should be greater than 0"); in.close(); @@ -268,21 +268,22 @@ public void testStatusProbesAfterClosingStream() throws Throwable { S3AInputStreamStatistics newInputStreamStatistics = ((S3APrefetchingInputStream) (in.getWrappedStream())).getS3AStreamStatistics(); - assertNotNull("Prefetching input IO stats should not be null", newIoStats); - assertNotNull("Prefetching input stream stats should not be null", newInputStreamStatistics); - assertNotEquals("Position retrieved from prefetching input stream should be greater than 0", 0, - newPos); + assertNotNull(newIoStats, "Prefetching input IO stats should not be null"); + assertNotNull(newInputStreamStatistics, "Prefetching input stream stats should not be null"); + assertNotEquals(0, newPos, + "Position retrieved from prefetching input stream should be greater than 0"); // compare status probes after closing of the stream with status probes done before // closing the stream - assertEquals("Position retrieved through stream before and after closing should match", pos, - newPos); - assertEquals("IO stats retrieved through stream before and after closing should match", ioStats, - newIoStats); - assertEquals("Stream stats retrieved through stream before and after closing should match", - inputStreamStatistics, newInputStreamStatistics); - - assertFalse("seekToNewSource() not supported with prefetch", in.seekToNewSource(10)); + assertEquals(pos, newPos, + "Position retrieved through stream before and after closing should match"); + assertEquals(ioStats, newIoStats, + "IO stats retrieved through stream before and after closing should match"); + assertEquals(inputStreamStatistics, newInputStreamStatistics, + "Stream stats retrieved through stream before and after closing should match"); + + assertFalse(in.seekToNewSource(10), + "seekToNewSource() not supported with prefetch"); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingLruEviction.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingLruEviction.java index 5cc948a044dbb..50b4bcd0dd4a4 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingLruEviction.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingLruEviction.java @@ -28,9 +28,8 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -55,12 +54,10 @@ /** * Test the prefetching input stream with LRU cache eviction on S3ACachingInputStream. */ -@RunWith(Parameterized.class) public class ITestS3APrefetchingLruEviction extends AbstractS3ACostTest { - private final String maxBlocks; + private String maxBlocks; - @Parameterized.Parameters(name = "max-blocks-{0}") public static Collection params() { return Arrays.asList(new Object[][]{ {"1"}, @@ -68,8 +65,8 @@ public static Collection params() { }); } - public ITestS3APrefetchingLruEviction(final String maxBlocks) { - this.maxBlocks = maxBlocks; + public void initITestS3APrefetchingLruEviction(final String pMaxBlocks) { + this.maxBlocks = pMaxBlocks; } private static final Logger LOG = @@ -94,8 +91,10 @@ public Configuration createConfiguration() { return conf; } - @Test - public void testSeeksWithLruEviction() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "max-blocks-{0}") + public void testSeeksWithLruEviction(String pMaxBlocks) throws Throwable { + initITestS3APrefetchingLruEviction(pMaxBlocks); IOStatistics ioStats; byte[] data = ContractTestUtils.dataset(SMALL_FILE_SIZE, 'x', 26); // Path for file which should have length > block size so S3ACachingInputStream is used diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ARequesterPays.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ARequesterPays.java index eadc398e61ab1..25dc421544be4 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ARequesterPays.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ARequesterPays.java @@ -20,7 +20,7 @@ import java.nio.file.AccessDeniedException; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AStorageClass.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AStorageClass.java index 56cb541e8233a..17b686236cc36 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AStorageClass.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AStorageClass.java @@ -24,9 +24,8 @@ import java.util.Map; import org.assertj.core.api.Assertions; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -50,14 +49,12 @@ /** * Tests of storage class. */ -@RunWith(Parameterized.class) public class ITestS3AStorageClass extends AbstractS3ATestBase { /** * HADOOP-18339. Parameterized the test for different fast upload buffer types * to ensure the storage class configuration works with all of them. */ - @Parameterized.Parameters(name = "fast-upload-buffer-{0}") public static Collection params() { return Arrays.asList(new Object[][]{ {FAST_UPLOAD_BUFFER_DISK}, @@ -65,10 +62,10 @@ public static Collection params() { }); } - private final String fastUploadBufferType; + private String fastUploadBufferType; - public ITestS3AStorageClass(String fastUploadBufferType) { - this.fastUploadBufferType = fastUploadBufferType; + public void initITestS3AStorageClass(String pFastUploadBufferType) { + this.fastUploadBufferType = pFastUploadBufferType; } @Override @@ -86,8 +83,11 @@ protected Configuration createConfiguration() { * This test ensures the default storage class configuration (no config or null) * works well with create and copy operations */ - @Test - public void testCreateAndCopyObjectWithStorageClassDefault() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "fast-upload-buffer-{0}") + public void testCreateAndCopyObjectWithStorageClassDefault( + String pFastUploadBufferType) throws Throwable { + initITestS3AStorageClass(pFastUploadBufferType); Configuration conf = this.createConfiguration(); S3AContract contract = (S3AContract) createContract(conf); contract.init(); @@ -108,8 +108,11 @@ public void testCreateAndCopyObjectWithStorageClassDefault() throws Throwable { * Verify object can be created and copied correctly * with specified storage class */ - @Test - public void testCreateAndCopyObjectWithStorageClassReducedRedundancy() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "fast-upload-buffer-{0}") + public void testCreateAndCopyObjectWithStorageClassReducedRedundancy( + String pFastUploadBufferType) throws Throwable { + initITestS3AStorageClass(pFastUploadBufferType); Configuration conf = this.createConfiguration(); conf.set(STORAGE_CLASS, STORAGE_CLASS_REDUCED_REDUNDANCY); S3AContract contract = (S3AContract) createContract(conf); @@ -133,8 +136,11 @@ public void testCreateAndCopyObjectWithStorageClassReducedRedundancy() throws Th * Archive storage classes have different behavior * from general storage classes */ - @Test - public void testCreateAndCopyObjectWithStorageClassGlacier() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "fast-upload-buffer-{0}") + public void testCreateAndCopyObjectWithStorageClassGlacier( + String pFastUploadBufferType) throws Throwable { + initITestS3AStorageClass(pFastUploadBufferType); Configuration conf = this.createConfiguration(); conf.set(STORAGE_CLASS, STORAGE_CLASS_GLACIER); S3AContract contract = (S3AContract) createContract(conf); @@ -162,8 +168,11 @@ public void testCreateAndCopyObjectWithStorageClassGlacier() throws Throwable { * Verify object can be created and copied correctly * with completely invalid storage class */ - @Test - public void testCreateAndCopyObjectWithStorageClassInvalid() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "fast-upload-buffer-{0}") + public void testCreateAndCopyObjectWithStorageClassInvalid( + String pFastUploadBufferType) throws Throwable { + initITestS3AStorageClass(pFastUploadBufferType); Configuration conf = this.createConfiguration(); conf.set(STORAGE_CLASS, "testing"); S3AContract contract = (S3AContract) createContract(conf); @@ -187,8 +196,11 @@ public void testCreateAndCopyObjectWithStorageClassInvalid() throws Throwable { * Verify object can be created and copied correctly * with empty string configuration */ - @Test - public void testCreateAndCopyObjectWithStorageClassEmpty() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "fast-upload-buffer-{0}") + public void testCreateAndCopyObjectWithStorageClassEmpty( + String pFastUploadBufferType) throws Throwable { + initITestS3AStorageClass(pFastUploadBufferType); Configuration conf = this.createConfiguration(); conf.set(STORAGE_CLASS, ""); S3AContract contract = (S3AContract) createContract(conf); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ATemporaryCredentials.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ATemporaryCredentials.java index 290a4d995c757..f8455efc0a03a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ATemporaryCredentials.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ATemporaryCredentials.java @@ -25,11 +25,13 @@ import java.time.OffsetDateTime; import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; import software.amazon.awssdk.services.sts.StsClient; import software.amazon.awssdk.services.sts.StsClientBuilder; import software.amazon.awssdk.services.sts.model.Credentials; import org.hamcrest.Matchers; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -81,12 +83,14 @@ public class ITestS3ATemporaryCredentials extends AbstractS3ATestBase { private AWSCredentialProviderList credentials; + @BeforeEach @Override public void setup() throws Exception { super.setup(); assumeSessionTestsEnabled(getConfiguration()); } + @AfterEach @Override public void teardown() throws Exception { S3AUtils.closeAutocloseables(LOG, credentials); @@ -224,9 +228,9 @@ public void testSessionTokenExpiry() throws Exception { long permittedExpiryOffset = 60; OffsetDateTime expirationTimestamp = sc.getExpirationDateTime().get(); OffsetDateTime localTimestamp = OffsetDateTime.now(); - assertTrue("local time of " + localTimestamp - + " is after expiry time of " + expirationTimestamp, - localTimestamp.isBefore(expirationTimestamp)); + assertTrue(localTimestamp.isBefore(expirationTimestamp), + "local time of " + localTimestamp + + " is after expiry time of " + expirationTimestamp); // what is the interval Duration actualDuration = Duration.between(localTimestamp, diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AUnbuffer.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AUnbuffer.java index 3a2d1b1b09a49..536212a8eb28b 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AUnbuffer.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AUnbuffer.java @@ -30,7 +30,8 @@ import org.apache.hadoop.io.IOUtils; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import java.io.IOException; @@ -56,6 +57,7 @@ public class ITestS3AUnbuffer extends AbstractS3ATestBase { private Path dest; + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -79,8 +81,8 @@ public void testUnbuffer() throws IOException { int bytesToRead = 8; readAndAssertBytesRead(inputStream, bytesToRead); assertTrue(isObjectStreamOpen(inputStream)); - assertTrue("No IOstatistics from " + inputStream, - iostats.aggregate(inputStream.getIOStatistics())); + assertTrue(iostats.aggregate(inputStream.getIOStatistics()), + "No IOstatistics from " + inputStream); verifyStatisticCounterValue(iostats, StreamStatisticNames.STREAM_READ_BYTES, bytesToRead); @@ -186,10 +188,10 @@ public void testUnbufferStreamStatistics() throws IOException { .hasFieldOrPropertyWithValue("bytesRead", expectedFinalBytesRead) .hasFieldOrPropertyWithValue("totalBytesRead", expectedTotalBytesRead); - assertEquals("S3AInputStream statistics were not updated properly in " - + streamStatsStr, - expectedFinalBytesRead, - streamStatistics.getBytesRead()); + assertEquals(expectedFinalBytesRead, + streamStatistics.getBytesRead(), + "S3AInputStream statistics were not updated properly in " + + streamStatsStr); } private boolean isObjectStreamOpen(FSDataInputStream inputStream) { @@ -209,8 +211,9 @@ private void skipIfCannotUnbuffer(FSDataInputStream inputStream) { */ private static void readAndAssertBytesRead(FSDataInputStream inputStream, int bytesToRead) throws IOException { - assertEquals("S3AInputStream#read did not read the correct number of " + - "bytes", bytesToRead, - inputStream.read(new byte[bytesToRead])); + assertEquals(bytesToRead, + inputStream.read(new byte[bytesToRead]), + "S3AInputStream#read did not read the correct number of " + + "bytes"); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AUrlScheme.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AUrlScheme.java index cfe46440c7512..dd76da692a3ab 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AUrlScheme.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AUrlScheme.java @@ -22,7 +22,7 @@ import java.net.URI; import java.net.URISyntaxException; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java index d51bc954a6329..17519720f9325 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3AAWSCredentialsProvider.java @@ -36,7 +36,7 @@ import javax.annotation.Nullable; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; @@ -208,7 +208,7 @@ public void testFallbackToDefaults() throws Throwable { Arrays.asList( EnvironmentVariableCredentialsProvider.class), Sets.newHashSet()); - assertTrue("empty credentials", credentials.size() > 0); + assertTrue(credentials.size() > 0, "empty credentials"); } /** @@ -449,13 +449,12 @@ private static void assertCredentialProviders( Class expectedClass = expectedClasses.get(i); AwsCredentialsProvider provider = providers.get(i); - assertNotNull( + assertNotNull(provider, String.format("At position %d, expected class is %s, but found null.", - i, expectedClass), provider); - assertTrue( + i, expectedClass)); + assertTrue(expectedClass.isAssignableFrom(provider.getClass()), String.format("At position %d, expected class is %s, but found %s.", - i, expectedClass, provider.getClass()), - expectedClass.isAssignableFrom(provider.getClass())); + i, expectedClass, provider.getClass())); } } @@ -466,12 +465,12 @@ private static void assertCredentialProviders( @Test public void testAuthenticationContainsProbes() { Configuration conf = new Configuration(false); - assertFalse("found AssumedRoleCredentialProvider", - authenticationContains(conf, AssumedRoleCredentialProvider.NAME)); + assertFalse(authenticationContains(conf, AssumedRoleCredentialProvider.NAME), + "found AssumedRoleCredentialProvider"); conf.set(AWS_CREDENTIALS_PROVIDER, AssumedRoleCredentialProvider.NAME); - assertTrue("didn't find AssumedRoleCredentialProvider", - authenticationContains(conf, AssumedRoleCredentialProvider.NAME)); + assertTrue(authenticationContains(conf, AssumedRoleCredentialProvider.NAME), + "didn't find AssumedRoleCredentialProvider"); } @Test @@ -486,17 +485,17 @@ public void testExceptionLogic() throws Throwable { providers.close(); S3ARetryPolicy retryPolicy = new S3ARetryPolicy(new Configuration(false)); - assertEquals("Expected no retry on auth failure", - RetryPolicy.RetryAction.FAIL.action, - retryPolicy.shouldRetry(noAuth, 0, 0, true).action); + assertEquals(RetryPolicy.RetryAction.FAIL.action, + retryPolicy.shouldRetry(noAuth, 0, 0, true).action, + "Expected no retry on auth failure"); try { throw S3AUtils.translateException("login", "", noAuth); } catch (AccessDeniedException expected) { // this is what we want; other exceptions will be passed up - assertEquals("Expected no retry on AccessDeniedException", - RetryPolicy.RetryAction.FAIL.action, - retryPolicy.shouldRetry(expected, 0, 0, true).action); + assertEquals(RetryPolicy.RetryAction.FAIL.action, + retryPolicy.shouldRetry(expected, 0, 0, true).action, + "Expected no retry on AccessDeniedException"); } } @@ -505,31 +504,30 @@ public void testExceptionLogic() throws Throwable { public void testRefCounting() throws Throwable { AWSCredentialProviderList providers = new AWSCredentialProviderList(); - assertEquals("Ref count for " + providers, - 1, providers.getRefCount()); + assertEquals(1, providers.getRefCount(), "Ref count for " + providers); AWSCredentialProviderList replicate = providers.share(); assertEquals(providers, replicate); - assertEquals("Ref count after replication for " + providers, - 2, providers.getRefCount()); - assertFalse("Was closed " + providers, providers.isClosed()); + assertEquals(2, providers.getRefCount(), + "Ref count after replication for " + providers); + assertFalse(providers.isClosed(), "Was closed " + providers); providers.close(); - assertFalse("Was closed " + providers, providers.isClosed()); - assertEquals("Ref count after close() for " + providers, - 1, providers.getRefCount()); + assertFalse(providers.isClosed(), "Was closed " + providers); + assertEquals(1, providers.getRefCount(), + "Ref count after close() for " + providers); // this should now close it providers.close(); - assertTrue("Was not closed " + providers, providers.isClosed()); - assertEquals("Ref count after close() for " + providers, - 0, providers.getRefCount()); - assertEquals("Ref count after second close() for " + providers, - 0, providers.getRefCount()); + assertTrue(providers.isClosed(), "Was not closed " + providers); + assertEquals(0, providers.getRefCount(), + "Ref count after close() for " + providers); + assertEquals(0, providers.getRefCount(), + "Ref count after second close() for " + providers); intercept(IllegalStateException.class, "closed", () -> providers.share()); // final call harmless providers.close(); - assertEquals("Ref count after close() for " + providers, - 0, providers.getRefCount()); + assertEquals(0, providers.getRefCount(), + "Ref count after close() for " + providers); intercept(NoAuthWithAWSException.class, AWSCredentialProviderList.CREDENTIALS_REQUESTED_WHEN_CLOSED, @@ -597,12 +595,10 @@ public void testConcurrentAuthentication() throws Throwable { List> results = new ArrayList<>(); try { - assertFalse( - "Provider not initialized. isInitialized should be false", - provider.isInitialized()); - assertFalse( - "Provider not initialized. hasCredentials should be false", - provider.hasCredentials()); + assertFalse(provider.isInitialized(), + "Provider not initialized. isInitialized should be false"); + assertFalse(provider.hasCredentials(), + "Provider not initialized. hasCredentials should be false"); if (provider.getInitializationException() != null) { throw new AssertionError( "Provider not initialized. getInitializationException should return null", @@ -625,12 +621,10 @@ public void testConcurrentAuthentication() throws Throwable { pool.shutdown(); } - assertTrue( - "Provider initialized without errors. isInitialized should be true", - provider.isInitialized()); - assertTrue( - "Provider initialized without errors. hasCredentials should be true", - provider.hasCredentials()); + assertTrue(provider.isInitialized(), + "Provider initialized without errors. isInitialized should be true"); + assertTrue(provider.hasCredentials(), + "Provider initialized without errors. hasCredentials should be true"); if (provider.getInitializationException() != null) { throw new AssertionError( "Provider initialized without errors. getInitializationException should return null", @@ -666,10 +660,10 @@ public void testConcurrentAuthenticationError() throws Throwable { List> results = new ArrayList<>(); try { - assertFalse("Provider not initialized. isInitialized should be false", - provider.isInitialized()); - assertFalse("Provider not initialized. hasCredentials should be false", - provider.hasCredentials()); + assertFalse(provider.isInitialized(), + "Provider not initialized. isInitialized should be false"); + assertFalse(provider.hasCredentials(), + "Provider not initialized. hasCredentials should be false"); if (provider.getInitializationException() != null) { throw new AssertionError( "Provider not initialized. getInitializationException should return null", @@ -691,15 +685,14 @@ public void testConcurrentAuthenticationError() throws Throwable { pool.shutdown(); } - assertTrue( - "Provider initialization failed. isInitialized should be true", - provider.isInitialized()); - assertFalse( - "Provider initialization failed. hasCredentials should be false", - provider.hasCredentials()); - assertTrue( - "Provider initialization failed. getInitializationException should contain the error", - provider.getInitializationException().getMessage().contains("expected error")); + assertTrue(provider.isInitialized(), + "Provider initialization failed. isInitialized should be true"); + assertFalse(provider.hasCredentials(), + "Provider initialization failed. hasCredentials should be false"); + assertTrue(provider.getInitializationException(). + getMessage().contains("expected error"), + "Provider initialization failed. " + + "getInitializationException should contain the error"); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditAccessChecks.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditAccessChecks.java index 3fc82c69c98d9..573de535efeb9 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditAccessChecks.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditAccessChecks.java @@ -21,7 +21,8 @@ import java.io.FileNotFoundException; import java.io.IOException; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -68,6 +69,7 @@ public Configuration createConfiguration() { return conf; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditManager.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditManager.java index 26618e34b4bc3..56eede93f416e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditManager.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditManager.java @@ -22,7 +22,7 @@ import java.util.EnumSet; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.s3a.S3AFileSystem; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditManagerDisabled.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditManagerDisabled.java index d9135509a422d..ffc4dafb9c506 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditManagerDisabled.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditManagerDisabled.java @@ -19,7 +19,7 @@ package org.apache.hadoop.fs.s3a.audit; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.s3a.S3AFileSystem; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java index 592529b553d24..d9c84fa6914c9 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java @@ -30,12 +30,14 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.services.s3.model.MultipartUpload; import software.amazon.awssdk.services.sts.model.StsException; import com.fasterxml.jackson.core.JsonProcessingException; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -119,6 +121,7 @@ protected Configuration createConfiguration() { return conf; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -126,6 +129,7 @@ public void setup() throws Exception { uri = requireAnonymousDataPath(getConfiguration()).toUri(); } + @AfterEach @Override public void teardown() throws Exception { cleanupWithLogger(LOG, roleFS); @@ -167,7 +171,7 @@ public void testCreateCredentialProvider() throws IOException { = new AssumedRoleCredentialProvider(uri, conf)) { LOG.info("Provider is {}", provider); AwsCredentials credentials = provider.resolveCredentials(); - assertNotNull("Null credentials from " + provider, credentials); + assertNotNull(credentials, "Null credentials from " + provider); } } @@ -180,7 +184,7 @@ public void testCreateCredentialProviderNoURI() throws IOException { = new AssumedRoleCredentialProvider(null, conf)) { LOG.info("Provider is {}", provider); AwsCredentials credentials = provider.resolveCredentials(); - assertNotNull("Null credentials from " + provider, credentials); + assertNotNull(credentials, "Null credentials from " + provider); } } @@ -679,7 +683,7 @@ public void testRestrictedCommitActions() throws Throwable { public void assertCommitAccessDenied(final Path path, final CommitOperations.MaybeIOE maybeIOE) { IOException ex = maybeIOE.getException(); - assertNotNull("no IOE in " + maybeIOE + " for " + path, ex); + assertNotNull(ex, "no IOE in " + maybeIOE + " for " + path); if (!(ex instanceof AccessDeniedException)) { ContractTestUtils.fail("Wrong exception class for commit to " + path, ex); @@ -854,8 +858,8 @@ public void executePartialDelete(final Configuration conf, // and although you can't delete under the path, if the file doesn't // exist, the delete call fails fast. Path pathWhichDoesntExist = new Path(readOnlyDir, "no-such-path"); - assertFalse("deleting " + pathWhichDoesntExist, - roleFS.delete(pathWhichDoesntExist, true)); + assertFalse(roleFS.delete(pathWhichDoesntExist, true), + "deleting " + pathWhichDoesntExist); } /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java index 0cb42b0f31096..29201a8d5a0dc 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java @@ -20,6 +20,8 @@ import java.io.IOException; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -59,6 +61,7 @@ protected Configuration createConfiguration() { return disableCreateSession(super.createConfiguration()); } + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -76,6 +79,7 @@ public void setup() throws Exception { roleFS = (S3AFileSystem) restrictedDir.getFileSystem(conf); } + @AfterEach @Override public void teardown() throws Exception { cleanupWithLogger(LOG, roleFS); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java index e6eb60ad06134..1e7765801bff5 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java @@ -27,8 +27,9 @@ import java.util.Objects; import java.util.concurrent.atomic.AtomicInteger; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import software.amazon.awssdk.auth.signer.Aws4Signer; import software.amazon.awssdk.auth.signer.AwsS3V4Signer; import software.amazon.awssdk.auth.signer.internal.AbstractAwsS3V4Signer; @@ -36,7 +37,6 @@ import software.amazon.awssdk.core.signer.Signer; import software.amazon.awssdk.http.SdkHttpFullRequest; import org.assertj.core.api.Assertions; -import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -65,7 +65,6 @@ * Because the v2 sdk has had some problems with bulk delete * and custom signing, this suite is parameterized. */ -@RunWith(Parameterized.class) public class ITestCustomSigner extends AbstractS3ATestBase { private static final Logger LOG = LoggerFactory @@ -77,7 +76,6 @@ public class ITestCustomSigner extends AbstractS3ATestBase { /** * Parameterization. */ - @Parameterized.Parameters(name = "{0}") public static Collection params() { return Arrays.asList(new Object[][]{ {"bulk delete", true}, @@ -85,7 +83,7 @@ public static Collection params() { }); } - private final boolean bulkDelete; + private boolean bulkDelete; private final UserGroupInformation ugi1 = UserGroupInformation.createRemoteUser("user1"); @@ -95,10 +93,11 @@ public static Collection params() { private String endpoint; - public ITestCustomSigner( + public void initITestCustomSigner( final String ignored, - final boolean bulkDelete) { - this.bulkDelete = bulkDelete; + final boolean pBulkDelete) throws Exception { + this.bulkDelete = pBulkDelete; + setup(); } @Override @@ -120,6 +119,7 @@ public void setup() throws Exception { /** * Teardown closes all filesystems for the test UGIs. */ + @AfterEach @Override public void teardown() throws Exception { super.teardown(); @@ -127,10 +127,11 @@ public void teardown() throws Exception { FileSystem.closeAllForUGI(ugi2); } - @Test - public void testCustomSignerAndInitializer() - throws IOException, InterruptedException { - + @MethodSource("params") + @ParameterizedTest(name = "{0}") + public void testCustomSignerAndInitializer(final String ignored, + final boolean pBulkDelete) throws Exception { + initITestCustomSigner(ignored, pBulkDelete); final Path basePath = path(getMethodName()); FileSystem fs1 = runStoreOperationsAndVerify(ugi1, new Path(basePath, "customsignerpath1"), "id1"); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestHttpSigner.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestHttpSigner.java index db0aaa6be0eca..1a60c012ba7c8 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestHttpSigner.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestHttpSigner.java @@ -21,7 +21,9 @@ import java.io.IOException; import java.security.PrivilegedExceptionAction; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -62,6 +64,7 @@ public class ITestHttpSigner extends AbstractS3ATestBase { private String endpoint; + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -82,6 +85,7 @@ private String determineRegion(String bucketName) throws IOException { return getS3AInternals().getBucketLocation(bucketName); } + @AfterEach @Override public void teardown() throws Exception { super.teardown(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestJceksIO.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestJceksIO.java index 3649a6731a023..d13af064e1a88 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestJceksIO.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestJceksIO.java @@ -25,8 +25,10 @@ import java.nio.charset.StandardCharsets; import org.assertj.core.api.Assertions; -import org.junit.AfterClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,6 +62,7 @@ public class ITestJceksIO extends AbstractS3ATestBase { private ByteArrayOutputStream stdout, stderr; private PrintStream printStdout, printStderr; + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -74,6 +77,7 @@ public void setup() throws Exception { System.setErr(printStderr); } + @AfterEach @Override public void teardown() throws Exception { System.setOut(oldStdout); @@ -86,7 +90,7 @@ public void teardown() throws Exception { * Shut down all filesystems for this user to avoid * leaking those used by credential providers. */ - @AfterClass + @AfterAll public static void closeAllFilesystems() { try { LOG.info("Closing down all filesystems for current user"); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestRestrictedReadAccess.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestRestrictedReadAccess.java index 7151c38ad3e27..ae05a8dfc2076 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestRestrictedReadAccess.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestRestrictedReadAccess.java @@ -25,7 +25,9 @@ import java.util.concurrent.Callable; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -154,12 +156,14 @@ public class ITestRestrictedReadAccess extends AbstractS3ATestBase { */ private S3AFileSystem readonlyFS; + @BeforeEach @Override public void setup() throws Exception { super.setup(); assumeRoleTests(); } + @AfterEach @Override public void teardown() throws Exception { try { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/AbstractDelegationIT.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/AbstractDelegationIT.java index 67ed3d5e0a2f8..f5a4c6bba8149 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/AbstractDelegationIT.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/AbstractDelegationIT.java @@ -67,9 +67,7 @@ public static AbstractS3ATokenIdentifier lookupToken( requireNonNull( lookupS3ADelegationToken(submittedCredentials, uri), "No Token for " + uri); - assertEquals("Kind of token " + token, - kind, - token.getKind()); + assertEquals(kind, token.getKind(), "Kind of token " + token); AbstractS3ATokenIdentifier tid = token.decodeIdentifier(); LOG.info("Found for URI {}, token {}", uri, tid); @@ -112,10 +110,10 @@ protected static S3AFileSystem newS3AInstance(final URI uri, protected static void assertBoundToDT(final S3AFileSystem fs, final Text tokenKind) { final S3ADelegationTokens dtSupport = fs.getDelegationTokens().get(); - assertTrue("Expected bound to a delegation token: " + dtSupport, - dtSupport.isBoundToDT()); - assertEquals("Wrong token kind", - tokenKind, dtSupport.getBoundDT().get().getKind()); + assertTrue(dtSupport.isBoundToDT(), + "Expected bound to a delegation token: " + dtSupport); + assertEquals(tokenKind, dtSupport.getBoundDT().get().getKind(), + "Wrong token kind"); } /** @@ -126,9 +124,8 @@ protected static void assertBoundToDT(final S3AFileSystem fs, */ protected static void assertTokenCreationCount(final S3AFileSystem fs, final int expected) { - assertEquals("DT creation count from " + fs.getDelegationTokens().get(), - expected, - getTokenCreationCount(fs)); + assertEquals(expected, getTokenCreationCount(fs), + "DT creation count from " + fs.getDelegationTokens().get()); } /** @@ -173,7 +170,7 @@ protected void bindProviderList(String bucket, Configuration config, String... providerClassnames) { removeBaseAndBucketOverrides(bucket, config, AWS_CREDENTIALS_PROVIDER); - assertTrue("No providers to bind to", providerClassnames.length > 0); + assertTrue(providerClassnames.length > 0, "No providers to bind to"); config.setStrings(AWS_CREDENTIALS_PROVIDER, providerClassnames); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ILoadTestSessionCredentials.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ILoadTestSessionCredentials.java index 3b21a08e30a0a..317fc2e2edd15 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ILoadTestSessionCredentials.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ILoadTestSessionCredentials.java @@ -29,7 +29,8 @@ import java.util.concurrent.ExecutorService; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -116,14 +117,14 @@ protected String getDelegationBinding() { return DELEGATION_TOKEN_SESSION_BINDING; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); assumeSessionTestsEnabled(getConfiguration()); S3AFileSystem fileSystem = getFileSystem(); - assertNotNull( - "No delegation tokens in FS", - fileSystem.getCanonicalServiceName()); + assertNotNull(fileSystem.getCanonicalServiceName(), + "No delegation tokens in FS"); dataDir = GenericTestUtils.getTestDir("kerberos"); dataDir.mkdirs(); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestDelegatedMRJob.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestDelegatedMRJob.java index 4aaf35f0613e0..e53772a3b9287 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestDelegatedMRJob.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestDelegatedMRJob.java @@ -20,11 +20,11 @@ import java.util.Arrays; import java.util.Collection; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -86,7 +86,6 @@ * This is needed to verify that job resources have their tokens extracted * too. */ -@RunWith(Parameterized.class) public class ITestDelegatedMRJob extends AbstractDelegationIT { private static final Logger LOG = @@ -98,11 +97,11 @@ public class ITestDelegatedMRJob extends AbstractDelegationIT { @SuppressWarnings("StaticNonFinalField") private static MiniKerberizedHadoopCluster cluster; - private final String name; + private String name; - private final String tokenBinding; + private String tokenBinding; - private final Text tokenKind; + private Text tokenKind; /** * Created in test setup. @@ -127,7 +126,6 @@ public class ITestDelegatedMRJob extends AbstractDelegationIT { * Test array for parameterized test runs. * @return a list of parameter tuples. */ - @Parameterized.Parameters public static Collection params() { return Arrays.asList(new Object[][]{ {"session", DELEGATION_TOKEN_SESSION_BINDING, SESSION_TOKEN_KIND}, @@ -136,16 +134,18 @@ public static Collection params() { }); } - public ITestDelegatedMRJob(String name, String tokenBinding, Text tokenKind) { - this.name = name; - this.tokenBinding = tokenBinding; - this.tokenKind = tokenKind; + public void initITestDelegatedMRJob(String pName, String pTokenBinding, Text pTokenKind) + throws Exception { + this.name = pName; + this.tokenBinding = pTokenBinding; + this.tokenKind = pTokenKind; + setup(); } /*** * Set up the clusters. */ - @BeforeClass + @BeforeAll public static void setupCluster() throws Exception { JobConf conf = new JobConf(); assumeSessionTestsEnabled(conf); @@ -156,7 +156,7 @@ public static void setupCluster() throws Exception { /** * Tear down the cluster. */ - @AfterClass + @AfterAll public static void teardownCluster() throws Exception { cluster = terminateService(cluster); } @@ -213,6 +213,7 @@ public void setup() throws Exception { } + @AfterEach @Override public void teardown() throws Exception { describe("Teardown operations"); @@ -241,17 +242,23 @@ protected int getTestTimeoutMillis() { return getTestTimeoutSeconds() * 1000; } - @Test - public void testCommonCrawlLookup() throws Throwable { + @MethodSource("params") + @ParameterizedTest + public void testCommonCrawlLookup(String pName, String pTokenBinding, + Text pTokenKind) throws Throwable { + initITestDelegatedMRJob(pName, pTokenBinding, pTokenKind); FileSystem resourceFS = extraJobResourcePath.getFileSystem( getConfiguration()); FileStatus status = resourceFS.getFileStatus(extraJobResourcePath); LOG.info("Extra job resource is {}", status); - assertTrue("Not encrypted: " + status, status.isEncrypted()); + assertTrue(status.isEncrypted(), "Not encrypted: " + status); } - @Test - public void testJobSubmissionCollectsTokens() throws Exception { + @MethodSource("params") + @ParameterizedTest + public void testJobSubmissionCollectsTokens(String pName, String pTokenBinding, + Text pTokenKind) throws Exception { + initITestDelegatedMRJob(pName, pTokenBinding, pTokenKind); describe("Mock Job test"); JobConf conf = new JobConf(getConfiguration()); if (isUsingDefaultExternalDataFile(conf)) { @@ -298,10 +305,8 @@ public void testJobSubmissionCollectsTokens() throws Exception { job.submit(); final JobStatus status = job.getStatus(); - assertEquals("not a mock job", - MockJob.NAME, status.getSchedulingInfo()); - assertEquals("Job State", - JobStatus.State.RUNNING, status.getState()); + assertEquals(MockJob.NAME, status.getSchedulingInfo(), "not a mock job"); + assertEquals(JobStatus.State.RUNNING, status.getState(), "Job State"); final Credentials submittedCredentials = requireNonNull(job.getSubmittedCredentials(), diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestRoleDelegationInFilesystem.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestRoleDelegationInFilesystem.java index 08dba4b798214..bfae4b94fc6d6 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestRoleDelegationInFilesystem.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestRoleDelegationInFilesystem.java @@ -22,6 +22,7 @@ import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.io.Text; +import org.junit.jupiter.api.BeforeEach; import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.probeForAssumedRoleARN; import static org.apache.hadoop.fs.s3a.auth.delegation.DelegationConstants.DELEGATION_TOKEN_ROLE_BINDING; @@ -35,6 +36,7 @@ public class ITestRoleDelegationInFilesystem extends ITestSessionDelegationInFilesystem { + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestRoleDelegationTokens.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestRoleDelegationTokens.java index 1085c262ffea5..a7b11716ac8b6 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestRoleDelegationTokens.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestRoleDelegationTokens.java @@ -21,7 +21,8 @@ import java.util.EnumSet; import java.util.List; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -56,6 +57,7 @@ public Text getTokenKind() { return ROLE_TOKEN_KIND; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -117,7 +119,7 @@ public void testCreateRoleModel() throws Throwable { S3AFileSystem fs = getFileSystem(); List rules = fs.listAWSPolicyRules( access); - assertTrue("No AWS policy rules from FS", !rules.isEmpty()); + assertTrue(!rules.isEmpty(), "No AWS policy rules from FS"); String ruleset = new RoleModel().toJson(new RoleModel.Policy(rules)); LOG.info("Access policy for {}\n{}", fs.getUri(), ruleset); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFilesystem.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFilesystem.java index b2be0bc7d75ed..5930141c7d4ab 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFilesystem.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationInFilesystem.java @@ -28,9 +28,11 @@ import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.HeadBucketResponse; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -82,8 +84,7 @@ import static org.apache.hadoop.fs.s3a.test.PublicDatasetTestUtils.requireAnonymousDataPath; import static org.apache.hadoop.test.LambdaTestUtils.doAs; import static org.apache.hadoop.test.LambdaTestUtils.intercept; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.collection.IsCollectionWithSize.hasSize; +import static org.assertj.core.api.Assertions.assertThat; /** * Tests use of Hadoop delegation tokens within the FS itself. @@ -107,7 +108,7 @@ public class ITestSessionDelegationInFilesystem extends AbstractDelegationIT { /*** * Set up a mini Cluster with two users in the keytab. */ - @BeforeClass + @BeforeAll public static void setupCluster() throws Exception { cluster = new MiniKerberizedHadoopCluster(); cluster.init(new Configuration()); @@ -118,7 +119,7 @@ public static void setupCluster() throws Exception { * Tear down the Cluster. */ @SuppressWarnings("ThrowableNotThrown") - @AfterClass + @AfterAll public static void teardownCluster() throws Exception { ServiceOperations.stopQuietly(LOG, cluster); } @@ -194,6 +195,7 @@ protected Configuration createConfiguration() { return conf; } + @BeforeEach @Override public void setup() throws Exception { // clear any existing tokens from the FS @@ -209,15 +211,15 @@ public void setup() throws Exception { super.setup(); S3AFileSystem fs = getFileSystem(); // make sure there aren't any tokens - assertNull("Unexpectedly found an S3A token", - lookupS3ADelegationToken( + assertNull(lookupS3ADelegationToken( UserGroupInformation.getCurrentUser().getCredentials(), - fs.getUri())); + fs.getUri()), "Unexpectedly found an S3A token"); // DTs are inited but not started. delegationTokens = instantiateDTSupport(getConfiguration()); } + @AfterEach @SuppressWarnings("ThrowableNotThrown") @Override public void teardown() throws Exception { @@ -242,8 +244,8 @@ public void testGetDTfromFileSystem() throws Throwable { describe("Enable delegation tokens and request one"); delegationTokens.start(); S3AFileSystem fs = getFileSystem(); - assertNotNull("No tokens from " + fs, - fs.getCanonicalServiceName()); + assertNotNull(fs.getCanonicalServiceName(), + "No tokens from " + fs); S3ATestUtils.MetricDiff invocationDiff = new S3ATestUtils.MetricDiff(fs, Statistic.INVOCATION_GET_DELEGATION_TOKEN); S3ATestUtils.MetricDiff issueDiff = new S3ATestUtils.MetricDiff(fs, @@ -251,7 +253,7 @@ public void testGetDTfromFileSystem() throws Throwable { Token token = requireNonNull(fs.getDelegationToken(""), "no token from filesystem " + fs); - assertEquals("token kind", getTokenKind(), token.getKind()); + assertEquals(getTokenKind(), token.getKind(), "token kind"); assertTokenCreationCount(fs, 1); final String fsInfo = fs.toString(); invocationDiff.assertDiffEquals("getDelegationToken() in " + fsInfo, @@ -260,11 +262,11 @@ public void testGetDTfromFileSystem() throws Throwable { 1); Text service = delegationTokens.getService(); - assertEquals("service name", service, token.getService()); + assertEquals(service, token.getService(), "service name"); Credentials creds = new Credentials(); creds.addToken(service, token); - assertEquals("retrieve token from " + creds, - token, creds.getToken(service)); + assertEquals(token, creds.getToken(service), + "retrieve token from " + creds); } @Test @@ -273,7 +275,7 @@ public void testAddTokensFromFileSystem() throws Throwable { S3AFileSystem fs = getFileSystem(); Credentials cred = new Credentials(); Token[] tokens = fs.addDelegationTokens(YARN_RM, cred); - assertEquals("Number of tokens", 1, tokens.length); + assertEquals(1, tokens.length, "Number of tokens"); Token token = requireNonNull(tokens[0], "token"); LOG.info("FS token is {}", token); Text service = delegationTokens.getService(); @@ -284,8 +286,7 @@ public void testAddTokensFromFileSystem() throws Throwable { // this only sneaks in because there isn't a state check here delegationTokens.resetTokenBindingToDT( (Token) retrieved); - assertTrue("bind to existing DT failed", - delegationTokens.isBoundToDT()); + assertTrue(delegationTokens.isBoundToDT(), "bind to existing DT failed"); AWSCredentialProviderList providerList = requireNonNull( delegationTokens.getCredentialProviders(), "providers"); @@ -306,9 +307,9 @@ public void testCanRetrieveTokenFromCurrentUserCreds() throws Throwable { LOG.info("Token = " + token0); Token token1 = requireNonNull( ugi.getCredentials().getToken(service), "Token from " + service); - assertEquals("retrieved token", token0, token1); - assertNotNull("token identifier of " + token1, - token1.getIdentifier()); + assertEquals(token0, token1, "retrieved token"); + assertNotNull(token1.getIdentifier(), + "token identifier of " + token1); } @Test @@ -316,11 +317,12 @@ public void testDTCredentialProviderFromCurrentUserCreds() throws Throwable { describe("Add credentials to the current user, " + "then verify that they can be found when S3ADelegationTokens binds"); Credentials cred = createDelegationTokens(); - assertThat("Token size", cred.getAllTokens(), hasSize(1)); + assertThat(cred.getAllTokens()).hasSize(1). + as("Token size"); UserGroupInformation.getCurrentUser().addCredentials(cred); delegationTokens.start(); - assertTrue("bind to existing DT failed", - delegationTokens.isBoundToDT()); + assertTrue(delegationTokens.isBoundToDT(), + "bind to existing DT failed"); } /** @@ -394,11 +396,11 @@ public void testDelegatedFileSystem() throws Throwable { LOG.info("Delegated filesystem is: {}", delegatedFS); assertBoundToDT(delegatedFS, tokenKind); if (encryptionTestEnabled()) { - assertNotNull("Encryption propagation failed", - delegatedFS.getS3EncryptionAlgorithm()); - assertEquals("Encryption propagation failed", - fs.getS3EncryptionAlgorithm(), - delegatedFS.getS3EncryptionAlgorithm()); + assertNotNull(delegatedFS.getS3EncryptionAlgorithm(), + "Encryption propagation failed"); + assertEquals(fs.getS3EncryptionAlgorithm(), + delegatedFS.getS3EncryptionAlgorithm(), + "Encryption propagation failed"); } verifyRestrictedPermissions(delegatedFS); @@ -414,30 +416,27 @@ public void testDelegatedFileSystem() throws Throwable { AbstractS3ATokenIdentifier tokenFromDelegatedFS = requireNonNull(delegatedFS.getDelegationToken(""), "New token").decodeIdentifier(); - assertEquals("Newly issued token != old one", - origTokenId, - tokenFromDelegatedFS); + assertEquals(origTokenId, + tokenFromDelegatedFS, "Newly issued token != old one"); issueDiff.assertDiffEquals("DTs issued in " + delegatedFS, 0); } // the DT auth chain should override the original one. - assertEquals("invocation count", - originalCount, - CountInvocationsProvider.getInvocationCount()); + assertEquals(originalCount, + CountInvocationsProvider.getInvocationCount(), "invocation count"); // create a second instance, which will pick up the same value try (S3AFileSystem secondDelegate = newS3AInstance(uri, conf)) { assertBoundToDT(secondDelegate, tokenKind); if (encryptionTestEnabled()) { - assertNotNull("Encryption propagation failed", - secondDelegate.getS3EncryptionAlgorithm()); - assertEquals("Encryption propagation failed", - fs.getS3EncryptionAlgorithm(), - secondDelegate.getS3EncryptionAlgorithm()); + assertNotNull( + secondDelegate.getS3EncryptionAlgorithm(), "Encryption propagation failed"); + assertEquals(fs.getS3EncryptionAlgorithm(), + secondDelegate.getS3EncryptionAlgorithm(), + "Encryption propagation failed"); } ContractTestUtils.assertDeleted(secondDelegate, testPath, true); - assertNotNull("unbounded DT", - secondDelegate.getDelegationToken("")); + assertNotNull(secondDelegate.getDelegationToken(""), "unbounded DT"); } } @@ -533,8 +532,8 @@ public void testDelegationBindingMismatch2() throws Throwable { Token secondDT = fullFS.getDelegationToken( "second"); assertTokenCreationCount(fullFS, 3); - assertNotEquals("DT identifiers", - firstDT.getIdentifier(), secondDT.getIdentifier()); + assertNotEquals(firstDT.getIdentifier(), secondDT.getIdentifier(), + "DT identifiers"); } // expect a token @@ -555,9 +554,8 @@ public void testDelegationBindingMismatch2() throws Throwable { delegatedFS.getDelegationToken(""), "New token") .decodeIdentifier(); assertTokenCreationCount(delegatedFS, 0); - assertEquals("Newly issued token != old one", - origTokenId, - tokenFromDelegatedFS); + assertEquals(origTokenId, + tokenFromDelegatedFS, "Newly issued token != old one"); } // now create a configuration which expects a session token. @@ -631,11 +629,8 @@ public void testYarnCredentialPickup() throws Throwable { Configuration conf = getConfiguration(); S3AFileSystem fs = getFileSystem(); TokenCache.obtainTokensForNamenodes(cred, paths, conf); - assertNotNull("No Token in credentials file", - lookupToken( - cred, - fs.getUri(), - getTokenKind())); + assertNotNull(lookupToken(cred, fs.getUri(), getTokenKind()), + "No Token in credentials file"); } /** @@ -663,8 +658,8 @@ public void testHDFSFetchDTCommand() throws Throwable { doAs(bobUser, () -> DelegationTokenFetcher.main(conf, args("--webservice", fsurl, tokenFilePath))); - assertTrue("token file was not created: " + tokenfile, - tokenfile.exists()); + assertTrue(tokenfile.exists(), + "token file was not created: " + tokenfile); // print to stdout String s = DelegationTokenFetcher.printTokensToString(conf, @@ -683,11 +678,10 @@ public void testHDFSFetchDTCommand() throws Throwable { creds, fsUri, getTokenKind()), "Token lookup"); - assertEquals("encryption secrets", - fs.getEncryptionSecrets(), - identifier.getEncryptionSecrets()); - assertEquals("Username of decoded token", - bobUser.getUserName(), identifier.getUser().getUserName()); + assertEquals(fs.getEncryptionSecrets(), + identifier.getEncryptionSecrets(), "encryption secrets"); + assertEquals(bobUser.getUserName(), identifier.getUser().getUserName(), + "Username of decoded token"); // renew DelegationTokenFetcher.main(conf, args("--renew", tokenFilePath)); @@ -722,25 +716,23 @@ public void testFileSystemBoundToCreator() throws Throwable { describe("Run tests to verify the DT Setup is bound to the creator"); // quick sanity check to make sure alice and bob are different - assertNotEquals("Alice and Bob logins", - aliceUser.getUserName(), bobUser.getUserName()); + assertNotEquals(aliceUser.getUserName(), bobUser.getUserName(), + "Alice and Bob logins"); final S3AFileSystem fs = getFileSystem(); - assertEquals("FS username in doAs()", - ALICE, - doAs(bobUser, () -> fs.getUsername())); + assertEquals(ALICE, + doAs(bobUser, () -> fs.getUsername()), "FS username in doAs()"); UserGroupInformation fsOwner = doAs(bobUser, () -> fs.getDelegationTokens().get().getOwner()); - assertEquals("username mismatch", - aliceUser.getUserName(), fsOwner.getUserName()); + assertEquals(aliceUser.getUserName(), fsOwner.getUserName(), + "username mismatch"); Token dt = fs.getDelegationToken(ALICE); AbstractS3ATokenIdentifier identifier = dt.decodeIdentifier(); UserGroupInformation user = identifier.getUser(); - assertEquals("User in DT", - aliceUser.getUserName(), user.getUserName()); + assertEquals(aliceUser.getUserName(), user.getUserName(), "User in DT"); } @@ -768,16 +760,14 @@ public void testDTUtilShell() throws Throwable { "get", fsURI, "-format", "protobuf", tfs); - assertTrue("not created: " + tokenfile, - tokenfile.exists()); - assertTrue("File is empty" + tokenfile, - tokenfile.length() > 0); - assertTrue("File only contains header" + tokenfile, - tokenfile.length() > 6); + assertTrue(tokenfile.exists(), "not created: " + tokenfile); + assertTrue(tokenfile.length() > 0, "File is empty" + tokenfile); + assertTrue(tokenfile.length() > 6, + "File only contains header" + tokenfile); String printed = dtutil(0, "print", tfs); - assertThat(printed, containsString(fsURI)); - assertThat(printed, containsString(getTokenKind().toString())); + assertThat(printed).contains(fsURI); + assertThat(printed).contains(getTokenKind().toString()); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationTokens.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationTokens.java index b58ca24aaa832..939fa4fc5e551 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationTokens.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestSessionDelegationTokens.java @@ -22,10 +22,11 @@ import java.io.IOException; import java.net.URI; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; -import org.hamcrest.Matchers; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -49,6 +50,7 @@ import static org.apache.hadoop.fs.s3a.auth.delegation.DelegationConstants.DELEGATION_TOKEN_SESSION_BINDING; import static org.apache.hadoop.fs.s3a.auth.delegation.DelegationConstants.SESSION_TOKEN_KIND; import static org.apache.hadoop.fs.s3a.auth.delegation.SessionTokenBinding.CREDENTIALS_CONVERTED_TO_DELEGATION_TOKEN; +import static org.assertj.core.api.Assertions.assertThat; /** * Tests use of Hadoop delegation tokens to marshall S3 credentials. @@ -81,6 +83,7 @@ protected Configuration createConfiguration() { return conf; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -90,6 +93,7 @@ public void setup() throws Exception { delegationTokens.start(); } + @AfterEach @Override public void teardown() throws Exception { IOUtils.cleanupWithLogger(LOG, delegationTokens); @@ -104,12 +108,12 @@ public void teardown() throws Exception { @Test public void testCanonicalization() throws Throwable { S3AFileSystem fs = getFileSystem(); - assertEquals("Default port has changed", - 0, fs.getDefaultPort()); + assertEquals(0, fs.getDefaultPort(), + "Default port has changed"); URI uri = fs.getCanonicalUri(); String service = fs.getCanonicalServiceName(); - assertEquals("canonical URI and service name mismatch", - uri, new URI(service)); + assertEquals(uri, new URI(service), + "canonical URI and service name mismatch"); } @Test @@ -121,10 +125,10 @@ public void testSaveLoadTokens() throws Throwable { = delegationTokens.createDelegationToken(encryptionSecrets, null); final SessionTokenIdentifier origIdentifier = (SessionTokenIdentifier) dt.decodeIdentifier(); - assertEquals("kind in " + dt, getTokenKind(), dt.getKind()); + assertEquals(getTokenKind(), dt.getKind(), "kind in " + dt); Configuration conf = getConfiguration(); saveDT(tokenFile, dt); - assertTrue("Empty token file", tokenFile.length() > 0); + assertTrue(tokenFile.length() > 0, "Empty token file"); Credentials creds = Credentials.readTokenStorageFile(tokenFile, conf); Text serviceId = delegationTokens.getService(); Token token = requireNonNull( @@ -133,13 +137,13 @@ public void testSaveLoadTokens() throws Throwable { SessionTokenIdentifier decoded = (SessionTokenIdentifier) token.decodeIdentifier(); decoded.validate(); - assertEquals("token identifier ", origIdentifier, decoded); - assertEquals("Origin in " + decoded, - origIdentifier.getOrigin(), decoded.getOrigin()); - assertEquals("Expiry time", - origIdentifier.getExpiryTime(), decoded.getExpiryTime()); - assertEquals("Encryption Secrets", - encryptionSecrets, decoded.getEncryptionSecrets()); + assertEquals(origIdentifier, decoded, "token identifier "); + assertEquals(origIdentifier.getOrigin(), decoded.getOrigin(), + "Origin in " + decoded); + assertEquals(origIdentifier.getExpiryTime(), decoded.getExpiryTime(), + "Expiry time"); + assertEquals(encryptionSecrets, decoded.getEncryptionSecrets(), + "Encryption Secrets"); } /** @@ -168,13 +172,13 @@ public void testCreateAndUseDT() throws Throwable { final S3AFileSystem fs = getFileSystem(); final Configuration conf = fs.getConf(); - assertNull("Current User has delegation token", - delegationTokens.selectTokenFromFSOwner()); + assertNull(delegationTokens.selectTokenFromFSOwner(), + "Current User has delegation token"); EncryptionSecrets secrets = new EncryptionSecrets( S3AEncryptionMethods.SSE_KMS, KMS_KEY, ""); Token originalDT = delegationTokens.createDelegationToken(secrets, null); - assertEquals("Token kind mismatch", getTokenKind(), originalDT.getKind()); + assertEquals(getTokenKind(), originalDT.getKind(), "Token kind mismatch"); // decode to get the binding info SessionTokenIdentifier issued = @@ -200,7 +204,7 @@ public void testCreateAndUseDT() throws Throwable { Token boundDT = dt2.getBoundOrNewDT(secrets, null); - assertEquals("Delegation Tokens", originalDT, boundDT); + assertEquals(originalDT, boundDT, "Delegation Tokens"); // simulate marshall and transmission creds = roundTrip(origCreds, conf); SessionTokenIdentifier reissued @@ -208,9 +212,8 @@ public void testCreateAndUseDT() throws Throwable { .decodeIdentifier(); reissued.validate(); String userAgentField = dt2.getUserAgentField(); - assertThat("UA field does not contain UUID", - userAgentField, - Matchers.containsString(issued.getUuid())); + assertThat(userAgentField).contains(issued.getUuid()). + as("UA field does not contain UUID"); } // now use those chained credentials to create a new FS instance @@ -226,13 +229,13 @@ public void testCreateWithRenewer() throws Throwable { final Configuration conf = fs.getConf(); final Text renewer = new Text("yarn"); - assertNull("Current User has delegation token", - delegationTokens.selectTokenFromFSOwner()); + assertNull(delegationTokens.selectTokenFromFSOwner(), + "Current User has delegation token"); EncryptionSecrets secrets = new EncryptionSecrets( S3AEncryptionMethods.SSE_KMS, KMS_KEY, ""); Token dt = delegationTokens.createDelegationToken(secrets, renewer); - assertEquals("Token kind mismatch", getTokenKind(), dt.getKind()); + assertEquals(getTokenKind(), dt.getKind(), "Token kind mismatch"); // decode to get the binding info SessionTokenIdentifier issued = @@ -240,7 +243,7 @@ public void testCreateWithRenewer() throws Throwable { (SessionTokenIdentifier) dt.decodeIdentifier(), () -> "no identifier in " + dt); issued.validate(); - assertEquals("Token renewer mismatch", renewer, issued.getRenewer()); + assertEquals(renewer, issued.getRenewer(), "Token renewer mismatch"); } /** @@ -283,10 +286,10 @@ protected AbstractS3ATokenIdentifier verifyCredentialPropagation( final MarshalledCredentials creds2 = fromAWSCredentials( verifySessionCredentials( delegationTokens2.getCredentialProviders().resolveCredentials())); - assertEquals("Credentials", session, creds2); - assertTrue("Origin in " + boundId, - boundId.getOrigin() - .contains(CREDENTIALS_CONVERTED_TO_DELEGATION_TOKEN)); + assertEquals(session, creds2, "Credentials"); + assertTrue(boundId.getOrigin() + .contains(CREDENTIALS_CONVERTED_TO_DELEGATION_TOKEN), + "Origin in " + boundId); return boundId; } } @@ -294,9 +297,9 @@ protected AbstractS3ATokenIdentifier verifyCredentialPropagation( private AwsSessionCredentials verifySessionCredentials( final AwsCredentials creds) { AwsSessionCredentials session = (AwsSessionCredentials) creds; - assertNotNull("access key", session.accessKeyId()); - assertNotNull("secret key", session.secretAccessKey()); - assertNotNull("session token", session.sessionToken()); + assertNotNull(session.accessKeyId(), "access key"); + assertNotNull(session.secretAccessKey(), "secret key"); + assertNotNull(session.sessionToken(), "session token"); return session; } @@ -306,8 +309,8 @@ public void testDBindingReentrancyLock() throws Throwable { + " is no token"); S3ADelegationTokens delegation = instantiateDTSupport(getConfiguration()); delegation.start(); - assertFalse("Delegation is bound to a DT: " + delegation, - delegation.isBoundToDT()); + assertFalse(delegation.isBoundToDT(), + "Delegation is bound to a DT: " + delegation); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java index 7b1dee4fd12b9..d0642fb237213 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java @@ -26,7 +26,8 @@ import java.util.List; import org.assertj.core.api.Assertions; -import org.junit.AfterClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -116,7 +117,7 @@ protected Configuration createConfiguration() { return conf; } - @AfterClass + @AfterAll public static void printStatistics() { LOG.info("Aggregate job statistics {}\n", IOStatisticsLogging.ioStatisticsToPrettyString(JOB_STATISTICS)); @@ -138,6 +139,7 @@ protected File getReportDir() { return reportDir; } + @BeforeEach @Override public void setup() throws Exception { // set the manifest committer to a localfs path for reports across @@ -233,8 +235,8 @@ protected void abortMultipartUploadsUnderPath(Path path) throws IOException { * @throws IOException IO failure */ protected void assertMultipartUploadsPending(Path path) throws IOException { - assertTrue("No multipart uploads in progress under " + path, - countMultipartUploads(path) > 0); + assertTrue(countMultipartUploads(path) > 0, + "No multipart uploads in progress under " + path); } /** @@ -392,8 +394,8 @@ public static SuccessData validateSuccessFile(final Path outputPath, LOG.info("Diagnostics\n{}", successData.dumpDiagnostics(" ", " = ", "\n")); if (!committerName.isEmpty()) { - assertEquals("Wrong committer in " + commitDetails, - committerName, successData.getCommitter()); + assertEquals(committerName, successData.getCommitter(), + "Wrong committer in " + commitDetails); } Assertions.assertThat(successData.getFilenames()) .describedAs("Files committed in " + commitDetails) @@ -437,12 +439,12 @@ public static SuccessData loadSuccessFile(final FileSystem fs, + " from " + origin + " not found: Job may have failed", success); - assertTrue("_SUCCESS outout from " + origin + " is not a file " + status, - status.isFile()); - assertTrue("0 byte success file " - + success + " from " + origin - + "; an S3A committer was not used", - status.getLen() > 0); + assertTrue(status.isFile(), + "_SUCCESS outout from " + origin + " is not a file " + status); + assertTrue(status.getLen() > 0, + "0 byte success file " + + success + " from " + origin + + "; an S3A committer was not used"); String body = ContractTestUtils.readUTF8(fs, success, -1); LOG.info("Loading committer success file {}. Actual contents=\n{}", success, body); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitProtocol.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitProtocol.java index 165379d1dc0c8..3921c640a2272 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitProtocol.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractITCommitProtocol.java @@ -29,8 +29,10 @@ import java.util.stream.Collectors; import org.assertj.core.api.Assertions; -import org.junit.AfterClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -162,6 +164,7 @@ protected String getMethodName() { return suitename() + "-" + super.getMethodName(); } + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -176,6 +179,7 @@ public void setup() throws Exception { cleanupDestDir(); } + @AfterEach @Override public void teardown() throws Exception { describe("teardown"); @@ -201,7 +205,7 @@ public void teardown() throws Exception { * This only looks for leakage of committer thread pools, * and not any other leaked threads, such as those from S3A FS instances. */ - @AfterClass + @AfterAll public static void checkForThreadLeakage() { List committerThreads = getCurrentThreadNames().stream() .filter(n -> n.startsWith(AbstractS3ACommitter.THREAD_PREFIX)) @@ -637,10 +641,10 @@ public void testRecoveryAndCleanup() throws Exception { TaskAttemptContext tContext = jobData.tContext; AbstractS3ACommitter committer = jobData.committer; - assertNotNull("null workPath in committer " + committer, - committer.getWorkPath()); - assertNotNull("null outputPath in committer " + committer, - committer.getOutputPath()); + assertNotNull(committer.getWorkPath(), + "null workPath in committer " + committer); + assertNotNull(committer.getOutputPath(), + "null outputPath in committer " + committer); // note the task attempt path. Path job1TaskAttempt0Path = committer.getTaskAttemptPath(tContext); @@ -659,8 +663,8 @@ public void testRecoveryAndCleanup() throws Exception { AbstractS3ACommitter committer2 = createCommitter(tContext2); committer2.setupJob(tContext2); - assertFalse("recoverySupported in " + committer2, - committer2.isRecoverySupported()); + assertFalse(committer2.isRecoverySupported(), + "recoverySupported in " + committer2); intercept(PathCommitException.class, "recover", () -> committer2.recoverTask(tContext2)); @@ -669,9 +673,8 @@ public void testRecoveryAndCleanup() throws Exception { final Path job2TaskAttempt0Path = committer2.getTaskAttemptPath(tContext2); LOG.info("Job attempt 1 task attempt path {}; attempt 2 path {}", job1TaskAttempt0Path, job2TaskAttempt0Path); - assertNotEquals("Task attempt paths must differ", - job1TaskAttempt0Path, - job2TaskAttempt0Path); + assertNotEquals(job1TaskAttempt0Path, + job2TaskAttempt0Path, "Task attempt paths must differ"); // at this point, task attempt 0 has failed to recover // it should be abortable though. This will be a no-op as it already @@ -826,8 +829,8 @@ public void testCommitLifecycle() throws Exception { dumpMultipartUploads(); describe("2. Committing task"); - assertTrue("No files to commit were found by " + committer, - committer.needsTaskCommit(tContext)); + assertTrue(committer.needsTaskCommit(tContext), + "No files to commit were found by " + committer); commitTask(committer, tContext); // this is only task commit; there MUST be no part- files in the dest dir @@ -1239,8 +1242,8 @@ public void assertJobAbortCleanedUp(JobData jobData) throws Exception { if (children.length != 0) { lsR(fs, outDir, true); } - assertArrayEquals("Output directory not empty " + ls(outDir), - new FileStatus[0], children); + assertArrayEquals(new FileStatus[0], children, + "Output directory not empty " + ls(outDir)); } catch (FileNotFoundException e) { // this is a valid failure mode; it means the dest dir doesn't exist yet. } @@ -1434,8 +1437,8 @@ public void testOutputFormatIntegration() throws Throwable { if (!isTrackMagicCommitsInMemoryEnabled(conf)) { validateTaskAttemptPathAfterWrite(dest, expectedLength); } - assertTrue("Committer does not have data to commit " + committer, - committer.needsTaskCommit(tContext)); + assertTrue(committer.needsTaskCommit(tContext), + "Committer does not have data to commit " + committer); commitTask(committer, tContext); // at this point the committer tasks stats should be current. IOStatisticsSnapshot snapshot = new IOStatisticsSnapshot( @@ -1484,7 +1487,7 @@ public void testAMWorkflow() throws Throwable { = ReflectionUtils.newInstance(newAttempt .getOutputFormatClass(), conf); Path outputPath = FileOutputFormat.getOutputPath(newAttempt); - assertNotNull("null output path in new task attempt", outputPath); + assertNotNull(outputPath, "null output path in new task attempt"); AbstractS3ACommitter committer2 = (AbstractS3ACommitter) outputFormat.getOutputCommitter(newAttempt); @@ -1533,13 +1536,11 @@ public void testParallelJobsToAdjacentPaths() throws Throwable { setup(jobData2); abortInTeardown(jobData2); // make sure the directories are different - assertNotEquals("Committer output paths", - committer1.getOutputPath(), - committer2.getOutputPath()); + assertNotEquals(committer1.getOutputPath(), + committer2.getOutputPath(), "Committer output paths"); - assertNotEquals("job UUIDs", - committer1.getUUID(), - committer2.getUUID()); + assertNotEquals(committer1.getUUID(), + committer2.getUUID(), "job UUIDs"); // job2 setup, write some data there writeTextOutput(tContext2); @@ -1703,8 +1704,8 @@ public void testParallelJobsToSameDestination() throws Throwable { // validate the output Path job1Output = new Path(outDir, job1TaskOutputFile.getName()); Path job2Output = new Path(outDir, job2TaskOutputFile.getName()); - assertNotEquals("Job output file filenames must be different", - job1Output, job2Output); + assertNotEquals(job1Output, job2Output, + "Job output file filenames must be different"); // job1 output must be there assertPathExists("job 1 output", job1Output); @@ -1761,9 +1762,8 @@ public void testSelfGeneratedUUID() throws Throwable { Assertions.assertThat(committer2.getUUIDSource()) .describedAs("UUID source of %s", committer2) .isEqualTo(AbstractS3ACommitter.JobUUIDSource.GeneratedLocally); - assertNotEquals("job UUIDs", - committer.getUUID(), - committer2.getUUID()); + assertNotEquals(committer.getUUID(), + committer2.getUUID(), "job UUIDs"); // Task setup MUST fail. intercept(PathCommitException.class, E_SELF_GENERATED_JOB_UUID, () -> { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractYarnClusterITest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractYarnClusterITest.java index 39265f1d8eab2..cb250ff43ae40 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractYarnClusterITest.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractYarnClusterITest.java @@ -18,13 +18,13 @@ package org.apache.hadoop.fs.s3a.commit; +import java.io.File; import java.io.IOException; import java.time.LocalDateTime; import java.time.format.DateTimeFormatter; -import org.junit.AfterClass; -import org.junit.Rule; -import org.junit.rules.TemporaryFolder; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -93,7 +93,7 @@ public abstract class AbstractYarnClusterITest extends AbstractCommitITest { private static ClusterBinding clusterBinding; - @AfterClass + @AfterAll public static void teardownClusters() throws IOException { terminateCluster(clusterBinding); clusterBinding = null; @@ -226,8 +226,7 @@ protected FileSystem getClusterFS() throws IOException { * the user's home directory, as that is often rejected by CI test * runners. */ - @Rule - public final TemporaryFolder stagingFilesDir = new TemporaryFolder(); + public File stagingFilesDir; /** * The name of the committer as returned by @@ -245,6 +244,7 @@ protected ClusterBinding demandCreateClusterBinding() throws Exception { return createCluster(new JobConf(), false); } + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -256,9 +256,10 @@ public void setup() throws Exception { if (getClusterBinding() == null) { clusterBinding = demandCreateClusterBinding(); } - assertNotNull("cluster is not bound", - getClusterBinding()); - + assertNotNull( + getClusterBinding(), "cluster is not bound"); + String methodName = getMethodName(); + stagingFilesDir = File.createTempFile(methodName, ""); } @Override @@ -303,7 +304,7 @@ protected Configuration patchConfigurationForCommitter( // pass down the scale test flag jobConf.setBoolean(KEY_SCALE_TESTS_ENABLED, isScaleTest()); // and fix the commit dir to the local FS across all workers. - String staging = stagingFilesDir.getRoot().getAbsolutePath(); + String staging = stagingFilesDir.getAbsolutePath(); LOG.info("Staging temp dir is {}", staging); jobConf.set(FS_S3A_COMMITTER_STAGING_TMP_PATH, staging); return jobConf; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperationCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperationCost.java index 02f0251e8f055..9ad2c0625a094 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperationCost.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperationCost.java @@ -24,7 +24,9 @@ import java.util.List; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -78,12 +80,14 @@ public class ITestCommitOperationCost extends AbstractS3ACostTest { */ private CommitterTestHelper testHelper; + @BeforeEach @Override public void setup() throws Exception { super.setup(); testHelper = new CommitterTestHelper(getFileSystem()); } + @AfterEach @Override public void teardown() throws Exception { try { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java index 34b856c21b7e8..ddd306ddc25ad 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java @@ -27,7 +27,8 @@ import java.util.UUID; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -88,6 +89,7 @@ protected Configuration createConfiguration() { return conf; } + @BeforeEach @Override public void setup() throws Exception { FileSystem.closeAll(); @@ -251,8 +253,8 @@ public void testCommitterFactoryDefault() throws Throwable { methodPath(), new TaskAttemptContextImpl(getConfiguration(), new TaskAttemptID(new TaskID(), 1))); - assertEquals("Wrong committer", - MagicS3GuardCommitter.class, committer.getClass()); + assertEquals(MagicS3GuardCommitter.class, committer.getClass(), + "Wrong committer"); } @Test @@ -430,7 +432,7 @@ private Path validatePendingCommitData(String filename, filename + PENDING_SUFFIX); FileStatus fileStatus = verifyPathExists(fs, "no pending file", pendingDataPath); - assertTrue("No data in " + fileStatus, fileStatus.getLen() > 0); + assertTrue(fileStatus.getLen() > 0, "No data in " + fileStatus); String data = read(fs, pendingDataPath); LOG.info("Contents of {}: \n{}", pendingDataPath, data); // really read it in and parse @@ -581,8 +583,8 @@ public void testWriteNormalStream() throws Throwable { Path destFile = path("normal"); try (FSDataOutputStream out = fs.create(destFile, true)) { out.writeChars("data"); - assertFalse("stream has magic output: " + out, - out.hasCapability(STREAM_CAPABILITY_MAGIC_OUTPUT)); + assertFalse(out.hasCapability(STREAM_CAPABILITY_MAGIC_OUTPUT), + "stream has magic output: " + out); } FileStatus status = fs.getFileStatus(destFile); Assertions.assertThat(status.getLen()) diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestS3ACommitterFactory.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestS3ACommitterFactory.java index 2561a69f60b59..ed3deb38de480 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestS3ACommitterFactory.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestS3ACommitterFactory.java @@ -22,9 +22,8 @@ import java.util.Arrays; import java.util.Collection; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -54,7 +53,6 @@ /** * Tests for the committer factory creation/override process. */ -@RunWith(Parameterized.class) public final class ITestS3ACommitterFactory extends AbstractCommitITest { private static final Logger LOG = LoggerFactory.getLogger( ITestS3ACommitterFactory.class); @@ -109,7 +107,6 @@ public final class ITestS3ACommitterFactory extends AbstractCommitITest { * * @return the committer binding for this run. */ - @Parameterized.Parameters(name = "{3}-fs=[{0}]-task=[{1}]-[{2}]") public static Collection params() { return Arrays.asList(BINDINGS); } @@ -117,40 +114,41 @@ public static Collection params() { /** * Name of committer to set in filesystem config. If "" do not set one. */ - private final String fsCommitterName; + private String fsCommitterName; /** * Name of committer to set in job config. */ - private final String jobCommitterName; + private String jobCommitterName; /** * Expected committer class. * If null: an exception is expected */ - private final Class committerClass; + private Class committerClass; /** * Description from parameters, simply for thread names to be more informative. */ - private final String description; + private String description; /** * Create a parameterized instance. - * @param fsCommitterName committer to set in filesystem config - * @param jobCommitterName committer to set in job config - * @param committerClass expected committer class - * @param description debug text for thread names. + * @param pFsCommitterName committer to set in filesystem config + * @param pJobCommitterName committer to set in job config + * @param pCommitterClass expected committer class + * @param pDescription debug text for thread names. */ - public ITestS3ACommitterFactory( - final String fsCommitterName, - final String jobCommitterName, - final Class committerClass, - final String description) { - this.fsCommitterName = fsCommitterName; - this.jobCommitterName = jobCommitterName; - this.committerClass = committerClass; - this.description = description; + public void initITestS3ACommitterFactory( + final String pFsCommitterName, + final String pJobCommitterName, + final Class pCommitterClass, + final String pDescription) throws Exception { + this.fsCommitterName = pFsCommitterName; + this.jobCommitterName = pJobCommitterName; + this.committerClass = pCommitterClass; + this.description = pDescription; + setup(); } @Override @@ -212,8 +210,14 @@ protected void deleteTestDirInTeardown() { * Verify that if all config options are unset, the FileOutputCommitter * is returned. */ - @Test - public void testBinding() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{3}-fs=[{0}]-task=[{1}]-[{2}]") + public void testBinding(String pFsCommitterName, + String pJobCommitterName, + Class pCommitterClass, + String pDescription) throws Throwable { + initITestS3ACommitterFactory(pFsCommitterName, pJobCommitterName, pCommitterClass, + pDescription); assertFactoryCreatesExpectedCommitter(committerClass); } @@ -229,9 +233,8 @@ private void assertFactoryCreatesExpectedCommitter( throws Exception { describe("Creating committer: expected class \"%s\"", expected); if (expected != null) { - assertEquals("Wrong Committer from factory", - expected, - createCommitter().getClass()); + assertEquals(expected, createCommitter().getClass(), + "Wrong Committer from factory"); } else { intercept(PathCommitException.class, this::createCommitter); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestUploadRecovery.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestUploadRecovery.java index 2ede6d82798d0..6e0970207f742 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestUploadRecovery.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestUploadRecovery.java @@ -27,9 +27,10 @@ import org.assertj.core.api.Assertions; import org.assertj.core.api.Assumptions; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.core.interceptor.Context; @@ -67,7 +68,7 @@ * Test upload recovery by injecting failures into the response chain. * The tests are parameterized on upload buffering. *

- * The test case {@link #testCommitOperations()} is independent of this option; + * The test case {@link #testCommitOperations(String, boolean)} is independent of this option; * the test parameterization only runs this once. * A bit inelegant but as the fault injection code is shared and the problem "adjacent" * this isolates all forms of upload recovery into the same test class without @@ -75,7 +76,6 @@ *

* Fault injection is implemented in {@link SdkFaultInjector}. */ -@RunWith(Parameterized.class) public class ITestUploadRecovery extends AbstractS3ACostTest { private static final Logger LOG = @@ -84,7 +84,6 @@ public class ITestUploadRecovery extends AbstractS3ACostTest { /** * Parameterization. */ - @Parameterized.Parameters(name = "{0}-commit-{1}") public static Collection params() { return Arrays.asList(new Object[][]{ {FAST_UPLOAD_BUFFER_ARRAY, true}, @@ -103,21 +102,22 @@ public static Collection params() { /** * should the commit test be included? */ - private final boolean includeCommitTest; + private boolean includeCommitTest; /** * Buffer type for this test run. */ - private final String buffer; + private String buffer; /** * Parameterized test suite. - * @param buffer buffer type - * @param includeCommitTest should the commit upload test be included? + * @param pBuffer buffer type + * @param pIncludeCommitTest should the commit upload test be included? */ - public ITestUploadRecovery(final String buffer, final boolean includeCommitTest) { - this.includeCommitTest = includeCommitTest; - this.buffer = buffer; + public void initITestUploadRecovery(final String pBuffer, + final boolean pIncludeCommitTest) { + this.includeCommitTest = pIncludeCommitTest; + this.buffer = pBuffer; } @Override @@ -152,12 +152,14 @@ public Configuration createConfiguration() { /** * Setup MUST set up the evaluator before the FS is created. */ + @BeforeEach @Override public void setup() throws Exception { SdkFaultInjector.resetFaultInjector(); super.setup(); } + @AfterEach @Override public void teardown() throws Exception { // safety check in case the evaluation is failing any @@ -170,8 +172,11 @@ public void teardown() throws Exception { /** * Verify that failures of simple PUT requests can be recovered from. */ - @Test - public void testPutRecovery() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}-commit-{1}") + public void testPutRecovery(String pBuffer, + boolean pIncludeCommitTest) throws Throwable { + initITestUploadRecovery(pBuffer, pIncludeCommitTest); describe("test put recovery"); final S3AFileSystem fs = getFileSystem(); final Path path = methodPath(); @@ -187,8 +192,11 @@ public void testPutRecovery() throws Throwable { /** * Validate recovery of multipart uploads within a magic write sequence. */ - @Test - public void testMagicWriteRecovery() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}-commit-{1}") + public void testMagicWriteRecovery(String pBuffer, + boolean pIncludeCommitTest) throws Throwable { + initITestUploadRecovery(pBuffer, pIncludeCommitTest); describe("test magic write recovery with multipart uploads"); final S3AFileSystem fs = getFileSystem(); @@ -227,8 +235,11 @@ public void testMagicWriteRecovery() throws Throwable { /** * Test the commit operations iff {@link #includeCommitTest} is true. */ - @Test - public void testCommitOperations() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}-commit-{1}") + public void testCommitOperations(String pBuffer, + boolean pIncludeCommitTest) throws Throwable { + initITestUploadRecovery(pBuffer, pIncludeCommitTest); skipIfClientSideEncryption(); Assumptions.assumeThat(includeCommitTest) .describedAs("commit test excluded") diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/integration/ITestS3ACommitterMRJob.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/integration/ITestS3ACommitterMRJob.java index 7488de41ce638..24da4b27b5676 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/integration/ITestS3ACommitterMRJob.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/integration/ITestS3ACommitterMRJob.java @@ -37,13 +37,11 @@ import org.apache.hadoop.fs.s3a.Constants; import org.apache.hadoop.util.Sets; import org.assertj.core.api.Assertions; -import org.junit.FixMethodOrder; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.junit.runner.RunWith; -import org.junit.runners.MethodSorters; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -104,27 +102,31 @@ * *

  • * The test suites are declared to be executed in ascending order, so - * that for a specific binding, the order is {@link #test_000()}, - * {@link #test_100()} {@link #test_200_execute()} and finally - * {@link #test_500()}. + * that for a specific binding, the order is + * {@link #test_000(CommitterTestBinding)}, + * {@link #test_100(CommitterTestBinding)} + * {@link #test_200_execute(CommitterTestBinding, java.nio.file.Path)} and finally + * {@link #test_500(CommitterTestBinding)}. *
  • *
  • - * {@link #test_000()} calls {@link CommitterTestBinding#validate()} to + * {@link #test_000(CommitterTestBinding)} calls + * {@link CommitterTestBinding#validate()} to * as to validate the state of the committer. This is primarily to * verify that the binding setup mechanism is working. *
  • *
  • - * {@link #test_100()} is relayed to + * {@link #test_100(CommitterTestBinding)} is relayed to * {@link CommitterTestBinding#test_100()}, * for any preflight tests. *
  • *
  • - * The {@link #test_200_execute()} test runs the MR job for that + * The {@link #test_200_execute(CommitterTestBinding, java.nio.file.Path)} + * test runs the MR job for that * particular binding with standard reporting and verification of the * outcome. *
  • *
  • - * {@link #test_500()} test is relayed to + * {@link #test_500(CommitterTestBinding)} test is relayed to * {@link CommitterTestBinding#test_500()}, for any post-MR-job tests. * * @@ -135,8 +137,7 @@ * generally no useful information about the job in the local S3AFileSystem * instance. */ -@RunWith(Parameterized.class) -@FixMethodOrder(MethodSorters.NAME_ASCENDING) +@TestMethodOrder(MethodOrderer.Alphanumeric.class) public class ITestS3ACommitterMRJob extends AbstractYarnClusterITest { private static final Logger LOG = @@ -147,7 +148,6 @@ public class ITestS3ACommitterMRJob extends AbstractYarnClusterITest { * * @return the committer binding for this run. */ - @Parameterized.Parameters(name = "{0}") public static Collection params() { return Arrays.asList(new Object[][]{ {new DirectoryCommitterTestBinding()}, @@ -159,15 +159,16 @@ public static Collection params() { /** * The committer binding for this instance. */ - private final CommitterTestBinding committerTestBinding; + private CommitterTestBinding committerTestBinding; /** * Parameterized constructor. - * @param committerTestBinding binding for the test. + * @param pCommitterTestBinding binding for the test. */ - public ITestS3ACommitterMRJob( - final CommitterTestBinding committerTestBinding) { - this.committerTestBinding = committerTestBinding; + public void initITestS3ACommitterMRJob( + final CommitterTestBinding pCommitterTestBinding) throws Exception { + this.committerTestBinding = pCommitterTestBinding; + setup(); } @Override @@ -184,9 +185,6 @@ protected Configuration createConfiguration() { return conf; } - @Rule - public final TemporaryFolder localFilesDir = new TemporaryFolder(); - @Override protected String committerName() { return committerTestBinding.getCommitterName(); @@ -195,18 +193,25 @@ protected String committerName() { /** * Verify that the committer binding is happy. */ - @Test - public void test_000() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}") + public void test_000(CommitterTestBinding pCommitterTestBinding) throws Throwable { + initITestS3ACommitterMRJob(pCommitterTestBinding); committerTestBinding.validate(); - } - @Test - public void test_100() throws Throwable { + + @MethodSource("params") + @ParameterizedTest(name = "{0}") + public void test_100(CommitterTestBinding pCommitterTestBinding) throws Throwable { + initITestS3ACommitterMRJob(pCommitterTestBinding); committerTestBinding.test_100(); } - @Test - public void test_200_execute() throws Exception { + @MethodSource("params") + @ParameterizedTest(name = "{0}") + public void test_200_execute(CommitterTestBinding pCommitterTestBinding, + @TempDir java.nio.file.Path localFilesDir) throws Exception { + initITestS3ACommitterMRJob(pCommitterTestBinding); describe("Run an MR with committer %s", committerName()); S3AFileSystem fs = getFileSystem(); @@ -224,7 +229,7 @@ public void test_200_execute() throws Exception { List expectedFiles = new ArrayList<>(numFiles); Set expectedKeys = Sets.newHashSet(); for (int i = 0; i < numFiles; i += 1) { - File file = localFilesDir.newFile(i + ".text"); + File file = localFilesDir.resolve(i + ".text").toFile(); try (FileOutputStream out = new FileOutputStream(file)) { out.write(("file " + i).getBytes(StandardCharsets.UTF_8)); } @@ -241,7 +246,7 @@ public void test_200_execute() throws Exception { mrJob.setOutputFormatClass(LoggingTextOutputFormat.class); FileOutputFormat.setOutputPath(mrJob, outputPath); - File mockResultsFile = localFilesDir.newFile("committer.bin"); + File mockResultsFile = localFilesDir.resolve("committer.bin").toFile(); mockResultsFile.delete(); String committerPath = "file:" + mockResultsFile; jobConf.set("mock-results-file", committerPath); @@ -251,7 +256,7 @@ public void test_200_execute() throws Exception { mrJob.setInputFormatClass(TextInputFormat.class); FileInputFormat.addInputPath(mrJob, - new Path(localFilesDir.getRoot().toURI())); + new Path(localFilesDir.getRoot().toUri())); mrJob.setMapperClass(MapClass.class); mrJob.setNumReduceTasks(0); @@ -354,8 +359,10 @@ protected void customPostExecutionValidation(final Path destPath, /** * This is the extra test which committer test bindings can add. */ - @Test - public void test_500() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}") + public void test_500(CommitterTestBinding pCommitterTestBinding) throws Throwable { + initITestS3ACommitterMRJob(pCommitterTestBinding); committerTestBinding.test_500(); } @@ -492,7 +499,8 @@ protected void validateResult(Path destPath, } /** - * A test to run before the main {@link #test_200_execute()} test is + * A test to run before the main + * {@link #test_200_execute(CommitterTestBinding, java.nio.file.Path)} test is * invoked. * @throws Throwable failure. */ @@ -501,7 +509,8 @@ void test_100() throws Throwable { } /** - * A test to run after the main {@link #test_200_execute()} test is + * A test to run after the main + * {@link #test_200_execute(CommitterTestBinding, java.nio.file.Path)} test is * invoked. * @throws Throwable failure. */ @@ -511,14 +520,14 @@ void test_500() throws Throwable { /** * Validate the state of the binding. - * This is called in {@link #test_000()} so will + * This is called in {@link #test_000(CommitterTestBinding)} so will * fail independently of the other tests. * @throws Throwable failure. */ public void validate() throws Throwable { - assertNotNull("Not bound to a cluster", binding); - assertNotNull("No cluster filesystem", getClusterFS()); - assertNotNull("No yarn cluster", binding.getYarn()); + assertNotNull(binding, "Not bound to a cluster"); + assertNotNull(getClusterFS(), "No cluster filesystem"); + assertNotNull(binding.getYarn(), "No yarn cluster"); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestMagicCommitProtocol.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestMagicCommitProtocol.java index cbfc23a2a29b6..90f0150a2f7fe 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestMagicCommitProtocol.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestMagicCommitProtocol.java @@ -26,7 +26,6 @@ import org.apache.hadoop.conf.Configuration; import org.assertj.core.api.Assertions; -import org.junit.Test; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; @@ -42,8 +41,8 @@ import org.apache.hadoop.mapreduce.JobStatus; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides; import static org.apache.hadoop.fs.s3a.S3AUtils.listAndFilter; @@ -54,10 +53,9 @@ /** * Test the magic committer's commit protocol. */ -@RunWith(Parameterized.class) public class ITestMagicCommitProtocol extends AbstractITCommitProtocol { - private final boolean trackCommitsInMemory; + private boolean trackCommitsInMemory; @Override protected String suitename() { @@ -80,7 +78,6 @@ public void setup() throws Exception { CommitUtils.verifyIsMagicCommitFS(getFileSystem()); } - @Parameterized.Parameters(name = "track-commit-in-memory-{0}") public static Collection params() { return Arrays.asList(new Object[][]{ {false}, @@ -88,8 +85,10 @@ public static Collection params() { }); } - public ITestMagicCommitProtocol(boolean trackCommitsInMemory) { - this.trackCommitsInMemory = trackCommitsInMemory; + public void initITestMagicCommitProtocol(boolean pTrackCommitsInMemory) + throws Exception { + this.trackCommitsInMemory = pTrackCommitsInMemory; + setup(); } @Override @@ -183,8 +182,10 @@ protected void validateTaskAttemptWorkingDirectory( * committer UUID to ensure uniqueness in the case of more than * one job writing to the same destination path. */ - @Test - public void testCommittersPathsHaveUUID() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "track-commit-in-memory-{0}") + public void testCommittersPathsHaveUUID(boolean pTrackCommitsInMemory) throws Throwable { + initITestMagicCommitProtocol(pTrackCommitsInMemory); TaskAttemptContext tContext = new TaskAttemptContextImpl( getConfiguration(), getTaskAttempt0()); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestMagicCommitProtocolFailure.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestMagicCommitProtocolFailure.java index 41593c2b26304..780ca9ca69080 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestMagicCommitProtocolFailure.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestMagicCommitProtocolFailure.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.s3a.commit.magic; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestS3AHugeMagicCommits.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestS3AHugeMagicCommits.java index 116d48e9de5fc..45d7469f7970a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestS3AHugeMagicCommits.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestS3AHugeMagicCommits.java @@ -23,7 +23,8 @@ import java.util.Map; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -97,6 +98,7 @@ protected boolean expectImmediateFileVisibility() { return false; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -144,8 +146,8 @@ public void test_030_postCreationAssertions() throws Throwable { // as a 0-byte marker is created, there is a file at the end path, // it just MUST be 0-bytes long FileStatus status = fs.getFileStatus(magicOutputFile); - assertEquals("Non empty marker file " + status, - 0, status.getLen()); + assertEquals(0, status.getLen(), + "Non empty marker file " + status); final Map xAttr = fs.getXAttrs(magicOutputFile); final String header = XA_MAGIC_MARKER; Assertions.assertThat(xAttr) @@ -164,7 +166,7 @@ public void test_030_postCreationAssertions() throws Throwable { Assertions.assertThat(listMultipartUploads(fs, destDirKey)) .describedAs("Pending uploads") .hasSize(1); - assertNotNull("jobDir", jobDir); + assertNotNull(jobDir, "jobDir"); try(CommitContext commitContext = operations.createCommitContextForTesting(jobDir, null, COMMITTER_THREADS)) { Pair>> diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestDirectoryCommitProtocol.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestDirectoryCommitProtocol.java index b19662c0117fd..43b4d3d8095ac 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestDirectoryCommitProtocol.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestDirectoryCommitProtocol.java @@ -22,7 +22,7 @@ import java.util.Arrays; import java.util.stream.Collectors; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -90,14 +90,14 @@ public void testValidateDefaultConflictMode() throws Throwable { .collect(Collectors.joining(",")); String baseConfVal = baseConf .getTrimmed(FS_S3A_COMMITTER_STAGING_CONFLICT_MODE); - assertEquals("conflict mode in core config from "+ sourceStr, - CONFLICT_MODE_APPEND, baseConfVal); + assertEquals(CONFLICT_MODE_APPEND, baseConfVal, + "conflict mode in core config from "+ sourceStr); Configuration fsConf = getFileSystem().getConf(); String conflictModeDefVal = fsConf .getTrimmed(FS_S3A_COMMITTER_STAGING_CONFLICT_MODE); - assertEquals("conflict mode in filesystem", - CONFLICT_MODE_APPEND, conflictModeDefVal); + assertEquals(CONFLICT_MODE_APPEND, conflictModeDefVal, + "conflict mode in filesystem"); } /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestStagingCommitProtocol.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestStagingCommitProtocol.java index 81c3af812ab95..b74d95ecfd173 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestStagingCommitProtocol.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestStagingCommitProtocol.java @@ -21,7 +21,8 @@ import java.io.IOException; import java.util.UUID; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.Configuration; @@ -62,6 +63,7 @@ protected Configuration createConfiguration() { return conf; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -73,10 +75,9 @@ public void setup() throws Exception { uuid); Pair t3 = AbstractS3ACommitter .buildJobUUID(conf, JobID.forName("job_" + getJobId())); - assertEquals("Job UUID", uuid, t3.getLeft()); - assertEquals("Job UUID source: " + t3, - AbstractS3ACommitter.JobUUIDSource.SparkWriteUUID, - t3.getRight()); + assertEquals(uuid, t3.getLeft(), "Job UUID"); + assertEquals(AbstractS3ACommitter.JobUUIDSource.SparkWriteUUID, + t3.getRight(), "Job UUID source: " + t3); Path tempDir = Paths.getLocalTaskAttemptTempDir(conf, uuid, getTaskAttempt0()); rmdir(tempDir, conf); @@ -124,7 +125,7 @@ protected void validateTaskAttemptPathAfterWrite(Path p, FileSystem localFS = getLocalFS(); ContractTestUtils.assertPathExists(localFS, "task attempt", p); FileStatus st = localFS.getFileStatus(p); - assertEquals("file length in " + st, expectedLength, st.getLen()); + assertEquals(expectedLength, st.getLen(), "file length in " + st); } protected FileSystem getLocalFS() throws IOException { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestStagingCommitProtocolFailure.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestStagingCommitProtocolFailure.java index 08b6c21a863d5..6355e31345e35 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestStagingCommitProtocolFailure.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/integration/ITestStagingCommitProtocolFailure.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.s3a.commit.staging.integration; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/terasort/ITestTerasortOnS3A.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/terasort/ITestTerasortOnS3A.java index da1580076dbb8..a068431ed6169 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/terasort/ITestTerasortOnS3A.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/terasort/ITestTerasortOnS3A.java @@ -29,12 +29,11 @@ import java.util.Optional; import java.util.function.Consumer; -import org.junit.Assume; -import org.junit.FixMethodOrder; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.MethodSorters; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -57,6 +56,7 @@ import static java.util.Optional.empty; import static org.apache.hadoop.fs.s3a.S3ATestUtils.lsR; +import static org.junit.jupiter.api.Assumptions.assumeTrue; /** * Runs Terasort against S3A. @@ -72,8 +72,7 @@ * Before anyone calls that out as slow: try running the test with the file * committer. */ -@FixMethodOrder(MethodSorters.NAME_ASCENDING) -@RunWith(Parameterized.class) +@TestMethodOrder(MethodOrderer.Alphanumeric.class) @SuppressWarnings("StaticNonFinalField") public class ITestTerasortOnS3A extends AbstractYarnClusterITest { @@ -88,7 +87,7 @@ public class ITestTerasortOnS3A extends AbstractYarnClusterITest { /** * Duration tracker created in the first of the test cases and closed - * in {@link #test_140_teracomplete()}. + * in {@link #test_140_teracomplete(String, boolean)}. */ private static Optional terasortDuration = empty(); @@ -98,10 +97,10 @@ public class ITestTerasortOnS3A extends AbstractYarnClusterITest { private static Map completedStages = new HashMap<>(); /** Name of the committer for this run. */ - private final String committerName; + private String committerName; /** Should Magic committer track pending commits in-memory. */ - private final boolean trackCommitsInMemory; + private boolean trackCommitsInMemory; /** Base path for all the terasort input and output paths. */ private Path terasortPath; @@ -120,7 +119,6 @@ public class ITestTerasortOnS3A extends AbstractYarnClusterITest { * * @return the committer binding for this run. */ - @Parameterized.Parameters(name = "{0}-memory={1}") public static Collection params() { return Arrays.asList(new Object[][]{ {DirectoryStagingCommitter.NAME, false}, @@ -128,9 +126,10 @@ public static Collection params() { {MagicS3GuardCommitter.NAME, true}}); } - public ITestTerasortOnS3A(final String committerName, final boolean trackCommitsInMemory) { - this.committerName = committerName; - this.trackCommitsInMemory = trackCommitsInMemory; + public void initITestTerasortOnS3A( + final String pCommitterName, final boolean pTrackCommitsInMemory) throws Exception { + this.committerName = pCommitterName; + this.trackCommitsInMemory = pTrackCommitsInMemory; } @Override @@ -138,6 +137,7 @@ protected String committerName() { return committerName; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -212,9 +212,8 @@ private static void completedStage(final String stage, * @param stage stage name */ private static void requireStage(final String stage) { - Assume.assumeTrue( - "Required stage was not completed: " + stage, - completedStages.get(stage) != null); + assumeTrue(completedStages.get(stage) != null, + "Required stage was not completed: " + stage); } /** @@ -243,9 +242,9 @@ private void executeStage( d.close(); } dumpOutputTree(dest); - assertEquals(stage + assertEquals(0, result, stage + "(" + StringUtils.join(", ", args) + ")" - + " failed", 0, result); + + " failed"); validateSuccessFile(dest, committerName(), getFileSystem(), stage, minimumFileCount, ""); completedStage(stage, d); @@ -259,8 +258,11 @@ private void executeStage( * It is where all variables which need to be reset for each run need * to be reset. */ - @Test - public void test_100_terasort_setup() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}-memory={1}") + public void test_100_terasort_setup(String pCommitterName, + boolean pTrackCommitsInMemory) throws Throwable { + initITestTerasortOnS3A(pCommitterName, pTrackCommitsInMemory); describe("Setting up for a terasort with path of %s", terasortPath); getFileSystem().delete(terasortPath, true); @@ -268,8 +270,11 @@ public void test_100_terasort_setup() throws Throwable { terasortDuration = Optional.of(new DurationInfo(LOG, false, "Terasort")); } - @Test - public void test_110_teragen() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}-memory={1}") + public void test_110_teragen(String pCommitterName, + boolean pTrackCommitsInMemory) throws Throwable { + initITestTerasortOnS3A(pCommitterName, pTrackCommitsInMemory); describe("Teragen to %s", sortInput); getFileSystem().delete(sortInput, true); @@ -284,8 +289,11 @@ public void test_110_teragen() throws Throwable { } - @Test - public void test_120_terasort() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}-memory={1}") + public void test_120_terasort(String pCommitterName, + boolean pTrackCommitsInMemory) throws Throwable { + initITestTerasortOnS3A(pCommitterName, pTrackCommitsInMemory); describe("Terasort from %s to %s", sortInput, sortOutput); requireStage("teragen"); getFileSystem().delete(sortOutput, true); @@ -301,8 +309,11 @@ public void test_120_terasort() throws Throwable { 1); } - @Test - public void test_130_teravalidate() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}-memory={1}") + public void test_130_teravalidate(String pCommitterName, + boolean pTrackCommitsInMemory) throws Throwable { + initITestTerasortOnS3A(pCommitterName, pTrackCommitsInMemory); describe("TeraValidate from %s to %s", sortOutput, sortValidate); requireStage("terasort"); getFileSystem().delete(sortValidate, true); @@ -321,8 +332,11 @@ public void test_130_teravalidate() throws Throwable { * Print the results, and save to the base dir as a CSV file. * Why there? Makes it easy to list and compare. */ - @Test - public void test_140_teracomplete() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}-memory={1}") + public void test_140_teracomplete(String pCommitterName, + boolean pTrackCommitsInMemory) throws Throwable { + initITestTerasortOnS3A(pCommitterName, pTrackCommitsInMemory); terasortDuration.ifPresent(d -> { d.close(); completedStage("overall", d); @@ -357,13 +371,19 @@ public void test_140_teracomplete() throws Throwable { * Without this the total execution time is reported as from the start of * the first test suite to the end of the second. */ - @Test - public void test_150_teracleanup() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}-memory={1}") + public void test_150_teracleanup(String pCommitterName, + boolean pTrackCommitsInMemory) throws Throwable { + initITestTerasortOnS3A(pCommitterName, pTrackCommitsInMemory); terasortDuration = Optional.empty(); } - @Test - public void test_200_directory_deletion() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}-memory={1}") + public void test_200_directory_deletion(String pCommitterName, + boolean pTrackCommitsInMemory) throws Throwable { + initITestTerasortOnS3A(pCommitterName, pTrackCommitsInMemory); getFileSystem().delete(terasortPath, true); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestAwsSdkWorkarounds.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestAwsSdkWorkarounds.java index d18a722a0e2cc..dea4c29796ed1 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestAwsSdkWorkarounds.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestAwsSdkWorkarounds.java @@ -21,7 +21,7 @@ import java.io.IOException; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestConnectionTimeouts.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestConnectionTimeouts.java index aeb9629b3a6d1..09911b339d2b9 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestConnectionTimeouts.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestConnectionTimeouts.java @@ -24,7 +24,8 @@ import java.util.concurrent.atomic.AtomicLong; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -133,6 +134,7 @@ private Configuration timingOutConfiguration() { return conf; } + @AfterEach @Override public void teardown() throws Exception { AWSClientConfig.resetMinimumOperationDuration(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java index 40318be35bae4..d33359dbecf55 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java @@ -29,9 +29,9 @@ import java.util.stream.Collectors; import org.assertj.core.api.Assertions; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -91,7 +91,6 @@ * */ @SuppressWarnings("ThrowableNotThrown") -@RunWith(Parameterized.class) public class ITestPartialRenamesDeletes extends AbstractS3ATestBase { private static final Logger LOG = @@ -165,7 +164,7 @@ public class ITestPartialRenamesDeletes extends AbstractS3ATestBase { private Path noReadDir; /** delete policy: single or multi? */ - private final boolean multiDelete; + private boolean multiDelete; /** * Configuration for the assume role FS. @@ -190,7 +189,6 @@ public class ITestPartialRenamesDeletes extends AbstractS3ATestBase { * * @return a list of parameter tuples. */ - @Parameterized.Parameters(name = "bulk-delete={0}") public static Collection params() { return Arrays.asList(new Object[][]{ {false}, @@ -200,10 +198,12 @@ public static Collection params() { /** * Constructor. - * @param multiDelete single vs multi delete in the role FS? + * @param pMultiDelete single vs multi delete in the role FS? */ - public ITestPartialRenamesDeletes(final boolean multiDelete) { - this.multiDelete = multiDelete; + public void initITestPartialRenamesDeletes(final boolean pMultiDelete) + throws Exception { + this.multiDelete = pMultiDelete; + setup(); } /** @@ -256,6 +256,7 @@ public void setup() throws Exception { dirDepth = scaleTest ? DEPTH_SCALED : DEPTH; } + @AfterEach @Override public void teardown() throws Exception { cleanupWithLogger(LOG, roleFS); @@ -335,8 +336,10 @@ private Path uniquePath() throws IOException { /** * This is here to verify role and path setup. */ - @Test - public void testCannotTouchUnderRODir() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "bulk-delete={0}") + public void testCannotTouchUnderRODir(boolean pMultiDelete) throws Throwable { + initITestPartialRenamesDeletes(pMultiDelete); forbidden("touching the empty child " + readOnlyChild, "", () -> { @@ -344,8 +347,10 @@ public void testCannotTouchUnderRODir() throws Throwable { return readOnlyChild; }); } - @Test - public void testCannotReadUnderNoReadDir() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "bulk-delete={0}") + public void testCannotReadUnderNoReadDir(boolean pMultiDelete) throws Throwable { + initITestPartialRenamesDeletes(pMultiDelete); Path path = new Path(noReadDir, "unreadable.txt"); createFile(getFileSystem(), path, true, "readonly".getBytes()); forbidden("trying to read " + path, @@ -353,8 +358,10 @@ public void testCannotReadUnderNoReadDir() throws Throwable { () -> readUTF8(roleFS, path, -1)); } - @Test - public void testMultiDeleteOptionPropagated() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "bulk-delete={0}") + public void testMultiDeleteOptionPropagated(boolean pMultiDelete) throws Throwable { + initITestPartialRenamesDeletes(pMultiDelete); describe("Verify the test parameter propagates to the store context"); StoreContext ctx = roleFS.createStoreContext(); Assertions.assertThat(ctx.isMultiObjectDeleteEnabled()) @@ -365,8 +372,10 @@ public void testMultiDeleteOptionPropagated() throws Throwable { /** * Execute a sequence of rename operations with access locked down. */ - @Test - public void testRenameParentPathNotWriteable() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "bulk-delete={0}") + public void testRenameParentPathNotWriteable(boolean pMultiDelete) throws Throwable { + initITestPartialRenamesDeletes(pMultiDelete); describe("rename with parent paths not writeable; multi=%s", multiDelete); final Configuration conf = createAssumedRoleConfig(); bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW, @@ -397,8 +406,10 @@ public void testRenameParentPathNotWriteable() throws Throwable { roleFS.delete(writableDir, true); } - @Test - public void testRenameSingleFileFailsInDelete() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "bulk-delete={0}") + public void testRenameSingleFileFailsInDelete(boolean pMultiDelete) throws Throwable { + initITestPartialRenamesDeletes(pMultiDelete); describe("rename with source read only; multi=%s", multiDelete); Path readOnlyFile = readOnlyChild; @@ -443,8 +454,10 @@ public void testRenameSingleFileFailsInDelete() throws Throwable { * it's a filesystem forever.
  • * */ - @Test - public void testRenameDirFailsInDelete() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "bulk-delete={0}") + public void testRenameDirFailsInDelete(boolean pMultiDelete) throws Throwable { + initITestPartialRenamesDeletes(pMultiDelete); describe("rename with source read only; multi=%s", multiDelete); // the full FS @@ -492,8 +505,10 @@ public void testRenameDirFailsInDelete() throws Throwable { } } - @Test - public void testRenameFileFailsNoWrite() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "bulk-delete={0}") + public void testRenameFileFailsNoWrite(boolean pMultiDelete) throws Throwable { + initITestPartialRenamesDeletes(pMultiDelete); describe("Try to rename to a write-only destination fails with src" + " & dest unchanged."); roleFS.mkdirs(writableDir); @@ -509,8 +524,10 @@ public void testRenameFileFailsNoWrite() throws Throwable { assertPathDoesNotExist("rename destination", dest); } - @Test - public void testCopyDirFailsToReadOnlyDir() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "bulk-delete={0}") + public void testCopyDirFailsToReadOnlyDir(boolean pMultiDelete) throws Throwable { + initITestPartialRenamesDeletes(pMultiDelete); describe("Try to copy to a read-only destination"); roleFS.mkdirs(writableDir); S3AFileSystem fs = getFileSystem(); @@ -525,8 +542,10 @@ public void testCopyDirFailsToReadOnlyDir() throws Throwable { writableDir, files.size()); } - @Test - public void testCopyFileFailsOnSourceRead() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "bulk-delete={0}") + public void testCopyFileFailsOnSourceRead(boolean pMultiDelete) throws Throwable { + initITestPartialRenamesDeletes(pMultiDelete); describe("The source file isn't readable, so the COPY fails"); Path source = new Path(noReadDir, "source"); S3AFileSystem fs = getFileSystem(); @@ -538,8 +557,10 @@ public void testCopyFileFailsOnSourceRead() throws Throwable { assertPathDoesNotExist("rename destination", dest); } - @Test - public void testCopyDirFailsOnSourceRead() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "bulk-delete={0}") + public void testCopyDirFailsOnSourceRead(boolean pMultiDelete) throws Throwable { + initITestPartialRenamesDeletes(pMultiDelete); describe("The source file isn't readable, so the COPY fails"); S3AFileSystem fs = getFileSystem(); List files = createFiles(fs, noReadDir, dirDepth, fileCount, @@ -557,8 +578,10 @@ public void testCopyDirFailsOnSourceRead() throws Throwable { * This verifies that failures in the delete fake dir stage. * are not visible. */ - @Test - public void testPartialEmptyDirDelete() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "bulk-delete={0}") + public void testPartialEmptyDirDelete(boolean pMultiDelete) throws Throwable { + initITestPartialRenamesDeletes(pMultiDelete); describe("delete an empty directory with parent dir r/o" + " multidelete=%s", multiDelete); @@ -579,8 +602,10 @@ public void testPartialEmptyDirDelete() throws Throwable { * Have a directory with full R/W permissions, but then remove * write access underneath, and try to delete it. */ - @Test - public void testPartialDirDelete() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "bulk-delete={0}") + public void testPartialDirDelete(boolean pMultiDelete) throws Throwable { + initITestPartialRenamesDeletes(pMultiDelete); describe("delete with part of the child tree read only;" + " multidelete=%s", multiDelete); @@ -599,8 +624,8 @@ public void testPartialDirDelete() throws Throwable { // as a safety check, verify that one of the deletable files can be deleted Path head = deletableFiles.remove(0); - assertTrue("delete " + head + " failed", - roleFS.delete(head, false)); + assertTrue(roleFS.delete(head, false), + "delete " + head + " failed"); // this set can be deleted by the role FS MetricDiff rejectionCount = new MetricDiff(roleFS, FILES_DELETE_REJECTED); @@ -727,8 +752,10 @@ private Set listFilesUnderPath(Path path, boolean recursive) *

    * See HADOOP-17621. */ - @Test - public void testRenamePermissionRequirements() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "bulk-delete={0}") + public void testRenamePermissionRequirements(boolean pMultiDelete) throws Throwable { + initITestPartialRenamesDeletes(pMultiDelete); describe("Verify rename() only needs s3:DeleteObject permission"); // close the existing roleFS IOUtils.cleanupWithLogger(LOG, roleFS); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestRenameDeleteRace.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestRenameDeleteRace.java index d16d09d068b47..c54534da0bdf9 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestRenameDeleteRace.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestRenameDeleteRace.java @@ -24,7 +24,7 @@ import java.util.concurrent.TimeUnit; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.core.exception.SdkException; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestTreewalkProblems.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestTreewalkProblems.java index 6c84b374b93e9..afc4a62a8e87e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestTreewalkProblems.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestTreewalkProblems.java @@ -25,7 +25,8 @@ import java.util.stream.Collectors; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import software.amazon.awssdk.services.s3.model.MultipartUpload; import org.apache.hadoop.conf.Configuration; @@ -97,6 +98,7 @@ public Configuration createConfiguration() { return conf; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestUploadPurgeOnDirectoryOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestUploadPurgeOnDirectoryOperations.java index 80a44e22b8de7..301f348981462 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestUploadPurgeOnDirectoryOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestUploadPurgeOnDirectoryOperations.java @@ -21,7 +21,8 @@ import java.io.IOException; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import software.amazon.awssdk.services.s3.model.MultipartUpload; import org.apache.hadoop.conf.Configuration; @@ -59,6 +60,7 @@ public Configuration createConfiguration() { return conf; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestXAttrCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestXAttrCost.java index 665c8fdf4d30a..9fef319a241b6 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestXAttrCost.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestXAttrCost.java @@ -25,7 +25,7 @@ import org.assertj.core.api.AbstractStringAssert; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/AbstractS3ACostTest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/AbstractS3ACostTest.java index 4d9fa06b00f02..0be658a4d7bf4 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/AbstractS3ACostTest.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/AbstractS3ACostTest.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum; import org.apache.hadoop.fs.s3a.statistics.StatisticTypeEnum; import org.apache.hadoop.fs.store.audit.AuditSpan; +import org.junit.jupiter.api.BeforeEach; import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; @@ -98,6 +99,7 @@ public Configuration createConfiguration() { return conf; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestCreateFileCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestCreateFileCost.java index c1dda84d7e00b..1d8a36b64ec42 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestCreateFileCost.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestCreateFileCost.java @@ -24,9 +24,6 @@ import java.util.concurrent.atomic.AtomicLong; import org.assertj.core.api.Assertions; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; @@ -37,6 +34,8 @@ import org.apache.hadoop.fs.s3a.RemoteFileChangedException; import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import static java.util.Objects.requireNonNull; @@ -63,7 +62,6 @@ * with the FS_S3A_CREATE_PERFORMANCE option. */ @SuppressWarnings("resource") -@RunWith(Parameterized.class) public class ITestCreateFileCost extends AbstractS3ACostTest { /** @@ -71,7 +69,6 @@ public class ITestCreateFileCost extends AbstractS3ACostTest { * options. * @return a list of test parameters. */ - @Parameterized.Parameters public static Collection params() { return Arrays.asList(new Object[][]{ {false}, @@ -82,14 +79,14 @@ public static Collection params() { /** * Flag for performance creation; all cost asserts need changing. */ - private final boolean createPerformance; + private boolean createPerformance; /** * Create. - * @param createPerformance use the performance flag + * @param pCreatePerformance use the performance flag */ - public ITestCreateFileCost(final boolean createPerformance) { - this.createPerformance = createPerformance; + public void initITestCreateFileCost(final boolean pCreatePerformance) { + this.createPerformance = pCreatePerformance; } /** @@ -111,8 +108,10 @@ public Configuration createConfiguration() { return conf; } - @Test - public void testCreateNoOverwrite() throws Throwable { + @MethodSource("params") + @ParameterizedTest + public void testCreateNoOverwrite(boolean pCreatePerformance) throws Throwable { + initITestCreateFileCost(pCreatePerformance); describe("Test file creation without overwrite"); Path testFile = methodPath(); // when overwrite is false, the path is checked for existence. @@ -120,16 +119,20 @@ public void testCreateNoOverwrite() throws Throwable { expected(CREATE_FILE_NO_OVERWRITE)); } - @Test - public void testCreateOverwrite() throws Throwable { + @MethodSource("params") + @ParameterizedTest + public void testCreateOverwrite(boolean pCreatePerformance) throws Throwable { + initITestCreateFileCost(pCreatePerformance); describe("Test file creation with overwrite"); Path testFile = methodPath(); // when overwrite is true: only the directory checks take place. create(testFile, true, expected(CREATE_FILE_OVERWRITE)); } - @Test - public void testCreateNoOverwriteFileExists() throws Throwable { + @MethodSource("params") + @ParameterizedTest + public void testCreateNoOverwriteFileExists(boolean pCreatePerformance) throws Throwable { + initITestCreateFileCost(pCreatePerformance); describe("Test cost of create file failing with existing file"); Path testFile = file(methodPath()); @@ -144,8 +147,10 @@ public void testCreateNoOverwriteFileExists() throws Throwable { } } - @Test - public void testCreateFileOverDirNoOverwrite() throws Throwable { + @MethodSource("params") + @ParameterizedTest + public void testCreateFileOverDirNoOverwrite(boolean pCreatePerformance) throws Throwable { + initITestCreateFileCost(pCreatePerformance); describe("Test cost of create file overwrite=false failing with existing dir"); Path testFile = dir(methodPath()); @@ -160,8 +165,10 @@ public void testCreateFileOverDirNoOverwrite() throws Throwable { } } - @Test - public void testCreateFileOverDirWithOverwrite() throws Throwable { + @MethodSource("params") + @ParameterizedTest + public void testCreateFileOverDirWithOverwrite(boolean pCreatePerformance) throws Throwable { + initITestCreateFileCost(pCreatePerformance); describe("Test cost of create file overwrite=false failing with existing dir"); Path testFile = dir(methodPath()); @@ -180,8 +187,10 @@ public void testCreateFileOverDirWithOverwrite() throws Throwable { * Use the builder API. * on s3a this skips parent checks, always. */ - @Test - public void testCreateBuilderSequence() throws Throwable { + @MethodSource("params") + @ParameterizedTest + public void testCreateBuilderSequence(boolean pCreatePerformance) throws Throwable { + initITestCreateFileCost(pCreatePerformance); describe("Test builder file creation cost"); Path testFile = methodPath(); dir(testFile.getParent()); @@ -207,8 +216,10 @@ public void testCreateBuilderSequence() throws Throwable { } } - @Test - public void testCreateFilePerformanceFlag() throws Throwable { + @MethodSource("params") + @ParameterizedTest + public void testCreateFilePerformanceFlag(boolean pCreatePerformance) throws Throwable { + initITestCreateFileCost(pCreatePerformance); describe("createFile with performance flag skips safety checks"); S3AFileSystem fs = getFileSystem(); @@ -234,8 +245,10 @@ public void testCreateFilePerformanceFlag() throws Throwable { .isGreaterThanOrEqualTo(1); } - @Test - public void testCreateFileRecursive() throws Throwable { + @MethodSource("params") + @ParameterizedTest + public void testCreateFileRecursive(boolean pCreatePerformance) throws Throwable { + initITestCreateFileCost(pCreatePerformance); describe("createFile without performance flag performs overwrite safety checks"); S3AFileSystem fs = getFileSystem(); @@ -261,8 +274,10 @@ public void testCreateFileRecursive() throws Throwable { .isEqualTo(custom); } - @Test - public void testCreateFileNonRecursive() throws Throwable { + @MethodSource("params") + @ParameterizedTest + public void testCreateFileNonRecursive(boolean pCreatePerformance) throws Throwable { + initITestCreateFileCost(pCreatePerformance); describe("nonrecursive createFile does not check parents"); S3AFileSystem fs = getFileSystem(); @@ -272,8 +287,10 @@ public void testCreateFileNonRecursive() throws Throwable { } - @Test - public void testCreateNonRecursive() throws Throwable { + @MethodSource("params") + @ParameterizedTest + public void testCreateNonRecursive(boolean pCreatePerformance) throws Throwable { + initITestCreateFileCost(pCreatePerformance); describe("nonrecursive createFile does not check parents"); S3AFileSystem fs = getFileSystem(); @@ -296,8 +313,10 @@ private FSDataOutputStream build(final FSDataOutputStreamBuilder builder) /** * Shows how the performance option allows the FS to become ill-formed. */ - @Test - public void testPerformanceFlagPermitsInvalidStores() throws Throwable { + @MethodSource("params") + @ParameterizedTest + public void testPerformanceFlagPermitsInvalidStores(boolean pCreatePerformance) throws Throwable { + initITestCreateFileCost(pCreatePerformance); describe("createFile with performance flag over a directory"); S3AFileSystem fs = getFileSystem(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestCreateSessionTimeout.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestCreateSessionTimeout.java index ebd771bddb3ff..0b8a749c1e77f 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestCreateSessionTimeout.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestCreateSessionTimeout.java @@ -25,7 +25,8 @@ import java.util.concurrent.atomic.AtomicLong; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.http.SdkHttpRequest; @@ -114,6 +115,7 @@ public Configuration createConfiguration() { return conf; } + @BeforeEach @Override public void setup() throws Exception { // remove the safety check on minimum durations. diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestDirectoryMarkerListing.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestDirectoryMarkerListing.java index 475d1d658dd08..3d30d4161985f 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestDirectoryMarkerListing.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestDirectoryMarkerListing.java @@ -27,7 +27,9 @@ import java.util.stream.Collectors; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.core.exception.SdkException; @@ -168,6 +170,7 @@ protected Configuration createConfiguration() { /** * The setup phase includes creating the test objects. */ + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -183,6 +186,7 @@ public void setup() throws Exception { * Teardown deletes the objects created before * the superclass does the directory cleanup. */ + @AfterEach @Override public void teardown() throws Exception { if (s3client != null) { @@ -515,8 +519,8 @@ public void testRenameEmptyDirOverMarker() throws Throwable { head(srcKey); Path dest = markerDir; // renamed into the dest dir - assertFalse("rename(" + src + ", " + dest + ") should have failed", - getFileSystem().rename(src, dest)); + assertFalse(getFileSystem().rename(src, dest), + "rename(" + src + ", " + dest + ") should have failed"); // source is still there assertIsDirectory(src); head(srcKey); @@ -654,7 +658,7 @@ private void assertIsFileUnderMarker(final FileStatus stat) { * @param stat status object */ private void assertIsFileAtPath(final Path path, final FileStatus stat) { - assertTrue("Is not file " + stat, stat.isFile()); + assertTrue(stat.isFile(), "Is not file " + stat); assertPathEquals(path, stat); } @@ -664,8 +668,8 @@ private void assertIsFileAtPath(final Path path, final FileStatus stat) { * @param stat status object */ private void assertPathEquals(final Path path, final FileStatus stat) { - assertEquals("filename is not the expected path :" + stat, - path, stat.getPath()); + assertEquals(path, stat.getPath(), + "filename is not the expected path :" + stat); } /** @@ -719,8 +723,8 @@ private List dump(List l) { */ private void assertRenamed(final Path src, final Path dest) throws IOException { - assertTrue("rename(" + src + ", " + dest + ") failed", - getFileSystem().rename(src, dest)); + assertTrue(getFileSystem().rename(src, dest), + "rename(" + src + ", " + dest + ") failed"); } /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3ADeleteCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3ADeleteCost.java index b923fca47bc5c..27276655541cb 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3ADeleteCost.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3ADeleteCost.java @@ -23,7 +23,8 @@ import java.util.Arrays; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -63,6 +64,7 @@ public Configuration createConfiguration() { ""); } + @AfterEach @Override public void teardown() throws Exception { // do this ourselves to avoid audits teardown failing diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMiscOperationCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMiscOperationCost.java index bfffc498b71aa..19384b04510ff 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMiscOperationCost.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMiscOperationCost.java @@ -24,9 +24,8 @@ import java.util.Collection; import org.assertj.core.api.Assertions; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -50,7 +49,6 @@ /** * Use metrics to assert about the cost of misc operations. */ -@RunWith(Parameterized.class) public class ITestS3AMiscOperationCost extends AbstractS3ACostTest { private static final Logger LOG = @@ -59,12 +57,11 @@ public class ITestS3AMiscOperationCost extends AbstractS3ACostTest { /** * Parameter: should auditing be enabled? */ - private final boolean auditing; + private boolean auditing; /** * Parameterization. */ - @Parameterized.Parameters(name = "{0}") public static Collection params() { return Arrays.asList(new Object[][]{ {"auditing", true}, @@ -72,9 +69,9 @@ public static Collection params() { }); } - public ITestS3AMiscOperationCost(final String name, - final boolean auditing) { - this.auditing = auditing; + public void initITestS3AMiscOperationCost(final String pName, + final boolean pAuditing) throws Exception { + this.auditing = pAuditing; } @Override @@ -101,8 +98,11 @@ protected OperationCostValidator.ExpectedProbe withAuditCount( /** * Common operation which should be low cost as possible. */ - @Test - public void testMkdirOverDir() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}") + public void testMkdirOverDir(String pName, + boolean pAuditing) throws Throwable { + initITestS3AMiscOperationCost(pName, pAuditing); describe("create a dir over a dir"); S3AFileSystem fs = getFileSystem(); // create base dir with marker @@ -116,8 +116,11 @@ public void testMkdirOverDir() throws Throwable { with(OBJECT_LIST_REQUEST, FILESTATUS_DIR_PROBE_L)); } - @Test - public void testGetContentSummaryRoot() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}") + public void testGetContentSummaryRoot(String pName, + boolean pAuditing) throws Throwable { + initITestS3AMiscOperationCost(pName, pAuditing); describe("getContentSummary on Root"); S3AFileSystem fs = getFileSystem(); @@ -126,8 +129,11 @@ public void testGetContentSummaryRoot() throws Throwable { with(INVOCATION_GET_CONTENT_SUMMARY, 1)); } - @Test - public void testGetContentSummaryDir() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}") + public void testGetContentSummaryDir(String pName, + boolean pAuditing) throws Throwable { + initITestS3AMiscOperationCost(pName, pAuditing); describe("getContentSummary on test dir with children"); S3AFileSystem fs = getFileSystem(); Path baseDir = methodPath(); @@ -151,8 +157,11 @@ public void testGetContentSummaryDir() throws Throwable { .isEqualTo(1); } - @Test - public void testGetContentMissingPath() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}") + public void testGetContentMissingPath(String pName, + boolean pAuditing) throws Throwable { + initITestS3AMiscOperationCost(pName, pAuditing); describe("getContentSummary on a missing path"); Path baseDir = methodPath(); verifyMetricsIntercepting(FileNotFoundException.class, diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMkdirCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMkdirCost.java index 262a99fdb48fd..43978beab7337 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMkdirCost.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMkdirCost.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.s3a.performance; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AOpenCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AOpenCost.java index febc6bb82c410..c1c03ca6e7212 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AOpenCost.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AOpenCost.java @@ -26,7 +26,8 @@ import java.util.concurrent.TimeUnit; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -108,6 +109,7 @@ public Configuration createConfiguration() { * Setup creates a test file, saves is status and length * to fields. */ + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -166,7 +168,7 @@ public void testOpenFileWithStatusOfOtherFS() throws Throwable { readStream(in), always(NO_HEAD_OR_LIST), with(STREAM_READ_OPENED, 1)); - assertEquals("bytes read from file", fileLength, readLen); + assertEquals(fileLength, readLen, "bytes read from file"); } @Test @@ -228,7 +230,7 @@ public void testOpenFileShorterLength() throws Throwable { LOG.info("Statistics of read stream {}", statsString); - assertEquals("bytes read from file", shortLen, r2); + assertEquals(shortLen, r2, "bytes read from file"); // no bytes were discarded. bytesDiscarded.assertDiffEquals(0); } @@ -254,7 +256,7 @@ public void testOpenFileLongerLengthReadFully() throws Throwable { return in; }); in.seek(longLen - 1); - assertEquals("read past real EOF on " + in, -1, in.read()); + assertEquals(-1, in.read(), "read past real EOF on " + in); return in.toString(); } }, diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3ARenameCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3ARenameCost.java index 9717d6455d09c..04e0295f84ecf 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3ARenameCost.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3ARenameCost.java @@ -21,7 +21,7 @@ import java.util.UUID; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestUnbufferDraining.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestUnbufferDraining.java index e68ea9a031521..6b94c2e3ca0d8 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestUnbufferDraining.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestUnbufferDraining.java @@ -22,7 +22,9 @@ import java.time.Duration; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -114,6 +116,7 @@ public Configuration createConfiguration() { return conf; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -140,6 +143,7 @@ public void setup() throws Exception { } } + @AfterEach @Override public void teardown() throws Exception { super.teardown(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java index d4deb85f4470d..99287e7cfdac5 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java @@ -24,7 +24,9 @@ import java.util.Arrays; import java.util.List; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.s3a.AbstractS3ATestBase; @@ -70,7 +72,7 @@ protected static void expectResult(int expected, String message, S3GuardTool tool, String... args) throws Exception { - assertEquals(message, expected, tool.run(args)); + assertEquals(expected, tool.run(args), message); } /** @@ -131,11 +133,13 @@ protected void runToFailure(int status, Configuration conf, Object... args) } } + @BeforeEach @Override public void setup() throws Exception { super.setup(); } + @AfterEach @Override public void teardown() throws Exception { super.teardown(); @@ -153,8 +157,8 @@ public void testBucketInfoUnguarded() throws Exception { "-" + S3GuardTool.BucketInfo.UNGUARDED_FLAG, fsUri.toString()); - assertTrue("Output should contain information about S3A client " + info, - info.contains("S3A Client")); + assertTrue(info.contains("S3A Client"), + "Output should contain information about S3A client " + info); } /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardTool.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardTool.java index 59787617b884f..995a20ec6b6a9 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardTool.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardTool.java @@ -27,7 +27,7 @@ import java.util.Arrays; import java.util.List; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -150,7 +150,7 @@ public void testUploads() throws Throwable { describe("Uploading single part."); createPartUpload(fs, key, 128, 1); - assertEquals("Should be one upload", 1, countUploadsAt(fs, path)); + assertEquals(1, countUploadsAt(fs, path), "Should be one upload"); // 6. Confirm part exists via CLI, direct path and parent path describe("Confirming CLI lists one part"); @@ -160,7 +160,7 @@ public void testUploads() throws Throwable { // 8. Confirm deletion via API describe("Confirming deletion via API"); - assertEquals("Should be no uploads", 0, countUploadsAt(fs, path)); + assertEquals(0, countUploadsAt(fs, path), "Should be no uploads"); // 9. Confirm no uploads are listed via CLI describe("Confirming CLI lists nothing."); @@ -193,7 +193,7 @@ public void testUploadListByAge() throws Throwable { try { // 3. Confirm it exists via API - assertEquals("Should be one upload", 1, countUploadsAt(fs, path)); + assertEquals(1, countUploadsAt(fs, path), "Should be one upload"); // 4. Confirm part does appear in listing with long age filter describe("Confirming CLI older age doesn't list"); @@ -216,7 +216,7 @@ public void testUploadListByAge() throws Throwable { describe("Doing aged deletion"); uploadCommandAssertCount(fs, ABORT_FORCE_OPTIONS, path, 1, 1); describe("Confirming age deletion happened"); - assertEquals("Should be no uploads", 0, countUploadsAt(fs, path)); + assertEquals(0, countUploadsAt(fs, path), "Should be no uploads"); } catch (Throwable t) { // Clean up on intermediate failure clearAnyUploads(fs, path); @@ -291,8 +291,8 @@ private void uploadCommandAssertCount(S3AFileSystem fs, String[] options, Path p int parsedUploads = Integer.parseInt(fields[1]); LOG.debug("Matched CLI output: {} {} {} {}", fields[0], fields[1], fields[2], fields[3]); - assertEquals("Unexpected number of uploads", numUploads, - parsedUploads); + assertEquals(numUploads, + parsedUploads, "Unexpected number of uploads"); return; } LOG.debug("Not matched: {}", line); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java index 70cab0d75544e..9c864d10dd37c 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java @@ -27,9 +27,10 @@ import java.util.function.IntFunction; import org.assertj.core.api.Assertions; -import org.junit.FixMethodOrder; -import org.junit.Test; -import org.junit.runners.MethodSorters; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -82,7 +83,7 @@ * exists. Even so: they should all have a {@link #assumeHugeFileExists()} * check at the start, in case an individual test is executed. */ -@FixMethodOrder(MethodSorters.NAME_ASCENDING) +@TestMethodOrder(MethodOrderer.Alphanumeric.class) public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase { private static final Logger LOG = LoggerFactory.getLogger( @@ -97,6 +98,7 @@ public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase { private int partitionSize; private long filesize; + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -186,14 +188,14 @@ public void test_010_CreateHugeFile() throws IOException { // assume 1 MB/s upload bandwidth int bandwidth = _1MB; long uploadTime = filesize / bandwidth; - assertTrue(String.format("Timeout set in %s seconds is too low;" + - " estimating upload time of %d seconds at 1 MB/s." + - " Rerun tests with -D%s=%d", - timeout, uploadTime, KEY_TEST_TIMEOUT, uploadTime * 2), - uploadTime < timeout); - assertEquals("File size set in " + KEY_HUGE_FILESIZE + " = " + filesize - + " is not a multiple of " + uploadBlockSize, - 0, filesize % uploadBlockSize); + assertTrue(uploadTime < timeout, + String.format("Timeout set in %s seconds is too low;" + + " estimating upload time of %d seconds at 1 MB/s." + + " Rerun tests with -D%s=%d", + timeout, uploadTime, KEY_TEST_TIMEOUT, uploadTime * 2)); + assertEquals(0, filesize % uploadBlockSize, + "File size set in " + KEY_HUGE_FILESIZE + " = " + filesize + + " is not a multiple of " + uploadBlockSize); byte[] data = new byte[uploadBlockSize]; for (int i = 0; i < uploadBlockSize; i++) { @@ -311,8 +313,8 @@ public void test_010_CreateHugeFile() throws IOException { progress.verifyNoFailures( "Put file " + fileToCreate + " of size " + filesize); - assertEquals("actively allocated blocks in " + streamStatistics, - 0, streamStatistics.getBlocksActivelyAllocated()); + assertEquals(0, streamStatistics.getBlocksActivelyAllocated(), + "actively allocated blocks in " + streamStatistics); } /** @@ -402,7 +404,7 @@ private void assumeFileExists(Path file) throws IOException { file); FileStatus status = fs.getFileStatus(file); ContractTestUtils.assertIsFile(file, status); - assertTrue("File " + file + " is empty", status.getLen() > 0); + assertTrue(status.getLen() > 0, "File " + file + " is empty"); } private void logFSState() { @@ -421,7 +423,7 @@ public void test_030_postCreationAssertions() throws Throwable { FileStatus status = fs.getFileStatus(hugefile); ContractTestUtils.assertIsFile(hugefile, status); LOG.info("Huge File Status: {}", status); - assertEquals("File size in " + status, filesize, status.getLen()); + assertEquals(filesize, status.getLen(), "File size in " + status); // now do some etag status checks asserting they are always the same // across listing operations. diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ILoadTestS3ABulkDeleteThrottling.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ILoadTestS3ABulkDeleteThrottling.java index b586fb7dbabc6..f9bdba69808e3 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ILoadTestS3ABulkDeleteThrottling.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ILoadTestS3ABulkDeleteThrottling.java @@ -29,16 +29,14 @@ import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ExecutorService; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import software.amazon.awssdk.services.s3.model.ObjectIdentifier; import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.assertj.core.api.Assertions; -import org.junit.Assume; -import org.junit.FixMethodOrder; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.MethodSorters; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.api.MethodOrderer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -59,6 +57,7 @@ import static org.apache.hadoop.fs.s3a.Constants.ENABLE_MULTI_DELETE; import static org.apache.hadoop.fs.s3a.Constants.USER_AGENT_PREFIX; import static org.apache.hadoop.fs.s3a.impl.InternalConstants.MAX_ENTRIES_TO_DELETE; +import static org.junit.jupiter.api.Assumptions.assumeTrue; /** * Test some scalable operations related to file renaming and deletion. @@ -69,8 +68,7 @@ * Note: UA field includes the configuration tested for the benefit * of anyone looking through the server logs. */ -@FixMethodOrder(MethodSorters.NAME_ASCENDING) -@RunWith(Parameterized.class) +@TestMethodOrder(MethodOrderer.Alphanumeric.class) public class ILoadTestS3ABulkDeleteThrottling extends S3AScaleTestBase { private static final Logger LOG = @@ -101,9 +99,9 @@ public class ILoadTestS3ABulkDeleteThrottling extends S3AScaleTestBase { private File dataDir; - private final boolean throttle; - private final int pageSize; - private final int requests; + private boolean throttle; + private int pageSize; + private int requests; /** * Test array for parameterized test runs. @@ -114,8 +112,6 @@ public class ILoadTestS3ABulkDeleteThrottling extends S3AScaleTestBase { * * @return a list of parameter tuples. */ - @Parameterized.Parameters( - name = "bulk-delete-aws-retry={0}-requests={2}-size={1}") public static Collection params() { return Arrays.asList(new Object[][]{ {false, SMALL, SMALL_REQS}, @@ -127,20 +123,21 @@ public static Collection params() { /** * Parameterized constructor. - * @param throttle AWS client throttle on/off - * @param pageSize Page size - * @param requests request count; + * @param pThrottle AWS client throttle on/off + * @param pPageSize Page size + * @param pRequests request count; */ - public ILoadTestS3ABulkDeleteThrottling( - final boolean throttle, - final int pageSize, - final int requests) { - this.throttle = throttle; + public void initILoadTestS3ABulkDeleteThrottling( + final boolean pThrottle, + final int pPageSize, + final int pRequests) throws Exception { + this.throttle = pThrottle; Preconditions.checkArgument(pageSize > 0, "page size too low %s", pageSize); - this.pageSize = pageSize; - this.requests = requests; + this.pageSize = pPageSize; + this.requests = pRequests; + setup(); } @Override @@ -166,8 +163,8 @@ protected Configuration createScaleConfiguration() { public void setup() throws Exception { final Configuration conf = getConf(); super.setup(); - Assume.assumeTrue("multipart delete disabled", - conf.getBoolean(ENABLE_MULTI_DELETE, true)); + assumeTrue(conf.getBoolean(ENABLE_MULTI_DELETE, true), + "multipart delete disabled"); dataDir = GenericTestUtils.getTestDir("throttling"); dataDir.mkdirs(); final String size = getFileSystem().getConf().get(BULK_DELETE_PAGE_SIZE); @@ -180,13 +177,19 @@ public void setup() throws Exception { } - @Test - public void test_010_Reset() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "bulk-delete-aws-retry={0}-requests={2}-size={1}") + public void test_010_Reset(final boolean pThrottle, + final int pPageSize, final int pRequests) throws Throwable { + initILoadTestS3ABulkDeleteThrottling(pThrottle, pPageSize, pRequests); testWasThrottled = false; } - @Test - public void test_020_DeleteThrottling() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "bulk-delete-aws-retry={0}-requests={2}-size={1}") + public void test_020_DeleteThrottling(final boolean pThrottle, + final int pPageSize, final int pRequests) throws Throwable { + initILoadTestS3ABulkDeleteThrottling(pThrottle, pPageSize, pRequests); describe("test how S3 reacts to massive multipart deletion requests"); final File results = deleteFiles(requests, pageSize); LOG.info("Test run completed against {}:\n see {}", getFileSystem(), @@ -198,8 +201,11 @@ public void test_020_DeleteThrottling() throws Throwable { } } - @Test - public void test_030_Sleep() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "bulk-delete-aws-retry={0}-requests={2}-size={1}") + public void test_030_Sleep(final boolean pThrottle, + final int pPageSize, final int pRequests) throws Throwable { + initILoadTestS3ABulkDeleteThrottling(pThrottle, pPageSize, pRequests); maybeSleep(); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ABlockOutputStreamInterruption.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ABlockOutputStreamInterruption.java index 24ba519adf0cc..9b16e66c1f3b7 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ABlockOutputStreamInterruption.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ABlockOutputStreamInterruption.java @@ -27,9 +27,9 @@ import java.util.concurrent.atomic.AtomicReference; import org.assertj.core.api.Assertions; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Abortable; @@ -83,7 +83,6 @@ * Marked as a scale test even though it tries to aggressively abort streams being written * and should, if working, complete fast. */ -@RunWith(Parameterized.class) public class ITestS3ABlockOutputStreamInterruption extends S3AScaleTestBase { public static final int MAX_RETRIES_IN_SDK = 2; @@ -92,7 +91,6 @@ public class ITestS3ABlockOutputStreamInterruption extends S3AScaleTestBase { * Parameterized on (buffer type, active blocks). * @return parameters */ - @Parameterized.Parameters(name = "{0}-{1}") public static Collection params() { return Arrays.asList(new Object[][]{ {FAST_UPLOAD_BUFFER_DISK, 2}, @@ -106,22 +104,23 @@ public static Collection params() { /** * Buffer type. */ - private final String bufferType; + private String bufferType; /** * How many blocks can a stream have uploading? */ - private final int activeBlocks; + private int activeBlocks; /** * Constructor. - * @param bufferType buffer type - * @param activeBlocks number of active blocks which can be uploaded + * @param pBufferType buffer type + * @param pActiveBlocks number of active blocks which can be uploaded */ - public ITestS3ABlockOutputStreamInterruption(final String bufferType, - int activeBlocks) { - this.bufferType = requireNonNull(bufferType); - this.activeBlocks = activeBlocks; + public void initITestS3ABlockOutputStreamInterruption(final String pBufferType, + int pActiveBlocks) throws Exception { + this.bufferType = requireNonNull(pBufferType); + this.activeBlocks = pActiveBlocks; + setup(); } /** @@ -170,6 +169,7 @@ public void setup() throws Exception { super.setup(); } + @AfterEach @Override public void teardown() throws Exception { // safety check in case the evaluation is failing any @@ -179,8 +179,11 @@ public void teardown() throws Exception { super.teardown(); } - @Test - public void testInterruptMultipart() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}-{1}") + public void testInterruptMultipart(String pBufferType, + int pActiveBlocks) throws Throwable { + initITestS3ABlockOutputStreamInterruption(pBufferType, pActiveBlocks); describe("Interrupt a thread performing close() on a multipart upload"); interruptMultipartUpload(methodPath(), 6 * _1MB); @@ -225,8 +228,11 @@ private void interruptMultipartUpload(final Path path, int len) throws Exception * then go on to simulate an NPE in the part upload and verify * that this does not get escalated. */ - @Test - public void testAbortDuringUpload() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}-{1}") + public void testAbortDuringUpload(String pBufferType, + int pActiveBlocks) throws Throwable { + initITestS3ABlockOutputStreamInterruption(pBufferType, pActiveBlocks); describe("Abort during multipart upload"); int len = 6 * _1MB; final byte[] dataset = dataset(len, 'a', 'z' - 'a'); @@ -280,8 +286,11 @@ public void testAbortDuringUpload() throws Throwable { * Test that a part upload failure is propagated to * the close() call. */ - @Test - public void testPartUploadFailure() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}-{1}") + public void testPartUploadFailure(String pBufferType, + int pActiveBlocks) throws Throwable { + initITestS3ABlockOutputStreamInterruption(pBufferType, pActiveBlocks); describe("Trigger a failure during a multipart upload"); int len = 6 * _1MB; final byte[] dataset = dataset(len, 'a', 'z' - 'a'); @@ -330,8 +339,11 @@ private static void assertBytesTransferred( /** * Write a small dataset and interrupt the close() operation. */ - @Test - public void testInterruptMagicWrite() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}-{1}") + public void testInterruptMagicWrite(String pBufferType, + int pActiveBlocks) throws Throwable { + initITestS3ABlockOutputStreamInterruption(pBufferType, pActiveBlocks); describe("Interrupt a thread performing close() on a magic upload"); // write a smaller file to a magic path and assert multipart outcome @@ -342,8 +354,11 @@ public void testInterruptMagicWrite() throws Throwable { /** * Write a small dataset and interrupt the close() operation. */ - @Test - public void testInterruptWhenAbortingAnUpload() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}-{1}") + public void testInterruptWhenAbortingAnUpload(String pBufferType, + int pActiveBlocks) throws Throwable { + initITestS3ABlockOutputStreamInterruption(pBufferType, pActiveBlocks); describe("Interrupt a thread performing close() on a magic upload"); // fail more than the SDK will retry @@ -369,8 +384,11 @@ public void testInterruptWhenAbortingAnUpload() throws Throwable { * a {@code InterruptedIOException} and the count of interrupted events * to increase. */ - @Test - public void testInterruptSimplePut() throws Throwable { + @MethodSource("params") + @ParameterizedTest(name = "{0}-{1}") + public void testInterruptSimplePut(String pBufferType, + int pActiveBlocks) throws Throwable { + initITestS3ABlockOutputStreamInterruption(pBufferType, pActiveBlocks); describe("Interrupt simple object PUT"); // dataset is less than one block @@ -484,7 +502,7 @@ public void progressChanged(final ProgressListenerEvent eventType, * Assert that the trigger took place. */ private void assertTriggered() { - assertTrue("Not triggered", triggered.get()); + assertTrue(triggered.get(), "Not triggered"); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java index 5b63b20dc67d4..6c83163786751 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java @@ -38,8 +38,9 @@ import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.assertj.core.api.Assertions; -import org.junit.After; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -71,6 +72,7 @@ protected Configuration createScaleConfiguration() { return conf; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -92,7 +94,7 @@ private S3AFileSystem getNormalFileSystem() throws Exception { return s3a; } - @After + @AfterEach public void teardown() throws Exception { super.teardown(); if (auxFs != null) { @@ -152,7 +154,7 @@ public Thread newThread(Runnable r) { LOG.info("Deadlock may have occurred if nothing else is logged" + " or the test times out"); for (int i = 0; i < concurrentRenames; i++) { - assertTrue("No future " + i, futures[i].get()); + assertTrue(futures[i].get(), "No future " + i); assertPathExists("target path", target[i]); assertPathDoesNotExist("source path", source[i]); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ACreatePerformance.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ACreatePerformance.java index fd32ba5bb62ed..7b83442ed9519 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ACreatePerformance.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ACreatePerformance.java @@ -20,7 +20,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.S3AFileSystem; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,6 +40,7 @@ public class ITestS3ACreatePerformance extends S3AScaleTestBase { private int basePathDepth; private static final int PATH_DEPTH = 10; + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -70,8 +72,8 @@ public void testDeepSequentialCreate() throws Exception { /* Get a unique path of depth totalDepth for given test iteration. */ private Path getPathIteration(long iter, int totalDepth) throws Exception { - assertTrue("Test path too long, increase PATH_DEPTH in test.", - totalDepth > basePathDepth); + assertTrue(totalDepth > basePathDepth, + "Test path too long, increase PATH_DEPTH in test."); int neededDirs = totalDepth - basePathDepth - 1; StringBuilder sb = new StringBuilder(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADeleteManyFiles.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADeleteManyFiles.java index dbdd8b5da6a3c..eebb72f349c3c 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADeleteManyFiles.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADeleteManyFiles.java @@ -29,7 +29,7 @@ import org.apache.hadoop.util.DurationInfo; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -102,7 +102,7 @@ public void testBulkRenameAndDelete() throws Throwable { long sourceSize = Arrays.stream(statuses) .mapToLong(FileStatus::getLen) .sum(); - assertEquals("Source file Count", count, nSrcFiles); + assertEquals(count, nSrcFiles, "Source file Count"); ContractTestUtils.NanoTimer renameTimer = new ContractTestUtils.NanoTimer(); try (DurationInfo ignored = new DurationInfo(LOG, "Rename %s to %s", srcDir, finalDir)) { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java index e1a01eee2142f..dcbf61574fb9d 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java @@ -36,7 +36,7 @@ import org.apache.hadoop.fs.store.audit.AuditSpan; import org.apache.hadoop.util.functional.RemoteIterators; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.assertj.core.api.Assertions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -124,9 +124,9 @@ public void testListOperations() throws Throwable { listContinueRequests, listStatusCalls, getFileStatusCalls); - assertEquals("Files found in listFiles(recursive=true) " + - " created=" + created + " listed=" + treewalkResults, - created.getFileCount(), treewalkResults.getFileCount()); + assertEquals(created.getFileCount(), treewalkResults.getFileCount(), + "Files found in listFiles(recursive=true) " + + " created=" + created + " listed=" + treewalkResults); describe("Listing files via listFiles(recursive=true)"); // listFiles() does the recursion internally @@ -136,9 +136,9 @@ public void testListOperations() throws Throwable { fs.listFiles(listDir, true)); listFilesRecursiveTimer.end("listFiles(recursive=true) of %s", created); - assertEquals("Files found in listFiles(recursive=true) " + - " created=" + created + " listed=" + listFilesResults, - created.getFileCount(), listFilesResults.getFileCount()); + assertEquals(created.getFileCount(), listFilesResults.getFileCount(), + "Files found in listFiles(recursive=true) " + + " created=" + created + " listed=" + listFilesResults); // only two list operations should have taken place print(LOG, @@ -147,7 +147,7 @@ public void testListOperations() throws Throwable { listContinueRequests, listStatusCalls, getFileStatusCalls); - assertEquals(listRequests.toString(), 1, listRequests.diff()); + assertEquals(1, listRequests.diff(), listRequests.toString()); reset(metadataRequests, listRequests, listContinueRequests, @@ -170,21 +170,21 @@ public void testListOperations() throws Throwable { listContinueRequests, listStatusCalls, getFileStatusCalls); - assertEquals(listRequests.toString(), 2, listRequests.diff()); + assertEquals(2, listRequests.diff(), listRequests.toString()); reset(metadataRequests, listRequests, listContinueRequests, listStatusCalls, getFileStatusCalls); - assertTrue("Root directory count should be > test path", - rootPathSummary.getDirectoryCount() > testPathSummary.getDirectoryCount()); - assertTrue("Root file count should be >= to test path", - rootPathSummary.getFileCount() >= testPathSummary.getFileCount()); - assertEquals("Incorrect directory count", created.getDirCount() + 1, - testPathSummary.getDirectoryCount()); - assertEquals("Incorrect file count", created.getFileCount(), - testPathSummary.getFileCount()); + assertTrue(rootPathSummary.getDirectoryCount() > testPathSummary.getDirectoryCount(), + "Root directory count should be > test path"); + assertTrue(rootPathSummary.getFileCount() >= testPathSummary.getFileCount(), + "Root file count should be >= to test path"); + assertEquals(created.getDirCount() + 1, + testPathSummary.getDirectoryCount(), "Incorrect directory count"); + assertEquals(created.getFileCount(), + testPathSummary.getFileCount(), "Incorrect file count"); } finally { describe("deletion"); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesEncryption.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesEncryption.java index f2c549bf8101c..7031bd937b14f 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesEncryption.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesEncryption.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.s3a.EncryptionTestUtils; import org.apache.hadoop.fs.s3a.S3AEncryptionMethods; import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.junit.jupiter.api.BeforeEach; import static org.apache.hadoop.fs.s3a.Constants.S3_ENCRYPTION_ALGORITHM; import static org.apache.hadoop.fs.s3a.S3AEncryptionMethods.DSSE_KMS; @@ -44,6 +45,7 @@ */ public class ITestS3AHugeFilesEncryption extends AbstractSTestS3AHugeFiles { + @BeforeEach @Override public void setup() throws Exception { Configuration c = new Configuration(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesSSECDiskBlocks.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesSSECDiskBlocks.java index 0203b00caab69..401db0969c657 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesSSECDiskBlocks.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesSSECDiskBlocks.java @@ -22,6 +22,7 @@ import org.apache.hadoop.fs.s3a.AWSUnsupportedFeatureException; import org.apache.hadoop.fs.s3a.Constants; import org.apache.hadoop.fs.s3a.S3AEncryptionMethods; +import org.junit.jupiter.api.BeforeEach; import java.nio.file.AccessDeniedException; @@ -51,6 +52,7 @@ public class ITestS3AHugeFilesSSECDiskBlocks * S3 throw AmazonS3Exception with status 403 AccessDenied * then it is translated into AccessDeniedException by S3AUtils.translateException(...) */ + @BeforeEach @Override public void setup() throws Exception { try { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesStorageClass.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesStorageClass.java index 58988c9c41bf8..9fb78ad2cbd99 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesStorageClass.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesStorageClass.java @@ -123,8 +123,7 @@ protected void assertStorageClass(Path hugeFile) throws IOException { String actual = getS3AInternals().getObjectMetadata(hugeFile).storageClassAsString(); - assertTrue( - "Storage class of object is " + actual + ", expected " + STORAGE_CLASS_REDUCED_REDUNDANCY, - STORAGE_CLASS_REDUCED_REDUNDANCY.equalsIgnoreCase(actual)); + assertTrue(STORAGE_CLASS_REDUCED_REDUNDANCY.equalsIgnoreCase(actual), + "Storage class of object is " + actual + ", expected " + STORAGE_CLASS_REDUCED_REDUNDANCY); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java index e0c71136e8023..c401c8205392a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java @@ -42,10 +42,10 @@ import org.apache.hadoop.util.LineReader; import org.assertj.core.api.Assertions; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -114,7 +114,7 @@ protected Configuration createScaleConfiguration() { * Open the FS and the test data. The input stream is always set up here. * @throws IOException IO Problems. */ - @Before + @BeforeEach public void openFS() throws IOException { Configuration conf = getConf(); conf.setInt(SOCKET_SEND_BUFFER, S_16K); @@ -148,7 +148,7 @@ private void bindS3aFS(Path path) throws IOException { /** * Cleanup: close the stream, close the FS. */ - @After + @AfterEach public void cleanup() { describe("cleanup"); IOUtils.closeStream(in); @@ -167,7 +167,7 @@ public void cleanup() { } } - @AfterClass + @AfterAll public static void dumpIOStatistics() { LOG.info("Aggregate Stream Statistics {}", IOSTATS); } @@ -249,8 +249,8 @@ protected void assertStreamOpenedExactlyOnce() { * @param expected the expected number */ private void assertOpenOperationCount(long expected) { - assertEquals("open operations in\n" + in, - expected, streamStatistics.getOpenOperations()); + assertEquals(expected, streamStatistics.getOpenOperations(), + "open operations in\n" + in); } /** @@ -361,7 +361,7 @@ public void testLazySeekEnabled() throws Throwable { logTimePerIOP("seek()", timer, blockCount); logStreamStatistics(); assertOpenOperationCount(0); - assertEquals("bytes read", 0, streamStatistics.getBytesRead()); + assertEquals(0, streamStatistics.getBytesRead(), "bytes read"); } @Test @@ -432,7 +432,7 @@ private void executeDecompression(long readahead, readahead); logTimePerIOP("line read", timer, lines); logStreamStatistics(); - assertNotNull("No IOStatistics through line reader", readerStatistics); + assertNotNull(readerStatistics, "No IOStatistics through line reader"); LOG.info("statistics from reader {}", ioStatisticsToString(readerStatistics)); } @@ -483,8 +483,8 @@ protected void executeSeekReadSequence(long blockSize, public void testRandomIORandomPolicy() throws Throwable { skipIfClientSideEncryption(); executeRandomIO(S3AInputPolicy.Random, (long) RANDOM_IO_SEQUENCE.length); - assertEquals("streams aborted in " + streamStatistics, - 0, streamStatistics.getAborted()); + assertEquals(0, streamStatistics.getAborted(), + "streams aborted in " + streamStatistics); } @Test @@ -492,13 +492,12 @@ public void testRandomIONormalPolicy() throws Throwable { skipIfClientSideEncryption(); long expectedOpenCount = RANDOM_IO_SEQUENCE.length; executeRandomIO(S3AInputPolicy.Normal, expectedOpenCount); - assertEquals("streams aborted in " + streamStatistics, - 1, streamStatistics.getAborted()); - assertEquals("policy changes in " + streamStatistics, - 2, streamStatistics.getPolicySetCount()); - assertEquals("input policy in " + streamStatistics, - S3AInputPolicy.Random.ordinal(), - streamStatistics.getInputPolicy()); + assertEquals(1, streamStatistics.getAborted(), + "streams aborted in " + streamStatistics); + assertEquals(2, streamStatistics.getPolicySetCount(), + "policy changes in " + streamStatistics); + assertEquals(S3AInputPolicy.Random.ordinal(), + streamStatistics.getInputPolicy(), "input policy in " + streamStatistics); IOStatistics ioStatistics = streamStatistics.getIOStatistics(); verifyStatisticCounterValue( ioStatistics, @@ -592,12 +591,9 @@ public void testRandomReadOverBuffer() throws Throwable { byte[] oneByte = new byte[1]; assertEquals(1, in.read(0, oneByte, 0, 1)); // make some assertions about the current state - assertEquals("remaining in\n" + in, - readahead - 1, s3aStream.remainingInCurrentRequest()); - assertEquals("range start in\n" + in, - 0, s3aStream.getContentRangeStart()); - assertEquals("range finish in\n" + in, - readahead, s3aStream.getContentRangeFinish()); + assertEquals(readahead - 1, s3aStream.remainingInCurrentRequest(), "remaining in\n" + in); + assertEquals(0, s3aStream.getContentRangeStart(), "range start in\n" + in); + assertEquals(readahead, s3aStream.getContentRangeFinish(), "range finish in\n" + in); assertStreamOpenedExactlyOnce(); @@ -615,15 +611,15 @@ public void testRandomReadOverBuffer() throws Throwable { bytesRead += read; offset += read; readOps++; - assertEquals("open operations on request #" + readOps - + " after reading " + bytesRead - + " current position in stream " + currentPos - + " in\n" + fs - + "\n " + in, - 1, streamStatistics.getOpenOperations()); + assertEquals(1, streamStatistics.getOpenOperations(), + "open operations on request #" + readOps + + " after reading " + bytesRead + + " current position in stream " + currentPos + + " in\n" + fs + + "\n " + in); for (int i = currentPos; i < currentPos + read; i++) { - assertEquals("Wrong value from byte " + i, - sourceData[i], buffer[i]); + assertEquals( + sourceData[i], buffer[i], "Wrong value from byte " + i); } currentPos += read; } @@ -640,10 +636,10 @@ public void testRandomReadOverBuffer() throws Throwable { describe("read last byte"); // read one more int read = in.read(currentPos, buffer, bytesRead, 1); - assertTrue("-1 from last read", read >= 0); + assertTrue(read >= 0, "-1 from last read"); assertOpenOperationCount(2); - assertEquals("Wrong value from read ", sourceData[currentPos], - (int) buffer[currentPos]); + assertEquals(sourceData[currentPos], (int) buffer[currentPos], + "Wrong value from read "); currentPos++; @@ -657,11 +653,9 @@ public void testRandomReadOverBuffer() throws Throwable { LOG.info("reading"); while(currentPos < datasetLen) { int r = in.read(); - assertTrue("Negative read() at position " + currentPos + " in\n" + in, - r >= 0); + assertTrue(r >= 0, "Negative read() at position " + currentPos + " in\n" + in); buffer[currentPos] = (byte)r; - assertEquals("Wrong value from read from\n" + in, - sourceData[currentPos], r); + assertEquals(sourceData[currentPos], r, "Wrong value from read from\n" + in); currentPos++; readCount++; } @@ -670,6 +664,6 @@ public void testRandomReadOverBuffer() throws Throwable { LOG.info("Time per read(): {} nS", toHuman(timer.nanosPerOperation(readCount))); - assertEquals("last read in " + in, -1, in.read()); + assertEquals(-1, in.read(), "last read in " + in); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AMultipartUploadSizeLimits.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AMultipartUploadSizeLimits.java index b83d12b4c1a66..f3d5d5515e3ec 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AMultipartUploadSizeLimits.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AMultipartUploadSizeLimits.java @@ -21,7 +21,7 @@ import java.io.File; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java index 38839ba0ddce7..e9886e33912c7 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java @@ -24,6 +24,7 @@ import org.apache.hadoop.fs.s3a.S3ATestConstants; import org.apache.hadoop.fs.s3a.Statistic; +import org.junit.jupiter.api.BeforeEach; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -80,6 +81,7 @@ public Configuration getConf() { return conf; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestSelectUnsupported.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestSelectUnsupported.java index 1b115158294f2..63884958c4afb 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestSelectUnsupported.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/select/ITestSelectUnsupported.java @@ -19,7 +19,7 @@ package org.apache.hadoop.fs.s3a.select; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestAWSStatisticCollection.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestAWSStatisticCollection.java index 594cb0cdafb87..7e6b5992744e7 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestAWSStatisticCollection.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestAWSStatisticCollection.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.s3a.statistics; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestAggregateIOStatistics.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestAggregateIOStatistics.java index c85651d8ab6c9..4a353547de729 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestAggregateIOStatistics.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestAggregateIOStatistics.java @@ -23,7 +23,7 @@ import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.AbstractS3ATestBase; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestS3AFileSystemStatistic.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestS3AFileSystemStatistic.java index 376dcdf727fa8..2f54cab00b13d 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestS3AFileSystemStatistic.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestS3AFileSystemStatistic.java @@ -20,7 +20,7 @@ import java.io.IOException; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -75,7 +75,7 @@ public void testBytesReadWithStream() throws IOException { FileSystem.Statistics fsStats = fs.getFsStatistics(); // Verifying that total bytes read by FS is equal to 2KB. - assertEquals("Mismatch in number of FS bytes read by InputStreams", TWO_KB, - fsStats.getBytesRead()); + assertEquals(TWO_KB, fsStats.getBytesRead(), + "Mismatch in number of FS bytes read by InputStreams"); } } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestBucketTool.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestBucketTool.java index b37e6eec7c822..bbe9d74824b7a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestBucketTool.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestBucketTool.java @@ -21,7 +21,8 @@ import java.io.IOException; import java.net.UnknownHostException; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -101,6 +102,7 @@ public class ITestBucketTool extends AbstractS3ATestBase { private S3AFileSystem fs; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestMarkerTool.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestMarkerTool.java index b6d41c4139ac9..6c2aeab268244 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestMarkerTool.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestMarkerTool.java @@ -24,7 +24,7 @@ import java.util.List; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestMarkerToolRootOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestMarkerToolRootOperations.java index 6d50fa7230335..f9861e06bcc63 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestMarkerToolRootOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestMarkerToolRootOperations.java @@ -20,9 +20,10 @@ import java.io.File; -import org.junit.FixMethodOrder; -import org.junit.Test; -import org.junit.runners.MethodSorters; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; import org.apache.hadoop.fs.Path; @@ -35,11 +36,12 @@ /** * Marker tool tests against the root FS; run in the sequential phase. */ -@FixMethodOrder(MethodSorters.NAME_ASCENDING) +@TestMethodOrder(MethodOrderer.Alphanumeric.class) public class ITestMarkerToolRootOperations extends AbstractMarkerToolTest { private Path rootPath; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3A.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3A.java index 037eda974276d..0cb57003b2e56 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3A.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3A.java @@ -19,9 +19,8 @@ import java.util.EnumSet; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataOutputStream; @@ -30,17 +29,16 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.AbstractS3ATestBase; import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.junit.jupiter.api.Timeout; /** * S3A tests through the {@link FileContext} API. */ +@Timeout(90) public class ITestS3A extends AbstractS3ATestBase { private FileContext fc; - @Rule - public final Timeout testTimeout = new Timeout(90000); - - + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -51,13 +49,12 @@ public void setup() throws Exception { public void testS3AStatus() throws Exception { FsStatus fsStatus = fc.getFsStatus(null); assertNotNull(fsStatus); - assertTrue("Used capacity should be positive: " + fsStatus.getUsed(), - fsStatus.getUsed() >= 0); - assertTrue("Remaining capacity should be positive: " + fsStatus - .getRemaining(), - fsStatus.getRemaining() >= 0); - assertTrue("Capacity should be positive: " + fsStatus.getCapacity(), - fsStatus.getCapacity() >= 0); + assertTrue(fsStatus.getUsed() >= 0, + "Used capacity should be positive: " + fsStatus.getUsed()); + assertTrue(fsStatus.getRemaining() >= 0, + "Remaining capacity should be positive: " + fsStatus.getRemaining()); + assertTrue(fsStatus.getCapacity() >= 0, + "Capacity should be positive: " + fsStatus.getCapacity()); } @Test diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java index 87b37b7f8ffbc..c74faa24f9933 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java @@ -42,7 +42,9 @@ import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.yarn.server.MiniYARNCluster; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import static org.apache.hadoop.fs.s3a.commit.CommitConstants.FS_S3A_COMMITTER_NAME; import static org.apache.hadoop.fs.s3a.commit.CommitConstants.FS_S3A_COMMITTER_STAGING_UNIQUE_FILENAMES; @@ -66,6 +68,7 @@ protected Configuration createConfiguration() { return conf; } + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -84,6 +87,7 @@ public void setup() throws Exception { yarnCluster.start(); } + @AfterEach @Override public void teardown() throws Exception { if (yarnCluster != null) { @@ -115,12 +119,12 @@ public void testWithMiniCluster() throws Exception { FileOutputFormat.setOutputPath(job, output); int exitCode = (job.waitForCompletion(true) ? 0 : 1); - assertEquals("Returned error code.", 0, exitCode); + assertEquals(0, exitCode, "Returned error code."); Path success = new Path(output, _SUCCESS); FileStatus status = fs.getFileStatus(success); - assertTrue("0 byte success file - not an S3A committer " + success, - status.getLen() > 0); + assertTrue(status.getLen() > 0, + "0 byte success file - not an S3A committer " + success); SuccessData successData = SuccessData.load(fs, success); String commitDetails = successData.toString(); LOG.info("Committer details \n{}", commitDetails); @@ -142,9 +146,9 @@ private Map getResultAsMap(String outputAsStr) Map result = new HashMap<>(); for (String line : outputAsStr.split("\n")) { String[] tokens = line.split("\t"); - assertTrue("Not enough tokens in in string \" "+ line - + "\" from output \"" + outputAsStr + "\"", - tokens.length > 1); + assertTrue(tokens.length > 1, + "Not enough tokens in in string \" "+ line + + "\" from output \"" + outputAsStr + "\""); result.put(tokens[0], Integer.parseInt(tokens[1])); } return result; diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractAppendLive.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractAppendLive.java index ffe6dd3826f12..66c941b65cb38 100644 --- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractAppendLive.java +++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractAppendLive.java @@ -23,7 +23,7 @@ import org.apache.hadoop.fs.contract.AbstractContractAppendTest; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.ContractTestUtils; -import org.junit.Test; +import org.junit.jupiter.api.Test; /** * Test Append on Adl file system. diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java index 60d30ac42e9c5..71c8906597fa2 100644 --- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java +++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlContractConcatLive.java @@ -23,7 +23,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.AbstractContractConcatTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static org.apache.hadoop.fs.contract.ContractTestUtils.touch; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/AbstractAbfsClusterITest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/AbstractAbfsClusterITest.java index 35d15f6c472d7..00e46cc98de12 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/AbstractAbfsClusterITest.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/AbstractAbfsClusterITest.java @@ -22,9 +22,10 @@ import java.time.LocalDateTime; import java.time.format.DateTimeFormatter; -import org.junit.AfterClass; +import org.junit.jupiter.api.AfterAll; import org.junit.Assume; import org.junit.Rule; +import org.junit.jupiter.api.BeforeEach; import org.junit.rules.TemporaryFolder; import org.apache.hadoop.conf.Configuration; @@ -80,6 +81,7 @@ protected int getTestTimeoutMillis() { return AzureTestConstants.SCALE_TEST_TIMEOUT_MILLIS; } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); @@ -88,10 +90,10 @@ public void setup() throws Exception { if (getClusterBinding() == null) { clusterBinding = demandCreateClusterBinding(); } - assertNotNull("cluster is not bound", getClusterBinding()); + assertNotNull(getClusterBinding(), "cluster is not bound"); } - @AfterClass + @AfterAll public static void teardownClusters() throws IOException { terminateCluster(clusterBinding); clusterBinding = null; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsCleanupStage.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsCleanupStage.java index a597c35376a71..28bdf91fcf129 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsCleanupStage.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsCleanupStage.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.azurebfs.contract.AbfsFileSystemContract; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.mapreduce.lib.output.committer.manifest.TestCleanupStage; +import org.junit.jupiter.api.BeforeEach; /** * Cleanup logic on ABFS. @@ -35,6 +36,7 @@ public ITestAbfsCleanupStage() throws Exception { binding = new ABFSContractTestBinding(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsCommitTaskStage.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsCommitTaskStage.java index a0aaec8532850..13520db94749f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsCommitTaskStage.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsCommitTaskStage.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.azurebfs.contract.AbfsFileSystemContract; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.mapreduce.lib.output.committer.manifest.TestCommitTaskStage; +import org.junit.jupiter.api.BeforeEach; /** * ABFS storage test of task committer. @@ -35,6 +36,7 @@ public ITestAbfsCommitTaskStage() throws Exception { binding = new ABFSContractTestBinding(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsCreateOutputDirectoriesStage.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsCreateOutputDirectoriesStage.java index 6621b80da00c1..0684075920277 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsCreateOutputDirectoriesStage.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsCreateOutputDirectoriesStage.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.azurebfs.contract.AbfsFileSystemContract; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.mapreduce.lib.output.committer.manifest.TestCreateOutputDirectoriesStage; +import org.junit.jupiter.api.BeforeEach; /** * ABFS storage test of directory creation. @@ -35,6 +36,7 @@ public ITestAbfsCreateOutputDirectoriesStage() throws Exception { binding = new ABFSContractTestBinding(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsJobThroughManifestCommitter.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsJobThroughManifestCommitter.java index 4e4c4f5996bc2..ce1d183c1ef44 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsJobThroughManifestCommitter.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsJobThroughManifestCommitter.java @@ -23,6 +23,7 @@ import org.assertj.core.api.Assertions; import org.junit.FixMethodOrder; +import org.junit.jupiter.api.BeforeEach; import org.junit.runners.MethodSorters; import org.apache.hadoop.conf.Configuration; @@ -52,6 +53,7 @@ public ITestAbfsJobThroughManifestCommitter() throws Exception { binding = new ABFSContractTestBinding(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsLoadManifestsStage.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsLoadManifestsStage.java index 367692fca5d22..2979afc79e5ce 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsLoadManifestsStage.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsLoadManifestsStage.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.mapreduce.lib.output.committer.manifest.ManifestCommitterTestSupport; import org.apache.hadoop.mapreduce.lib.output.committer.manifest.TestLoadManifestsStage; +import org.junit.jupiter.api.BeforeEach; /** * ABFS storage test of saving and loading a large number @@ -38,6 +39,7 @@ public ITestAbfsLoadManifestsStage() throws Exception { binding = new ABFSContractTestBinding(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsManifestCommitProtocol.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsManifestCommitProtocol.java index aac06f952dab2..88a5202ba4c9f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsManifestCommitProtocol.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsManifestCommitProtocol.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.azurebfs.contract.AbfsFileSystemContract; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.mapreduce.lib.output.committer.manifest.TestManifestCommitProtocol; +import org.junit.jupiter.api.BeforeEach; import static org.apache.hadoop.fs.azurebfs.commit.AbfsCommitTestHelper.prepareTestConfiguration; @@ -38,6 +39,7 @@ public ITestAbfsManifestCommitProtocol() throws Exception { binding = new ABFSContractTestBinding(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsManifestStoreOperations.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsManifestStoreOperations.java index 922782da29c5f..9fc06ee8250c5 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsManifestStoreOperations.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsManifestStoreOperations.java @@ -21,7 +21,8 @@ import java.nio.charset.StandardCharsets; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,6 +61,7 @@ public ITestAbfsManifestStoreOperations() throws Exception { binding = new ABFSContractTestBinding(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsRenameStageFailure.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsRenameStageFailure.java index 6b9830e8f33fc..b7ac25ec4c042 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsRenameStageFailure.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsRenameStageFailure.java @@ -26,6 +26,8 @@ import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.mapreduce.lib.output.committer.manifest.TestRenameStageFailure; +import org.junit.jupiter.api.BeforeEach; + /** * Rename failure logic on ABFS. * This will go through the resilient rename operation. @@ -48,6 +50,7 @@ protected boolean isNamespaceEnabled() throws AzureBlobFileSystemException { return fs.getAbfsStore().getIsNamespaceEnabled(AbstractAbfsIntegrationTest.getSampleTracingContext(fs, false)); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsTaskManifestFileIO.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsTaskManifestFileIO.java index d2fe9de115c3b..d6e39e4ddeb08 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsTaskManifestFileIO.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsTaskManifestFileIO.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.azurebfs.contract.AbfsFileSystemContract; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.mapreduce.lib.output.committer.manifest.TestTaskManifestFileIO; +import org.junit.jupiter.api.BeforeEach; /** * Test Reading/writing manifest file through ABFS. @@ -35,6 +36,7 @@ public ITestAbfsTaskManifestFileIO() throws Exception { binding = new ABFSContractTestBinding(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsTerasort.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsTerasort.java index 820938b2d68ef..5ccb5d7652224 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsTerasort.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsTerasort.java @@ -28,7 +28,8 @@ import org.junit.Assume; import org.junit.FixMethodOrder; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.junit.runners.MethodSorters; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -119,7 +120,7 @@ public class ITestAbfsTerasort extends AbstractAbfsClusterITest { public ITestAbfsTerasort() throws Exception { } - + @BeforeEach @Override public void setup() throws Exception { // superclass calls requireScaleTestsEnabled(); @@ -218,9 +219,9 @@ private ManifestSuccessData executeStage( d.close(); } dumpOutputTree(dest); - assertEquals(stage + assertEquals(0, result, stage + "(" + StringUtils.join(", ", args) + ")" - + " failed", 0, result); + + " failed"); final ManifestSuccessData successFile = validateSuccessFile(getFileSystem(), dest, minimumFileCount, ""); final IOStatistics iostats = successFile.getIOStatistics(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsContractBulkDelete.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsContractBulkDelete.java index 7ec11abe733b7..49c1ffc49068e 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsContractBulkDelete.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsContractBulkDelete.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractBulkDeleteTest; import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.jupiter.api.BeforeEach; public class ITestAbfsContractBulkDelete extends AbstractContractBulkDeleteTest { @@ -32,6 +33,7 @@ public ITestAbfsContractBulkDelete() throws Exception { this.isSecure = binding.isSecureMode(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsContractUnbuffer.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsContractUnbuffer.java index 3f7fce41a0bfd..3e0013c00b48a 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsContractUnbuffer.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsContractUnbuffer.java @@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.contract.AbstractContractUnbufferTest; +import org.junit.jupiter.api.BeforeEach; /** * Contract test for unbuffer operation. @@ -36,6 +37,7 @@ public ITestAbfsContractUnbuffer() throws Exception { this.isSecure = binding.isSecureMode(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractAppend.java index 59df4f0deb86d..036e58354abe8 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractAppend.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractAppend.java @@ -21,7 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractAppendTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; @@ -37,6 +38,7 @@ public ITestAbfsFileSystemContractAppend() throws Exception { this.isSecure = binding.isSecureMode(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractConcat.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractConcat.java index c67e2bc5144ca..34eabfd2a7e96 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractConcat.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractConcat.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractConcatTest; import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.jupiter.api.BeforeEach; /** * Contract test for concat operation. @@ -33,6 +34,7 @@ public ITestAbfsFileSystemContractConcat() throws Exception { this.isSecure = binding.isSecureMode(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractCreate.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractCreate.java index 11d01641ead81..42cdc2e32dd8e 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractCreate.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractCreate.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractCreateTest; import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.jupiter.api.BeforeEach; /** * Contract test for create operation. @@ -34,6 +35,7 @@ public ITestAbfsFileSystemContractCreate() throws Exception { this.isSecure = binding.isSecureMode(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractDelete.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractDelete.java index 9d77829c6fbf4..e6cde6cab9b05 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractDelete.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractDelete.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractDeleteTest; import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.jupiter.api.BeforeEach; /** * Contract test for delete operation. @@ -34,6 +35,7 @@ public ITestAbfsFileSystemContractDelete() throws Exception { this.isSecure = binding.isSecureMode(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractEtag.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractEtag.java index d498ae71a4b6f..f87da3238d5bd 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractEtag.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractEtag.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractEtagTest; import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.jupiter.api.BeforeEach; /** * Contract test for etag support. @@ -34,6 +35,7 @@ public ITestAbfsFileSystemContractEtag() throws Exception { this.isSecure = binding.isSecureMode(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractGetFileStatus.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractGetFileStatus.java index f64abf9cb3716..cb7a81e08f637 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractGetFileStatus.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractGetFileStatus.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest; import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.jupiter.api.BeforeEach; /** * Contract test for getFileStatus operation. @@ -33,6 +34,7 @@ public ITestAbfsFileSystemContractGetFileStatus() throws Exception { this.isSecure = binding.isSecureMode(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractMkdir.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractMkdir.java index 99959ed2d02ed..571c11805c243 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractMkdir.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractMkdir.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractMkdirTest; import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.jupiter.api.BeforeEach; /** * Contract test for mkdir operation. @@ -34,6 +35,7 @@ public ITestAbfsFileSystemContractMkdir() throws Exception { this.isSecure = binding.isSecureMode(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractOpen.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractOpen.java index 43552e50b7a59..db0769fed5885 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractOpen.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractOpen.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractOpenTest; import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.jupiter.api.BeforeEach; /** * Contract test for open operation. @@ -34,6 +35,7 @@ public ITestAbfsFileSystemContractOpen() throws Exception { this.isSecure = binding.isSecureMode(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractRename.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractRename.java index cd60e6d5ae498..537810cdaad6f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractRename.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractRename.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractRenameTest; import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.jupiter.api.BeforeEach; /** * Contract test for rename operation. @@ -34,6 +35,7 @@ public ITestAbfsFileSystemContractRename() throws Exception { this.isSecure = binding.isSecureMode(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractRootDirectory.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractRootDirectory.java index 5da2c76907e9f..78f495ddc9ea0 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractRootDirectory.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractRootDirectory.java @@ -21,6 +21,7 @@ import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.junit.Ignore; +import org.junit.jupiter.api.BeforeEach; /** * Contract test for root directory operation. @@ -34,6 +35,7 @@ public ITestAbfsFileSystemContractRootDirectory() throws Exception { this.isSecure = binding.isSecureMode(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractSeek.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractSeek.java index e8b044f92456c..749e32f7be5c5 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractSeek.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractSeek.java @@ -22,7 +22,8 @@ import java.util.concurrent.CompletableFuture; import org.assertj.core.api.Assertions; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -54,6 +55,7 @@ public ITestAbfsFileSystemContractSeek() throws Exception { this.isSecure = binding.isSecureMode(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); @@ -91,8 +93,8 @@ public void testSeekAndReadWithReadAhead() throws IOException { AbfsInputStream inStream = ((AbfsInputStream) in.getWrappedStream()); AbfsInputStreamStatisticsImpl streamStatistics = (AbfsInputStreamStatisticsImpl) inStream.getStreamStatistics(); - assertEquals(String.format("Value of %s is not set correctly", AZURE_READ_AHEAD_RANGE), - MIN_BUFFER_SIZE, inStream.getReadAheadRange()); + assertEquals(MIN_BUFFER_SIZE, inStream.getReadAheadRange(), + String.format("Value of %s is not set correctly", AZURE_READ_AHEAD_RANGE)); long remoteReadOperationsOldVal = streamStatistics.getRemoteReadOperations(); Assertions.assertThat(remoteReadOperationsOldVal) diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractSetTimes.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractSetTimes.java index 40434842eb9d8..458604e17fa06 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractSetTimes.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractSetTimes.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractSetTimesTest; import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.jupiter.api.BeforeEach; /** * Contract test for setTimes operation. @@ -33,6 +34,7 @@ public ITestAbfsFileSystemContractSetTimes() throws Exception { this.isSecure = binding.isSecureMode(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractVectoredRead.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractVectoredRead.java index e553989008313..88d542b47176b 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractVectoredRead.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractVectoredRead.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractVectoredReadTest; import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.junit.jupiter.api.BeforeEach; /** * Contract test for vectored reads through ABFS connector. @@ -30,12 +31,12 @@ public class ITestAbfsFileSystemContractVectoredRead private final boolean isSecure; private final ABFSContractTestBinding binding; - public ITestAbfsFileSystemContractVectoredRead(final String bufferType) throws Exception { - super(bufferType); + public ITestAbfsFileSystemContractVectoredRead() throws Exception { this.binding = new ABFSContractTestBinding(); this.isSecure = binding.isSecureMode(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java index aa42cb968d61f..cb9d42b7168fe 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java @@ -51,9 +51,9 @@ import org.apache.hadoop.util.functional.RemoteIterators; import org.assertj.core.api.Assertions; -import org.junit.Before; +import org.junit.jupiter.api.BeforeEach; import org.junit.Rule; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -160,7 +160,7 @@ protected Configuration createConfiguration() { return newConf; } - @Before + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -287,12 +287,10 @@ void assertCounterInRange(Job job, Enum counter, long min, long max) String.format("%s value %s", c.getDisplayName(), value, false); if (min >= 0) { - assertTrue(description + " too below minimum " + min, - value >= min); + assertTrue(value >= min, description + " too below minimum " + min); } if (max >= 0) { - assertTrue(description + " above maximum " + max, - value <= max); + assertTrue(value <= max, description + " above maximum " + max); } } @@ -479,14 +477,14 @@ public void testTrackDeepDirectoryStructureToRemote() throws Exception { } // look for the new file in both lists - assertTrue("No " + outputFileNew1 + " in source listing", - sourceFiles.containsValue(inputFileNew1)); - assertTrue("No " + outputFileNew1 + " in target listing", - targetFiles.containsValue(outputFileNew1)); - assertTrue("No " + outputSubDir4 + " in target listing", - targetFiles.containsValue(outputSubDir4)); - assertFalse("Found " + inputSubDir4 + " in source listing", - sourceFiles.containsValue(inputSubDir4)); + assertTrue(sourceFiles.containsValue(inputFileNew1), + "No " + outputFileNew1 + " in source listing"); + assertTrue(targetFiles.containsValue(outputFileNew1), + "No " + outputFileNew1 + " in target listing"); + assertTrue(targetFiles.containsValue(outputSubDir4), + "No " + outputSubDir4 + " in target listing"); + assertFalse(sourceFiles.containsValue(inputSubDir4), + "Found " + inputSubDir4 + " in source listing"); } @@ -531,8 +529,8 @@ public void testSetJobId() throws Exception { DistCpTestUtils .assertRunDistCp(DistCpConstants.SUCCESS, remoteDir.toString(), localDir.toString(), getDefaultCLIOptionsOrNull(), conf); - assertNotNull("DistCp job id isn't set", - conf.get(CONF_LABEL_DISTCP_JOB_ID)); + assertNotNull(conf.get(CONF_LABEL_DISTCP_JOB_ID), + "DistCp job id isn't set"); } /** @@ -646,9 +644,9 @@ private void runDistCp(Path src, Path dst) throws Exception { */ private Job runDistCp(final DistCpOptions options) throws Exception { Job job = new DistCp(conf, options).execute(); - assertNotNull("Unexpected null job returned from DistCp execution.", job); - assertTrue("DistCp job did not complete.", job.isComplete()); - assertTrue("DistCp job did not complete successfully.", job.isSuccessful()); + assertNotNull(job, "Unexpected null job returned from DistCp execution."); + assertTrue(job.isComplete(), "DistCp job did not complete."); + assertTrue(job.isSuccessful(), "DistCp job did not complete successfully."); return job; } @@ -672,7 +670,7 @@ private DistCpOptions buildWithStandardOptions( * @throws Exception if there is a failure */ private static void mkdirs(FileSystem fs, Path dir) throws Exception { - assertTrue("Failed to mkdir " + dir, fs.mkdirs(dir)); + assertTrue(fs.mkdirs(dir), "Failed to mkdir " + dir); } @Test @@ -996,9 +994,9 @@ private void verifySkipAndCopyCounter(Job job, long copyActualValue = job.getCounters() .findCounter(CopyMapper.Counter.COPY).getValue(); // Verify if the actual values equals the expected ones. - assertEquals("Mismatch in COPY counter value", copyExpectedValue, - copyActualValue); - assertEquals("Mismatch in SKIP counter value", skipExpectedValue, - skipActualValue); + assertEquals(copyExpectedValue, + copyActualValue, "Mismatch in COPY counter value"); + assertEquals(skipExpectedValue, + skipActualValue, "Mismatch in SKIP counter value"); } } \ No newline at end of file diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/TestHDFSContractDistCp.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/TestHDFSContractDistCp.java index 61a16b1e816fd..aaccacc334d6a 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/TestHDFSContractDistCp.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/TestHDFSContractDistCp.java @@ -21,8 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.hdfs.HDFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import java.io.IOException; @@ -34,12 +34,12 @@ */ public class TestHDFSContractDistCp extends AbstractContractDistCpTest { - @BeforeClass + @BeforeAll public static void createCluster() throws IOException { HDFSContract.createCluster(); } - @AfterClass + @AfterAll public static void teardownCluster() throws IOException { HDFSContract.destroyCluster(); } From 9487ca28a983fd430d672254f0a89283899bab00 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Sat, 14 Jun 2025 13:18:53 +0800 Subject: [PATCH 2/3] HADOOP-19415. Fix CheckStyle Issue. --- .../contract/AbstractFSContractTestBase.java | 2 +- .../org/apache/hadoop/fs/extend/TestName.java | 39 ------------------- .../ITestAzureNativeContractDistCp.java | 2 + .../ITestAbfsFileSystemContractDistCp.java | 7 +++- .../azurebfs/contract/ITestAbfsWrappedIO.java | 2 + .../contract/AbstractContractDistCpTest.java | 6 +-- 6 files changed, 13 insertions(+), 45 deletions(-) delete mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/extend/TestName.java diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java index baaf3c1426ffa..f6523d660adf1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.extend.TestName; +import org.apache.hadoop.test.TestName; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/extend/TestName.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/extend/TestName.java deleted file mode 100644 index 440726f3de424..0000000000000 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/extend/TestName.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.extend; - -import org.junit.jupiter.api.extension.BeforeEachCallback; -import org.junit.jupiter.api.extension.ExtensionContext; - -/** - * This is a custom JUnit5 `RegisterExtension` - * we created to obtain the methond name of the executing function. - */ -public class TestName implements BeforeEachCallback { - - private volatile String name; - - @Override - public void beforeEach(ExtensionContext extensionContext) throws Exception { - name = extensionContext.getTestMethod().get().getName(); - } - - public String getMethodName() { - return this.name; - } -} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDistCp.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDistCp.java index 77695706d0b13..4d07886cf18f8 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDistCp.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/contract/ITestAzureNativeContractDistCp.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.azure.integration.AzureTestConstants; import org.apache.hadoop.tools.contract.AbstractContractDistCpTest; +import org.junit.jupiter.api.BeforeEach; import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.assumeScaleTestsEnabled; @@ -39,6 +40,7 @@ protected NativeAzureFileSystemContract createContract(Configuration conf) { return new NativeAzureFileSystemContract(conf); } + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractDistCp.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractDistCp.java index 3f06509241f74..402c75ad52c87 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractDistCp.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractDistCp.java @@ -22,7 +22,9 @@ import org.apache.hadoop.fs.azure.integration.AzureTestConstants; import org.apache.hadoop.fs.azurebfs.services.AuthType; import org.apache.hadoop.tools.contract.AbstractContractDistCpTest; -import org.junit.Assume; +import org.junit.jupiter.api.BeforeEach; + +import static org.junit.jupiter.api.Assumptions.assumeTrue; import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.assumeScaleTestsEnabled; @@ -39,9 +41,10 @@ protected int getTestTimeoutMillis() { public ITestAbfsFileSystemContractDistCp() throws Exception { binding = new ABFSContractTestBinding(); - Assume.assumeTrue(binding.getAuthType() != AuthType.OAuth); + assumeTrue(binding.getAuthType() != AuthType.OAuth); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsWrappedIO.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsWrappedIO.java index 28750fd6dc633..47cd54491dd40 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsWrappedIO.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsWrappedIO.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.io.wrappedio.impl.TestWrappedIO; +import org.junit.jupiter.api.BeforeEach; /** * Test WrappedIO access to ABFS. @@ -35,6 +36,7 @@ public ITestAbfsWrappedIO() throws Exception { this.isSecure = binding.isSecureMode(); } + @BeforeEach @Override public void setup() throws Exception { binding.setup(); diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java index cb9d42b7168fe..5c5f11a026c7b 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java @@ -41,6 +41,7 @@ import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.TestName; import org.apache.hadoop.tools.CopyListingFileStatus; import org.apache.hadoop.tools.DistCp; import org.apache.hadoop.tools.DistCpConstants; @@ -52,9 +53,8 @@ import org.assertj.core.api.Assertions; import org.junit.jupiter.api.BeforeEach; -import org.junit.Rule; import org.junit.jupiter.api.Test; -import org.junit.rules.TestName; +import org.junit.jupiter.api.extension.RegisterExtension; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -99,7 +99,7 @@ public abstract class AbstractContractDistCpTest */ protected static final int DEFAULT_WIDTH = 2; - @Rule + @RegisterExtension public TestName testName = new TestName(); /** From 97bd0de98250a08df07214247c7d4ab3c84c4bb6 Mon Sep 17 00:00:00 2001 From: slfan1989 Date: Sat, 14 Jun 2025 20:30:10 +0800 Subject: [PATCH 3/3] HADOOP-19415. Fix CheckStyle Issue. --- .../hadoop/fs/contract/AbstractContractVectoredReadTest.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractVectoredReadTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractVectoredReadTest.java index 0de26a6376695..b2d70e9efe03b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractVectoredReadTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractVectoredReadTest.java @@ -34,7 +34,6 @@ import java.util.function.IntFunction; import org.assertj.core.api.Assertions; -import org.junit.Test; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -49,7 +48,6 @@ import org.apache.hadoop.io.ElasticByteBufferPool; import org.apache.hadoop.io.WeakReferencedElasticByteBufferPool; import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.apache.hadoop.util.functional.FutureIO; import static org.apache.hadoop.fs.Options.OpenFileOptions.FS_OPTION_OPENFILE_LENGTH; import static org.apache.hadoop.fs.Options.OpenFileOptions.FS_OPTION_OPENFILE_READ_POLICY;