diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java index d1ee94803b475..c8f263222c74c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java @@ -30,6 +30,7 @@ import java.util.concurrent.CompletableFuture; import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; @@ -99,6 +100,7 @@ public void setup() throws Exception { } @Override + @AfterEach public void teardown() throws Exception { MultipartUploader uploader = getUploader(1); if (uploader != null) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java index 0c2fe326b0023..f4b66ff12179b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.test.tags.RootFilesystemTest; import static org.apache.commons.lang3.StringUtils.join; import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile; @@ -52,6 +53,7 @@ * Only subclass this for tests against transient filesystems where * you don't care about the data. */ +@RootFilesystemTest public abstract class AbstractContractRootDirectoryTest extends AbstractFSContractTestBase { private static final Logger LOG = LoggerFactory.getLogger(AbstractContractRootDirectoryTest.class); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java index ab1e78c5308e5..59eb57af8135e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java @@ -26,13 +26,19 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.test.tags.FlakyTest; import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; /** * Contract tests for {@link org.apache.hadoop.fs.CanUnbuffer#unbuffer}. + * Some of these test cases can fail if the FS read() call returns less + * than requested, which is a valid (possibly correct) implementation + * of {@code InputStream.read(buffer[])} which may return only those bytes + * which can be returned without blocking for more data. */ +@FlakyTest("buffer underflow") public abstract class AbstractContractUnbufferTest extends AbstractFSContractTestBase { private Path file; @@ -105,6 +111,7 @@ public void testMultipleUnbuffers() throws IOException { } } + @Test public void testUnbufferMultipleReads() throws IOException { describe("unbuffer a file multiple times"); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractVectoredReadTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractVectoredReadTest.java index b2d70e9efe03b..7998a0fe4f685 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractVectoredReadTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractVectoredReadTest.java @@ -34,8 +34,10 @@ import java.util.function.IntFunction; import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -68,6 +70,8 @@ * Both the original readVectored(allocator) and the readVectored(allocator, release) * operations are tested. */ +@ParameterizedClass(name="buffer-{0}") +@MethodSource("params") public abstract class AbstractContractVectoredReadTest extends AbstractFSContractTestBase { private static final Logger LOG = @@ -80,15 +84,15 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac /** * Buffer allocator for vector IO. */ - protected IntFunction allocate; + private final IntFunction allocate; /** * Buffer pool for vector IO. */ - protected final ElasticByteBufferPool pool = + private final ElasticByteBufferPool pool = new WeakReferencedElasticByteBufferPool(); - protected String bufferType; + private final String bufferType; /** * Path to the vector file. @@ -106,8 +110,8 @@ public static List params() { return Arrays.asList("direct", "array"); } - public void initAbstractContractVectoredReadTest(String pBufferType) { - this.bufferType = pBufferType; + protected AbstractContractVectoredReadTest(String bufferType) { + this.bufferType = bufferType; final boolean isDirect = !"array".equals(bufferType); this.allocate = size -> pool.getBuffer(isDirect, size); } @@ -147,6 +151,7 @@ public void setup() throws Exception { createFile(fs, vectorPath, true, DATASET); } + @AfterEach @Override public void teardown() throws Exception { pool.release(); @@ -177,10 +182,8 @@ protected FSDataInputStream openVectorFile(final FileSystem fs) throws IOExcepti .build()); } - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testVectoredReadMultipleRanges(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testVectoredReadMultipleRanges() throws Exception { List fileRanges = new ArrayList<>(); for (int i = 0; i < 10; i++) { FileRange fileRange = FileRange.createFileRange(i * 100, 100); @@ -201,10 +204,8 @@ public void testVectoredReadMultipleRanges(String pBufferType) throws Exception } } - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testVectoredReadAndReadFully(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testVectoredReadAndReadFully() throws Exception { List fileRanges = new ArrayList<>(); range(fileRanges, 100, 100); try (FSDataInputStream in = openVectorFile()) { @@ -219,10 +220,8 @@ public void testVectoredReadAndReadFully(String pBufferType) throws Exception { } } - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testVectoredReadWholeFile(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testVectoredReadWholeFile() throws Exception { describe("Read the whole file in one single vectored read"); List fileRanges = new ArrayList<>(); range(fileRanges, 0, DATASET_LEN); @@ -240,10 +239,8 @@ public void testVectoredReadWholeFile(String pBufferType) throws Exception { * As the minimum seek value is 4*1024,none of the below ranges * will get merged. */ - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testDisjointRanges(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testDisjointRanges() throws Exception { List fileRanges = new ArrayList<>(); range(fileRanges, 0, 100); range(fileRanges, 4_000 + 101, 100); @@ -259,10 +256,8 @@ public void testDisjointRanges(String pBufferType) throws Exception { * As the minimum seek value is 4*1024, all the below ranges * will get merged into one. */ - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testAllRangesMergedIntoOne(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testAllRangesMergedIntoOne() throws Exception { List fileRanges = new ArrayList<>(); final int length = 100; range(fileRanges, 0, length); @@ -279,10 +274,8 @@ public void testAllRangesMergedIntoOne(String pBufferType) throws Exception { * As the minimum seek value is 4*1024, the first three ranges will be * merged into and other two will remain as it is. */ - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testSomeRangesMergedSomeUnmerged(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testSomeRangesMergedSomeUnmerged() throws Exception { FileSystem fs = getFileSystem(); List fileRanges = new ArrayList<>(); range(fileRanges, 8 * 1024, 100); @@ -306,10 +299,8 @@ public void testSomeRangesMergedSomeUnmerged(String pBufferType) throws Exceptio * Most file systems won't support overlapping ranges. * Currently, only Raw Local supports it. */ - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testOverlappingRanges(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testOverlappingRanges() throws Exception { if (!isSupported(VECTOR_IO_OVERLAPPING_RANGES)) { verifyExceptionalVectoredRead( getSampleOverlappingRanges(), @@ -327,10 +318,8 @@ public void testOverlappingRanges(String pBufferType) throws Exception { /** * Same ranges are special case of overlapping. */ - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testSameRanges(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testSameRanges() throws Exception { if (!isSupported(VECTOR_IO_OVERLAPPING_RANGES)) { verifyExceptionalVectoredRead( getSampleSameRanges(), @@ -348,10 +337,8 @@ public void testSameRanges(String pBufferType) throws Exception { /** * A null range is not permitted. */ - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testNullRange(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testNullRange() throws Exception { List fileRanges = new ArrayList<>(); range(fileRanges, 500, 100); fileRanges.add(null); @@ -362,19 +349,15 @@ public void testNullRange(String pBufferType) throws Exception { /** * A null range is not permitted. */ - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testNullRangeList(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testNullRangeList() throws Exception { verifyExceptionalVectoredRead( null, NullPointerException.class); } - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testSomeRandomNonOverlappingRanges(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testSomeRandomNonOverlappingRanges() throws Exception { List fileRanges = new ArrayList<>(); range(fileRanges, 500, 100); range(fileRanges, 1000, 200); @@ -387,10 +370,8 @@ public void testSomeRandomNonOverlappingRanges(String pBufferType) throws Except } } - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testConsecutiveRanges(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testConsecutiveRanges() throws Exception { List fileRanges = new ArrayList<>(); final int offset = 500; final int length = 2011; @@ -403,10 +384,8 @@ public void testConsecutiveRanges(String pBufferType) throws Exception { } } - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testEmptyRanges(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testEmptyRanges() throws Exception { List fileRanges = new ArrayList<>(); try (FSDataInputStream in = openVectorFile()) { in.readVectored(fileRanges, allocate); @@ -425,10 +404,8 @@ public void testEmptyRanges(String pBufferType) throws Exception { * The contract option {@link ContractOptions#VECTOR_IO_EARLY_EOF_CHECK} is used * to determine which check to perform. */ - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testEOFRanges(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testEOFRanges() throws Exception { describe("Testing reading with an offset past the end of the file"); List fileRanges = range(DATASET_LEN + 1, 100); @@ -441,10 +418,8 @@ public void testEOFRanges(String pBufferType) throws Exception { } - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testVectoredReadWholeFilePlusOne(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testVectoredReadWholeFilePlusOne() throws Exception { describe("Try to read whole file plus 1 byte"); List fileRanges = range(0, DATASET_LEN + 1); @@ -471,35 +446,29 @@ private void expectEOFinRead(final List fileRanges) throws Exception } } - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testNegativeLengthRange(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testNegativeLengthRange() throws Exception { + verifyExceptionalVectoredRead(range(0, -50), IllegalArgumentException.class); } - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testNegativeOffsetRange(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testNegativeOffsetRange() throws Exception { verifyExceptionalVectoredRead(range(-1, 50), EOFException.class); } - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testNullReleaseOperation(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testNullReleaseOperation() throws Exception { + final List range = range(0, 10); try (FSDataInputStream in = openVectorFile()) { - intercept(NullPointerException.class, () -> - in.readVectored(range, allocate, null)); + intercept(NullPointerException.class, () -> + in.readVectored(range, allocate, null)); } } - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testNormalReadAfterVectoredRead(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testNormalReadAfterVectoredRead() throws Exception { List fileRanges = createSampleNonOverlappingRanges(); try (FSDataInputStream in = openVectorFile()) { in.readVectored(fileRanges, allocate); @@ -514,10 +483,8 @@ public void testNormalReadAfterVectoredRead(String pBufferType) throws Exception } } - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testVectoredReadAfterNormalRead(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testVectoredReadAfterNormalRead() throws Exception { List fileRanges = createSampleNonOverlappingRanges(); try (FSDataInputStream in = openVectorFile()) { // read starting 200 bytes @@ -532,10 +499,8 @@ public void testVectoredReadAfterNormalRead(String pBufferType) throws Exception } } - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testMultipleVectoredReads(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testMultipleVectoredReads() throws Exception { List fileRanges1 = createSampleNonOverlappingRanges(); List fileRanges2 = createSampleNonOverlappingRanges(); try (FSDataInputStream in = openVectorFile()) { @@ -553,10 +518,8 @@ public void testMultipleVectoredReads(String pBufferType) throws Exception { * operation and then uses a separate thread pool to process the * results asynchronously. */ - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testVectoredIOEndToEnd(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testVectoredIOEndToEnd() throws Exception { List fileRanges = new ArrayList<>(); range(fileRanges, 8 * 1024, 100); range(fileRanges, 14 * 1024, 100); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractVectoredRead.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractVectoredRead.java index dcb474fa6f050..8cee3b2484586 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractVectoredRead.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractVectoredRead.java @@ -34,15 +34,20 @@ import org.apache.hadoop.fs.contract.AbstractContractVectoredReadTest; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.ContractTestUtils; -import org.junit.jupiter.params.ParameterizedTest; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import static org.apache.hadoop.fs.contract.ContractTestUtils.validateVectoredReadResult; import static org.apache.hadoop.test.LambdaTestUtils.intercept; +@ParameterizedClass(name="buffer-{0}") +@MethodSource("params") public class TestLocalFSContractVectoredRead extends AbstractContractVectoredReadTest { - public TestLocalFSContractVectoredRead() { + public TestLocalFSContractVectoredRead(final String bufferType) { + super(bufferType); } @Override @@ -50,11 +55,8 @@ protected AbstractFSContract createContract(Configuration conf) { return new LocalFSContract(conf); } - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testChecksumValidationDuringVectoredRead( - String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testChecksumValidationDuringVectoredRead() throws Exception { Path testPath = path("big_range_checksum_file"); List someRandomRanges = new ArrayList<>(); someRandomRanges.add(FileRange.createFileRange(10, 1024)); @@ -67,11 +69,8 @@ public void testChecksumValidationDuringVectoredRead( * Test for file size less than checksum chunk size. * {@code ChecksumFileSystem#bytesPerChecksum}. */ - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testChecksumValidationDuringVectoredReadSmallFile( - String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testChecksumValidationDuringVectoredReadSmallFile() throws Exception { Path testPath = path("big_range_checksum_file"); final int length = 471; List smallFileRanges = new ArrayList<>(); @@ -110,10 +109,9 @@ private void validateCheckReadException(Path testPath, () -> validateVectoredReadResult(ranges, datasetCorrupted, 0)); } } - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void tesChecksumVectoredReadBoundaries(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + + @Test + public void tesChecksumVectoredReadBoundaries() throws Exception { Path testPath = path("boundary_range_checksum_file"); final int length = 1071; LocalFileSystem localFs = (LocalFileSystem) getFileSystem(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawLocalContractVectoredRead.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawLocalContractVectoredRead.java index 7ec7403e181f6..6fe1f4e3a9745 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawLocalContractVectoredRead.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawLocalContractVectoredRead.java @@ -18,13 +18,19 @@ package org.apache.hadoop.fs.contract.rawlocal; +import org.junit.jupiter.params.ParameterizedClass; +import org.junit.jupiter.params.provider.MethodSource; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractVectoredReadTest; import org.apache.hadoop.fs.contract.AbstractFSContract; +@ParameterizedClass(name="buffer-{0}") +@MethodSource("params") public class TestRawLocalContractVectoredRead extends AbstractContractVectoredReadTest { - public TestRawLocalContractVectoredRead() { + public TestRawLocalContractVectoredRead(final String bufferType) { + super(bufferType); } @Override diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/tags/FlakyTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/tags/FlakyTest.java new file mode 100644 index 0000000000000..f628dc814c7b9 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/tags/FlakyTest.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.test.tags; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Inherited; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import org.junit.jupiter.api.Tag; + +/** + * JUnit 5 tag to indicate that a test is flaky, which can be used by test runners + * to skip tests which may fail in CI builds. + *

The test runner tag to filter on is {@code flaky}. + * + *

+ * Only use this for tests which really are flaky due to some external factor, such as + * network status. If a test fails intermittently due to some timing/race condition, + * fix that. + */ +@Target({ElementType.METHOD, ElementType.TYPE}) +@Retention(RetentionPolicy.RUNTIME) +@Tag("flaky") +@Inherited +public @interface FlakyTest { + String value(); +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/tags/IntegrationTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/tags/IntegrationTest.java new file mode 100644 index 0000000000000..6015c47f5efd8 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/tags/IntegrationTest.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.test.tags; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Inherited; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import org.junit.jupiter.api.Tag; + +/** + * JUnit 5 tag to indicate that a test is an integration test which is + * to be executed against a running service -a service whose + * deployment and operation is not part of the codebase. + *

+ * This is primarily for test suites which are targeted at + * remote cloud stores. + *

+ * Key aspects of these tests are not just that they depend upon + * an external service -the test run must be configured such that + * it can connect to and authenticate with the service. + *

+ * Consult the documentation of the specific module to + * determine how to do this. + * + *

The test runner tag to filter on is {@code integration}. + */ +@Target({ElementType.METHOD, ElementType.TYPE}) +@Retention(RetentionPolicy.RUNTIME) +@Inherited +@Tag("integration") +public @interface IntegrationTest { +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/tags/LoadTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/tags/LoadTest.java new file mode 100644 index 0000000000000..ba8d1c499e947 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/tags/LoadTest.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.test.tags; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Inherited; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import org.junit.jupiter.api.Tag; + +/** + * JUnit 5 tag to indicate that a test suite is a load test suite, which is + * designed to load/overload the target system. + *

If this test is directed at cloud infrastructure the load may be significant + * enough to trigger throttling, which may be observed not only by other tests, but + * by other users/applications using the same account. + *

The test runner tag to filter on is {@code load}. + *

Note: this annotation should be accompanied by the {@link ScaleTest} + * tag to indicate it is a specific type of scale test. + */ +@Target({ElementType.METHOD, ElementType.TYPE}) +@Retention(RetentionPolicy.RUNTIME) +@Inherited +@Tag("load") +public @interface LoadTest { +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/tags/RootFilesystemTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/tags/RootFilesystemTest.java new file mode 100644 index 0000000000000..8ff284f304233 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/tags/RootFilesystemTest.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.test.tags; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Inherited; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import org.junit.jupiter.api.Tag; + +/** + * JUnit 5 tag to indicate that a filesystem test works against the root FS. + * . which can be used by test runners + * to skip slower tests. + *

The test runner tag to filter on is {@code scale}. + */ +@Target({ElementType.METHOD, ElementType.TYPE}) +@Retention(RetentionPolicy.RUNTIME) +@Inherited +@Tag("rootfilesystem") +public @interface RootFilesystemTest { +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/tags/ScaleTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/tags/ScaleTest.java new file mode 100644 index 0000000000000..d58ec11c4628d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/tags/ScaleTest.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.test.tags; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Inherited; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import org.junit.jupiter.api.Tag; + +/** + * JUnit 5 tag to indicate that a test is a scale test. which can be used by test runners + * to skip slower tests. + *

The test runner tag to filter on is {@code scale}. + */ +@Target({ElementType.METHOD, ElementType.TYPE}) +@Retention(RetentionPolicy.RUNTIME) +@Inherited +@Tag("scale") +public @interface ScaleTest { +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/tags/package-info.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/tags/package-info.java new file mode 100644 index 0000000000000..be38df9abd292 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/tags/package-info.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * JUnit 5 tags. + *

+ * For use in Hadoop's own test suites, and those which extend them, such as FileSystem contract + * tests. + */ +@org.apache.hadoop.classification.InterfaceStability.Unstable +@org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate("Derived Test Suites") +package org.apache.hadoop.test.tags; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractVectoredRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractVectoredRead.java index 217395c972793..c4f08e5e7e438 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractVectoredRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractVectoredRead.java @@ -22,6 +22,8 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedClass; +import org.junit.jupiter.params.provider.MethodSource; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractVectoredReadTest; @@ -30,10 +32,13 @@ /** * Contract test for vectored reads through HDFS connector. */ +@ParameterizedClass(name="buffer-{0}") +@MethodSource("params") public class TestHDFSContractVectoredRead extends AbstractContractVectoredReadTest { - public TestHDFSContractVectoredRead() { + public TestHDFSContractVectoredRead(final String bufferType) { + super(bufferType); } @BeforeAll diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractAnalyticsStreamVectoredRead.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractAnalyticsStreamVectoredRead.java index 41a107c36f07a..351b263e56fb7 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractAnalyticsStreamVectoredRead.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractAnalyticsStreamVectoredRead.java @@ -21,7 +21,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractVectoredReadTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.jupiter.api.BeforeEach; +import org.apache.hadoop.test.tags.IntegrationTest; + +import org.junit.jupiter.params.ParameterizedClass; +import org.junit.jupiter.params.provider.MethodSource; import static org.apache.hadoop.fs.s3a.S3ATestUtils.enableAnalyticsAccelerator; import static org.apache.hadoop.fs.s3a.S3ATestUtils.skipForAnyEncryptionExceptSSES3; @@ -34,15 +37,13 @@ * implementation of readVectored {@link org.apache.hadoop.fs.PositionedReadable} * still works. */ +@IntegrationTest +@ParameterizedClass(name="buffer-{0}") +@MethodSource("params") public class ITestS3AContractAnalyticsStreamVectoredRead extends AbstractContractVectoredReadTest { - public ITestS3AContractAnalyticsStreamVectoredRead() { - } - - @BeforeEach - @Override - public void setup() throws Exception { - super.setup(); + public ITestS3AContractAnalyticsStreamVectoredRead(String bufferType) { + super(bufferType); } /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractBulkDelete.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractBulkDelete.java index 48173fe230b6a..da90fc0dfd709 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractBulkDelete.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractBulkDelete.java @@ -23,7 +23,8 @@ import java.util.List; import org.assertj.core.api.Assertions; -import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,6 +39,7 @@ import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3ATestUtils; import org.apache.hadoop.fs.statistics.MeanStatistic; +import org.apache.hadoop.test.tags.IntegrationTest; import static java.util.stream.Collectors.toList; import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; @@ -53,7 +55,9 @@ /** * Contract tests for bulk delete operation for S3A Implementation. */ - +@IntegrationTest +@ParameterizedClass(name = "enableMultiObjectDelete={0}") +@MethodSource("enableMultiObjectDelete") public class ITestS3AContractBulkDelete extends AbstractContractBulkDeleteTest { private static final Logger LOG = LoggerFactory.getLogger(ITestS3AContractBulkDelete.class); @@ -66,7 +70,7 @@ public class ITestS3AContractBulkDelete extends AbstractContractBulkDeleteTest { */ private static final int DELETE_PAGE_SIZE = 20; - private boolean enableMultiObjectDelete; + private final boolean enableMultiObjectDelete; public static Iterable enableMultiObjectDelete() { return Arrays.asList(new Object[][]{ @@ -75,8 +79,8 @@ public static Iterable enableMultiObjectDelete() { }); } - public void initITestS3AContractBulkDelete(boolean pEnableMultiObjectDelete) { - this.enableMultiObjectDelete = pEnableMultiObjectDelete; + public ITestS3AContractBulkDelete(boolean enableMultiObjectDelete) { + this.enableMultiObjectDelete = enableMultiObjectDelete; } @Override @@ -117,11 +121,8 @@ public void validatePageSize() throws Exception { .isEqualTo(getExpectedPageSize()); } - @MethodSource("enableMultiObjectDelete") - @ParameterizedTest(name = "enableMultiObjectDelete = {0}") - public void testBulkDeleteZeroPageSizePrecondition( - boolean pEnableMultiObjectDelete) throws Exception { - initITestS3AContractBulkDelete(pEnableMultiObjectDelete); + @Test + public void testBulkDeleteZeroPageSizePrecondition() throws Exception { if (!enableMultiObjectDelete) { // if multi-object delete is disabled, skip this test as // page size is always 1. @@ -136,11 +137,8 @@ public void testBulkDeleteZeroPageSizePrecondition( } } - @MethodSource("enableMultiObjectDelete") - @ParameterizedTest(name = "enableMultiObjectDelete = {0}") - public void testPageSizeWhenMultiObjectsDisabled( - boolean pEnableMultiObjectDelete) throws Exception { - initITestS3AContractBulkDelete(pEnableMultiObjectDelete); + @Test + public void testPageSizeWhenMultiObjectsDisabled() throws Exception { Configuration conf = getContract().getConf(); conf.setBoolean(Constants.ENABLE_MULTI_DELETE, false); Path testPath = path(getMethodName()); @@ -153,6 +151,7 @@ public void testPageSizeWhenMultiObjectsDisabled( } @Override + @Test public void testDeletePathsDirectory() throws Exception { List paths = new ArrayList<>(); Path dirPath = new Path(basePath, "dir"); @@ -169,11 +168,8 @@ public void testDeletePathsDirectory() throws Exception { assertIsDirectory(dirPath); } - @MethodSource("enableMultiObjectDelete") - @ParameterizedTest(name = "enableMultiObjectDelete = {0}") - public void testBulkDeleteParentDirectoryWithDirectories( - boolean pEnableMultiObjectDelete) throws Exception { - initITestS3AContractBulkDelete(pEnableMultiObjectDelete); + @Test + public void testBulkDeleteParentDirectoryWithDirectories() throws Exception { List paths = new ArrayList<>(); Path dirPath = new Path(basePath, "dir"); fs.mkdirs(dirPath); @@ -187,6 +183,7 @@ public void testBulkDeleteParentDirectoryWithDirectories( assertIsDirectory(subDir); } + @Test public void testBulkDeleteParentDirectoryWithFiles() throws Exception { List paths = new ArrayList<>(); Path dirPath = new Path(basePath, "dir"); @@ -202,8 +199,7 @@ public void testBulkDeleteParentDirectoryWithFiles() throws Exception { } - @MethodSource("enableMultiObjectDelete") - @ParameterizedTest(name = "enableMultiObjectDelete = {0}") + @Test public void testRateLimiting() throws Exception { if (!enableMultiObjectDelete) { skip("Multi-object delete is disabled so hard to trigger rate limiting"); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractContentSummary.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractContentSummary.java index 5544ab7bad68a..9ac2328ba4ba6 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractContentSummary.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractContentSummary.java @@ -27,9 +27,11 @@ import org.apache.hadoop.fs.contract.AbstractContractContentSummaryTest; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.test.tags.IntegrationTest; import static org.apache.hadoop.fs.contract.ContractTestUtils.touch; +@IntegrationTest public class ITestS3AContractContentSummary extends AbstractContractContentSummaryTest { @Test diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractCreate.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractCreate.java index 03f5611b187b3..65894cc65183d 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractCreate.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractCreate.java @@ -21,13 +21,15 @@ import java.util.Arrays; import java.util.Collection; -import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractCreateTest; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.apache.hadoop.test.tags.IntegrationTest; import static org.apache.hadoop.fs.s3a.S3ATestConstants.KEY_PERFORMANCE_TESTS_ENABLED; import static org.apache.hadoop.fs.s3a.Constants.CONNECTION_EXPECT_CONTINUE; @@ -41,6 +43,9 @@ * Parameterized on the create performance flag as all overwrite * tests are required to fail in create performance mode. */ +@IntegrationTest +@ParameterizedClass(name="performance-{0}-continue={1}") +@MethodSource("params") public class ITestS3AContractCreate extends AbstractContractCreateTest { /** @@ -58,17 +63,17 @@ public static Collection params() { /** * Is this test run in create performance mode? */ - private boolean createPerformance; + private final boolean createPerformance; /** * Expect a 100-continue response? */ - private boolean expectContinue; + private final boolean expectContinue; - public void initITestS3AContractCreate(final boolean pCreatePerformance, - final boolean pExpectContinue) { - this.createPerformance = pCreatePerformance; - this.expectContinue = pExpectContinue; + public ITestS3AContractCreate(final boolean createPerformance, + final boolean expectContinue) { + this.createPerformance = createPerformance; + this.expectContinue = expectContinue; } @Override @@ -92,11 +97,8 @@ protected Configuration createConfiguration() { return conf; } - @MethodSource("params") - @ParameterizedTest - public void testOverwriteNonEmptyDirectory(boolean pCreatePerformance, - boolean pExpectContinue) throws Throwable { - initITestS3AContractCreate(pCreatePerformance, pExpectContinue); + @Test + public void testOverwriteNonEmptyDirectory() throws Throwable { try { // Currently analytics accelerator does not support reading of files that have been overwritten. // This is because the analytics accelerator library caches metadata, and when a file is @@ -112,11 +114,9 @@ public void testOverwriteNonEmptyDirectory(boolean pCreatePerformance, } } - @MethodSource("params") - @ParameterizedTest - public void testOverwriteEmptyDirectory(boolean pCreatePerformance, - boolean pExpectContinue) throws Throwable { - initITestS3AContractCreate(pCreatePerformance, pExpectContinue); + @Override + @Test + public void testOverwriteEmptyDirectory() throws Throwable { try { super.testOverwriteEmptyDirectory(); failWithCreatePerformance(); @@ -125,11 +125,9 @@ public void testOverwriteEmptyDirectory(boolean pCreatePerformance, } } - @MethodSource("params") - @ParameterizedTest - public void testCreateFileOverExistingFileNoOverwrite(boolean pCreatePerformance, - boolean pExpectContinue) throws Throwable { - initITestS3AContractCreate(pCreatePerformance, pExpectContinue); + @Test + @Override + public void testCreateFileOverExistingFileNoOverwrite() throws Throwable { try { super.testCreateFileOverExistingFileNoOverwrite(); failWithCreatePerformance(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractDelete.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractDelete.java index a47dcaef61ee1..9c90288f3b482 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractDelete.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractDelete.java @@ -21,10 +21,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractDeleteTest; import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.apache.hadoop.test.tags.IntegrationTest; /** * S3A contract tests covering deletes. */ +@IntegrationTest public class ITestS3AContractDelete extends AbstractContractDeleteTest { @Override diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractDistCp.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractDistCp.java index 1d35439d4fee2..4e3013e9d4335 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractDistCp.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractDistCp.java @@ -18,12 +18,15 @@ package org.apache.hadoop.fs.contract.s3a; +import org.junit.jupiter.api.Test; + import static org.apache.hadoop.fs.s3a.Constants.*; import static org.apache.hadoop.fs.s3a.S3ATestConstants.SCALE_TEST_TIMEOUT_MILLIS; import static org.apache.hadoop.fs.s3a.S3ATestUtils.skipIfAnalyticsAcceleratorEnabled; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageStatistics; +import org.apache.hadoop.test.tags.IntegrationTest; import org.apache.hadoop.tools.contract.AbstractContractDistCpTest; /** @@ -31,6 +34,7 @@ * Uses the block output stream, buffered to disk. This is the * recommended output mechanism for DistCP due to its scalability. */ +@IntegrationTest public class ITestS3AContractDistCp extends AbstractContractDistCpTest { private static final long MULTIPART_SETTING = MULTIPART_MIN_SIZE; @@ -62,6 +66,7 @@ protected S3AContract createContract(Configuration conf) { return new S3AContract(conf); } + @Test @Override public void testDistCpWithIterator() throws Exception { final long renames = getRenameOperationCount(); @@ -70,6 +75,7 @@ public void testDistCpWithIterator() throws Exception { renames, "Expected no renames for a direct write distcp"); } + @Test @Override public void testNonDirectWrite() throws Exception { final long renames = getRenameOperationCount(); @@ -78,6 +84,7 @@ public void testNonDirectWrite() throws Exception { "Expected 2 renames for a non-direct write distcp"); } + @Test @Override public void testDistCpUpdateCheckFileSkip() throws Exception { // Currently analytics accelerator does not support reading of files that have been overwritten. diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractEtag.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractEtag.java index 824b2dab1f8df..c02f368df181d 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractEtag.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractEtag.java @@ -21,10 +21,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractEtagTest; import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.apache.hadoop.test.tags.IntegrationTest; /** * Test S3A etag support. */ +@IntegrationTest public class ITestS3AContractEtag extends AbstractContractEtagTest { @Override diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractGetFileStatus.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractGetFileStatus.java index c1f0056bcca60..28f4e4bb20466 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractGetFileStatus.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractGetFileStatus.java @@ -23,6 +23,8 @@ import org.apache.hadoop.fs.s3a.Constants; import org.apache.hadoop.fs.s3a.S3ATestConstants; import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.apache.hadoop.test.tags.IntegrationTest; + import org.junit.jupiter.api.AfterEach; /** @@ -30,6 +32,7 @@ * Some of the tests can take too long when the fault injection rate is high, * so the test timeout is extended. */ +@IntegrationTest public class ITestS3AContractGetFileStatus extends AbstractContractGetFileStatusTest { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMkdir.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMkdir.java index 847f6980b5619..4936615b28f9e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMkdir.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMkdir.java @@ -21,12 +21,14 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractMkdirTest; import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.apache.hadoop.test.tags.IntegrationTest; import static org.apache.hadoop.fs.s3a.S3ATestUtils.setPerformanceFlags; /** * Test dir operations on S3A. */ +@IntegrationTest public class ITestS3AContractMkdir extends AbstractContractMkdirTest { @Override diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMkdirWithCreatePerf.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMkdirWithCreatePerf.java index f36094e4283ed..4570320029d59 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMkdirWithCreatePerf.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMkdirWithCreatePerf.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.contract.AbstractContractMkdirTest; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.test.tags.IntegrationTest; import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; @@ -36,6 +37,7 @@ /** * Test mkdir operations on S3A with create performance mode. */ +@IntegrationTest public class ITestS3AContractMkdirWithCreatePerf extends AbstractContractMkdirTest { @Override diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java index 550a1caacd02a..0afdf20595bcc 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractMultipartUploader.java @@ -22,7 +22,11 @@ import org.apache.hadoop.fs.contract.AbstractContractMultipartUploaderTest; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.test.tags.IntegrationTest; +import org.apache.hadoop.test.tags.ScaleTest; + import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; import static org.apache.hadoop.fs.s3a.S3ATestConstants.DEFAULT_SCALE_TESTS_ENABLED; @@ -43,6 +47,8 @@ * to enable it, and partition size option to control the size of * parts uploaded. */ +@IntegrationTest +@ScaleTest public class ITestS3AContractMultipartUploader extends AbstractContractMultipartUploaderTest { @@ -112,21 +118,26 @@ public void setup() throws Exception { /** * S3 has no concept of directories, so this test does not apply. */ + @Test + @Override public void testDirectoryInTheWay() throws Exception { skip("Unsupported"); } + @Test @Override public void testMultipartUploadReverseOrder() throws Exception { skip("skipped for speed"); } + @Test @Override public void testMultipartUploadReverseOrderNonContiguousPartNumbers() throws Exception { assumeNotS3ExpressFileSystem(getFileSystem()); super.testMultipartUploadReverseOrderNonContiguousPartNumbers(); } + @Test @Override public void testConcurrentUploads() throws Throwable { assumeNotS3ExpressFileSystem(getFileSystem()); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractOpen.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractOpen.java index cc39e2a338f60..4d71a0da7f39b 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractOpen.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractOpen.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.AbstractContractOpenTest; import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.apache.hadoop.test.tags.IntegrationTest; import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; @@ -37,6 +38,7 @@ /** * S3A contract tests opening files. */ +@IntegrationTest public class ITestS3AContractOpen extends AbstractContractOpenTest { @Override diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRename.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRename.java index eaf10b7ed9789..1eae7e7e1858a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRename.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRename.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3ATestUtils; import org.apache.hadoop.fs.s3a.Statistic; +import org.apache.hadoop.test.tags.IntegrationTest; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; @@ -40,6 +41,7 @@ /** * S3A contract tests covering rename. */ +@IntegrationTest public class ITestS3AContractRename extends AbstractContractRenameTest { public static final Logger LOG = LoggerFactory.getLogger( @@ -55,6 +57,7 @@ protected AbstractFSContract createContract(Configuration conf) { return new S3AContract(conf); } + @Test @Override public void testRenameDirIntoExistingDir() throws Throwable { describe("S3A rename into an existing directory returns false"); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRootDir.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRootDir.java index 4f999c99dfa3a..e4d0ffba31c37 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRootDir.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractRootDir.java @@ -20,6 +20,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -27,12 +28,14 @@ import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.test.tags.IntegrationTest; import static org.apache.hadoop.fs.s3a.S3ATestUtils.maybeSkipRootTests; /** * root dir operations against an S3 bucket. */ +@IntegrationTest public class ITestS3AContractRootDir extends AbstractContractRootDirectoryTest { @@ -57,6 +60,7 @@ public S3AFileSystem getFileSystem() { } @Override + @Test @Disabled("S3 always return false when non-recursively remove root dir") public void testRmNonEmptyRootDirNonRecursive() throws Throwable { } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractSeek.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractSeek.java index e8b015fafbede..7ba5fe1177c76 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractSeek.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractSeek.java @@ -24,8 +24,10 @@ import java.util.Arrays; import java.util.Collection; +import org.assertj.core.api.Assertions; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -41,6 +43,7 @@ import org.apache.hadoop.fs.s3a.S3AInputPolicy; import org.apache.hadoop.fs.s3a.S3ATestUtils; import org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory; +import org.apache.hadoop.test.tags.IntegrationTest; import org.apache.hadoop.util.NativeCodeLoader; import static org.apache.hadoop.fs.Options.OpenFileOptions.FS_OPTION_OPENFILE_READ_POLICY_DEFAULT; @@ -50,7 +53,6 @@ import static org.apache.hadoop.fs.s3a.Constants.INPUT_FADVISE; import static org.apache.hadoop.fs.s3a.Constants.READAHEAD_RANGE; import static org.apache.hadoop.fs.s3a.Constants.SSL_CHANNEL_MODE; -import static org.apache.hadoop.fs.s3a.S3ATestConstants.FS_S3A_IMPL_DISABLE_CACHE; import static org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory. SSLChannelMode.Default_JSSE; import static org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory. @@ -63,6 +65,9 @@ /** * S3A contract tests covering file seek. */ +@IntegrationTest +@ParameterizedClass(name="policy-{0}-ssl-{1}") +@MethodSource("params") public class ITestS3AContractSeek extends AbstractContractSeekTest { private static final Logger LOG = @@ -79,8 +84,8 @@ public class ITestS3AContractSeek extends AbstractContractSeekTest { /** * This test suite is parameterized for the different seek policies - * which S3A Supports. - * @return a list of seek policies to test. + * which S3A Supports, and different SSL back ends. + * @return a list of (seek policies, SSL channel modes) */ public static Collection params() { return Arrays.asList(new Object[][]{ @@ -94,7 +99,7 @@ public static Collection params() { * Run the test with a chosen seek policy. * @param seekPolicy fadvise policy to use. */ - public void initITestS3AContractSeek(final String pSeekPolicy, + public ITestS3AContractSeek(final String pSeekPolicy, final DelegatingSSLSocketFactory.SSLChannelMode pSslChannelMode) { this.seekPolicy = pSeekPolicy; this.sslChannelMode = pSslChannelMode; @@ -138,11 +143,7 @@ protected AbstractFSContract createContract(Configuration conf) { @Override public void teardown() throws Exception { super.teardown(); - S3AFileSystem fs = getFileSystem(); - if (fs != null && fs.getConf().getBoolean(FS_S3A_IMPL_DISABLE_CACHE, - false)) { - fs.close(); - } + S3ATestUtils.maybeCloseFilesystem(getFileSystem()); } /** @@ -209,11 +210,9 @@ public void validateSSLChannelMode() { } } - @MethodSource("params") - @ParameterizedTest(name="policy={0}") - public void testReadPolicyInFS(String pSeekPolicy, - DelegatingSSLSocketFactory.SSLChannelMode pSslChannelMode) throws Throwable { - initITestS3AContractSeek(pSeekPolicy, pSslChannelMode); + + @Test + public void testReadPolicyInFS() throws Throwable { describe("Verify the read policy is being consistently set"); S3AFileSystem fs = getFileSystem(); assertEquals(S3AInputPolicy.getPolicy(seekPolicy, S3AInputPolicy.Normal), @@ -225,11 +224,9 @@ public void testReadPolicyInFS(String pSeekPolicy, * This sets up a read which will span the active readahead and, * in random IO mode, a subsequent GET. */ - @MethodSource("params") - @ParameterizedTest(name="policy={0}") - public void testReadAcrossReadahead(String pSeekPolicy, - DelegatingSSLSocketFactory.SSLChannelMode pSslChannelMode) throws Throwable { - initITestS3AContractSeek(pSeekPolicy, pSslChannelMode); + + @Test + public void testReadAcrossReadahead() throws Throwable { describe("Sets up a read which will span the active readahead" + " and the rest of the file."); Path path = path("testReadAcrossReadahead"); @@ -258,11 +255,9 @@ public void testReadAcrossReadahead(String pSeekPolicy, * Read across the end of the read buffer using the readByte call, * which will read a single byte only. */ - @MethodSource("params") - @ParameterizedTest(name="policy={0}") - public void testReadSingleByteAcrossReadahead(String pSeekPolicy, - DelegatingSSLSocketFactory.SSLChannelMode pSslChannelMode) throws Throwable { - initITestS3AContractSeek(pSeekPolicy, pSslChannelMode); + + @Test + public void testReadSingleByteAcrossReadahead() throws Throwable { describe("Read over boundary using read()/readByte() calls."); Path path = path("testReadSingleByteAcrossReadahead"); writeTestDataset(path); @@ -282,11 +277,9 @@ public void testReadSingleByteAcrossReadahead(String pSeekPolicy, } } - @MethodSource("params") - @ParameterizedTest(name="policy={0}") - public void testSeekToReadaheadAndRead(String pSeekPolicy, - DelegatingSSLSocketFactory.SSLChannelMode pSslChannelMode) throws Throwable { - initITestS3AContractSeek(pSeekPolicy, pSslChannelMode); + + @Test + public void testSeekToReadaheadAndRead() throws Throwable { describe("Seek to just before readahead limit and call" + " InputStream.read(byte[])"); Path path = path("testSeekToReadaheadAndRead"); @@ -305,11 +298,9 @@ public void testSeekToReadaheadAndRead(String pSeekPolicy, } } - @MethodSource("params") - @ParameterizedTest(name="policy={0}") - public void testSeekToReadaheadExactlyAndRead(String pSeekPolicy, - DelegatingSSLSocketFactory.SSLChannelMode pSslChannelMode) throws Throwable { - initITestS3AContractSeek(pSeekPolicy, pSslChannelMode); + + @Test + public void testSeekToReadaheadExactlyAndRead() throws Throwable { describe("Seek to exactly the readahead limit and call" + " InputStream.read(byte[])"); Path path = path("testSeekToReadaheadExactlyAndRead"); @@ -323,16 +314,16 @@ public void testSeekToReadaheadExactlyAndRead(String pSeekPolicy, // expect to read at least one byte. int l = in.read(temp); LOG.info("Read of byte array at offset {} returned {} bytes", offset, l); - assertTrue(l > 0, "Reading in temp data"); + Assertions.assertThat(l). + describedAs("Reading in temp data") + .isGreaterThan(0); assertDatasetEquals(offset, "read at end of boundary", temp, l); } } - @MethodSource("params") - @ParameterizedTest(name="policy={0}") - public void testSeekToReadaheadExactlyAndReadByte(String pSeekPolicy, - DelegatingSSLSocketFactory.SSLChannelMode pSslChannelMode) throws Throwable { - initITestS3AContractSeek(pSeekPolicy, pSslChannelMode); + + @Test + public void testSeekToReadaheadExactlyAndReadByte() throws Throwable { describe("Seek to exactly the readahead limit and call" + " readByte()"); Path path = path("testSeekToReadaheadExactlyAndReadByte"); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractUnbuffer.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractUnbuffer.java index 2c7149ff5cb15..8d5f16d6ea7d9 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractUnbuffer.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractUnbuffer.java @@ -21,7 +21,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractUnbufferTest; import org.apache.hadoop.fs.contract.AbstractFSContract; +import org.apache.hadoop.test.tags.IntegrationTest; +@IntegrationTest public class ITestS3AContractUnbuffer extends AbstractContractUnbufferTest { @Override diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractVectoredRead.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractVectoredRead.java index ef47acb459587..ccd3814b0482a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractVectoredRead.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractVectoredRead.java @@ -30,7 +30,9 @@ import java.util.concurrent.TimeUnit; import org.assertj.core.api.Assertions; -import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,7 +46,6 @@ import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.s3a.Constants; -import org.apache.hadoop.fs.s3a.RangeNotSatisfiableEOFException; import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3AInputPolicy; import org.apache.hadoop.fs.s3a.S3AInputStream; @@ -53,6 +54,7 @@ import org.apache.hadoop.fs.statistics.StoreStatisticNames; import org.apache.hadoop.fs.statistics.StreamStatisticNames; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.test.tags.IntegrationTest; import static org.apache.hadoop.fs.Options.OpenFileOptions.FS_OPTION_OPENFILE_LENGTH; import static org.apache.hadoop.fs.Options.OpenFileOptions.FS_OPTION_OPENFILE_READ_POLICY; @@ -77,11 +79,15 @@ * This is a complex suite as it really is testing the store, so measurements of * what IO took place is also performed if the input stream is suitable for this. */ +@IntegrationTest +@ParameterizedClass(name="buffer-{0}") +@MethodSource("params") public class ITestS3AContractVectoredRead extends AbstractContractVectoredReadTest { private static final Logger LOG = LoggerFactory.getLogger(ITestS3AContractVectoredRead.class); - public ITestS3AContractVectoredRead() { + public ITestS3AContractVectoredRead(String bufferType) { + super(bufferType); } @Override @@ -93,6 +99,7 @@ protected AbstractFSContract createContract(Configuration conf) { * Analytics Accelerator Library for Amazon S3 does not support Vectored Reads. * @throws Exception */ + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -107,10 +114,8 @@ public void setup() throws Exception { * this test thinks the file is longer than it is, so the call * fails in the GET request. */ - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testEOFRanges416Handling(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testEOFRanges416Handling() throws Exception { FileSystem fs = getFileSystem(); final int extendedLen = DATASET_LEN + 1024; @@ -153,10 +158,8 @@ public void testEOFRanges416Handling(String pBufferType) throws Exception { } - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testMinSeekAndMaxSizeConfigsPropagation(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testMinSeekAndMaxSizeConfigsPropagation() throws Exception { Configuration conf = getFileSystem().getConf(); S3ATestUtils.removeBaseAndBucketOverrides(conf, AWS_S3_VECTOR_READS_MAX_MERGED_READ_SIZE, @@ -178,11 +181,8 @@ public void testMinSeekAndMaxSizeConfigsPropagation(String pBufferType) throws E } } - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testMinSeekAndMaxSizeDefaultValues(String pBufferType) - throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testMinSeekAndMaxSizeDefaultValues() throws Exception { Configuration conf = getFileSystem().getConf(); S3ATestUtils.removeBaseAndBucketOverrides(conf, AWS_S3_VECTOR_READS_MIN_SEEK_SIZE, @@ -199,10 +199,9 @@ public void testMinSeekAndMaxSizeDefaultValues(String pBufferType) } } - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testStopVectoredIoOperationsCloseStream(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testStopVectoredIoOperationsCloseStream() throws Exception { + List fileRanges = createSampleNonOverlappingRanges(); try (FSDataInputStream in = openVectorFile()){ in.readVectored(fileRanges, getAllocate()); @@ -222,10 +221,9 @@ public void testStopVectoredIoOperationsCloseStream(String pBufferType) throws E * There's a small risk of a race condition where the unbuffer() call * is made after the vector reads have completed. */ - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testStopVectoredIoOperationsUnbuffer(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testStopVectoredIoOperationsUnbuffer() throws Exception { + List fileRanges = createSampleNonOverlappingRanges(); try (FSDataInputStream in = openVectorFile()){ in.readVectored(fileRanges, getAllocate()); @@ -243,10 +241,9 @@ public void testStopVectoredIoOperationsUnbuffer(String pBufferType) throws Exce * As the minimum seek value is 4*1024, the first three ranges will be * merged into and other two will remain as it is. * */ - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testNormalReadVsVectoredReadStatsCollection(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testNormalReadVsVectoredReadStatsCollection() throws Exception { + try (S3AFileSystem fs = getTestFileSystemWithReadAheadDisabled()) { List fileRanges = new ArrayList<>(); range(fileRanges, 10 * 1024, 100); @@ -364,10 +361,8 @@ public void testNormalReadVsVectoredReadStatsCollection(String pBufferType) thro } } - @MethodSource("params") - @ParameterizedTest(name = "Buffer type : {0}") - public void testMultiVectoredReadStatsCollection(String pBufferType) throws Exception { - initAbstractContractVectoredReadTest(pBufferType); + @Test + public void testMultiVectoredReadStatsCollection() throws Exception { try (S3AFileSystem fs = getTestFileSystemWithReadAheadDisabled()) { List ranges1 = getConsecutiveRanges(); List ranges2 = getConsecutiveRanges(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AWrappedIO.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AWrappedIO.java index 3a82441faef48..c83a8d447c498 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AWrappedIO.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AWrappedIO.java @@ -21,10 +21,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.io.wrappedio.impl.TestWrappedIO; +import org.apache.hadoop.test.tags.IntegrationTest; /** * Test S3A access through the wrapped operations class. */ +@IntegrationTest public class ITestS3AWrappedIO extends TestWrappedIO { @Override diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java index 2cbae974ea211..cfc18f6ec1363 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.store.audit.AuditSpan; import org.apache.hadoop.fs.store.audit.AuditSpanSource; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.test.tags.IntegrationTest; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; @@ -49,6 +50,7 @@ /** * An extension of the contract test base set up for S3A tests. */ +@IntegrationTest public abstract class AbstractS3ATestBase extends AbstractFSContractTestBase implements S3ATestConstants { protected static final Logger LOG = diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java index baaa6eb37a4fe..d843043702403 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestBlockingThreadPoolExecutorService.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.s3a; +import org.apache.hadoop.test.AbstractHadoopTestBase; import org.apache.hadoop.util.BlockingThreadPoolExecutorService; import org.apache.hadoop.util.SemaphoredDelegatingExecutor; import org.apache.hadoop.util.StopWatch; @@ -40,7 +41,7 @@ * Basic test for S3A's blocking executor service. */ @Timeout(60) -public class ITestBlockingThreadPoolExecutorService { +public class ITestBlockingThreadPoolExecutorService extends AbstractHadoopTestBase { private static final Logger LOG = LoggerFactory.getLogger( ITestBlockingThreadPoolExecutorService.class); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryption.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryption.java index 96470b70d1489..c3a65c8d363bd 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryption.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryption.java @@ -46,6 +46,7 @@ import org.apache.hadoop.fs.statistics.IOStatisticAssertions; import org.apache.hadoop.fs.statistics.IOStatistics; import org.apache.hadoop.fs.store.audit.AuditSpan; +import org.apache.hadoop.test.tags.ScaleTest; import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; @@ -160,6 +161,7 @@ public void testDirectoryListingFileLengths() throws IOException { * verifying the contents of the uploaded file. */ @Test + @ScaleTest public void testBigFilePutAndGet() throws IOException { maybeSkipTest(); assume("Scale test disabled: to enable set property " + diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java index ad95d7ddbcd76..7222e923e963b 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java @@ -60,6 +60,7 @@ import org.apache.hadoop.security.alias.CredentialProviderFactory; import org.apache.hadoop.test.AbstractHadoopTestBase; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.tags.IntegrationTest; import org.apache.hadoop.util.VersionInfo; import org.apache.http.HttpStatus; @@ -83,6 +84,7 @@ * S3A tests for configuration, especially credentials. */ @Timeout(value = S3A_TEST_TIMEOUT, unit = TimeUnit.MILLISECONDS) +@IntegrationTest public class ITestS3AConfiguration extends AbstractHadoopTestBase { private static final String EXAMPLE_ID = "AKASOMEACCESSKEY"; private static final String EXAMPLE_KEY = diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AContractGetFileStatusV1List.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AContractGetFileStatusV1List.java index 06521e97526d1..355efe1357bcc 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AContractGetFileStatusV1List.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AContractGetFileStatusV1List.java @@ -22,6 +22,8 @@ import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.s3a.S3AContract; +import org.apache.hadoop.test.tags.IntegrationTest; + import org.junit.jupiter.api.AfterEach; import static org.apache.hadoop.fs.s3a.Constants.LIST_VERSION; @@ -33,6 +35,7 @@ /** * S3A contract tests for getFileStatus, using the v1 List Objects API. */ +@IntegrationTest public class ITestS3AContractGetFileStatusV1List extends AbstractContractGetFileStatusTest { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACopyFromLocalFile.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACopyFromLocalFile.java index 2777a013e0a0f..921f2c60e32ca 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACopyFromLocalFile.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACopyFromLocalFile.java @@ -29,9 +29,11 @@ import org.apache.hadoop.fs.contract.s3a.S3AContract; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.test.tags.IntegrationTest; import org.assertj.core.api.Assertions; -import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import static org.apache.hadoop.fs.s3a.Constants.OPTIMIZED_COPY_FROM_LOCAL; @@ -45,6 +47,9 @@ * Parameterized on whether or not the optimized * copyFromLocalFile is enabled. */ +@IntegrationTest +@ParameterizedClass(name="optimized-{0}") +@MethodSource("params") public class ITestS3ACopyFromLocalFile extends AbstractContractCopyFromLocalTest { /** @@ -56,10 +61,10 @@ public static Collection params() { {false}, }); } - private boolean enabled; + private final boolean enabled; - public void initITestS3ACopyFromLocalFile(final boolean pEnabled) { - this.enabled = pEnabled; + public ITestS3ACopyFromLocalFile(final boolean enabled) { + this.enabled = enabled; } @Override @@ -78,10 +83,8 @@ protected AbstractFSContract createContract(Configuration conf) { return new S3AContract(conf); } - @MethodSource("params") - @ParameterizedTest - public void testOptionPropagation(boolean pEnabled) throws Throwable { - initITestS3ACopyFromLocalFile(pEnabled); + @Test + public void testOptionPropagation() throws Throwable { Assertions.assertThat(getFileSystem().hasPathCapability(new Path("/"), OPTIMIZED_COPY_FROM_LOCAL)) .describedAs("path capability of %s", OPTIMIZED_COPY_FROM_LOCAL) @@ -89,10 +92,8 @@ public void testOptionPropagation(boolean pEnabled) throws Throwable { } - @MethodSource("params") - @ParameterizedTest - public void testLocalFilesOnly(boolean pEnabled) throws Throwable { - initITestS3ACopyFromLocalFile(pEnabled); + @Test + public void testLocalFilesOnly() throws Throwable { describe("Copying into other file systems must fail"); Path dest = fileToPath(createTempDirectory("someDir")); @@ -100,10 +101,8 @@ public void testLocalFilesOnly(boolean pEnabled) throws Throwable { () -> getFileSystem().copyFromLocalFile(false, true, dest, dest)); } - @MethodSource("params") - @ParameterizedTest - public void testOnlyFromLocal(boolean pEnabled) throws Throwable { - initITestS3ACopyFromLocalFile(pEnabled); + @Test + public void testOnlyFromLocal() throws Throwable { describe("Copying must be from a local file system"); File source = createTempFile("someFile"); Path dest = copyFromLocal(source, true); @@ -112,10 +111,8 @@ public void testOnlyFromLocal(boolean pEnabled) throws Throwable { () -> getFileSystem().copyFromLocalFile(true, true, dest, dest)); } - @MethodSource("params") - @ParameterizedTest - public void testCopyFromLocalWithNoFileScheme(boolean pEnabled) throws IOException { - initITestS3ACopyFromLocalFile(pEnabled); + @Test + public void testCopyFromLocalWithNoFileScheme() throws IOException { describe("Copying from local file with no file scheme to remote s3 destination"); File source = createTempFile("tempData"); Path dest = path(getMethodName()); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFSMainOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFSMainOperations.java index 88b6ce169925e..f78f97a097ac3 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFSMainOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFSMainOperations.java @@ -25,6 +25,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.s3a.S3AContract; +import org.apache.hadoop.test.tags.IntegrationTest; + import org.junit.jupiter.api.Disabled; import static org.apache.hadoop.fs.s3a.S3ATestUtils.createTestPath; @@ -36,6 +38,7 @@ /** * S3A Test suite for the FSMainOperationsBaseTest tests. */ +@IntegrationTest public class ITestS3AFSMainOperations extends FSMainOperationsBaseTest { private S3AContract contract; @@ -55,13 +58,6 @@ protected FileSystem createFileSystem() throws Exception { return contract.getTestFileSystem(); } - @Override - public void tearDown() throws Exception { - if (contract.getTestFileSystem() != null) { - super.tearDown(); - } - } - @Override @Disabled("Permissions not supported") public void testListStatusThrowsExceptionForUnreadableDir() { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileSystemContract.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileSystemContract.java index 094b6dbb9c916..8667d2b646fd0 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileSystemContract.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileSystemContract.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileSystemContractBaseTest; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.test.tags.IntegrationTest; import static org.apache.hadoop.fs.contract.ContractTestUtils.skip; @@ -47,6 +48,7 @@ * Tests a live S3 system. If your keys and bucket aren't specified, all tests * are marked as passed. */ +@IntegrationTest public class ITestS3AFileSystemContract extends FileSystemContractBaseTest { protected static final Logger LOG = diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingLruEviction.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingLruEviction.java index 50b4bcd0dd4a4..a3cb152aebc4c 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingLruEviction.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingLruEviction.java @@ -28,7 +28,9 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.junit.jupiter.params.ParameterizedTest; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -54,9 +56,11 @@ /** * Test the prefetching input stream with LRU cache eviction on S3ACachingInputStream. */ +@ParameterizedClass(name="max-blocks-{0}") +@MethodSource("params") public class ITestS3APrefetchingLruEviction extends AbstractS3ACostTest { - private String maxBlocks; + private final String maxBlocks; public static Collection params() { return Arrays.asList(new Object[][]{ @@ -65,8 +69,8 @@ public static Collection params() { }); } - public void initITestS3APrefetchingLruEviction(final String pMaxBlocks) { - this.maxBlocks = pMaxBlocks; + public ITestS3APrefetchingLruEviction(final String maxBlocks) { + this.maxBlocks = maxBlocks; } private static final Logger LOG = @@ -91,10 +95,8 @@ public Configuration createConfiguration() { return conf; } - @MethodSource("params") - @ParameterizedTest(name = "max-blocks-{0}") - public void testSeeksWithLruEviction(String pMaxBlocks) throws Throwable { - initITestS3APrefetchingLruEviction(pMaxBlocks); + @Test + public void testSeeksWithLruEviction() throws Throwable { IOStatistics ioStats; byte[] data = ContractTestUtils.dataset(SMALL_FILE_SIZE, 'x', 26); // Path for file which should have length > block size so S3ACachingInputStream is used diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AStorageClass.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AStorageClass.java index 17b686236cc36..9ff731d483d2f 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AStorageClass.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AStorageClass.java @@ -24,7 +24,8 @@ import java.util.Map; import org.assertj.core.api.Assertions; -import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import org.apache.hadoop.conf.Configuration; @@ -49,6 +50,8 @@ /** * Tests of storage class. */ +@ParameterizedClass(name="-{0}") +@MethodSource("params") public class ITestS3AStorageClass extends AbstractS3ATestBase { /** @@ -62,10 +65,10 @@ public static Collection params() { }); } - private String fastUploadBufferType; + private final String fastUploadBufferType; - public void initITestS3AStorageClass(String pFastUploadBufferType) { - this.fastUploadBufferType = pFastUploadBufferType; + public ITestS3AStorageClass(String fastUploadBufferType) { + this.fastUploadBufferType = fastUploadBufferType; } @Override @@ -83,11 +86,8 @@ protected Configuration createConfiguration() { * This test ensures the default storage class configuration (no config or null) * works well with create and copy operations */ - @MethodSource("params") - @ParameterizedTest(name = "fast-upload-buffer-{0}") - public void testCreateAndCopyObjectWithStorageClassDefault( - String pFastUploadBufferType) throws Throwable { - initITestS3AStorageClass(pFastUploadBufferType); + @Test + public void testCreateAndCopyObjectWithStorageClassDefault() throws Throwable { Configuration conf = this.createConfiguration(); S3AContract contract = (S3AContract) createContract(conf); contract.init(); @@ -108,11 +108,8 @@ public void testCreateAndCopyObjectWithStorageClassDefault( * Verify object can be created and copied correctly * with specified storage class */ - @MethodSource("params") - @ParameterizedTest(name = "fast-upload-buffer-{0}") - public void testCreateAndCopyObjectWithStorageClassReducedRedundancy( - String pFastUploadBufferType) throws Throwable { - initITestS3AStorageClass(pFastUploadBufferType); + @Test + public void testCreateAndCopyObjectWithStorageClassReducedRedundancy() throws Throwable { Configuration conf = this.createConfiguration(); conf.set(STORAGE_CLASS, STORAGE_CLASS_REDUCED_REDUNDANCY); S3AContract contract = (S3AContract) createContract(conf); @@ -136,11 +133,8 @@ public void testCreateAndCopyObjectWithStorageClassReducedRedundancy( * Archive storage classes have different behavior * from general storage classes */ - @MethodSource("params") - @ParameterizedTest(name = "fast-upload-buffer-{0}") - public void testCreateAndCopyObjectWithStorageClassGlacier( - String pFastUploadBufferType) throws Throwable { - initITestS3AStorageClass(pFastUploadBufferType); + @Test + public void testCreateAndCopyObjectWithStorageClassGlacier() throws Throwable { Configuration conf = this.createConfiguration(); conf.set(STORAGE_CLASS, STORAGE_CLASS_GLACIER); S3AContract contract = (S3AContract) createContract(conf); @@ -168,11 +162,8 @@ public void testCreateAndCopyObjectWithStorageClassGlacier( * Verify object can be created and copied correctly * with completely invalid storage class */ - @MethodSource("params") - @ParameterizedTest(name = "fast-upload-buffer-{0}") - public void testCreateAndCopyObjectWithStorageClassInvalid( - String pFastUploadBufferType) throws Throwable { - initITestS3AStorageClass(pFastUploadBufferType); + @Test + public void testCreateAndCopyObjectWithStorageClassInvalid() throws Throwable { Configuration conf = this.createConfiguration(); conf.set(STORAGE_CLASS, "testing"); S3AContract contract = (S3AContract) createContract(conf); @@ -196,11 +187,8 @@ public void testCreateAndCopyObjectWithStorageClassInvalid( * Verify object can be created and copied correctly * with empty string configuration */ - @MethodSource("params") - @ParameterizedTest(name = "fast-upload-buffer-{0}") - public void testCreateAndCopyObjectWithStorageClassEmpty( - String pFastUploadBufferType) throws Throwable { - initITestS3AStorageClass(pFastUploadBufferType); + @Test + public void testCreateAndCopyObjectWithStorageClassEmpty() throws Throwable { Configuration conf = this.createConfiguration(); conf.set(STORAGE_CLASS, ""); S3AContract contract = (S3AContract) createContract(conf); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ATestUtils.java index b7a44d142e764..8d23bcce74ba7 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ATestUtils.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ATestUtils.java @@ -19,6 +19,8 @@ package org.apache.hadoop.fs.s3a; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.test.tags.IntegrationTest; + import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -31,6 +33,7 @@ * Test the test utils. Why an integration test? it's needed to * verify property pushdown. */ +@IntegrationTest public class ITestS3ATestUtils extends Assertions { private static final Logger LOG = LoggerFactory.getLogger(ITestS3ATestUtils.class); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java index c9d71fa881af5..17159b901e256 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.s3a; +import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.classification.InterfaceAudience; @@ -1197,6 +1198,17 @@ public static Configuration setPerformanceFlags(final Configuration conf, return conf; } + /** + * CLose the filesystem if it not in the cache. + * @param fs filesystem to check. May be null + */ + public static void maybeCloseFilesystem(final S3AFileSystem fs) { + if (fs != null && fs.getConf().getBoolean(FS_S3A_IMPL_DISABLE_CACHE, + false)) { + IOUtils.closeQuietly(fs); + } + } + /** * Helper class to do diffs of metrics. */ diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestAuditSpanLifecycle.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestAuditSpanLifecycle.java index 082b03c16c030..d783b6445f377 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestAuditSpanLifecycle.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestAuditSpanLifecycle.java @@ -39,6 +39,7 @@ public class TestAuditSpanLifecycle extends AbstractAuditingTest { private AuditSpan resetSpan; @BeforeEach + @Override public void setup() throws Exception { super.setup(); resetSpan = getManager().getActiveAuditSpan(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestHttpReferrerAuditHeader.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestHttpReferrerAuditHeader.java index 9c2870dc0d5f7..42bdc7ab44f15 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestHttpReferrerAuditHeader.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestHttpReferrerAuditHeader.java @@ -71,6 +71,7 @@ public class TestHttpReferrerAuditHeader extends AbstractAuditingTest { private LoggingAuditor auditor; @BeforeEach + @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestLoggingAuditor.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestLoggingAuditor.java index eeb2d22d6088d..ecec6e85805d9 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestLoggingAuditor.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/TestLoggingAuditor.java @@ -61,6 +61,7 @@ public class TestLoggingAuditor extends AbstractAuditingTest { private LoggingAuditor auditor; @BeforeEach + @Override public void setup() throws Exception { super.setup(); auditor = (LoggingAuditor) getManager().getAuditor(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java index 1e7765801bff5..1a4d354d5edc8 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestCustomSigner.java @@ -28,7 +28,9 @@ import java.util.concurrent.atomic.AtomicInteger; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import software.amazon.awssdk.auth.signer.Aws4Signer; import software.amazon.awssdk.auth.signer.AwsS3V4Signer; @@ -53,6 +55,7 @@ import org.apache.hadoop.fs.s3a.auth.delegation.DelegationTokenProvider; import org.apache.hadoop.security.UserGroupInformation; +import static org.apache.hadoop.fs.s3a.Constants.CHECKSUM_ALGORITHM; import static org.apache.hadoop.fs.s3a.Constants.CUSTOM_SIGNERS; import static org.apache.hadoop.fs.s3a.Constants.ENABLE_MULTI_DELETE; import static org.apache.hadoop.fs.s3a.Constants.SIGNING_ALGORITHM_S3; @@ -65,6 +68,8 @@ * Because the v2 sdk has had some problems with bulk delete * and custom signing, this suite is parameterized. */ +@ParameterizedClass(name="bulk-delete={0}") +@MethodSource("params") public class ITestCustomSigner extends AbstractS3ATestBase { private static final Logger LOG = LoggerFactory @@ -83,7 +88,7 @@ public static Collection params() { }); } - private boolean bulkDelete; + private final boolean bulkDelete; private final UserGroupInformation ugi1 = UserGroupInformation.createRemoteUser("user1"); @@ -93,14 +98,22 @@ public static Collection params() { private String endpoint; - public void initITestCustomSigner( + public ITestCustomSigner( final String ignored, - final boolean pBulkDelete) throws Exception { - this.bulkDelete = pBulkDelete; - setup(); + final boolean bulkDelete) { + this.bulkDelete = bulkDelete; } @Override + protected Configuration createConfiguration() { + final Configuration conf = super.createConfiguration(); + // customer signer doesn't work + removeBaseAndBucketOverrides(conf, CHECKSUM_ALGORITHM); + return conf; + } + + @Override + @BeforeEach public void setup() throws Exception { super.setup(); final S3AFileSystem fs = getFileSystem(); @@ -127,11 +140,10 @@ public void teardown() throws Exception { FileSystem.closeAllForUGI(ugi2); } - @MethodSource("params") - @ParameterizedTest(name = "{0}") - public void testCustomSignerAndInitializer(final String ignored, - final boolean pBulkDelete) throws Exception { - initITestCustomSigner(ignored, pBulkDelete); + @Test + public void testCustomSignerAndInitializer() + throws IOException, InterruptedException { + final Path basePath = path(getMethodName()); FileSystem fs1 = runStoreOperationsAndVerify(ugi1, new Path(basePath, "customsignerpath1"), "id1"); @@ -221,7 +233,7 @@ private Configuration createTestConfig(String identifier) { // Having the checksum algorithm in this test causes // x-amz-sdk-checksum-algorithm specified, but no corresponding // x-amz-checksum-* or x-amz-trailer headers were found - conf.unset(Constants.CHECKSUM_ALGORITHM); + conf.unset(CHECKSUM_ALGORITHM); // make absolutely sure there is no caching. disableFilesystemCaching(conf); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ILoadTestRoleCredentials.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ILoadTestRoleCredentials.java index ffcb2fb902b7e..11f7201c61d8a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ILoadTestRoleCredentials.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ILoadTestRoleCredentials.java @@ -18,12 +18,17 @@ package org.apache.hadoop.fs.s3a.auth.delegation; +import org.apache.hadoop.test.tags.LoadTest; +import org.apache.hadoop.test.tags.ScaleTest; + import static org.apache.hadoop.fs.s3a.auth.delegation.DelegationConstants.DELEGATION_TOKEN_ROLE_BINDING; /** * This looks at the cost of assume role, to see if it is more expensive * than creating simple session credentials. */ +@LoadTest +@ScaleTest public class ILoadTestRoleCredentials extends ILoadTestSessionCredentials { @Override diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ILoadTestSessionCredentials.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ILoadTestSessionCredentials.java index 317fc2e2edd15..2fe229937e294 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ILoadTestSessionCredentials.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ILoadTestSessionCredentials.java @@ -28,6 +28,8 @@ import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ExecutorService; +import org.apache.hadoop.test.tags.LoadTest; +import org.apache.hadoop.test.tags.ScaleTest; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -78,6 +80,8 @@ * @see * AWS STS login throttling statistics */ +@LoadTest +@ScaleTest public class ILoadTestSessionCredentials extends S3AScaleTestBase { private static final Logger LOG = diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestDelegatedMRJob.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestDelegatedMRJob.java index e53772a3b9287..119600e81b241 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestDelegatedMRJob.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ITestDelegatedMRJob.java @@ -23,7 +23,9 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -86,6 +88,8 @@ * This is needed to verify that job resources have their tokens extracted * too. */ +@ParameterizedClass(name="token={0}") +@MethodSource("params") public class ITestDelegatedMRJob extends AbstractDelegationIT { private static final Logger LOG = @@ -97,11 +101,11 @@ public class ITestDelegatedMRJob extends AbstractDelegationIT { @SuppressWarnings("StaticNonFinalField") private static MiniKerberizedHadoopCluster cluster; - private String name; + private final String name; - private String tokenBinding; + private final String tokenBinding; - private Text tokenKind; + private final Text tokenKind; /** * Created in test setup. @@ -134,12 +138,10 @@ public static Collection params() { }); } - public void initITestDelegatedMRJob(String pName, String pTokenBinding, Text pTokenKind) - throws Exception { - this.name = pName; - this.tokenBinding = pTokenBinding; - this.tokenKind = pTokenKind; - setup(); + public ITestDelegatedMRJob(String name, String tokenBinding, Text tokenKind) { + this.name = name; + this.tokenBinding = tokenBinding; + this.tokenKind = tokenKind; } /*** @@ -191,6 +193,7 @@ protected YarnConfiguration getConfiguration() { } @Override + @BeforeEach public void setup() throws Exception { cluster.loginPrincipal(); super.setup(); @@ -242,11 +245,8 @@ protected int getTestTimeoutMillis() { return getTestTimeoutSeconds() * 1000; } - @MethodSource("params") - @ParameterizedTest - public void testCommonCrawlLookup(String pName, String pTokenBinding, - Text pTokenKind) throws Throwable { - initITestDelegatedMRJob(pName, pTokenBinding, pTokenKind); + @Test + public void testCommonCrawlLookup() throws Throwable { FileSystem resourceFS = extraJobResourcePath.getFileSystem( getConfiguration()); FileStatus status = resourceFS.getFileStatus(extraJobResourcePath); @@ -254,11 +254,8 @@ public void testCommonCrawlLookup(String pName, String pTokenBinding, assertTrue(status.isEncrypted(), "Not encrypted: " + status); } - @MethodSource("params") - @ParameterizedTest - public void testJobSubmissionCollectsTokens(String pName, String pTokenBinding, - Text pTokenKind) throws Exception { - initITestDelegatedMRJob(pName, pTokenBinding, pTokenKind); + @Test + public void testJobSubmissionCollectsTokens() throws Exception { describe("Mock Job test"); JobConf conf = new JobConf(getConfiguration()); if (isUsingDefaultExternalDataFile(conf)) { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractYarnClusterITest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractYarnClusterITest.java index cb250ff43ae40..3ae00d3223c6c 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractYarnClusterITest.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractYarnClusterITest.java @@ -25,6 +25,7 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -226,6 +227,7 @@ protected FileSystem getClusterFS() throws IOException { * the user's home directory, as that is often rejected by CI test * runners. */ + @TempDir public File stagingFilesDir; /** @@ -259,7 +261,6 @@ public void setup() throws Exception { assertNotNull( getClusterBinding(), "cluster is not bound"); String methodName = getMethodName(); - stagingFilesDir = File.createTempFile(methodName, ""); } @Override diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestS3ACommitterFactory.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestS3ACommitterFactory.java index ed3deb38de480..2956bfc3baa7e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestS3ACommitterFactory.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestS3ACommitterFactory.java @@ -22,7 +22,9 @@ import java.util.Arrays; import java.util.Collection; -import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,9 +55,13 @@ /** * Tests for the committer factory creation/override process. */ +@ParameterizedClass(name="committer={3}") +@MethodSource("params") public final class ITestS3ACommitterFactory extends AbstractCommitITest { + private static final Logger LOG = LoggerFactory.getLogger( ITestS3ACommitterFactory.class); + /** * Name for invalid committer: {@value}. */ @@ -114,18 +120,18 @@ public static Collection params() { /** * Name of committer to set in filesystem config. If "" do not set one. */ - private String fsCommitterName; + private final String fsCommitterName; /** * Name of committer to set in job config. */ - private String jobCommitterName; + private final String jobCommitterName; /** * Expected committer class. * If null: an exception is expected */ - private Class committerClass; + private final Class committerClass; /** * Description from parameters, simply for thread names to be more informative. @@ -134,21 +140,20 @@ public static Collection params() { /** * Create a parameterized instance. - * @param pFsCommitterName committer to set in filesystem config - * @param pJobCommitterName committer to set in job config - * @param pCommitterClass expected committer class - * @param pDescription debug text for thread names. + * @param fsCommitterName committer to set in filesystem config + * @param jobCommitterName committer to set in job config + * @param committerClass expected committer class + * @param description debug text for thread names. */ - public void initITestS3ACommitterFactory( - final String pFsCommitterName, - final String pJobCommitterName, - final Class pCommitterClass, - final String pDescription) throws Exception { - this.fsCommitterName = pFsCommitterName; - this.jobCommitterName = pJobCommitterName; - this.committerClass = pCommitterClass; - this.description = pDescription; - setup(); + public ITestS3ACommitterFactory( + final String fsCommitterName, + final String jobCommitterName, + final Class committerClass, + final String description) { + this.fsCommitterName = fsCommitterName; + this.jobCommitterName = jobCommitterName; + this.committerClass = committerClass; + this.description = description; } @Override @@ -176,6 +181,7 @@ private static void maybeSetCommitterName(final Configuration conf, final String } @Override + @BeforeEach public void setup() throws Exception { // destroy all filesystems from previous runs. FileSystem.closeAllForUGI(UserGroupInformation.getCurrentUser()); @@ -210,14 +216,8 @@ protected void deleteTestDirInTeardown() { * Verify that if all config options are unset, the FileOutputCommitter * is returned. */ - @MethodSource("params") - @ParameterizedTest(name = "{3}-fs=[{0}]-task=[{1}]-[{2}]") - public void testBinding(String pFsCommitterName, - String pJobCommitterName, - Class pCommitterClass, - String pDescription) throws Throwable { - initITestS3ACommitterFactory(pFsCommitterName, pJobCommitterName, pCommitterClass, - pDescription); + @Test + public void testBinding() throws Throwable { assertFactoryCreatesExpectedCommitter(committerClass); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestUploadRecovery.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestUploadRecovery.java index 6e0970207f742..c5144a466a935 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestUploadRecovery.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestUploadRecovery.java @@ -29,7 +29,8 @@ import org.assertj.core.api.Assumptions; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -68,7 +69,7 @@ * Test upload recovery by injecting failures into the response chain. * The tests are parameterized on upload buffering. *

- * The test case {@link #testCommitOperations(String, boolean)} is independent of this option; + * The test case {@link #testCommitOperations()} is independent of this option; * the test parameterization only runs this once. * A bit inelegant but as the fault injection code is shared and the problem "adjacent" * this isolates all forms of upload recovery into the same test class without @@ -76,6 +77,8 @@ *

* Fault injection is implemented in {@link SdkFaultInjector}. */ +@ParameterizedClass(name="buffer={0}-commit-test={1}") +@MethodSource("params") public class ITestUploadRecovery extends AbstractS3ACostTest { private static final Logger LOG = @@ -102,22 +105,21 @@ public static Collection params() { /** * should the commit test be included? */ - private boolean includeCommitTest; + private final boolean includeCommitTest; /** * Buffer type for this test run. */ - private String buffer; + private final String buffer; /** * Parameterized test suite. - * @param pBuffer buffer type - * @param pIncludeCommitTest should the commit upload test be included? + * @param buffer buffer type + * @param includeCommitTest should the commit upload test be included? */ - public void initITestUploadRecovery(final String pBuffer, - final boolean pIncludeCommitTest) { - this.includeCommitTest = pIncludeCommitTest; - this.buffer = pBuffer; + public ITestUploadRecovery(final String buffer, final boolean includeCommitTest) { + this.includeCommitTest = includeCommitTest; + this.buffer = buffer; } @Override @@ -172,11 +174,8 @@ public void teardown() throws Exception { /** * Verify that failures of simple PUT requests can be recovered from. */ - @MethodSource("params") - @ParameterizedTest(name = "{0}-commit-{1}") - public void testPutRecovery(String pBuffer, - boolean pIncludeCommitTest) throws Throwable { - initITestUploadRecovery(pBuffer, pIncludeCommitTest); + @Test + public void testPutRecovery() throws Throwable { describe("test put recovery"); final S3AFileSystem fs = getFileSystem(); final Path path = methodPath(); @@ -192,11 +191,8 @@ public void testPutRecovery(String pBuffer, /** * Validate recovery of multipart uploads within a magic write sequence. */ - @MethodSource("params") - @ParameterizedTest(name = "{0}-commit-{1}") - public void testMagicWriteRecovery(String pBuffer, - boolean pIncludeCommitTest) throws Throwable { - initITestUploadRecovery(pBuffer, pIncludeCommitTest); + @Test + public void testMagicWriteRecovery() throws Throwable { describe("test magic write recovery with multipart uploads"); final S3AFileSystem fs = getFileSystem(); @@ -235,11 +231,8 @@ public void testMagicWriteRecovery(String pBuffer, /** * Test the commit operations iff {@link #includeCommitTest} is true. */ - @MethodSource("params") - @ParameterizedTest(name = "{0}-commit-{1}") - public void testCommitOperations(String pBuffer, - boolean pIncludeCommitTest) throws Throwable { - initITestUploadRecovery(pBuffer, pIncludeCommitTest); + @Test + public void testCommitOperations() throws Throwable { skipIfClientSideEncryption(); Assumptions.assumeThat(includeCommitTest) .describedAs("commit test excluded") diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/integration/ITestS3ACommitterMRJob.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/integration/ITestS3ACommitterMRJob.java index 24da4b27b5676..4903c0132da51 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/integration/ITestS3ACommitterMRJob.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/integration/ITestS3ACommitterMRJob.java @@ -37,10 +37,12 @@ import org.apache.hadoop.fs.s3a.Constants; import org.apache.hadoop.util.Sets; import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestMethodOrder; import org.junit.jupiter.api.MethodOrderer; import org.junit.jupiter.api.io.TempDir; -import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -126,7 +128,7 @@ * outcome. * *

  • - * {@link #test_500(CommitterTestBinding)} test is relayed to + * {@link #test_500()} test is relayed to * {@link CommitterTestBinding#test_500()}, for any post-MR-job tests. * * @@ -138,6 +140,8 @@ * instance. */ @TestMethodOrder(MethodOrderer.Alphanumeric.class) +@ParameterizedClass(name="binding={0}") +@MethodSource("params") public class ITestS3ACommitterMRJob extends AbstractYarnClusterITest { private static final Logger LOG = @@ -159,19 +163,19 @@ public static Collection params() { /** * The committer binding for this instance. */ - private CommitterTestBinding committerTestBinding; + private final CommitterTestBinding committerTestBinding; /** * Parameterized constructor. - * @param pCommitterTestBinding binding for the test. + * @param committerTestBinding binding for the test. */ - public void initITestS3ACommitterMRJob( - final CommitterTestBinding pCommitterTestBinding) throws Exception { - this.committerTestBinding = pCommitterTestBinding; - setup(); + public ITestS3ACommitterMRJob( + final CommitterTestBinding committerTestBinding) { + this.committerTestBinding = committerTestBinding; } @Override + @BeforeEach public void setup() throws Exception { super.setup(); // configure the test binding for this specific test case. @@ -193,25 +197,19 @@ protected String committerName() { /** * Verify that the committer binding is happy. */ - @MethodSource("params") - @ParameterizedTest(name = "{0}") - public void test_000(CommitterTestBinding pCommitterTestBinding) throws Throwable { - initITestS3ACommitterMRJob(pCommitterTestBinding); + @Test + public void test_000() throws Throwable { committerTestBinding.validate(); } - @MethodSource("params") - @ParameterizedTest(name = "{0}") - public void test_100(CommitterTestBinding pCommitterTestBinding) throws Throwable { - initITestS3ACommitterMRJob(pCommitterTestBinding); + @Test + public void test_100() throws Throwable { committerTestBinding.test_100(); } - @MethodSource("params") - @ParameterizedTest(name = "{0}") - public void test_200_execute(CommitterTestBinding pCommitterTestBinding, + @Test + public void test_200_execute( @TempDir java.nio.file.Path localFilesDir) throws Exception { - initITestS3ACommitterMRJob(pCommitterTestBinding); describe("Run an MR with committer %s", committerName()); S3AFileSystem fs = getFileSystem(); @@ -359,10 +357,8 @@ protected void customPostExecutionValidation(final Path destPath, /** * This is the extra test which committer test bindings can add. */ - @MethodSource("params") - @ParameterizedTest(name = "{0}") - public void test_500(CommitterTestBinding pCommitterTestBinding) throws Throwable { - initITestS3ACommitterMRJob(pCommitterTestBinding); + @Test + public void test_500() throws Throwable { committerTestBinding.test_500(); } @@ -499,8 +495,7 @@ protected void validateResult(Path destPath, } /** - * A test to run before the main - * {@link #test_200_execute(CommitterTestBinding, java.nio.file.Path)} test is + * A test to run before the main {@link #test_200_execute()} test is * invoked. * @throws Throwable failure. */ @@ -509,8 +504,7 @@ void test_100() throws Throwable { } /** - * A test to run after the main - * {@link #test_200_execute(CommitterTestBinding, java.nio.file.Path)} test is + * A test to run after the main {@link #test_200_execute()} test is * invoked. * @throws Throwable failure. */ @@ -520,7 +514,7 @@ void test_500() throws Throwable { /** * Validate the state of the binding. - * This is called in {@link #test_000(CommitterTestBinding)} so will + * This is called in {@link #test_000()} so will * fail independently of the other tests. * @throws Throwable failure. */ diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestMagicCommitProtocol.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestMagicCommitProtocol.java index b2c8ab28efaec..6de1a25f7b3ac 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestMagicCommitProtocol.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestMagicCommitProtocol.java @@ -25,7 +25,6 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.assertj.core.api.Assertions; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; @@ -41,8 +40,10 @@ import org.apache.hadoop.mapreduce.JobStatus; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; + +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides; @@ -50,10 +51,13 @@ import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*; import static org.apache.hadoop.fs.s3a.commit.impl.CommitUtilsWithMR.getMagicJobPath; import static org.apache.hadoop.util.functional.RemoteIterators.toList; +import static org.assertj.core.api.Assertions.assertThat; /** * Test the magic committer's commit protocol. */ +@ParameterizedClass(name="track-commit-in-memory-{0}") +@MethodSource("params") public class ITestMagicCommitProtocol extends AbstractITCommitProtocol { private boolean trackCommitsInMemory; @@ -74,6 +78,7 @@ protected String getCommitterName() { } @Override + @BeforeEach public void setup() throws Exception { super.setup(); CommitUtils.verifyIsMagicCommitFS(getFileSystem()); @@ -86,10 +91,8 @@ public static Collection params() { }); } - public void initITestMagicCommitProtocol(boolean pTrackCommitsInMemory) - throws Exception { - this.trackCommitsInMemory = pTrackCommitsInMemory; - setup(); + public ITestMagicCommitProtocol(boolean trackCommitsInMemory) { + this.trackCommitsInMemory = trackCommitsInMemory; } @Override @@ -128,7 +131,7 @@ protected void validateTaskAttemptPathDuringWrite(Path p, final long expectedLength, String jobId) throws IOException { String pathStr = p.toString(); - Assertions.assertThat(pathStr) + assertThat(pathStr) .describedAs("Magic path") .contains("/" + MAGIC_PATH_PREFIX + jobId + "/"); assertPathDoesNotExist("task attempt visible", p); @@ -149,9 +152,9 @@ protected void validateTaskAttemptPathAfterWrite(Path marker, List filtered = toList(listAndFilter(fs, marker.getParent(), false, (path) -> path.getName().equals(name))); - Assertions.assertThat(filtered) + assertThat(filtered) .hasSize(1); - Assertions.assertThat(filtered.get(0)) + assertThat(filtered.get(0)) .matches(lst -> lst.getLen() == 0, "Listing should return 0 byte length"); @@ -174,7 +177,7 @@ protected void validateTaskAttemptWorkingDirectory( assertEquals("s3a", wd.getScheme(), "Wrong schema for working dir " + wd + " with committer " + committer); - Assertions.assertThat(wd.getPath()) + assertThat(wd.getPath()) .contains("/" + MAGIC_PATH_PREFIX + committer.getUUID() + "/"); } @@ -183,10 +186,8 @@ protected void validateTaskAttemptWorkingDirectory( * committer UUID to ensure uniqueness in the case of more than * one job writing to the same destination path. */ - @MethodSource("params") - @ParameterizedTest(name = "track-commit-in-memory-{0}") - public void testCommittersPathsHaveUUID(boolean pTrackCommitsInMemory) throws Throwable { - initITestMagicCommitProtocol(pTrackCommitsInMemory); + @Test + public void testCommittersPathsHaveUUID() throws Throwable { TaskAttemptContext tContext = new TaskAttemptContextImpl( getConfiguration(), getTaskAttempt0()); @@ -195,7 +196,7 @@ public void testCommittersPathsHaveUUID(boolean pTrackCommitsInMemory) throws Th String ta0 = getTaskAttempt0().toString(); // magic path for the task attempt Path taskAttemptPath = committer.getTaskAttemptPath(tContext); - Assertions.assertThat(taskAttemptPath.toString()) + assertThat(taskAttemptPath.toString()) .describedAs("task path of %s", committer) .contains(committer.getUUID()) .contains("/" + MAGIC_PATH_PREFIX + committer.getUUID() + "/") @@ -206,7 +207,7 @@ public void testCommittersPathsHaveUUID(boolean pTrackCommitsInMemory) throws Th // temp path for files which the TA will create with an absolute path // and which need renaming into place. Path tempTaskAttemptPath = committer.getTempTaskAttemptPath(tContext); - Assertions.assertThat(tempTaskAttemptPath.toString()) + assertThat(tempTaskAttemptPath.toString()) .describedAs("Temp task path of %s", committer) .contains(committer.getUUID()) .contains(TEMP_DATA) diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestS3AHugeMagicCommits.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestS3AHugeMagicCommits.java index 45d7469f7970a..edcc7bfbf8310 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestS3AHugeMagicCommits.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/magic/ITestS3AHugeMagicCommits.java @@ -24,7 +24,9 @@ import org.assertj.core.api.Assertions; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.MethodOrderer; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,6 +44,7 @@ import org.apache.hadoop.fs.s3a.commit.impl.CommitOperations; import org.apache.hadoop.fs.s3a.scale.AbstractSTestS3AHugeFiles; import org.apache.hadoop.fs.store.audit.AuditSpan; +import org.apache.hadoop.test.tags.ScaleTest; import static org.apache.hadoop.fs.s3a.MultipartTestUtils.listMultipartUploads; import static org.apache.hadoop.fs.s3a.Statistic.MULTIPART_UPLOAD_ABORT_UNDER_PATH_INVOKED; @@ -57,6 +60,8 @@ * * This is a scale test. */ +@ScaleTest +@TestMethodOrder(MethodOrderer.Alphanumeric.class) public class ITestS3AHugeMagicCommits extends AbstractSTestS3AHugeFiles { private static final Logger LOG = LoggerFactory.getLogger( ITestS3AHugeMagicCommits.class); @@ -136,6 +141,7 @@ public void test_000_CleanupPendingUploads() throws IOException { LOG.info("Aborted {} uploads under {}", count, key); } + @Test @Override public void test_030_postCreationAssertions() throws Throwable { describe("Committing file"); @@ -191,21 +197,25 @@ private void skipQuietly(String text) { describe("Skipping: %s", text); } + @Test @Override public void test_040_PositionedReadHugeFile() { skipQuietly("test_040_PositionedReadHugeFile"); } + @Test @Override public void test_050_readHugeFile() { skipQuietly("readHugeFile"); } + @Test @Override public void test_100_renameHugeFile() { skipQuietly("renameHugeFile"); } + @Test @Override public void test_800_DeleteHugeFiles() throws IOException { if (getFileSystem() != null) { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java index 5661a4969f3f5..bc2bda09ec161 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java @@ -31,7 +31,9 @@ import java.util.UUID; import java.util.stream.Collectors; -import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; @@ -81,6 +83,8 @@ * The main unit test suite of the staging committer. * Parameterized on thread count and unique filename policy. */ +@ParameterizedClass(name="threads-{0}-unique-{1}") +@MethodSource("params") public class TestStagingCommitter extends StagingTestBase.MiniDFSTest { private static final JobID JOB_ID = new JobID("job", 1); @@ -94,8 +98,8 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest { private static final Logger LOG = LoggerFactory.getLogger(TestStagingCommitter.class); - private int numThreads; - private boolean uniqueFilenames; + private final int numThreads; + private final boolean uniqueFilenames; private JobContext job = null; private TaskAttemptContext tac = null; private Configuration conf = null; @@ -134,12 +138,12 @@ public static Collection params() { }); } - public void initTestStagingCommitter(int pNumThreads, boolean pNniqueFilenames) throws Exception { - this.numThreads = pNumThreads; - this.uniqueFilenames = pNniqueFilenames; - setupCommitter(); + public TestStagingCommitter(int numThreads, boolean uniqueFilenames) { + this.numThreads = numThreads; + this.uniqueFilenames = uniqueFilenames; } + @BeforeEach public void setupCommitter() throws Exception { JobConf jobConf = getConfiguration(); jobConf.setInt(FS_S3A_COMMITTER_THREADS, numThreads); @@ -198,10 +202,8 @@ private Configuration newConfig() { return new Configuration(false); } - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testMockFSclientWiredUp(int pNumThreads, boolean pNniqueFilenames) throws Throwable { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testMockFSclientWiredUp() throws Throwable { final S3Client client = mockFS.getS3AInternals().getAmazonS3Client("test"); assertThat(client) .describedAs("S3Client from FS") @@ -209,10 +211,8 @@ public void testMockFSclientWiredUp(int pNumThreads, boolean pNniqueFilenames) t .isSameAs(mockClient); } - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testUUIDPropagation(int pNumThreads, boolean pNniqueFilenames) throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testUUIDPropagation() throws Exception { Configuration config = newConfig(); String uuid = uuid(); config.set(SPARK_WRITE_UUID, uuid); @@ -228,10 +228,8 @@ public void testUUIDPropagation(int pNumThreads, boolean pNniqueFilenames) throw * If the Spark UUID is required, then binding will fail * if a UUID did not get passed in. */ - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testUUIDValidation(int pNumThreads, boolean pNniqueFilenames) throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testUUIDValidation() throws Exception { Configuration config = newConfig(); config.setBoolean(FS_S3A_COMMITTER_REQUIRE_UUID, true); intercept(PathCommitException.class, E_NO_SPARK_UUID, () -> @@ -241,10 +239,8 @@ public void testUUIDValidation(int pNumThreads, boolean pNniqueFilenames) throws /** * Validate ordering of UUID retrieval. */ - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testUUIDLoadOrdering(int pNumThreads, boolean pNniqueFilenames) throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testUUIDLoadOrdering() throws Exception { Configuration config = newConfig(); config.setBoolean(FS_S3A_COMMITTER_REQUIRE_UUID, true); String uuid = uuid(); @@ -262,10 +258,8 @@ public void testUUIDLoadOrdering(int pNumThreads, boolean pNniqueFilenames) thro * Verify that unless the config enables self-generation, JobIDs * are used. */ - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testJobIDIsUUID(int pNumThreads, boolean pNniqueFilenames) throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testJobIDIsUUID() throws Exception { Configuration config = newConfig(); Pair t3 = AbstractS3ACommitter .buildJobUUID(config, JOB_ID); @@ -279,10 +273,8 @@ public void testJobIDIsUUID(int pNumThreads, boolean pNniqueFilenames) throws Ex * Verify self-generated UUIDs are supported when enabled, * and come before JobID. */ - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testSelfGeneratedUUID(int pNumThreads, boolean pNniqueFilenames) throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testSelfGeneratedUUID() throws Exception { Configuration config = newConfig(); config.setBoolean(FS_S3A_COMMITTER_GENERATE_UUID, true); Pair t3 = AbstractS3ACommitter @@ -312,11 +304,8 @@ private String uuid() { return UUID.randomUUID().toString(); } - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testAttemptPathConstructionNoSchema(int pNumThreads, boolean pNniqueFilenames) - throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testAttemptPathConstructionNoSchema() throws Exception { Configuration config = newConfig(); final String jobUUID = addUUID(config); config.set(BUFFER_DIR, "/tmp/mr-local-0,/tmp/mr-local-1"); @@ -328,11 +317,8 @@ public void testAttemptPathConstructionNoSchema(int pNumThreads, boolean pNnique .contains(jobUUID); } - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testAttemptPathsDifferentByTaskAttempt(int pNumThreads, boolean pNniqueFilenames) - throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testAttemptPathsDifferentByTaskAttempt() throws Exception { Configuration config = newConfig(); final String jobUUID = addUUID(config); config.set(BUFFER_DIR, "file:/tmp/mr-local-0"); @@ -345,11 +331,8 @@ public void testAttemptPathsDifferentByTaskAttempt(int pNumThreads, boolean pNni .isNotEqualTo(attempt1Path); } - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testAttemptPathConstructionWithSchema(int pNumThreads, boolean pNniqueFilenames) - throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testAttemptPathConstructionWithSchema() throws Exception { Configuration config = newConfig(); final String jobUUID = addUUID(config); String commonPath = "file:/tmp/mr-local-"; @@ -364,11 +347,8 @@ public void testAttemptPathConstructionWithSchema(int pNumThreads, boolean pNniq .startsWith(commonPath); } - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testAttemptPathConstructionWrongSchema(int pNumThreads, - boolean pNniqueFilenames) throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testAttemptPathConstructionWrongSchema() throws Exception { Configuration config = newConfig(); final String jobUUID = addUUID(config); config.set(BUFFER_DIR, @@ -378,11 +358,8 @@ public void testAttemptPathConstructionWrongSchema(int pNumThreads, tac.getTaskAttemptID())); } - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testCommitPathConstruction(int pNumThreads, - boolean pNniqueFilenames) throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testCommitPathConstruction() throws Exception { Path committedTaskPath = committer.getCommittedTaskPath(tac); assertEquals("hdfs", committedTaskPath.toUri().getScheme(), "Path should be in HDFS: " + committedTaskPath); @@ -391,11 +368,8 @@ public void testCommitPathConstruction(int pNumThreads, "Did not end with \"" + ending +"\" :" + committedTaskPath); } - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testSingleTaskCommit(int pNumThreads, - boolean pNniqueFilenames) throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testSingleTaskCommit() throws Exception { Path file = new Path(commitTask(committer, tac, 1).iterator().next()); List uploads = results.getUploads(); @@ -425,11 +399,8 @@ public void testSingleTaskCommit(int pNumThreads, * This originally verified that empty files weren't PUT. They are now. * @throws Exception on a failure */ - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testSingleTaskEmptyFileCommit(int pNumThreads, - boolean pNniqueFilenames) throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testSingleTaskEmptyFileCommit() throws Exception { committer.setupTask(tac); Path attemptPath = committer.getTaskAttemptPath(tac); @@ -457,11 +428,8 @@ public void testSingleTaskEmptyFileCommit(int pNumThreads, assertEquals(1, pending.size(), "Should have one pending commit"); } - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testSingleTaskMultiFileCommit(int pNumThreads, - boolean pNniqueFilenames) throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testSingleTaskMultiFileCommit() throws Exception { int numFiles = 3; Set files = commitTask(committer, tac, numFiles); @@ -494,11 +462,8 @@ public void testSingleTaskMultiFileCommit(int pNumThreads, assertEquals(files, keys, "Should write to the correct key"); } - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testTaskInitializeFailure(int pNumThreads, - boolean pNniqueFilenames) throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testTaskInitializeFailure() throws Exception { committer.setupTask(tac); errors.failOnInit(1); @@ -525,11 +490,8 @@ public void testTaskInitializeFailure(int pNumThreads, attemptPath); } - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testTaskSingleFileUploadFailure(int pNumThreads, - boolean pNniqueFilenames) throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testTaskSingleFileUploadFailure() throws Exception { describe("Set up a single file upload to fail on upload 2"); committer.setupTask(tac); @@ -557,11 +519,8 @@ public void testTaskSingleFileUploadFailure(int pNumThreads, attemptPath); } - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testTaskMultiFileUploadFailure(int pNumThreads, - boolean pNniqueFilenames) throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testTaskMultiFileUploadFailure() throws Exception { committer.setupTask(tac); errors.failOnUpload(5); @@ -590,11 +549,8 @@ public void testTaskMultiFileUploadFailure(int pNumThreads, attemptPath); } - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testTaskUploadAndAbortFailure(int pNumThreads, - boolean pNniqueFilenames) throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testTaskUploadAndAbortFailure() throws Exception { committer.setupTask(tac); errors.failOnUpload(5); @@ -623,11 +579,8 @@ public void testTaskUploadAndAbortFailure(int pNumThreads, assertPathDoesNotExist(fs, "Should remove the attempt path", attemptPath); } - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testSingleTaskAbort(int pNumThreads, - boolean pNniqueFilenames) throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testSingleTaskAbort() throws Exception { committer.setupTask(tac); Path attemptPath = committer.getTaskAttemptPath(tac); @@ -646,11 +599,8 @@ public void testSingleTaskAbort(int pNumThreads, } - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testJobCommit(int pNumThreads, - boolean pNniqueFilenames) throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testJobCommit() throws Exception { Path jobAttemptPath = jobCommitter.getJobAttemptPath(job); FileSystem fs = jobAttemptPath.getFileSystem(conf); @@ -673,11 +623,8 @@ public void testJobCommit(int pNumThreads, } - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testJobCommitFailure(int pNumThreads, - boolean pNniqueFilenames) throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testJobCommitFailure() throws Exception { Path jobAttemptPath = jobCommitter.getJobAttemptPath(job); FileSystem fs = jobAttemptPath.getFileSystem(conf); @@ -724,11 +671,8 @@ public void testJobCommitFailure(int pNumThreads, assertPathDoesNotExist(fs, "jobAttemptPath not deleted", jobAttemptPath); } - @ParameterizedTest(name = "threads-{0}-unique-{1}") - @MethodSource("params") - public void testJobAbort(int pNumThreads, - boolean pNniqueFilenames) throws Exception { - initTestStagingCommitter(pNumThreads, pNniqueFilenames); + @Test + public void testJobAbort() throws Exception { Path jobAttemptPath = jobCommitter.getJobAttemptPath(job); FileSystem fs = jobAttemptPath.getFileSystem(conf); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/terasort/ITestTerasortOnS3A.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/terasort/ITestTerasortOnS3A.java index a068431ed6169..536a158dbb4b1 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/terasort/ITestTerasortOnS3A.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/terasort/ITestTerasortOnS3A.java @@ -31,8 +31,9 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestMethodOrder; -import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -49,6 +50,7 @@ import org.apache.hadoop.fs.s3a.commit.magic.MagicS3GuardCommitter; import org.apache.hadoop.fs.s3a.commit.staging.DirectoryStagingCommitter; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.test.tags.ScaleTest; import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; @@ -56,8 +58,10 @@ import static java.util.Optional.empty; import static org.apache.hadoop.fs.s3a.S3ATestUtils.lsR; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assumptions.assumeTrue; + /** * Runs Terasort against S3A. * @@ -72,7 +76,10 @@ * Before anyone calls that out as slow: try running the test with the file * committer. */ +@ScaleTest +@ParameterizedClass(name="-{0}") @TestMethodOrder(MethodOrderer.Alphanumeric.class) +@MethodSource("params") @SuppressWarnings("StaticNonFinalField") public class ITestTerasortOnS3A extends AbstractYarnClusterITest { @@ -87,7 +94,7 @@ public class ITestTerasortOnS3A extends AbstractYarnClusterITest { /** * Duration tracker created in the first of the test cases and closed - * in {@link #test_140_teracomplete(String, boolean)}. + * in {@link #test_140_teracomplete()}. */ private static Optional terasortDuration = empty(); @@ -97,10 +104,10 @@ public class ITestTerasortOnS3A extends AbstractYarnClusterITest { private static Map completedStages = new HashMap<>(); /** Name of the committer for this run. */ - private String committerName; + private final String committerName; /** Should Magic committer track pending commits in-memory. */ - private boolean trackCommitsInMemory; + private final boolean trackCommitsInMemory; /** Base path for all the terasort input and output paths. */ private Path terasortPath; @@ -126,10 +133,9 @@ public static Collection params() { {MagicS3GuardCommitter.NAME, true}}); } - public void initITestTerasortOnS3A( - final String pCommitterName, final boolean pTrackCommitsInMemory) throws Exception { - this.committerName = pCommitterName; - this.trackCommitsInMemory = pTrackCommitsInMemory; + public ITestTerasortOnS3A(final String committerName, final boolean trackCommitsInMemory) { + this.committerName = committerName; + this.trackCommitsInMemory = trackCommitsInMemory; } @Override @@ -242,9 +248,11 @@ private void executeStage( d.close(); } dumpOutputTree(dest); - assertEquals(0, result, stage - + "(" + StringUtils.join(", ", args) + ")" - + " failed"); + assertThat(result) + .describedAs(stage + + "(" + StringUtils.join(", ", args) + ")" + + " failed") + .isEqualTo(0); validateSuccessFile(dest, committerName(), getFileSystem(), stage, minimumFileCount, ""); completedStage(stage, d); @@ -258,11 +266,8 @@ private void executeStage( * It is where all variables which need to be reset for each run need * to be reset. */ - @MethodSource("params") - @ParameterizedTest(name = "{0}-memory={1}") - public void test_100_terasort_setup(String pCommitterName, - boolean pTrackCommitsInMemory) throws Throwable { - initITestTerasortOnS3A(pCommitterName, pTrackCommitsInMemory); + @Test + public void test_100_terasort_setup() throws Throwable { describe("Setting up for a terasort with path of %s", terasortPath); getFileSystem().delete(terasortPath, true); @@ -270,11 +275,8 @@ public void test_100_terasort_setup(String pCommitterName, terasortDuration = Optional.of(new DurationInfo(LOG, false, "Terasort")); } - @MethodSource("params") - @ParameterizedTest(name = "{0}-memory={1}") - public void test_110_teragen(String pCommitterName, - boolean pTrackCommitsInMemory) throws Throwable { - initITestTerasortOnS3A(pCommitterName, pTrackCommitsInMemory); + @Test + public void test_110_teragen() throws Throwable { describe("Teragen to %s", sortInput); getFileSystem().delete(sortInput, true); @@ -289,11 +291,8 @@ public void test_110_teragen(String pCommitterName, } - @MethodSource("params") - @ParameterizedTest(name = "{0}-memory={1}") - public void test_120_terasort(String pCommitterName, - boolean pTrackCommitsInMemory) throws Throwable { - initITestTerasortOnS3A(pCommitterName, pTrackCommitsInMemory); + @Test + public void test_120_terasort() throws Throwable { describe("Terasort from %s to %s", sortInput, sortOutput); requireStage("teragen"); getFileSystem().delete(sortOutput, true); @@ -309,11 +308,8 @@ public void test_120_terasort(String pCommitterName, 1); } - @MethodSource("params") - @ParameterizedTest(name = "{0}-memory={1}") - public void test_130_teravalidate(String pCommitterName, - boolean pTrackCommitsInMemory) throws Throwable { - initITestTerasortOnS3A(pCommitterName, pTrackCommitsInMemory); + @Test + public void test_130_teravalidate() throws Throwable { describe("TeraValidate from %s to %s", sortOutput, sortValidate); requireStage("terasort"); getFileSystem().delete(sortValidate, true); @@ -332,11 +328,8 @@ public void test_130_teravalidate(String pCommitterName, * Print the results, and save to the base dir as a CSV file. * Why there? Makes it easy to list and compare. */ - @MethodSource("params") - @ParameterizedTest(name = "{0}-memory={1}") - public void test_140_teracomplete(String pCommitterName, - boolean pTrackCommitsInMemory) throws Throwable { - initITestTerasortOnS3A(pCommitterName, pTrackCommitsInMemory); + @Test + public void test_140_teracomplete() throws Throwable { terasortDuration.ifPresent(d -> { d.close(); completedStage("overall", d); @@ -371,19 +364,13 @@ public void test_140_teracomplete(String pCommitterName, * Without this the total execution time is reported as from the start of * the first test suite to the end of the second. */ - @MethodSource("params") - @ParameterizedTest(name = "{0}-memory={1}") - public void test_150_teracleanup(String pCommitterName, - boolean pTrackCommitsInMemory) throws Throwable { - initITestTerasortOnS3A(pCommitterName, pTrackCommitsInMemory); + @Test + public void test_150_teracleanup() throws Throwable { terasortDuration = Optional.empty(); } - @MethodSource("params") - @ParameterizedTest(name = "{0}-memory={1}") - public void test_200_directory_deletion(String pCommitterName, - boolean pTrackCommitsInMemory) throws Throwable { - initITestTerasortOnS3A(pCommitterName, pTrackCommitsInMemory); + @Test + public void test_200_directory_deletion() throws Throwable { getFileSystem().delete(terasortPath, true); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContext.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContext.java index 1d36600cfb24e..fd2652f803eaf 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContext.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContext.java @@ -23,12 +23,14 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.TestFileContext; import org.apache.hadoop.fs.UnsupportedFileSystemException; +import org.apache.hadoop.test.tags.IntegrationTest; import static org.junit.jupiter.api.Assertions.assertEquals; /** * Implementation of TestFileContext for S3a. */ +@IntegrationTest public class ITestS3AFileContext extends TestFileContext { @Test diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextCreateMkdir.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextCreateMkdir.java index 65ec8678012e8..da54eac32145f 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextCreateMkdir.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextCreateMkdir.java @@ -16,6 +16,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContextCreateMkdirBaseTest; import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.apache.hadoop.test.tags.IntegrationTest; + import org.junit.jupiter.api.BeforeEach; import static org.apache.hadoop.fs.s3a.S3ATestUtils.setPerformanceFlags; @@ -23,6 +25,7 @@ /** * Extends FileContextCreateMkdirBaseTest for a S3a FileContext. */ +@IntegrationTest public class ITestS3AFileContextCreateMkdir extends FileContextCreateMkdirBaseTest { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextCreateMkdirCreatePerf.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextCreateMkdirCreatePerf.java index 1834476ae321d..5532fc6b7452d 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextCreateMkdirCreatePerf.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextCreateMkdirCreatePerf.java @@ -19,6 +19,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContextCreateMkdirBaseTest; import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.apache.hadoop.test.tags.IntegrationTest; import static org.apache.hadoop.fs.s3a.S3ATestUtils.setPerformanceFlags; import static org.apache.hadoop.test.LambdaTestUtils.intercept; @@ -27,6 +28,7 @@ * Extends FileContextCreateMkdirBaseTest for a S3a FileContext with * create performance mode. */ +@IntegrationTest public class ITestS3AFileContextCreateMkdirCreatePerf extends FileContextCreateMkdirBaseTest { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextMainOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextMainOperations.java index 1e7829c901d00..890adca0e919a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextMainOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextMainOperations.java @@ -26,12 +26,14 @@ import org.apache.hadoop.fs.FileContextTestHelper; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.apache.hadoop.test.tags.IntegrationTest; import static org.apache.hadoop.fs.s3a.S3ATestUtils.setPerformanceFlags; /** * S3A implementation of FileContextMainOperationsBaseTest. */ +@IntegrationTest public class ITestS3AFileContextMainOperations extends FileContextMainOperationsBaseTest { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextStatistics.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextStatistics.java index f81a3882d8918..484d1cc2c3121 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextStatistics.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextStatistics.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.S3ATestUtils; import org.apache.hadoop.fs.s3a.auth.STSClientFactory; +import org.apache.hadoop.test.tags.IntegrationTest; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -35,6 +36,7 @@ /** * S3a implementation of FCStatisticsBaseTest. */ +@IntegrationTest public class ITestS3AFileContextStatistics extends FCStatisticsBaseTest { private static final Logger LOG = diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextURI.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextURI.java index 5674a463b6611..0a1687355d6de 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextURI.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextURI.java @@ -17,6 +17,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContextURIBase; import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.apache.hadoop.test.tags.IntegrationTest; + import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; @@ -26,6 +28,7 @@ /** * S3a implementation of FileContextURIBase. */ +@IntegrationTest public class ITestS3AFileContextURI extends FileContextURIBase { private Configuration conf; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextUtil.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextUtil.java index 873de72961925..e66d0e3a01b63 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextUtil.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextUtil.java @@ -17,11 +17,14 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContextUtilBase; import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.apache.hadoop.test.tags.IntegrationTest; + import org.junit.jupiter.api.BeforeEach; /** * S3A implementation of FileContextUtilBase. */ +@IntegrationTest public class ITestS3AFileContextUtil extends FileContextUtilBase { @BeforeEach diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java index d33359dbecf55..8afb60d6139e0 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java @@ -30,7 +30,9 @@ import org.assertj.core.api.Assertions; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -90,6 +92,8 @@ * * */ +@ParameterizedClass(name="bulk-delete-{0}") +@MethodSource("params") @SuppressWarnings("ThrowableNotThrown") public class ITestPartialRenamesDeletes extends AbstractS3ATestBase { @@ -164,7 +168,7 @@ public class ITestPartialRenamesDeletes extends AbstractS3ATestBase { private Path noReadDir; /** delete policy: single or multi? */ - private boolean multiDelete; + private final boolean multiDelete; /** * Configuration for the assume role FS. @@ -198,12 +202,10 @@ public static Collection params() { /** * Constructor. - * @param pMultiDelete single vs multi delete in the role FS? + * @param multiDelete single vs multi delete in the role FS? */ - public void initITestPartialRenamesDeletes(final boolean pMultiDelete) - throws Exception { - this.multiDelete = pMultiDelete; - setup(); + public ITestPartialRenamesDeletes(final boolean multiDelete) { + this.multiDelete = multiDelete; } /** @@ -215,6 +217,7 @@ public void initITestPartialRenamesDeletes(final boolean pMultiDelete) * @throws Exception failure */ @Override + @BeforeEach public void setup() throws Exception { super.setup(); assumeRoleTests(); @@ -336,10 +339,8 @@ private Path uniquePath() throws IOException { /** * This is here to verify role and path setup. */ - @MethodSource("params") - @ParameterizedTest(name = "bulk-delete={0}") - public void testCannotTouchUnderRODir(boolean pMultiDelete) throws Throwable { - initITestPartialRenamesDeletes(pMultiDelete); + @Test + public void testCannotTouchUnderRODir() throws Throwable { forbidden("touching the empty child " + readOnlyChild, "", () -> { @@ -347,10 +348,8 @@ public void testCannotTouchUnderRODir(boolean pMultiDelete) throws Throwable { return readOnlyChild; }); } - @MethodSource("params") - @ParameterizedTest(name = "bulk-delete={0}") - public void testCannotReadUnderNoReadDir(boolean pMultiDelete) throws Throwable { - initITestPartialRenamesDeletes(pMultiDelete); + @Test + public void testCannotReadUnderNoReadDir() throws Throwable { Path path = new Path(noReadDir, "unreadable.txt"); createFile(getFileSystem(), path, true, "readonly".getBytes()); forbidden("trying to read " + path, @@ -358,10 +357,8 @@ public void testCannotReadUnderNoReadDir(boolean pMultiDelete) throws Throwable () -> readUTF8(roleFS, path, -1)); } - @MethodSource("params") - @ParameterizedTest(name = "bulk-delete={0}") - public void testMultiDeleteOptionPropagated(boolean pMultiDelete) throws Throwable { - initITestPartialRenamesDeletes(pMultiDelete); + @Test + public void testMultiDeleteOptionPropagated() throws Throwable { describe("Verify the test parameter propagates to the store context"); StoreContext ctx = roleFS.createStoreContext(); Assertions.assertThat(ctx.isMultiObjectDeleteEnabled()) @@ -372,10 +369,8 @@ public void testMultiDeleteOptionPropagated(boolean pMultiDelete) throws Throwab /** * Execute a sequence of rename operations with access locked down. */ - @MethodSource("params") - @ParameterizedTest(name = "bulk-delete={0}") - public void testRenameParentPathNotWriteable(boolean pMultiDelete) throws Throwable { - initITestPartialRenamesDeletes(pMultiDelete); + @Test + public void testRenameParentPathNotWriteable() throws Throwable { describe("rename with parent paths not writeable; multi=%s", multiDelete); final Configuration conf = createAssumedRoleConfig(); bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW, @@ -406,10 +401,8 @@ public void testRenameParentPathNotWriteable(boolean pMultiDelete) throws Throwa roleFS.delete(writableDir, true); } - @MethodSource("params") - @ParameterizedTest(name = "bulk-delete={0}") - public void testRenameSingleFileFailsInDelete(boolean pMultiDelete) throws Throwable { - initITestPartialRenamesDeletes(pMultiDelete); + @Test + public void testRenameSingleFileFailsInDelete() throws Throwable { describe("rename with source read only; multi=%s", multiDelete); Path readOnlyFile = readOnlyChild; @@ -454,10 +447,8 @@ public void testRenameSingleFileFailsInDelete(boolean pMultiDelete) throws Throw * it's a filesystem forever.
  • * */ - @MethodSource("params") - @ParameterizedTest(name = "bulk-delete={0}") - public void testRenameDirFailsInDelete(boolean pMultiDelete) throws Throwable { - initITestPartialRenamesDeletes(pMultiDelete); + @Test + public void testRenameDirFailsInDelete() throws Throwable { describe("rename with source read only; multi=%s", multiDelete); // the full FS @@ -505,10 +496,8 @@ public void testRenameDirFailsInDelete(boolean pMultiDelete) throws Throwable { } } - @MethodSource("params") - @ParameterizedTest(name = "bulk-delete={0}") - public void testRenameFileFailsNoWrite(boolean pMultiDelete) throws Throwable { - initITestPartialRenamesDeletes(pMultiDelete); + @Test + public void testRenameFileFailsNoWrite() throws Throwable { describe("Try to rename to a write-only destination fails with src" + " & dest unchanged."); roleFS.mkdirs(writableDir); @@ -524,10 +513,8 @@ public void testRenameFileFailsNoWrite(boolean pMultiDelete) throws Throwable { assertPathDoesNotExist("rename destination", dest); } - @MethodSource("params") - @ParameterizedTest(name = "bulk-delete={0}") - public void testCopyDirFailsToReadOnlyDir(boolean pMultiDelete) throws Throwable { - initITestPartialRenamesDeletes(pMultiDelete); + @Test + public void testCopyDirFailsToReadOnlyDir() throws Throwable { describe("Try to copy to a read-only destination"); roleFS.mkdirs(writableDir); S3AFileSystem fs = getFileSystem(); @@ -542,10 +529,8 @@ public void testCopyDirFailsToReadOnlyDir(boolean pMultiDelete) throws Throwable writableDir, files.size()); } - @MethodSource("params") - @ParameterizedTest(name = "bulk-delete={0}") - public void testCopyFileFailsOnSourceRead(boolean pMultiDelete) throws Throwable { - initITestPartialRenamesDeletes(pMultiDelete); + @Test + public void testCopyFileFailsOnSourceRead() throws Throwable { describe("The source file isn't readable, so the COPY fails"); Path source = new Path(noReadDir, "source"); S3AFileSystem fs = getFileSystem(); @@ -557,10 +542,8 @@ public void testCopyFileFailsOnSourceRead(boolean pMultiDelete) throws Throwable assertPathDoesNotExist("rename destination", dest); } - @MethodSource("params") - @ParameterizedTest(name = "bulk-delete={0}") - public void testCopyDirFailsOnSourceRead(boolean pMultiDelete) throws Throwable { - initITestPartialRenamesDeletes(pMultiDelete); + @Test + public void testCopyDirFailsOnSourceRead() throws Throwable { describe("The source file isn't readable, so the COPY fails"); S3AFileSystem fs = getFileSystem(); List files = createFiles(fs, noReadDir, dirDepth, fileCount, @@ -578,10 +561,8 @@ public void testCopyDirFailsOnSourceRead(boolean pMultiDelete) throws Throwable * This verifies that failures in the delete fake dir stage. * are not visible. */ - @MethodSource("params") - @ParameterizedTest(name = "bulk-delete={0}") - public void testPartialEmptyDirDelete(boolean pMultiDelete) throws Throwable { - initITestPartialRenamesDeletes(pMultiDelete); + @Test + public void testPartialEmptyDirDelete() throws Throwable { describe("delete an empty directory with parent dir r/o" + " multidelete=%s", multiDelete); @@ -602,10 +583,8 @@ public void testPartialEmptyDirDelete(boolean pMultiDelete) throws Throwable { * Have a directory with full R/W permissions, but then remove * write access underneath, and try to delete it. */ - @MethodSource("params") - @ParameterizedTest(name = "bulk-delete={0}") - public void testPartialDirDelete(boolean pMultiDelete) throws Throwable { - initITestPartialRenamesDeletes(pMultiDelete); + @Test + public void testPartialDirDelete() throws Throwable { describe("delete with part of the child tree read only;" + " multidelete=%s", multiDelete); @@ -752,10 +731,8 @@ private Set listFilesUnderPath(Path path, boolean recursive) *

    * See HADOOP-17621. */ - @MethodSource("params") - @ParameterizedTest(name = "bulk-delete={0}") - public void testRenamePermissionRequirements(boolean pMultiDelete) throws Throwable { - initITestPartialRenamesDeletes(pMultiDelete); + @Test + public void testRenamePermissionRequirements() throws Throwable { describe("Verify rename() only needs s3:DeleteObject permission"); // close the existing roleFS IOUtils.cleanupWithLogger(LOG, roleFS); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestS3AConditionalCreateBehavior.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestS3AConditionalCreateBehavior.java index c20b6f7831d04..60041f8e0d845 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestS3AConditionalCreateBehavior.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestS3AConditionalCreateBehavior.java @@ -24,8 +24,8 @@ import org.assertj.core.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.params.ParameterizedClass; +import org.junit.jupiter.params.provider.MethodSource; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; @@ -51,7 +51,8 @@ import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.assertj.core.api.Assumptions.assumeThat; -@RunWith(Parameterized.class) +@ParameterizedClass(name="conditionalCreateEnabled-{0}") +@MethodSource("data") public class ITestS3AConditionalCreateBehavior extends AbstractS3ATestBase { private static final byte[] SMALL_FILE_BYTES = dataset(TEST_FILE_LEN, 0, 255); @@ -62,7 +63,6 @@ public ITestS3AConditionalCreateBehavior(boolean conditionalCreateEnabled) { this.conditionalCreateEnabled = conditionalCreateEnabled; } - @Parameterized.Parameters public static Collection data() { return Arrays.asList(new Object[][]{ {true}, diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestS3APutIfMatchAndIfNoneMatch.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestS3APutIfMatchAndIfNoneMatch.java index 951bad37bb640..2ce49f0d89597 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestS3APutIfMatchAndIfNoneMatch.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestS3APutIfMatchAndIfNoneMatch.java @@ -23,6 +23,7 @@ import java.nio.charset.StandardCharsets; import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import software.amazon.awssdk.services.s3.model.S3Exception; @@ -97,6 +98,7 @@ public Configuration createConfiguration() { } @Override + @BeforeEach public void setup() throws Exception { super.setup(); Configuration conf = getConfiguration(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestCreateFileCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestCreateFileCost.java index 1d8a36b64ec42..9549a0c85f0ca 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestCreateFileCost.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestCreateFileCost.java @@ -24,6 +24,9 @@ import java.util.concurrent.atomic.AtomicLong; import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; +import org.junit.jupiter.params.provider.MethodSource; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; @@ -34,8 +37,6 @@ import org.apache.hadoop.fs.s3a.RemoteFileChangedException; import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3ATestUtils; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; import static java.util.Objects.requireNonNull; @@ -62,6 +63,8 @@ * with the FS_S3A_CREATE_PERFORMANCE option. */ @SuppressWarnings("resource") +@ParameterizedClass(name="performance-{0}") +@MethodSource("params") public class ITestCreateFileCost extends AbstractS3ACostTest { /** @@ -79,14 +82,14 @@ public static Collection params() { /** * Flag for performance creation; all cost asserts need changing. */ - private boolean createPerformance; + private final boolean createPerformance; /** * Create. - * @param pCreatePerformance use the performance flag + * @param createPerformance use the performance flag */ - public void initITestCreateFileCost(final boolean pCreatePerformance) { - this.createPerformance = pCreatePerformance; + public ITestCreateFileCost(final boolean createPerformance) { + this.createPerformance = createPerformance; } /** @@ -108,10 +111,8 @@ public Configuration createConfiguration() { return conf; } - @MethodSource("params") - @ParameterizedTest - public void testCreateNoOverwrite(boolean pCreatePerformance) throws Throwable { - initITestCreateFileCost(pCreatePerformance); + @Test + public void testCreateNoOverwrite() throws Throwable { describe("Test file creation without overwrite"); Path testFile = methodPath(); // when overwrite is false, the path is checked for existence. @@ -119,20 +120,16 @@ public void testCreateNoOverwrite(boolean pCreatePerformance) throws Throwable { expected(CREATE_FILE_NO_OVERWRITE)); } - @MethodSource("params") - @ParameterizedTest - public void testCreateOverwrite(boolean pCreatePerformance) throws Throwable { - initITestCreateFileCost(pCreatePerformance); + @Test + public void testCreateOverwrite() throws Throwable { describe("Test file creation with overwrite"); Path testFile = methodPath(); // when overwrite is true: only the directory checks take place. create(testFile, true, expected(CREATE_FILE_OVERWRITE)); } - @MethodSource("params") - @ParameterizedTest - public void testCreateNoOverwriteFileExists(boolean pCreatePerformance) throws Throwable { - initITestCreateFileCost(pCreatePerformance); + @Test + public void testCreateNoOverwriteFileExists() throws Throwable { describe("Test cost of create file failing with existing file"); Path testFile = file(methodPath()); @@ -147,10 +144,8 @@ public void testCreateNoOverwriteFileExists(boolean pCreatePerformance) throws T } } - @MethodSource("params") - @ParameterizedTest - public void testCreateFileOverDirNoOverwrite(boolean pCreatePerformance) throws Throwable { - initITestCreateFileCost(pCreatePerformance); + @Test + public void testCreateFileOverDirNoOverwrite() throws Throwable { describe("Test cost of create file overwrite=false failing with existing dir"); Path testFile = dir(methodPath()); @@ -165,10 +160,8 @@ public void testCreateFileOverDirNoOverwrite(boolean pCreatePerformance) throws } } - @MethodSource("params") - @ParameterizedTest - public void testCreateFileOverDirWithOverwrite(boolean pCreatePerformance) throws Throwable { - initITestCreateFileCost(pCreatePerformance); + @Test + public void testCreateFileOverDirWithOverwrite() throws Throwable { describe("Test cost of create file overwrite=false failing with existing dir"); Path testFile = dir(methodPath()); @@ -187,10 +180,8 @@ public void testCreateFileOverDirWithOverwrite(boolean pCreatePerformance) throw * Use the builder API. * on s3a this skips parent checks, always. */ - @MethodSource("params") - @ParameterizedTest - public void testCreateBuilderSequence(boolean pCreatePerformance) throws Throwable { - initITestCreateFileCost(pCreatePerformance); + @Test + public void testCreateBuilderSequence() throws Throwable { describe("Test builder file creation cost"); Path testFile = methodPath(); dir(testFile.getParent()); @@ -216,10 +207,8 @@ public void testCreateBuilderSequence(boolean pCreatePerformance) throws Throwab } } - @MethodSource("params") - @ParameterizedTest - public void testCreateFilePerformanceFlag(boolean pCreatePerformance) throws Throwable { - initITestCreateFileCost(pCreatePerformance); + @Test + public void testCreateFilePerformanceFlag() throws Throwable { describe("createFile with performance flag skips safety checks"); S3AFileSystem fs = getFileSystem(); @@ -245,10 +234,8 @@ public void testCreateFilePerformanceFlag(boolean pCreatePerformance) throws Thr .isGreaterThanOrEqualTo(1); } - @MethodSource("params") - @ParameterizedTest - public void testCreateFileRecursive(boolean pCreatePerformance) throws Throwable { - initITestCreateFileCost(pCreatePerformance); + @Test + public void testCreateFileRecursive() throws Throwable { describe("createFile without performance flag performs overwrite safety checks"); S3AFileSystem fs = getFileSystem(); @@ -274,10 +261,8 @@ public void testCreateFileRecursive(boolean pCreatePerformance) throws Throwable .isEqualTo(custom); } - @MethodSource("params") - @ParameterizedTest - public void testCreateFileNonRecursive(boolean pCreatePerformance) throws Throwable { - initITestCreateFileCost(pCreatePerformance); + @Test + public void testCreateFileNonRecursive() throws Throwable { describe("nonrecursive createFile does not check parents"); S3AFileSystem fs = getFileSystem(); @@ -287,10 +272,8 @@ public void testCreateFileNonRecursive(boolean pCreatePerformance) throws Throwa } - @MethodSource("params") - @ParameterizedTest - public void testCreateNonRecursive(boolean pCreatePerformance) throws Throwable { - initITestCreateFileCost(pCreatePerformance); + @Test + public void testCreateNonRecursive() throws Throwable { describe("nonrecursive createFile does not check parents"); S3AFileSystem fs = getFileSystem(); @@ -313,10 +296,8 @@ private FSDataOutputStream build(final FSDataOutputStreamBuilder builder) /** * Shows how the performance option allows the FS to become ill-formed. */ - @MethodSource("params") - @ParameterizedTest - public void testPerformanceFlagPermitsInvalidStores(boolean pCreatePerformance) throws Throwable { - initITestCreateFileCost(pCreatePerformance); + @Test + public void testPerformanceFlagPermitsInvalidStores() throws Throwable { describe("createFile with performance flag over a directory"); S3AFileSystem fs = getFileSystem(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMiscOperationCost.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMiscOperationCost.java index 19384b04510ff..f1a62e2a362bc 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMiscOperationCost.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMiscOperationCost.java @@ -20,12 +20,9 @@ import java.io.FileNotFoundException; import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; import org.assertj.core.api.Assertions; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -54,55 +51,30 @@ public class ITestS3AMiscOperationCost extends AbstractS3ACostTest { private static final Logger LOG = LoggerFactory.getLogger(ITestS3AMiscOperationCost.class); - /** - * Parameter: should auditing be enabled? - */ - private boolean auditing; - - /** - * Parameterization. - */ - public static Collection params() { - return Arrays.asList(new Object[][]{ - {"auditing", true}, - {"unaudited", false} - }); - } - - public void initITestS3AMiscOperationCost(final String pName, - final boolean pAuditing) throws Exception { - this.auditing = pAuditing; - } - @Override public Configuration createConfiguration() { final Configuration conf = super.createConfiguration(); removeBaseAndBucketOverrides(conf, AUDIT_ENABLED); - conf.setBoolean(AUDIT_ENABLED, auditing); + conf.setBoolean(AUDIT_ENABLED, true); return conf; } /** - * Expected audit count when auditing is enabled; expect 0 - * when disabled. + * Expected audit count. * @param expected expected value. * @return the probe. */ protected OperationCostValidator.ExpectedProbe withAuditCount( final int expected) { - return probe(AUDIT_SPAN_CREATION, - auditing ? expected : 0); + return probe(AUDIT_SPAN_CREATION, expected); } /** * Common operation which should be low cost as possible. */ - @MethodSource("params") - @ParameterizedTest(name = "{0}") - public void testMkdirOverDir(String pName, - boolean pAuditing) throws Throwable { - initITestS3AMiscOperationCost(pName, pAuditing); + @Test + public void testMkdirOverDir() throws Throwable { describe("create a dir over a dir"); S3AFileSystem fs = getFileSystem(); // create base dir with marker @@ -116,11 +88,8 @@ public void testMkdirOverDir(String pName, with(OBJECT_LIST_REQUEST, FILESTATUS_DIR_PROBE_L)); } - @MethodSource("params") - @ParameterizedTest(name = "{0}") - public void testGetContentSummaryRoot(String pName, - boolean pAuditing) throws Throwable { - initITestS3AMiscOperationCost(pName, pAuditing); + @Test + public void testGetContentSummaryRoot() throws Throwable { describe("getContentSummary on Root"); S3AFileSystem fs = getFileSystem(); @@ -129,11 +98,8 @@ public void testGetContentSummaryRoot(String pName, with(INVOCATION_GET_CONTENT_SUMMARY, 1)); } - @MethodSource("params") - @ParameterizedTest(name = "{0}") - public void testGetContentSummaryDir(String pName, - boolean pAuditing) throws Throwable { - initITestS3AMiscOperationCost(pName, pAuditing); + @Test + public void testGetContentSummaryDir() throws Throwable { describe("getContentSummary on test dir with children"); S3AFileSystem fs = getFileSystem(); Path baseDir = methodPath(); @@ -157,11 +123,8 @@ public void testGetContentSummaryDir(String pName, .isEqualTo(1); } - @MethodSource("params") - @ParameterizedTest(name = "{0}") - public void testGetContentMissingPath(String pName, - boolean pAuditing) throws Throwable { - initITestS3AMiscOperationCost(pName, pAuditing); + @Test + public void testGetContentMissingPath() throws Throwable { describe("getContentSummary on a missing path"); Path baseDir = methodPath(); verifyMetricsIntercepting(FileNotFoundException.class, diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ILoadTestS3ABulkDeleteThrottling.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ILoadTestS3ABulkDeleteThrottling.java index f9bdba69808e3..a9e454deab165 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ILoadTestS3ABulkDeleteThrottling.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ILoadTestS3ABulkDeleteThrottling.java @@ -29,9 +29,14 @@ import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ExecutorService; -import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import software.amazon.awssdk.services.s3.model.ObjectIdentifier; + +import org.apache.hadoop.test.tags.LoadTest; +import org.apache.hadoop.test.tags.ScaleTest; import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.assertj.core.api.Assertions; @@ -68,6 +73,10 @@ * Note: UA field includes the configuration tested for the benefit * of anyone looking through the server logs. */ +@LoadTest +@ScaleTest +@ParameterizedClass(name="bulk-delete-aws-retry={0}-requests={2}-size={1}") +@MethodSource("params") @TestMethodOrder(MethodOrderer.Alphanumeric.class) public class ILoadTestS3ABulkDeleteThrottling extends S3AScaleTestBase { @@ -99,9 +108,9 @@ public class ILoadTestS3ABulkDeleteThrottling extends S3AScaleTestBase { private File dataDir; - private boolean throttle; - private int pageSize; - private int requests; + private final boolean throttle; + private final int pageSize; + private final int requests; /** * Test array for parameterized test runs. @@ -123,21 +132,20 @@ public static Collection params() { /** * Parameterized constructor. - * @param pThrottle AWS client throttle on/off - * @param pPageSize Page size - * @param pRequests request count; + * @param throttle AWS client throttle on/off + * @param pageSize Page size + * @param requests request count; */ - public void initILoadTestS3ABulkDeleteThrottling( - final boolean pThrottle, - final int pPageSize, - final int pRequests) throws Exception { - this.throttle = pThrottle; + public ILoadTestS3ABulkDeleteThrottling( + final boolean throttle, + final int pageSize, + final int requests) { + this.throttle = throttle; Preconditions.checkArgument(pageSize > 0, "page size too low %s", pageSize); - this.pageSize = pPageSize; - this.requests = pRequests; - setup(); + this.pageSize = pageSize; + this.requests = requests; } @Override @@ -160,9 +168,10 @@ protected Configuration createScaleConfiguration() { } @Override + @BeforeEach public void setup() throws Exception { - final Configuration conf = getConf(); super.setup(); + final Configuration conf = getConf(); assumeTrue(conf.getBoolean(ENABLE_MULTI_DELETE, true), "multipart delete disabled"); dataDir = GenericTestUtils.getTestDir("throttling"); @@ -177,19 +186,13 @@ public void setup() throws Exception { } - @MethodSource("params") - @ParameterizedTest(name = "bulk-delete-aws-retry={0}-requests={2}-size={1}") - public void test_010_Reset(final boolean pThrottle, - final int pPageSize, final int pRequests) throws Throwable { - initILoadTestS3ABulkDeleteThrottling(pThrottle, pPageSize, pRequests); + @Test + public void test_010_Reset() throws Throwable { testWasThrottled = false; } - @MethodSource("params") - @ParameterizedTest(name = "bulk-delete-aws-retry={0}-requests={2}-size={1}") - public void test_020_DeleteThrottling(final boolean pThrottle, - final int pPageSize, final int pRequests) throws Throwable { - initILoadTestS3ABulkDeleteThrottling(pThrottle, pPageSize, pRequests); + @Test + public void test_020_DeleteThrottling() throws Throwable { describe("test how S3 reacts to massive multipart deletion requests"); final File results = deleteFiles(requests, pageSize); LOG.info("Test run completed against {}:\n see {}", getFileSystem(), @@ -201,11 +204,8 @@ public void test_020_DeleteThrottling(final boolean pThrottle, } } - @MethodSource("params") - @ParameterizedTest(name = "bulk-delete-aws-retry={0}-requests={2}-size={1}") - public void test_030_Sleep(final boolean pThrottle, - final int pPageSize, final int pRequests) throws Throwable { - initILoadTestS3ABulkDeleteThrottling(pThrottle, pPageSize, pRequests); + @Test + public void test_030_Sleep() throws Throwable { maybeSleep(); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ABlockOutputStreamInterruption.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ABlockOutputStreamInterruption.java index 9b16e66c1f3b7..c04261efc1390 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ABlockOutputStreamInterruption.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ABlockOutputStreamInterruption.java @@ -28,7 +28,9 @@ import org.assertj.core.api.Assertions; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import org.apache.hadoop.conf.Configuration; @@ -83,6 +85,8 @@ * Marked as a scale test even though it tries to aggressively abort streams being written * and should, if working, complete fast. */ +@ParameterizedClass(name = "{0}-{1}") +@MethodSource("params") public class ITestS3ABlockOutputStreamInterruption extends S3AScaleTestBase { public static final int MAX_RETRIES_IN_SDK = 2; @@ -104,23 +108,22 @@ public static Collection params() { /** * Buffer type. */ - private String bufferType; + private final String bufferType; /** * How many blocks can a stream have uploading? */ - private int activeBlocks; + private final int activeBlocks; /** * Constructor. - * @param pBufferType buffer type - * @param pActiveBlocks number of active blocks which can be uploaded + * @param bufferType buffer type + * @param activeBlocks number of active blocks which can be uploaded */ - public void initITestS3ABlockOutputStreamInterruption(final String pBufferType, - int pActiveBlocks) throws Exception { - this.bufferType = requireNonNull(pBufferType); - this.activeBlocks = pActiveBlocks; - setup(); + public ITestS3ABlockOutputStreamInterruption(final String bufferType, + int activeBlocks) { + this.bufferType = requireNonNull(bufferType); + this.activeBlocks = activeBlocks; } /** @@ -164,6 +167,7 @@ protected Configuration createScaleConfiguration() { * Setup MUST set up the evaluator before the FS is created. */ @Override + @BeforeEach public void setup() throws Exception { SdkFaultInjector.resetFaultInjector(); super.setup(); @@ -179,11 +183,8 @@ public void teardown() throws Exception { super.teardown(); } - @MethodSource("params") - @ParameterizedTest(name = "{0}-{1}") - public void testInterruptMultipart(String pBufferType, - int pActiveBlocks) throws Throwable { - initITestS3ABlockOutputStreamInterruption(pBufferType, pActiveBlocks); + @Test + public void testInterruptMultipart() throws Throwable { describe("Interrupt a thread performing close() on a multipart upload"); interruptMultipartUpload(methodPath(), 6 * _1MB); @@ -228,11 +229,8 @@ private void interruptMultipartUpload(final Path path, int len) throws Exception * then go on to simulate an NPE in the part upload and verify * that this does not get escalated. */ - @MethodSource("params") - @ParameterizedTest(name = "{0}-{1}") - public void testAbortDuringUpload(String pBufferType, - int pActiveBlocks) throws Throwable { - initITestS3ABlockOutputStreamInterruption(pBufferType, pActiveBlocks); + @Test + public void testAbortDuringUpload() throws Throwable { describe("Abort during multipart upload"); int len = 6 * _1MB; final byte[] dataset = dataset(len, 'a', 'z' - 'a'); @@ -286,11 +284,8 @@ public void testAbortDuringUpload(String pBufferType, * Test that a part upload failure is propagated to * the close() call. */ - @MethodSource("params") - @ParameterizedTest(name = "{0}-{1}") - public void testPartUploadFailure(String pBufferType, - int pActiveBlocks) throws Throwable { - initITestS3ABlockOutputStreamInterruption(pBufferType, pActiveBlocks); + @Test + public void testPartUploadFailure() throws Throwable { describe("Trigger a failure during a multipart upload"); int len = 6 * _1MB; final byte[] dataset = dataset(len, 'a', 'z' - 'a'); @@ -339,11 +334,8 @@ private static void assertBytesTransferred( /** * Write a small dataset and interrupt the close() operation. */ - @MethodSource("params") - @ParameterizedTest(name = "{0}-{1}") - public void testInterruptMagicWrite(String pBufferType, - int pActiveBlocks) throws Throwable { - initITestS3ABlockOutputStreamInterruption(pBufferType, pActiveBlocks); + @Test + public void testInterruptMagicWrite() throws Throwable { describe("Interrupt a thread performing close() on a magic upload"); // write a smaller file to a magic path and assert multipart outcome @@ -354,11 +346,8 @@ public void testInterruptMagicWrite(String pBufferType, /** * Write a small dataset and interrupt the close() operation. */ - @MethodSource("params") - @ParameterizedTest(name = "{0}-{1}") - public void testInterruptWhenAbortingAnUpload(String pBufferType, - int pActiveBlocks) throws Throwable { - initITestS3ABlockOutputStreamInterruption(pBufferType, pActiveBlocks); + @Test + public void testInterruptWhenAbortingAnUpload() throws Throwable { describe("Interrupt a thread performing close() on a magic upload"); // fail more than the SDK will retry @@ -384,11 +373,8 @@ public void testInterruptWhenAbortingAnUpload(String pBufferType, * a {@code InterruptedIOException} and the count of interrupted events * to increase. */ - @MethodSource("params") - @ParameterizedTest(name = "{0}-{1}") - public void testInterruptSimplePut(String pBufferType, - int pActiveBlocks) throws Throwable { - initITestS3ABlockOutputStreamInterruption(pBufferType, pActiveBlocks); + @Test + public void testInterruptSimplePut() throws Throwable { describe("Interrupt simple object PUT"); // dataset is less than one block diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java index 6c83163786751..96c59c266a647 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AConcurrentOps.java @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.contract.ContractTestUtils.NanoTimer; import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.test.tags.ScaleTest; import org.assertj.core.api.Assertions; import org.junit.jupiter.api.AfterEach; @@ -51,6 +52,7 @@ /** * Tests concurrent operations on a single S3AFileSystem instance. */ +@ScaleTest public class ITestS3AConcurrentOps extends S3AScaleTestBase { private static final Logger LOG = LoggerFactory.getLogger( ITestS3AConcurrentOps.class); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ACreatePerformance.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ACreatePerformance.java index 7b83442ed9519..5b2c9cd974260 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ACreatePerformance.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ACreatePerformance.java @@ -20,6 +20,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.test.tags.ScaleTest; + import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; @@ -32,6 +34,7 @@ /** * Tests for create(): performance and/or load testing. */ +@ScaleTest public class ITestS3ACreatePerformance extends S3AScaleTestBase { private static final Logger LOG = LoggerFactory.getLogger( ITestS3ADirectoryPerformance.class); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADeleteFilesOneByOne.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADeleteFilesOneByOne.java index 10dfa65a15098..f3fe05d9801d0 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADeleteFilesOneByOne.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADeleteFilesOneByOne.java @@ -20,10 +20,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.s3a.Constants; +import org.apache.hadoop.test.tags.ScaleTest; /** * Tests file deletion with multi-delete disabled. */ +@ScaleTest public class ITestS3ADeleteFilesOneByOne extends ITestS3ADeleteManyFiles { @Override diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADeleteManyFiles.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADeleteManyFiles.java index eebb72f349c3c..bba485b0c647e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADeleteManyFiles.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADeleteManyFiles.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3ATestUtils; +import org.apache.hadoop.test.tags.ScaleTest; import org.apache.hadoop.util.DurationInfo; import org.assertj.core.api.Assertions; @@ -47,6 +48,7 @@ * issue multiple delete requests during a delete sequence -so test that * operation more efficiently. */ +@ScaleTest public class ITestS3ADeleteManyFiles extends S3AScaleTestBase { private static final Logger LOG = diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java index dcbf61574fb9d..6a8ba42c9e67c 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.s3a.api.RequestFactory; import org.apache.hadoop.fs.statistics.IOStatistics; import org.apache.hadoop.fs.store.audit.AuditSpan; +import org.apache.hadoop.test.tags.ScaleTest; import org.apache.hadoop.util.functional.RemoteIterators; import org.junit.jupiter.api.Test; @@ -68,6 +69,7 @@ /** * Test the performance of listing files/directories. */ +@ScaleTest public class ITestS3ADirectoryPerformance extends S3AScaleTestBase { private static final Logger LOG = LoggerFactory.getLogger( ITestS3ADirectoryPerformance.class); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesArrayBlocks.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesArrayBlocks.java index d6f15c8c84476..33dfdc6db6aff 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesArrayBlocks.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesArrayBlocks.java @@ -19,10 +19,12 @@ package org.apache.hadoop.fs.s3a.scale; import org.apache.hadoop.fs.s3a.Constants; +import org.apache.hadoop.test.tags.ScaleTest; /** * Use {@link Constants#FAST_UPLOAD_BUFFER_ARRAY} for buffering. */ +@ScaleTest public class ITestS3AHugeFilesArrayBlocks extends AbstractSTestS3AHugeFiles { protected String getBlockOutputBufferName() { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesByteBufferBlocks.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesByteBufferBlocks.java index 1e74d715b88fa..3b7b4caae9d8b 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesByteBufferBlocks.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesByteBufferBlocks.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.s3a.Constants; import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.test.tags.ScaleTest; import static org.apache.hadoop.fs.s3a.Constants.FAST_UPLOAD_BYTEBUFFER; @@ -33,6 +34,7 @@ * This also renames by parent directory, so validates parent * dir renaming of huge files. */ +@ScaleTest public class ITestS3AHugeFilesByteBufferBlocks extends AbstractSTestS3AHugeFiles { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesDiskBlocks.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesDiskBlocks.java index 6020f4c5f8f80..29fef37e7d2e7 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesDiskBlocks.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesDiskBlocks.java @@ -19,11 +19,13 @@ package org.apache.hadoop.fs.s3a.scale; import org.apache.hadoop.fs.s3a.Constants; +import org.apache.hadoop.test.tags.ScaleTest; /** * Use {@link Constants#FAST_UPLOAD_BUFFER_DISK} for buffering. * Also uses direct buffers for the vector IO. */ +@ScaleTest public class ITestS3AHugeFilesDiskBlocks extends AbstractSTestS3AHugeFiles { protected String getBlockOutputBufferName() { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesEncryption.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesEncryption.java index 7031bd937b14f..819ee069da845 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesEncryption.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesEncryption.java @@ -27,6 +27,8 @@ import org.apache.hadoop.fs.s3a.EncryptionTestUtils; import org.apache.hadoop.fs.s3a.S3AEncryptionMethods; import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.test.tags.ScaleTest; + import org.junit.jupiter.api.BeforeEach; import static org.apache.hadoop.fs.s3a.Constants.S3_ENCRYPTION_ALGORITHM; @@ -43,6 +45,7 @@ * is set in the configuration. The testing bucket must be configured with this * same key else test might fail. */ +@ScaleTest public class ITestS3AHugeFilesEncryption extends AbstractSTestS3AHugeFiles { @BeforeEach diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesNoMultipart.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesNoMultipart.java index 3ee8bdbf128e7..f9b3ddee54c08 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesNoMultipart.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesNoMultipart.java @@ -19,9 +19,11 @@ package org.apache.hadoop.fs.s3a.scale; import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.Test; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.s3a.Constants; +import org.apache.hadoop.test.tags.ScaleTest; import static org.apache.hadoop.fs.contract.ContractTestUtils.IO_CHUNK_BUFFER_SIZE; import static org.apache.hadoop.fs.s3a.Constants.CONNECTION_EXPECT_CONTINUE; @@ -37,6 +39,7 @@ * Use a single PUT for the whole upload/rename/delete workflow; include verification * that the transfer manager will fail fast unless the multipart threshold is huge. */ +@ScaleTest public class ITestS3AHugeFilesNoMultipart extends AbstractSTestS3AHugeFiles { public static final String SINGLE_PUT_REQUEST_TIMEOUT = "1h"; @@ -98,6 +101,7 @@ protected Configuration createScaleConfiguration() { /** * Verify multipart copy is disabled. */ + @Test @Override public void test_030_postCreationAssertions() throws Throwable { super.test_030_postCreationAssertions(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesSSECDiskBlocks.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesSSECDiskBlocks.java index 401db0969c657..870b8199b568a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesSSECDiskBlocks.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesSSECDiskBlocks.java @@ -22,6 +22,8 @@ import org.apache.hadoop.fs.s3a.AWSUnsupportedFeatureException; import org.apache.hadoop.fs.s3a.Constants; import org.apache.hadoop.fs.s3a.S3AEncryptionMethods; +import org.apache.hadoop.test.tags.ScaleTest; + import org.junit.jupiter.api.BeforeEach; import java.nio.file.AccessDeniedException; @@ -40,6 +42,7 @@ * and tests huge files operations with SSE-C encryption enabled. * Skipped if the SSE tests are disabled. */ +@ScaleTest public class ITestS3AHugeFilesSSECDiskBlocks extends ITestS3AHugeFilesDiskBlocks { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesStorageClass.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesStorageClass.java index 9fb78ad2cbd99..068af769dac4f 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesStorageClass.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AHugeFilesStorageClass.java @@ -20,6 +20,9 @@ import java.io.IOException; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -29,6 +32,7 @@ import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.s3a.Constants; import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.test.tags.ScaleTest; import static org.apache.hadoop.fs.contract.ContractTestUtils.bandwidth; import static org.apache.hadoop.fs.contract.ContractTestUtils.toHuman; @@ -42,6 +46,8 @@ * Class to verify that {@link Constants#STORAGE_CLASS} is set correctly * for creating and renaming huge files with multipart upload requests. */ +@ScaleTest +@TestMethodOrder(MethodOrderer.Alphanumeric.class) public class ITestS3AHugeFilesStorageClass extends AbstractSTestS3AHugeFiles { private static final Logger LOG = LoggerFactory.getLogger(ITestS3AHugeFilesStorageClass.class); @@ -62,12 +68,14 @@ protected String getBlockOutputBufferName() { return Constants.FAST_UPLOAD_BUFFER_ARRAY; } + @Test @Override public void test_010_CreateHugeFile() throws IOException { super.test_010_CreateHugeFile(); assertStorageClass(getPathOfFileToCreate()); } + @Test @Override public void test_030_postCreationAssertions() throws Throwable { super.test_030_postCreationAssertions(); @@ -75,20 +83,24 @@ public void test_030_postCreationAssertions() throws Throwable { } @Override + @Test public void test_040_PositionedReadHugeFile() throws Throwable { skipQuietly("PositionedReadHugeFile"); } + @Test @Override public void test_050_readHugeFile() throws Throwable { skipQuietly("readHugeFile"); } + @Test @Override public void test_090_verifyRenameSourceEncryption() throws IOException { skipQuietly("verifyRenameSourceEncryption"); } + @Test @Override public void test_100_renameHugeFile() throws Throwable { Path hugefile = getHugefile(); @@ -110,6 +122,7 @@ public void test_100_renameHugeFile() throws Throwable { assertStorageClass(hugefileRenamed); } + @Test @Override public void test_110_verifyRenameDestEncryption() throws IOException { skipQuietly("verifyRenameDestEncryption"); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java index c401c8205392a..fd36b7400a2a6 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AInputStreamPerformance.java @@ -39,6 +39,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.CompressionCodecFactory; +import org.apache.hadoop.test.tags.ScaleTest; import org.apache.hadoop.util.LineReader; import org.assertj.core.api.Assertions; @@ -79,6 +80,7 @@ /** * Look at the performance of S3a Input Stream Reads. */ +@ScaleTest public class ITestS3AInputStreamPerformance extends S3AScaleTestBase { private static final Logger LOG = LoggerFactory.getLogger( ITestS3AInputStreamPerformance.class); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AMultipartUploadSizeLimits.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AMultipartUploadSizeLimits.java index f3d5d5515e3ec..cee917de0a486 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AMultipartUploadSizeLimits.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3AMultipartUploadSizeLimits.java @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.s3a.Statistic; import org.apache.hadoop.fs.s3a.auth.ProgressCounter; import org.apache.hadoop.fs.s3a.commit.impl.CommitOperations; +import org.apache.hadoop.test.tags.ScaleTest; import static org.apache.hadoop.fs.StreamCapabilities.ABORTABLE_STREAM; import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile; @@ -57,6 +58,7 @@ /** * Testing S3 multipart upload for s3. */ +@ScaleTest public class ITestS3AMultipartUploadSizeLimits extends S3AScaleTestBase { public static final int MPU_SIZE = 5 * _1MB; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java index e9886e33912c7..50a1ac0f921dc 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/S3AScaleTestBase.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.s3a.AbstractS3ATestBase; import org.apache.hadoop.fs.s3a.S3ATestConstants; import org.apache.hadoop.fs.s3a.Statistic; +import org.apache.hadoop.test.tags.ScaleTest; import org.junit.jupiter.api.BeforeEach; import org.slf4j.Logger; @@ -58,6 +59,7 @@ * very bad form in Java code (indeed, in C++ it is actually permitted; * the base class implementations get invoked instead). */ +@ScaleTest public class S3AScaleTestBase extends AbstractS3ATestBase { public static final int _1KB = 1024; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestS3AContractStreamIOStatistics.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestS3AContractStreamIOStatistics.java index da2a39a986ea4..4165f7a6c9cb9 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestS3AContractStreamIOStatistics.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/ITestS3AContractStreamIOStatistics.java @@ -21,11 +21,14 @@ import java.util.Arrays; import java.util.List; +import org.junit.jupiter.api.Test; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractStreamIOStatisticsTest; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.apache.hadoop.fs.contract.s3a.S3AContract; import org.apache.hadoop.fs.statistics.StreamStatisticNames; +import org.apache.hadoop.test.tags.IntegrationTest; import static org.apache.hadoop.fs.s3a.S3ATestUtils.skipIfAnalyticsAcceleratorEnabled; import static org.apache.hadoop.fs.statistics.StreamStatisticNames.*; @@ -33,6 +36,7 @@ /** * Test the S3A Streams IOStatistics support. */ +@IntegrationTest public class ITestS3AContractStreamIOStatistics extends AbstractContractStreamIOStatisticsTest { @@ -79,6 +83,7 @@ public List outputStreamStatisticKeys() { STREAM_WRITE_EXCEPTIONS); } + @Test @Override public void testInputStreamStatisticRead() throws Throwable { // Analytics accelerator currently does not support IOStatistics, this will be added as diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/TestErrorCodeMapping.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/TestErrorCodeMapping.java index e5e59831bf5fc..002df40040d8b 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/TestErrorCodeMapping.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/statistics/TestErrorCodeMapping.java @@ -23,7 +23,9 @@ import org.apache.hadoop.fs.s3a.statistics.impl.StatisticsFromAwsSdkImpl; import org.apache.hadoop.test.AbstractHadoopTestBase; -import org.junit.jupiter.params.ParameterizedTest; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedClass; import org.junit.jupiter.params.provider.MethodSource; import static org.apache.hadoop.fs.s3a.impl.InternalConstants.SC_400_BAD_REQUEST; @@ -42,6 +44,8 @@ /** * Test mapping logic of {@link StatisticsFromAwsSdkImpl}. */ +@ParameterizedClass(name = "http {0} to {1}") +@MethodSource("params") public class TestErrorCodeMapping extends AbstractHadoopTestBase { /** @@ -61,19 +65,17 @@ public static Collection params() { }); } - private int code; + private final int code; - private String name; + private final String name; - public void initTestErrorCodeMapping(final int pCode, final String pName) { - this.code = pCode; - this.name = pName; + public TestErrorCodeMapping(final int code, final String name) { + this.code = code; + this.name = name; } - @ParameterizedTest(name = "http {0} to {1}") - @MethodSource("params") - public void testMapping(int pCode, String pName) throws Throwable { - initTestErrorCodeMapping(pCode, pName); + @Test + public void testMapping() { assertThat(mapErrorStatusCodeToStatisticName(code)) .describedAs("Mapping of status code %d", code) .isEqualTo(name); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/AbstractMarkerToolTest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/AbstractMarkerToolTest.java index d5f91ac0fa686..c160d7831f8f6 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/AbstractMarkerToolTest.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/AbstractMarkerToolTest.java @@ -24,6 +24,7 @@ import java.util.List; import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.AfterEach; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -71,6 +72,7 @@ protected Configuration createConfiguration() { } @Override + @AfterEach public void teardown() throws Exception { // do this ourselves to avoid audits teardown failing // when surplus markers are found diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestMarkerToolRootOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestMarkerToolRootOperations.java index f9861e06bcc63..e9bef36e3b6c0 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestMarkerToolRootOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestMarkerToolRootOperations.java @@ -26,6 +26,7 @@ import org.junit.jupiter.api.TestMethodOrder; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.test.tags.RootFilesystemTest; import static org.apache.hadoop.fs.s3a.S3ATestUtils.maybeSkipRootTests; import static org.apache.hadoop.fs.s3a.tools.MarkerTool.AUDIT; @@ -36,7 +37,9 @@ /** * Marker tool tests against the root FS; run in the sequential phase. */ +@RootFilesystemTest @TestMethodOrder(MethodOrderer.Alphanumeric.class) + public class ITestMarkerToolRootOperations extends AbstractMarkerToolTest { private Path rootPath; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractVectoredRead.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractVectoredRead.java index 88d542b47176b..7670a49a1d36c 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractVectoredRead.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAbfsFileSystemContractVectoredRead.java @@ -21,17 +21,22 @@ import org.apache.hadoop.fs.contract.AbstractContractVectoredReadTest; import org.apache.hadoop.fs.contract.AbstractFSContract; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedClass; +import org.junit.jupiter.params.provider.MethodSource; /** * Contract test for vectored reads through ABFS connector. */ +@ParameterizedClass(name="buffer-{0}") +@MethodSource("params") public class ITestAbfsFileSystemContractVectoredRead extends AbstractContractVectoredReadTest { private final boolean isSecure; private final ABFSContractTestBinding binding; - public ITestAbfsFileSystemContractVectoredRead() throws Exception { + public ITestAbfsFileSystemContractVectoredRead(final String bufferType) throws Exception { + super(bufferType); this.binding = new ABFSContractTestBinding(); this.isSecure = binding.isSecureMode(); }